Compare commits
9 Commits
48e63894eb
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ceb5fe92a | |||
|
|
50922641a9 | ||
|
|
904ea3d146 | ||
|
|
696919611c | ||
|
|
ea0f5d8765 | ||
| 0bc77f61e2 | |||
| 083142491f | |||
|
|
4bd8cef372 | ||
|
|
89b55edf9f |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,4 +4,5 @@
|
||||
*.cursorrules
|
||||
*png
|
||||
/upload
|
||||
document_ai
|
||||
texpixel
|
||||
/vendor
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,16 +1,22 @@
|
||||
# Build stage
|
||||
FROM registry.cn-beijing.aliyuncs.com/bitwsd/golang AS builder
|
||||
FROM golang:1.20-alpine AS builder
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
ENV GOPROXY=https://goproxy.cn,direct
|
||||
ENV GOSUMDB=off
|
||||
|
||||
|
||||
# Build binary
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -mod=vendor -o main ./main.go
|
||||
RUN go mod download && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o doc_ai ./main.go
|
||||
|
||||
# Runtime stage
|
||||
FROM registry.cn-beijing.aliyuncs.com/bitwsd/alpine
|
||||
FROM alpine:latest
|
||||
|
||||
# Set timezone
|
||||
RUN apk add --no-cache tzdata && \
|
||||
@@ -21,7 +27,7 @@ RUN apk add --no-cache tzdata && \
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /app/main .
|
||||
COPY --from=builder /app/doc_ai .
|
||||
|
||||
# Copy config files
|
||||
COPY config/config_*.yaml ./config/
|
||||
@@ -34,7 +40,7 @@ RUN mkdir -p /data/formula && \
|
||||
EXPOSE 8024
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["./main"]
|
||||
ENTRYPOINT ["./doc_ai"]
|
||||
|
||||
# Default command (can be overridden)
|
||||
CMD ["-env", "prod"]
|
||||
@@ -3,6 +3,7 @@ package formula
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitea.com/bitwsd/document_ai/internal/model/formula"
|
||||
"gitea.com/bitwsd/document_ai/internal/service"
|
||||
@@ -46,7 +47,7 @@ func (endpoint *FormulaEndpoint) CreateTask(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
fileExt := filepath.Ext(req.FileName)
|
||||
fileExt := strings.ToLower(filepath.Ext(req.FileName))
|
||||
if !utils.InArray(fileExt, []string{".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".webp"}) {
|
||||
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid file type"))
|
||||
return
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/document_ai/config"
|
||||
@@ -59,7 +60,7 @@ func GetSignatureURL(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, gin.H{"sign_url": "", "repeat": true, "path": task.FileURL}))
|
||||
return
|
||||
}
|
||||
extend := filepath.Ext(req.FileName)
|
||||
extend := strings.ToLower(filepath.Ext(req.FileName))
|
||||
if extend == "" {
|
||||
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "invalid file name"))
|
||||
return
|
||||
|
||||
@@ -3,7 +3,7 @@ package task
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"gitea.com/bitwsd/document_ai/internal/model/task"
|
||||
"gitea.com/bitwsd/document_ai/internal/service"
|
||||
"gitea.com/bitwsd/document_ai/pkg/common"
|
||||
|
||||
@@ -3,7 +3,7 @@ package user
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"gitea.com/bitwsd/document_ai/config"
|
||||
model "gitea.com/bitwsd/document_ai/internal/model/user"
|
||||
"gitea.com/bitwsd/document_ai/internal/service"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,16 +4,16 @@ server:
|
||||
|
||||
database:
|
||||
driver: mysql
|
||||
host: rm-bp1uh3e1qop18gz4wto.mysql.rds.aliyuncs.com
|
||||
port: 3306
|
||||
host: 172.31.134.12
|
||||
port: 3006
|
||||
username: root
|
||||
password: bitwsdttestESAadb12@3341
|
||||
password: yoge@coder%%%123321!
|
||||
dbname: doc_ai
|
||||
max_idle: 10
|
||||
max_open: 100
|
||||
max_open: 30
|
||||
|
||||
redis:
|
||||
addr: 172.31.32.138:6379
|
||||
addr: 172.31.134.12:6399
|
||||
password: bitwsd@8912WE!
|
||||
db: 0
|
||||
|
||||
@@ -40,6 +40,6 @@ aliyun:
|
||||
oss:
|
||||
endpoint: oss-cn-beijing.aliyuncs.com
|
||||
inner_endpoint: oss-cn-beijing-internal.aliyuncs.com
|
||||
access_key_id: LTAI5tKogxeiBb4gJGWEePWN
|
||||
access_key_secret: l4oCxtt5iLSQ1DAs40guTzKUfrxXwq
|
||||
bucket_name: bitwsd-doc-ai
|
||||
access_key_id: LTAI5t8qXhow6NCdYDtu1saF
|
||||
access_key_secret: qZ2SwYsNCEBckCVSOszH31yYwXU44A
|
||||
bucket_name: texpixel-doc
|
||||
|
||||
5
go.mod
5
go.mod
@@ -3,7 +3,6 @@ module gitea.com/bitwsd/document_ai
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914
|
||||
github.com/alibabacloud-go/darabonba-openapi v0.2.1
|
||||
github.com/alibabacloud-go/dysmsapi-20170525/v2 v2.0.18
|
||||
github.com/alibabacloud-go/tea v1.1.19
|
||||
@@ -13,7 +12,9 @@ require (
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/redis/go-redis/v9 v9.7.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/gorm v1.25.12
|
||||
)
|
||||
@@ -54,7 +55,6 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/rs/zerolog v1.33.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
@@ -76,6 +76,5 @@ require (
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
12
go.sum
12
go.sum
@@ -1,5 +1,3 @@
|
||||
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914 h1:3aRCeiuq/PWMr2yjEN9Y5NusfmpdMKiO4i/5tM5qc34=
|
||||
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914/go.mod h1:hbEUo3t/AFGCnQbxwdG4oiw2IHdlRgK02cqd0yicP1Y=
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo=
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
|
||||
github.com/alibabacloud-go/darabonba-openapi v0.1.18/go.mod h1:PB4HffMhJVmAgNKNq3wYbTUlFvPgxJpTzd1F5pTuUsc=
|
||||
@@ -33,9 +31,7 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9
|
||||
github.com/aliyun/credentials-go v1.1.2 h1:qU1vwGIBb3UJ8BwunHDRFtAhS6jnQLnde/yk0+Ih2GY=
|
||||
github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||
@@ -53,13 +49,11 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
@@ -69,7 +63,6 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
@@ -82,7 +75,6 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -103,11 +95,9 @@ github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuV
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
@@ -134,11 +124,9 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
|
||||
@@ -11,3 +11,8 @@ type GetFormulaTaskResponse struct {
|
||||
Count int `json:"count"`
|
||||
Latex string `json:"latex"`
|
||||
}
|
||||
|
||||
// FormulaRecognitionResponse 公式识别服务返回的响应
|
||||
type FormulaRecognitionResponse struct {
|
||||
Result string `json:"result"`
|
||||
}
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/config"
|
||||
"gitea.com/bitwsd/document_ai/internal/model/formula"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/cache"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/dao"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
|
||||
"gitea.com/bitwsd/document_ai/pkg/common"
|
||||
"gitea.com/bitwsd/document_ai/pkg/constant"
|
||||
@@ -200,7 +200,7 @@ func (s *RecognitionService) processVLFormula(ctx context.Context, taskID int64)
|
||||
log.Info(ctx, "func", "processVLFormulaQueue", "msg", "获取任务成功", "task_id", taskID)
|
||||
|
||||
// 处理具体任务
|
||||
if err := s.processVLFormulaTask(ctx, taskID, task.FileURL); err != nil {
|
||||
if err := s.processVLFormulaTask(ctx, taskID, task.FileURL, utils.ModelVLQwen3VL32BInstruct); err != nil {
|
||||
log.Error(ctx, "func", "processVLFormulaQueue", "msg", "处理任务失败", "error", err)
|
||||
return
|
||||
}
|
||||
@@ -263,11 +263,11 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
|
||||
return err
|
||||
}
|
||||
|
||||
downloadURL, err := oss.GetDownloadURL(ctx, fileURL)
|
||||
if err != nil {
|
||||
log.Error(ctx, "func", "processFormulaTask", "msg", "获取下载URL失败", "error", err)
|
||||
return err
|
||||
}
|
||||
// downloadURL, err := oss.GetDownloadURL(ctx, fileURL)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "func", "processFormulaTask", "msg", "获取下载URL失败", "error", err)
|
||||
// return err
|
||||
// }
|
||||
|
||||
// 将图片转为base64编码
|
||||
base64Image := base64.StdEncoding.EncodeToString(imageData)
|
||||
@@ -275,7 +275,6 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
|
||||
// 创建JSON请求
|
||||
requestData := map[string]string{
|
||||
"image_base64": base64Image,
|
||||
"img_url": downloadURL,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(requestData)
|
||||
@@ -288,7 +287,7 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
|
||||
headers := map[string]string{"Content-Type": "application/json", utils.RequestIDHeaderKey: utils.GetRequestIDFromContext(ctx)}
|
||||
|
||||
// 发送请求时会使用带超时的context
|
||||
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, s.getURL(ctx), bytes.NewReader(jsonData), headers)
|
||||
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, "http://cloud.texpixel.com:1080/formula/predict", bytes.NewReader(jsonData), headers)
|
||||
if err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
log.Error(ctx, "func", "processFormulaTask", "msg", "请求超时")
|
||||
@@ -299,13 +298,21 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
log.Info(ctx, "func", "processFormulaTask", "msg", "请求成功", "resp", resp.Body)
|
||||
log.Info(ctx, "func", "processFormulaTask", "msg", "请求成功")
|
||||
body := &bytes.Buffer{}
|
||||
if _, err = body.ReadFrom(resp.Body); err != nil {
|
||||
log.Error(ctx, "func", "processFormulaTask", "msg", "读取响应体失败", "error", err)
|
||||
return err
|
||||
}
|
||||
katex := utils.ToKatex(body.String())
|
||||
log.Info(ctx, "func", "processFormulaTask", "msg", "响应内容", "body", body.String())
|
||||
|
||||
// 解析 JSON 响应
|
||||
var formulaResp formula.FormulaRecognitionResponse
|
||||
if err := json.Unmarshal(body.Bytes(), &formulaResp); err != nil {
|
||||
log.Error(ctx, "func", "processFormulaTask", "msg", "解析响应JSON失败", "error", err)
|
||||
return err
|
||||
}
|
||||
katex := utils.ToKatex(formulaResp.Result)
|
||||
content := &dao.FormulaRecognitionContent{Latex: katex}
|
||||
b, _ := json.Marshal(content)
|
||||
// Save recognition result
|
||||
@@ -322,7 +329,7 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID int64, fileURL string) error {
|
||||
func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID int64, fileURL string, model string) error {
|
||||
isSuccess := false
|
||||
defer func() {
|
||||
if !isSuccess {
|
||||
@@ -349,28 +356,11 @@ func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID in
|
||||
log.Error(ctx, "func", "processVLFormulaTask", "msg", "读取图片数据失败", "error", err)
|
||||
return err
|
||||
}
|
||||
prompt := `
|
||||
Please perform OCR on the image and output only LaTeX code.
|
||||
Important instructions:
|
||||
|
||||
* "The image contains mathematical formulas, no plain text."
|
||||
|
||||
* "Preserve all layout, symbols, subscripts, summations, parentheses, etc., exactly as shown."
|
||||
|
||||
* "Use \[ ... \] or align environments to represent multiline math expressions."
|
||||
|
||||
* "Use adaptive symbols such as \left and \right where applicable."
|
||||
|
||||
* "Do not include any extra commentary, template answers, or unrelated equations."
|
||||
|
||||
* "Only output valid LaTeX code based on the actual content of the image, and not change the original mathematical expression."
|
||||
|
||||
* "The output result must be can render by better-react-mathjax."
|
||||
`
|
||||
prompt := `Please perform OCR on the image and output only LaTeX code.`
|
||||
base64Image := base64.StdEncoding.EncodeToString(imageData)
|
||||
|
||||
requestBody := formula.VLFormulaRequest{
|
||||
Model: "Qwen/Qwen2.5-VL-32B-Instruct",
|
||||
Model: model,
|
||||
Stream: false,
|
||||
MaxTokens: 512,
|
||||
Temperature: 0.1,
|
||||
@@ -517,24 +507,12 @@ func (s *RecognitionService) processOneTask(ctx context.Context) {
|
||||
ctx = context.WithValue(ctx, utils.RequestIDKey, task.TaskUUID)
|
||||
log.Info(ctx, "func", "processFormulaQueue", "msg", "获取任务成功", "task_id", taskID)
|
||||
|
||||
// 处理具体任务
|
||||
if err := s.processFormulaTask(ctx, taskID, task.FileURL); err != nil {
|
||||
// 处理任务
|
||||
err = s.processFormulaTask(ctx, taskID, task.FileURL)
|
||||
if err != nil {
|
||||
log.Error(ctx, "func", "processFormulaQueue", "msg", "处理任务失败", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info(ctx, "func", "processFormulaQueue", "msg", "处理任务成功", "task_id", taskID)
|
||||
}
|
||||
|
||||
func (s *RecognitionService) getURL(ctx context.Context) string {
|
||||
return "http://cloud.srcstar.com:8045/formula/predict"
|
||||
count, err := cache.IncrURLCount(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, "func", "getURL", "msg", "获取URL计数失败", "error", err)
|
||||
return "http://cloud.srcstar.com:8026/formula/predict"
|
||||
}
|
||||
if count%2 == 0 {
|
||||
return "http://cloud.srcstar.com:8026/formula/predict"
|
||||
}
|
||||
return "https://cloud.texpixel.com:1080/formula/predict"
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"gitea.com/bitwsd/document_ai/internal/model/task"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/dao"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/cache"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/dao"
|
||||
"gitea.com/bitwsd/document_ai/pkg/sms"
|
||||
|
||||
6
main.go
6
main.go
@@ -10,9 +10,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/core/common/cors"
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/core/common/middleware"
|
||||
"gitea.com/bitwsd/document_ai/pkg/cors"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/middleware"
|
||||
"gitea.com/bitwsd/document_ai/api"
|
||||
"gitea.com/bitwsd/document_ai/config"
|
||||
"gitea.com/bitwsd/document_ai/internal/storage/cache"
|
||||
|
||||
@@ -58,3 +58,4 @@ func Cors(config Config) gin.HandlerFunc {
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
)
|
||||
|
||||
// RetryConfig 重试配置
|
||||
|
||||
@@ -27,3 +27,4 @@ func DefaultLogConfig() *LogConfig {
|
||||
Compress: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,3 +149,4 @@ func Fatal(ctx context.Context, kv ...interface{}) {
|
||||
func Access(ctx context.Context, kv ...interface{}) {
|
||||
log(ctx, zerolog.InfoLevel, TypeAccess, kv...)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -72,3 +72,4 @@ func AccessLog() gin.HandlerFunc {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,3 +16,4 @@ func RequestID() gin.HandlerFunc {
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/config"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
)
|
||||
|
||||
@@ -125,9 +125,7 @@ func DownloadFile(ctx context.Context, ossPath string) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
// Create OSS client
|
||||
client, err := oss.New(endpoint,
|
||||
config.GlobalConfig.Aliyun.OSS.AccessKeyID,
|
||||
config.GlobalConfig.Aliyun.OSS.AccessKeySecret)
|
||||
client, err := oss.New(endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret)
|
||||
if err != nil {
|
||||
log.Error(ctx, "func", "DownloadFile", "msg", "create oss client failed", "error", err)
|
||||
return nil, err
|
||||
|
||||
@@ -23,6 +23,8 @@ func rmDollarSurr(text string) string {
|
||||
func ToKatex(formula string) string {
|
||||
res := formula
|
||||
|
||||
res = strings.ReplaceAll(res, "\n", "")
|
||||
|
||||
// Remove mbox surrounding
|
||||
res = changeAll(res, `\mbox `, " ", "{", "}", "", "")
|
||||
res = changeAll(res, `\mbox`, " ", "{", "}", "", "")
|
||||
|
||||
6
pkg/utils/model.go
Normal file
6
pkg/utils/model.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package utils
|
||||
|
||||
const (
|
||||
ModelVLDeepSeekOCR = "deepseek-ai/DeepSeek-OCR"
|
||||
ModelVLQwen3VL32BInstruct = "Qwen/Qwen3-VL-32B-Instruct"
|
||||
)
|
||||
@@ -3,7 +3,7 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gitea.com/bitwsd/core/common/log"
|
||||
"gitea.com/bitwsd/document_ai/pkg/log"
|
||||
)
|
||||
|
||||
func SafeGo(fn func()) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
package utils
|
||||
|
||||
const (
|
||||
SiliconFlowToken = "Bearer sk-akbroznlbxikkbiouzasspbbzwgxubnjjtqlujxmxsnvpmhn"
|
||||
SiliconFlowToken = "Bearer sk-wiggxqscvjdveqvwcdywwpipcinglkzkewkcfjnrgjqbdbmc"
|
||||
)
|
||||
|
||||
201
vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
305
vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go
generated
vendored
305
vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go
generated
vendored
@@ -1,305 +0,0 @@
|
||||
// This file is auto-generated, don't edit it. Thanks.
|
||||
package client
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
credential "github.com/aliyun/credentials-go/credentials"
|
||||
)
|
||||
|
||||
type InterceptorContext struct {
|
||||
Request *InterceptorContextRequest `json:"request,omitempty" xml:"request,omitempty" require:"true" type:"Struct"`
|
||||
Configuration *InterceptorContextConfiguration `json:"configuration,omitempty" xml:"configuration,omitempty" require:"true" type:"Struct"`
|
||||
Response *InterceptorContextResponse `json:"response,omitempty" xml:"response,omitempty" require:"true" type:"Struct"`
|
||||
}
|
||||
|
||||
func (s InterceptorContext) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s InterceptorContext) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *InterceptorContext) SetRequest(v *InterceptorContextRequest) *InterceptorContext {
|
||||
s.Request = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContext) SetConfiguration(v *InterceptorContextConfiguration) *InterceptorContext {
|
||||
s.Configuration = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContext) SetResponse(v *InterceptorContextResponse) *InterceptorContext {
|
||||
s.Response = v
|
||||
return s
|
||||
}
|
||||
|
||||
type InterceptorContextRequest struct {
|
||||
Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"`
|
||||
Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"`
|
||||
Body interface{} `json:"body,omitempty" xml:"body,omitempty"`
|
||||
Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"`
|
||||
HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"`
|
||||
Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"`
|
||||
ProductId *string `json:"productId,omitempty" xml:"productId,omitempty" require:"true"`
|
||||
Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"`
|
||||
Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"`
|
||||
Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"`
|
||||
Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"`
|
||||
AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"`
|
||||
BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"`
|
||||
ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"`
|
||||
Style *string `json:"style,omitempty" xml:"style,omitempty"`
|
||||
Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty" require:"true"`
|
||||
SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"`
|
||||
SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"`
|
||||
UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty" require:"true"`
|
||||
}
|
||||
|
||||
func (s InterceptorContextRequest) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s InterceptorContextRequest) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetHeaders(v map[string]*string) *InterceptorContextRequest {
|
||||
s.Headers = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetQuery(v map[string]*string) *InterceptorContextRequest {
|
||||
s.Query = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetBody(v interface{}) *InterceptorContextRequest {
|
||||
s.Body = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetStream(v io.Reader) *InterceptorContextRequest {
|
||||
s.Stream = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetHostMap(v map[string]*string) *InterceptorContextRequest {
|
||||
s.HostMap = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetPathname(v string) *InterceptorContextRequest {
|
||||
s.Pathname = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetProductId(v string) *InterceptorContextRequest {
|
||||
s.ProductId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetAction(v string) *InterceptorContextRequest {
|
||||
s.Action = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetVersion(v string) *InterceptorContextRequest {
|
||||
s.Version = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetProtocol(v string) *InterceptorContextRequest {
|
||||
s.Protocol = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetMethod(v string) *InterceptorContextRequest {
|
||||
s.Method = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetAuthType(v string) *InterceptorContextRequest {
|
||||
s.AuthType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetBodyType(v string) *InterceptorContextRequest {
|
||||
s.BodyType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetReqBodyType(v string) *InterceptorContextRequest {
|
||||
s.ReqBodyType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetStyle(v string) *InterceptorContextRequest {
|
||||
s.Style = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetCredential(v credential.Credential) *InterceptorContextRequest {
|
||||
s.Credential = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetSignatureVersion(v string) *InterceptorContextRequest {
|
||||
s.SignatureVersion = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetSignatureAlgorithm(v string) *InterceptorContextRequest {
|
||||
s.SignatureAlgorithm = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextRequest) SetUserAgent(v string) *InterceptorContextRequest {
|
||||
s.UserAgent = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type InterceptorContextConfiguration struct {
|
||||
RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty" require:"true"`
|
||||
Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"`
|
||||
EndpointRule *string `json:"endpointRule,omitempty" xml:"endpointRule,omitempty"`
|
||||
EndpointMap map[string]*string `json:"endpointMap,omitempty" xml:"endpointMap,omitempty"`
|
||||
EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"`
|
||||
Network *string `json:"network,omitempty" xml:"network,omitempty"`
|
||||
Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
func (s InterceptorContextConfiguration) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s InterceptorContextConfiguration) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetRegionId(v string) *InterceptorContextConfiguration {
|
||||
s.RegionId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetEndpoint(v string) *InterceptorContextConfiguration {
|
||||
s.Endpoint = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetEndpointRule(v string) *InterceptorContextConfiguration {
|
||||
s.EndpointRule = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetEndpointMap(v map[string]*string) *InterceptorContextConfiguration {
|
||||
s.EndpointMap = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetEndpointType(v string) *InterceptorContextConfiguration {
|
||||
s.EndpointType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetNetwork(v string) *InterceptorContextConfiguration {
|
||||
s.Network = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextConfiguration) SetSuffix(v string) *InterceptorContextConfiguration {
|
||||
s.Suffix = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type InterceptorContextResponse struct {
|
||||
StatusCode *int `json:"statusCode,omitempty" xml:"statusCode,omitempty"`
|
||||
Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"`
|
||||
Body io.Reader `json:"body,omitempty" xml:"body,omitempty"`
|
||||
DeserializedBody interface{} `json:"deserializedBody,omitempty" xml:"deserializedBody,omitempty"`
|
||||
}
|
||||
|
||||
func (s InterceptorContextResponse) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s InterceptorContextResponse) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *InterceptorContextResponse) SetStatusCode(v int) *InterceptorContextResponse {
|
||||
s.StatusCode = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextResponse) SetHeaders(v map[string]*string) *InterceptorContextResponse {
|
||||
s.Headers = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextResponse) SetBody(v io.Reader) *InterceptorContextResponse {
|
||||
s.Body = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *InterceptorContextResponse) SetDeserializedBody(v interface{}) *InterceptorContextResponse {
|
||||
s.DeserializedBody = v
|
||||
return s
|
||||
}
|
||||
|
||||
type AttributeMap struct {
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty" xml:"attributes,omitempty" require:"true"`
|
||||
Key map[string]*string `json:"key,omitempty" xml:"key,omitempty" require:"true"`
|
||||
}
|
||||
|
||||
func (s AttributeMap) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s AttributeMap) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *AttributeMap) SetAttributes(v map[string]interface{}) *AttributeMap {
|
||||
s.Attributes = v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *AttributeMap) SetKey(v map[string]*string) *AttributeMap {
|
||||
s.Key = v
|
||||
return s
|
||||
}
|
||||
|
||||
type ClientInterface interface {
|
||||
ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) error
|
||||
ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) error
|
||||
ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) error
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
}
|
||||
|
||||
func NewClient() (*Client, error) {
|
||||
client := new(Client)
|
||||
err := client.Init()
|
||||
return client, err
|
||||
}
|
||||
|
||||
func (client *Client) Init() (_err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *Client) ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
|
||||
panic("No Support!")
|
||||
}
|
||||
|
||||
func (client *Client) ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
|
||||
panic("No Support!")
|
||||
}
|
||||
|
||||
func (client *Client) ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
|
||||
panic("No Support!")
|
||||
}
|
||||
201
vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1694
vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go
generated
vendored
1694
vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
201
vendor/github.com/alibabacloud-go/debug/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/debug/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
12
vendor/github.com/alibabacloud-go/debug/debug/assert.go
generated
vendored
12
vendor/github.com/alibabacloud-go/debug/debug/assert.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertEqual(t *testing.T, a, b interface{}) {
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("%v != %v", a, b)
|
||||
}
|
||||
}
|
||||
36
vendor/github.com/alibabacloud-go/debug/debug/debug.go
generated
vendored
36
vendor/github.com/alibabacloud-go/debug/debug/debug.go
generated
vendored
@@ -1,36 +0,0 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Debug func(format string, v ...interface{})
|
||||
|
||||
var hookGetEnv = func() string {
|
||||
return os.Getenv("DEBUG")
|
||||
}
|
||||
|
||||
var hookPrint = func(input string) {
|
||||
fmt.Println(input)
|
||||
}
|
||||
|
||||
func Init(flag string) Debug {
|
||||
enable := false
|
||||
|
||||
env := hookGetEnv()
|
||||
parts := strings.Split(env, ",")
|
||||
for _, part := range parts {
|
||||
if part == flag {
|
||||
enable = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return func(format string, v ...interface{}) {
|
||||
if enable {
|
||||
hookPrint(fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
}
|
||||
201
vendor/github.com/alibabacloud-go/dysmsapi-20170525/v2/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/dysmsapi-20170525/v2/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
6116
vendor/github.com/alibabacloud-go/dysmsapi-20170525/v2/client/client.go
generated
vendored
6116
vendor/github.com/alibabacloud-go/dysmsapi-20170525/v2/client/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
41
vendor/github.com/alibabacloud-go/endpoint-util/service/service.go
generated
vendored
41
vendor/github.com/alibabacloud-go/endpoint-util/service/service.go
generated
vendored
@@ -1,41 +0,0 @@
|
||||
// This file is auto-generated, don't edit it. Thanks.
|
||||
/**
|
||||
* Get endpoint
|
||||
* @return string
|
||||
*/
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
func GetEndpointRules(product, regionId, endpointType, network, suffix *string) (_result *string, _err error) {
|
||||
if tea.StringValue(endpointType) == "regional" {
|
||||
if tea.StringValue(regionId) == "" {
|
||||
_err = fmt.Errorf("RegionId is empty, please set a valid RegionId")
|
||||
return tea.String(""), _err
|
||||
}
|
||||
_result = tea.String(strings.Replace("<product><suffix><network>.<region_id>.aliyuncs.com",
|
||||
"<region_id>", tea.StringValue(regionId), 1))
|
||||
} else {
|
||||
_result = tea.String("<product><suffix><network>.aliyuncs.com")
|
||||
}
|
||||
_result = tea.String(strings.Replace(tea.StringValue(_result),
|
||||
"<product>", strings.ToLower(tea.StringValue(product)), 1))
|
||||
if tea.StringValue(network) == "" || tea.StringValue(network) == "public" {
|
||||
_result = tea.String(strings.Replace(tea.StringValue(_result), "<network>", "", 1))
|
||||
} else {
|
||||
_result = tea.String(strings.Replace(tea.StringValue(_result),
|
||||
"<network>", "-"+tea.StringValue(network), 1))
|
||||
}
|
||||
if tea.StringValue(suffix) == "" {
|
||||
_result = tea.String(strings.Replace(tea.StringValue(_result), "<suffix>", "", 1))
|
||||
} else {
|
||||
_result = tea.String(strings.Replace(tea.StringValue(_result),
|
||||
"<suffix>", "-"+tea.StringValue(suffix), 1))
|
||||
}
|
||||
return _result, nil
|
||||
}
|
||||
201
vendor/github.com/alibabacloud-go/openapi-util/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/openapi-util/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
635
vendor/github.com/alibabacloud-go/openapi-util/service/service.go
generated
vendored
635
vendor/github.com/alibabacloud-go/openapi-util/service/service.go
generated
vendored
@@ -1,635 +0,0 @@
|
||||
// This file is auto-generated, don't edit it. Thanks.
|
||||
/**
|
||||
* This is for OpenApi Util
|
||||
*/
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
util "github.com/alibabacloud-go/tea-utils/service"
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/tjfoc/gmsm/sm3"
|
||||
)
|
||||
|
||||
const (
|
||||
PEM_BEGIN = "-----BEGIN RSA PRIVATE KEY-----\n"
|
||||
PEM_END = "\n-----END RSA PRIVATE KEY-----"
|
||||
)
|
||||
|
||||
type Sorter struct {
|
||||
Keys []string
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func newSorter(m map[string]string) *Sorter {
|
||||
hs := &Sorter{
|
||||
Keys: make([]string, 0, len(m)),
|
||||
Vals: make([]string, 0, len(m)),
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
hs.Keys = append(hs.Keys, k)
|
||||
hs.Vals = append(hs.Vals, v)
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
func (hs *Sorter) Sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
func (hs *Sorter) Len() int {
|
||||
return len(hs.Vals)
|
||||
}
|
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
func (hs *Sorter) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
|
||||
}
|
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
func (hs *Sorter) Swap(i, j int) {
|
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
|
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all params of body other than type of readable into content
|
||||
* @param body source Model
|
||||
* @param content target Model
|
||||
* @return void
|
||||
*/
|
||||
func Convert(body interface{}, content interface{}) {
|
||||
res := make(map[string]interface{})
|
||||
val := reflect.ValueOf(body).Elem()
|
||||
dataType := val.Type()
|
||||
for i := 0; i < dataType.NumField(); i++ {
|
||||
field := dataType.Field(i)
|
||||
name, _ := field.Tag.Lookup("json")
|
||||
name = strings.Split(name, ",omitempty")[0]
|
||||
_, ok := val.Field(i).Interface().(io.Reader)
|
||||
if !ok {
|
||||
res[name] = val.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
byt, _ := json.Marshal(res)
|
||||
json.Unmarshal(byt, content)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the string to be signed according to request
|
||||
* @param request which contains signed messages
|
||||
* @return the signed string
|
||||
*/
|
||||
func GetStringToSign(request *tea.Request) (_result *string) {
|
||||
return tea.String(getStringToSign(request))
|
||||
}
|
||||
|
||||
func getStringToSign(request *tea.Request) string {
|
||||
resource := tea.StringValue(request.Pathname)
|
||||
queryParams := request.Query
|
||||
// sort QueryParams by key
|
||||
var queryKeys []string
|
||||
for key := range queryParams {
|
||||
queryKeys = append(queryKeys, key)
|
||||
}
|
||||
sort.Strings(queryKeys)
|
||||
tmp := ""
|
||||
for i := 0; i < len(queryKeys); i++ {
|
||||
queryKey := queryKeys[i]
|
||||
v := tea.StringValue(queryParams[queryKey])
|
||||
if v != "" {
|
||||
tmp = tmp + "&" + queryKey + "=" + v
|
||||
} else {
|
||||
tmp = tmp + "&" + queryKey
|
||||
}
|
||||
}
|
||||
if tmp != "" {
|
||||
tmp = strings.TrimLeft(tmp, "&")
|
||||
resource = resource + "?" + tmp
|
||||
}
|
||||
return getSignedStr(request, resource)
|
||||
}
|
||||
|
||||
func getSignedStr(req *tea.Request, canonicalizedResource string) string {
|
||||
temp := make(map[string]string)
|
||||
|
||||
for k, v := range req.Headers {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-acs-") {
|
||||
temp[strings.ToLower(k)] = tea.StringValue(v)
|
||||
}
|
||||
}
|
||||
hs := newSorter(temp)
|
||||
|
||||
// Sort the temp by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := tea.StringValue(req.Headers["date"])
|
||||
accept := tea.StringValue(req.Headers["accept"])
|
||||
contentType := tea.StringValue(req.Headers["content-type"])
|
||||
contentMd5 := tea.StringValue(req.Headers["content-md5"])
|
||||
|
||||
signStr := tea.StringValue(req.Method) + "\n" + accept + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
|
||||
return signStr
|
||||
}
|
||||
|
||||
/**
|
||||
* Get signature according to stringToSign, secret
|
||||
* @param stringToSign the signed string
|
||||
* @param secret accesskey secret
|
||||
* @return the signature
|
||||
*/
|
||||
func GetROASignature(stringToSign *string, secret *string) (_result *string) {
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(tea.StringValue(secret)))
|
||||
io.WriteString(h, tea.StringValue(stringToSign))
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
return tea.String(signedStr)
|
||||
}
|
||||
|
||||
func GetEndpoint(endpoint *string, server *bool, endpointType *string) *string {
|
||||
if tea.StringValue(endpointType) == "internal" {
|
||||
strs := strings.Split(tea.StringValue(endpoint), ".")
|
||||
strs[0] += "-internal"
|
||||
endpoint = tea.String(strings.Join(strs, "."))
|
||||
}
|
||||
if tea.BoolValue(server) && tea.StringValue(endpointType) == "accelerate" {
|
||||
return tea.String("oss-accelerate.aliyuncs.com")
|
||||
}
|
||||
|
||||
return endpoint
|
||||
}
|
||||
|
||||
func HexEncode(raw []byte) *string {
|
||||
return tea.String(hex.EncodeToString(raw))
|
||||
}
|
||||
|
||||
func Hash(raw []byte, signatureAlgorithm *string) []byte {
|
||||
signType := tea.StringValue(signatureAlgorithm)
|
||||
if signType == "ACS3-HMAC-SHA256" || signType == "ACS3-RSA-SHA256" {
|
||||
h := sha256.New()
|
||||
h.Write(raw)
|
||||
return h.Sum(nil)
|
||||
} else if signType == "ACS3-HMAC-SM3" {
|
||||
h := sm3.New()
|
||||
h.Write(raw)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetEncodePath(path *string) *string {
|
||||
uri := tea.StringValue(path)
|
||||
strs := strings.Split(uri, "/")
|
||||
for i, v := range strs {
|
||||
strs[i] = url.QueryEscape(v)
|
||||
}
|
||||
uri = strings.Join(strs, "/")
|
||||
uri = strings.Replace(uri, "+", "%20", -1)
|
||||
uri = strings.Replace(uri, "*", "%2A", -1)
|
||||
uri = strings.Replace(uri, "%7E", "~", -1)
|
||||
return tea.String(uri)
|
||||
}
|
||||
|
||||
func GetEncodeParam(param *string) *string {
|
||||
uri := tea.StringValue(param)
|
||||
uri = url.QueryEscape(uri)
|
||||
uri = strings.Replace(uri, "+", "%20", -1)
|
||||
uri = strings.Replace(uri, "*", "%2A", -1)
|
||||
uri = strings.Replace(uri, "%7E", "~", -1)
|
||||
return tea.String(uri)
|
||||
}
|
||||
|
||||
func GetAuthorization(request *tea.Request, signatureAlgorithm, payload, acesskey, secret *string) *string {
|
||||
canonicalURI := tea.StringValue(request.Pathname)
|
||||
if canonicalURI == "" {
|
||||
canonicalURI = "/"
|
||||
}
|
||||
|
||||
canonicalURI = strings.Replace(canonicalURI, "+", "%20", -1)
|
||||
canonicalURI = strings.Replace(canonicalURI, "*", "%2A", -1)
|
||||
canonicalURI = strings.Replace(canonicalURI, "%7E", "~", -1)
|
||||
|
||||
method := tea.StringValue(request.Method)
|
||||
canonicalQueryString := getCanonicalQueryString(request.Query)
|
||||
canonicalheaders, signedHeaders := getCanonicalHeaders(request.Headers)
|
||||
|
||||
canonicalRequest := method + "\n" + canonicalURI + "\n" + canonicalQueryString + "\n" + canonicalheaders + "\n" +
|
||||
strings.Join(signedHeaders, ";") + "\n" + tea.StringValue(payload)
|
||||
signType := tea.StringValue(signatureAlgorithm)
|
||||
StringToSign := signType + "\n" + tea.StringValue(HexEncode(Hash([]byte(canonicalRequest), signatureAlgorithm)))
|
||||
signature := tea.StringValue(HexEncode(SignatureMethod(tea.StringValue(secret), StringToSign, signType)))
|
||||
auth := signType + " Credential=" + tea.StringValue(acesskey) + ",SignedHeaders=" +
|
||||
strings.Join(signedHeaders, ";") + ",Signature=" + signature
|
||||
return tea.String(auth)
|
||||
}
|
||||
|
||||
func SignatureMethod(secret, source, signatureAlgorithm string) []byte {
|
||||
if signatureAlgorithm == "ACS3-HMAC-SHA256" {
|
||||
h := hmac.New(sha256.New, []byte(secret))
|
||||
h.Write([]byte(source))
|
||||
return h.Sum(nil)
|
||||
} else if signatureAlgorithm == "ACS3-HMAC-SM3" {
|
||||
h := hmac.New(sm3.New, []byte(secret))
|
||||
h.Write([]byte(source))
|
||||
return h.Sum(nil)
|
||||
} else if signatureAlgorithm == "ACS3-RSA-SHA256" {
|
||||
return rsaSign(source, secret)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rsaSign(content, secret string) []byte {
|
||||
h := crypto.SHA256.New()
|
||||
h.Write([]byte(content))
|
||||
hashed := h.Sum(nil)
|
||||
priv, err := parsePrivateKey(secret)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
sign, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, hashed)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return sign
|
||||
}
|
||||
|
||||
func parsePrivateKey(privateKey string) (*rsa.PrivateKey, error) {
|
||||
privateKey = formatPrivateKey(privateKey)
|
||||
block, _ := pem.Decode([]byte(privateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("PrivateKey is invalid")
|
||||
}
|
||||
priKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch priKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return priKey.(*rsa.PrivateKey), nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func formatPrivateKey(privateKey string) string {
|
||||
if !strings.HasPrefix(privateKey, PEM_BEGIN) {
|
||||
privateKey = PEM_BEGIN + privateKey
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(privateKey, PEM_END) {
|
||||
privateKey += PEM_END
|
||||
}
|
||||
return privateKey
|
||||
}
|
||||
|
||||
func getCanonicalHeaders(headers map[string]*string) (string, []string) {
|
||||
tmp := make(map[string]string)
|
||||
tmpHeader := http.Header{}
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-acs-") || strings.ToLower(k) == "host" ||
|
||||
strings.ToLower(k) == "content-type" {
|
||||
tmp[strings.ToLower(k)] = strings.TrimSpace(tea.StringValue(v))
|
||||
tmpHeader.Add(strings.ToLower(k), strings.TrimSpace(tea.StringValue(v)))
|
||||
}
|
||||
}
|
||||
hs := newSorter(tmp)
|
||||
|
||||
// Sort the temp by the ascending order
|
||||
hs.Sort()
|
||||
canonicalheaders := ""
|
||||
for _, key := range hs.Keys {
|
||||
vals := tmpHeader[textproto.CanonicalMIMEHeaderKey(key)]
|
||||
sort.Strings(vals)
|
||||
canonicalheaders += key + ":" + strings.Join(vals, ",") + "\n"
|
||||
}
|
||||
|
||||
return canonicalheaders, hs.Keys
|
||||
}
|
||||
|
||||
func getCanonicalQueryString(query map[string]*string) string {
|
||||
canonicalQueryString := ""
|
||||
if tea.BoolValue(util.IsUnset(query)) {
|
||||
return canonicalQueryString
|
||||
}
|
||||
tmp := make(map[string]string)
|
||||
for k, v := range query {
|
||||
tmp[k] = tea.StringValue(v)
|
||||
}
|
||||
|
||||
hs := newSorter(tmp)
|
||||
|
||||
// Sort the temp by the ascending order
|
||||
hs.Sort()
|
||||
for i := range hs.Keys {
|
||||
if hs.Vals[i] != "" {
|
||||
canonicalQueryString += "&" + hs.Keys[i] + "=" + url.QueryEscape(hs.Vals[i])
|
||||
} else {
|
||||
canonicalQueryString += "&" + hs.Keys[i] + "="
|
||||
}
|
||||
}
|
||||
canonicalQueryString = strings.Replace(canonicalQueryString, "+", "%20", -1)
|
||||
canonicalQueryString = strings.Replace(canonicalQueryString, "*", "%2A", -1)
|
||||
canonicalQueryString = strings.Replace(canonicalQueryString, "%7E", "~", -1)
|
||||
|
||||
if canonicalQueryString != "" {
|
||||
canonicalQueryString = strings.TrimLeft(canonicalQueryString, "&")
|
||||
}
|
||||
return canonicalQueryString
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse filter into a form string
|
||||
* @param filter object
|
||||
* @return the string
|
||||
*/
|
||||
func ToForm(filter map[string]interface{}) (_result *string) {
|
||||
tmp := make(map[string]interface{})
|
||||
byt, _ := json.Marshal(filter)
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
_ = d.Decode(&tmp)
|
||||
|
||||
result := make(map[string]*string)
|
||||
for key, value := range tmp {
|
||||
filterValue := reflect.ValueOf(value)
|
||||
flatRepeatedList(filterValue, result, key)
|
||||
}
|
||||
|
||||
m := util.AnyifyMapValue(result)
|
||||
return util.ToFormString(m)
|
||||
}
|
||||
|
||||
func flatRepeatedList(dataValue reflect.Value, result map[string]*string, prefix string) {
|
||||
if !dataValue.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
dataType := dataValue.Type()
|
||||
if dataType.Kind().String() == "slice" {
|
||||
handleRepeatedParams(dataValue, result, prefix)
|
||||
} else if dataType.Kind().String() == "map" {
|
||||
handleMap(dataValue, result, prefix)
|
||||
} else {
|
||||
result[prefix] = tea.String(fmt.Sprintf("%v", dataValue.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
func handleRepeatedParams(repeatedFieldValue reflect.Value, result map[string]*string, prefix string) {
|
||||
if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() {
|
||||
for m := 0; m < repeatedFieldValue.Len(); m++ {
|
||||
elementValue := repeatedFieldValue.Index(m)
|
||||
key := prefix + "." + strconv.Itoa(m+1)
|
||||
fieldValue := reflect.ValueOf(elementValue.Interface())
|
||||
if fieldValue.Kind().String() == "map" {
|
||||
handleMap(fieldValue, result, key)
|
||||
} else {
|
||||
result[key] = tea.String(fmt.Sprintf("%v", fieldValue.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleMap(valueField reflect.Value, result map[string]*string, prefix string) {
|
||||
if valueField.IsValid() && valueField.String() != "" {
|
||||
valueFieldType := valueField.Type()
|
||||
if valueFieldType.Kind().String() == "map" {
|
||||
var byt []byte
|
||||
byt, _ = json.Marshal(valueField.Interface())
|
||||
cache := make(map[string]interface{})
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
_ = d.Decode(&cache)
|
||||
for key, value := range cache {
|
||||
pre := ""
|
||||
if prefix != "" {
|
||||
pre = prefix + "." + key
|
||||
} else {
|
||||
pre = key
|
||||
}
|
||||
fieldValue := reflect.ValueOf(value)
|
||||
flatRepeatedList(fieldValue, result, pre)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get timestamp
|
||||
* @return the timestamp string
|
||||
*/
|
||||
func GetTimestamp() (_result *string) {
|
||||
gmt := time.FixedZone("GMT", 0)
|
||||
return tea.String(time.Now().In(gmt).Format("2006-01-02T15:04:05Z"))
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse filter into a object which's type is map[string]string
|
||||
* @param filter query param
|
||||
* @return the object
|
||||
*/
|
||||
func Query(filter interface{}) (_result map[string]*string) {
|
||||
tmp := make(map[string]interface{})
|
||||
byt, _ := json.Marshal(filter)
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
_ = d.Decode(&tmp)
|
||||
|
||||
result := make(map[string]*string)
|
||||
for key, value := range tmp {
|
||||
filterValue := reflect.ValueOf(value)
|
||||
flatRepeatedList(filterValue, result, key)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Get signature according to signedParams, method and secret
|
||||
* @param signedParams params which need to be signed
|
||||
* @param method http method e.g. GET
|
||||
* @param secret AccessKeySecret
|
||||
* @return the signature
|
||||
*/
|
||||
func GetRPCSignature(signedParams map[string]*string, method *string, secret *string) (_result *string) {
|
||||
stringToSign := buildRpcStringToSign(signedParams, tea.StringValue(method))
|
||||
signature := sign(stringToSign, tea.StringValue(secret), "&")
|
||||
return tea.String(signature)
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse array into a string with specified style
|
||||
* @param array the array
|
||||
* @param prefix the prefix string
|
||||
* @style specified style e.g. repeatList
|
||||
* @return the string
|
||||
*/
|
||||
func ArrayToStringWithSpecifiedStyle(array interface{}, prefix *string, style *string) (_result *string) {
|
||||
if tea.BoolValue(util.IsUnset(array)) {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
sty := tea.StringValue(style)
|
||||
if sty == "repeatList" {
|
||||
tmp := map[string]interface{}{
|
||||
tea.StringValue(prefix): array,
|
||||
}
|
||||
return flatRepeatList(tmp)
|
||||
} else if sty == "simple" || sty == "spaceDelimited" || sty == "pipeDelimited" {
|
||||
return flatArray(array, sty)
|
||||
} else if sty == "json" {
|
||||
return util.ToJSONString(array)
|
||||
}
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
func ParseToMap(in interface{}) map[string]interface{} {
|
||||
if tea.BoolValue(util.IsUnset(in)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
tmp := make(map[string]interface{})
|
||||
byt, _ := json.Marshal(in)
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
err := d.Decode(&tmp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
func flatRepeatList(filter map[string]interface{}) (_result *string) {
|
||||
tmp := make(map[string]interface{})
|
||||
byt, _ := json.Marshal(filter)
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
_ = d.Decode(&tmp)
|
||||
|
||||
result := make(map[string]*string)
|
||||
for key, value := range tmp {
|
||||
filterValue := reflect.ValueOf(value)
|
||||
flatRepeatedList(filterValue, result, key)
|
||||
}
|
||||
|
||||
res := make(map[string]string)
|
||||
for k, v := range result {
|
||||
res[k] = tea.StringValue(v)
|
||||
}
|
||||
hs := newSorter(res)
|
||||
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
t := ""
|
||||
for i := range hs.Keys {
|
||||
if i == len(hs.Keys)-1 {
|
||||
t += hs.Keys[i] + "=" + hs.Vals[i]
|
||||
} else {
|
||||
t += hs.Keys[i] + "=" + hs.Vals[i] + "&&"
|
||||
}
|
||||
}
|
||||
return tea.String(t)
|
||||
}
|
||||
|
||||
func flatArray(array interface{}, sty string) *string {
|
||||
t := reflect.ValueOf(array)
|
||||
strs := make([]string, 0)
|
||||
for i := 0; i < t.Len(); i++ {
|
||||
tmp := t.Index(i)
|
||||
if tmp.Kind() == reflect.Ptr || tmp.Kind() == reflect.Interface {
|
||||
tmp = tmp.Elem()
|
||||
}
|
||||
|
||||
if tmp.Kind() == reflect.Ptr {
|
||||
tmp = tmp.Elem()
|
||||
}
|
||||
if tmp.Kind() == reflect.String {
|
||||
strs = append(strs, tmp.String())
|
||||
} else {
|
||||
inter := tmp.Interface()
|
||||
byt, _ := json.Marshal(inter)
|
||||
strs = append(strs, string(byt))
|
||||
}
|
||||
}
|
||||
str := ""
|
||||
if sty == "simple" {
|
||||
str = strings.Join(strs, ",")
|
||||
} else if sty == "spaceDelimited" {
|
||||
str = strings.Join(strs, " ")
|
||||
} else if sty == "pipeDelimited" {
|
||||
str = strings.Join(strs, "|")
|
||||
}
|
||||
return tea.String(str)
|
||||
}
|
||||
|
||||
func buildRpcStringToSign(signedParam map[string]*string, method string) (stringToSign string) {
|
||||
signParams := make(map[string]string)
|
||||
for key, value := range signedParam {
|
||||
signParams[key] = tea.StringValue(value)
|
||||
}
|
||||
|
||||
stringToSign = getUrlFormedMap(signParams)
|
||||
stringToSign = strings.Replace(stringToSign, "+", "%20", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "*", "%2A", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "%7E", "~", -1)
|
||||
stringToSign = url.QueryEscape(stringToSign)
|
||||
stringToSign = method + "&%2F&" + stringToSign
|
||||
return
|
||||
}
|
||||
|
||||
func getUrlFormedMap(source map[string]string) (urlEncoded string) {
|
||||
urlEncoder := url.Values{}
|
||||
for key, value := range source {
|
||||
urlEncoder.Add(key, value)
|
||||
}
|
||||
urlEncoded = urlEncoder.Encode()
|
||||
return
|
||||
}
|
||||
|
||||
func sign(stringToSign, accessKeySecret, secretSuffix string) string {
|
||||
secret := accessKeySecret + secretSuffix
|
||||
signedBytes := shaHmac1(stringToSign, secret)
|
||||
signedString := base64.StdEncoding.EncodeToString(signedBytes)
|
||||
return signedString
|
||||
}
|
||||
|
||||
func shaHmac1(source, secret string) []byte {
|
||||
key := []byte(secret)
|
||||
hmac := hmac.New(sha1.New, key)
|
||||
hmac.Write([]byte(source))
|
||||
return hmac.Sum(nil)
|
||||
}
|
||||
201
vendor/github.com/alibabacloud-go/tea-utils/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/tea-utils/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
468
vendor/github.com/alibabacloud-go/tea-utils/service/service.go
generated
vendored
468
vendor/github.com/alibabacloud-go/tea-utils/service/service.go
generated
vendored
@@ -1,468 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), "0.01")
|
||||
|
||||
type RuntimeOptions struct {
|
||||
Autoretry *bool `json:"autoretry" xml:"autoretry"`
|
||||
IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"`
|
||||
MaxAttempts *int `json:"maxAttempts" xml:"maxAttempts"`
|
||||
BackoffPolicy *string `json:"backoffPolicy" xml:"backoffPolicy"`
|
||||
BackoffPeriod *int `json:"backoffPeriod" xml:"backoffPeriod"`
|
||||
ReadTimeout *int `json:"readTimeout" xml:"readTimeout"`
|
||||
ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"`
|
||||
LocalAddr *string `json:"localAddr" xml:"localAddr"`
|
||||
HttpProxy *string `json:"httpProxy" xml:"httpProxy"`
|
||||
HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"`
|
||||
NoProxy *string `json:"noProxy" xml:"noProxy"`
|
||||
MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"`
|
||||
Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"`
|
||||
Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"`
|
||||
KeepAlive *bool `json:"keepAlive" xml:"keepAlive"`
|
||||
}
|
||||
|
||||
func (s RuntimeOptions) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s RuntimeOptions) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetAutoretry(v bool) *RuntimeOptions {
|
||||
s.Autoretry = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetIgnoreSSL(v bool) *RuntimeOptions {
|
||||
s.IgnoreSSL = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetMaxAttempts(v int) *RuntimeOptions {
|
||||
s.MaxAttempts = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetBackoffPolicy(v string) *RuntimeOptions {
|
||||
s.BackoffPolicy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetBackoffPeriod(v int) *RuntimeOptions {
|
||||
s.BackoffPeriod = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetReadTimeout(v int) *RuntimeOptions {
|
||||
s.ReadTimeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetConnectTimeout(v int) *RuntimeOptions {
|
||||
s.ConnectTimeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetHttpProxy(v string) *RuntimeOptions {
|
||||
s.HttpProxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetHttpsProxy(v string) *RuntimeOptions {
|
||||
s.HttpsProxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetNoProxy(v string) *RuntimeOptions {
|
||||
s.NoProxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetMaxIdleConns(v int) *RuntimeOptions {
|
||||
s.MaxIdleConns = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetLocalAddr(v string) *RuntimeOptions {
|
||||
s.LocalAddr = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetSocks5Proxy(v string) *RuntimeOptions {
|
||||
s.Socks5Proxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetSocks5NetWork(v string) *RuntimeOptions {
|
||||
s.Socks5NetWork = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *RuntimeOptions) SetKeepAlive(v bool) *RuntimeOptions {
|
||||
s.KeepAlive = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func ReadAsString(body io.Reader) (*string, error) {
|
||||
byt, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
r, ok := body.(io.ReadCloser)
|
||||
if ok {
|
||||
r.Close()
|
||||
}
|
||||
return tea.String(string(byt)), nil
|
||||
}
|
||||
|
||||
func StringifyMapValue(a map[string]interface{}) map[string]*string {
|
||||
res := make(map[string]*string)
|
||||
for key, value := range a {
|
||||
if value != nil {
|
||||
switch value.(type) {
|
||||
case string:
|
||||
res[key] = tea.String(value.(string))
|
||||
default:
|
||||
byt, _ := json.Marshal(value)
|
||||
res[key] = tea.String(string(byt))
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func AnyifyMapValue(a map[string]*string) map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
for key, value := range a {
|
||||
res[key] = tea.StringValue(value)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func ReadAsBytes(body io.Reader) ([]byte, error) {
|
||||
byt, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, ok := body.(io.ReadCloser)
|
||||
if ok {
|
||||
r.Close()
|
||||
}
|
||||
return byt, nil
|
||||
}
|
||||
|
||||
func DefaultString(reaStr, defaultStr *string) *string {
|
||||
if reaStr == nil {
|
||||
return defaultStr
|
||||
}
|
||||
return reaStr
|
||||
}
|
||||
|
||||
func ToJSONString(a interface{}) *string {
|
||||
switch v := a.(type) {
|
||||
case *string:
|
||||
return v
|
||||
case string:
|
||||
return tea.String(v)
|
||||
case []byte:
|
||||
return tea.String(string(v))
|
||||
case io.Reader:
|
||||
byt, err := ioutil.ReadAll(v)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return tea.String(string(byt))
|
||||
}
|
||||
byt, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return tea.String(string(byt))
|
||||
}
|
||||
|
||||
func DefaultNumber(reaNum, defaultNum *int) *int {
|
||||
if reaNum == nil {
|
||||
return defaultNum
|
||||
}
|
||||
return reaNum
|
||||
}
|
||||
|
||||
func ReadAsJSON(body io.Reader) (result interface{}, err error) {
|
||||
byt, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if string(byt) == "" {
|
||||
return
|
||||
}
|
||||
r, ok := body.(io.ReadCloser)
|
||||
if ok {
|
||||
r.Close()
|
||||
}
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
err = d.Decode(&result)
|
||||
return
|
||||
}
|
||||
|
||||
func GetNonce() *string {
|
||||
return tea.String(getUUID())
|
||||
}
|
||||
|
||||
func Empty(val *string) *bool {
|
||||
return tea.Bool(val == nil || tea.StringValue(val) == "")
|
||||
}
|
||||
|
||||
func ValidateModel(a interface{}) error {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
err := tea.Validate(a)
|
||||
return err
|
||||
}
|
||||
|
||||
func EqualString(val1, val2 *string) *bool {
|
||||
return tea.Bool(tea.StringValue(val1) == tea.StringValue(val2))
|
||||
}
|
||||
|
||||
func EqualNumber(val1, val2 *int) *bool {
|
||||
return tea.Bool(tea.IntValue(val1) == tea.IntValue(val2))
|
||||
}
|
||||
|
||||
func IsUnset(val interface{}) *bool {
|
||||
if val == nil {
|
||||
return tea.Bool(true)
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(val)
|
||||
if v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map {
|
||||
return tea.Bool(v.IsNil())
|
||||
}
|
||||
|
||||
valType := reflect.TypeOf(val)
|
||||
valZero := reflect.Zero(valType)
|
||||
return tea.Bool(valZero == v)
|
||||
}
|
||||
|
||||
func ToBytes(a *string) []byte {
|
||||
return []byte(tea.StringValue(a))
|
||||
}
|
||||
|
||||
func AssertAsMap(a interface{}) map[string]interface{} {
|
||||
r := reflect.ValueOf(a)
|
||||
if r.Kind().String() != "map" {
|
||||
panic(fmt.Sprintf("%v is not a map[string]interface{}", a))
|
||||
}
|
||||
|
||||
res := make(map[string]interface{})
|
||||
tmp := r.MapKeys()
|
||||
for _, key := range tmp {
|
||||
res[key.String()] = r.MapIndex(key).Interface()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func AssertAsNumber(a interface{}) *int {
|
||||
res := 0
|
||||
switch a.(type) {
|
||||
case int:
|
||||
tmp := a.(int)
|
||||
res = tmp
|
||||
case *int:
|
||||
tmp := a.(*int)
|
||||
res = tea.IntValue(tmp)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v is not a int", a))
|
||||
}
|
||||
|
||||
return tea.Int(res)
|
||||
}
|
||||
|
||||
func AssertAsBoolean(a interface{}) *bool {
|
||||
res := false
|
||||
switch a.(type) {
|
||||
case bool:
|
||||
tmp := a.(bool)
|
||||
res = tmp
|
||||
case *bool:
|
||||
tmp := a.(*bool)
|
||||
res = tea.BoolValue(tmp)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v is not a bool", a))
|
||||
}
|
||||
|
||||
return tea.Bool(res)
|
||||
}
|
||||
|
||||
func AssertAsString(a interface{}) *string {
|
||||
res := ""
|
||||
switch a.(type) {
|
||||
case string:
|
||||
tmp := a.(string)
|
||||
res = tmp
|
||||
case *string:
|
||||
tmp := a.(*string)
|
||||
res = tea.StringValue(tmp)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v is not a string", a))
|
||||
}
|
||||
|
||||
return tea.String(res)
|
||||
}
|
||||
|
||||
func AssertAsBytes(a interface{}) []byte {
|
||||
res, ok := a.([]byte)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("%v is not []byte", a))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func AssertAsReadable(a interface{}) io.Reader {
|
||||
res, ok := a.(io.Reader)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("%v is not reader", a))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func AssertAsArray(a interface{}) []interface{} {
|
||||
r := reflect.ValueOf(a)
|
||||
if r.Kind().String() != "array" && r.Kind().String() != "slice" {
|
||||
panic(fmt.Sprintf("%v is not a [x]interface{}", a))
|
||||
}
|
||||
aLen := r.Len()
|
||||
res := make([]interface{}, 0)
|
||||
for i := 0; i < aLen; i++ {
|
||||
res = append(res, r.Index(i).Interface())
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func ParseJSON(a *string) interface{} {
|
||||
mapTmp := make(map[string]interface{})
|
||||
d := json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a))))
|
||||
d.UseNumber()
|
||||
err := d.Decode(&mapTmp)
|
||||
if err == nil {
|
||||
return mapTmp
|
||||
}
|
||||
|
||||
sliceTmp := make([]interface{}, 0)
|
||||
d = json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a))))
|
||||
d.UseNumber()
|
||||
err = d.Decode(&sliceTmp)
|
||||
if err == nil {
|
||||
return sliceTmp
|
||||
}
|
||||
|
||||
if num, err := strconv.Atoi(tea.StringValue(a)); err == nil {
|
||||
return num
|
||||
}
|
||||
|
||||
if ok, err := strconv.ParseBool(tea.StringValue(a)); err == nil {
|
||||
return ok
|
||||
}
|
||||
|
||||
if floa64tVal, err := strconv.ParseFloat(tea.StringValue(a), 64); err == nil {
|
||||
return floa64tVal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ToString(a []byte) *string {
|
||||
return tea.String(string(a))
|
||||
}
|
||||
|
||||
func ToMap(in interface{}) map[string]interface{} {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
res := tea.ToMap(in)
|
||||
return res
|
||||
}
|
||||
|
||||
func ToFormString(a map[string]interface{}) *string {
|
||||
if a == nil {
|
||||
return tea.String("")
|
||||
}
|
||||
res := ""
|
||||
urlEncoder := url.Values{}
|
||||
for key, value := range a {
|
||||
v := fmt.Sprintf("%v", value)
|
||||
urlEncoder.Add(key, v)
|
||||
}
|
||||
res = urlEncoder.Encode()
|
||||
return tea.String(res)
|
||||
}
|
||||
|
||||
func GetDateUTCString() *string {
|
||||
return tea.String(time.Now().UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
func GetUserAgent(userAgent *string) *string {
|
||||
if userAgent != nil && tea.StringValue(userAgent) != "" {
|
||||
return tea.String(defaultUserAgent + " " + tea.StringValue(userAgent))
|
||||
}
|
||||
return tea.String(defaultUserAgent)
|
||||
}
|
||||
|
||||
func Is2xx(code *int) *bool {
|
||||
tmp := tea.IntValue(code)
|
||||
return tea.Bool(tmp >= 200 && tmp < 300)
|
||||
}
|
||||
|
||||
func Is3xx(code *int) *bool {
|
||||
tmp := tea.IntValue(code)
|
||||
return tea.Bool(tmp >= 300 && tmp < 400)
|
||||
}
|
||||
|
||||
func Is4xx(code *int) *bool {
|
||||
tmp := tea.IntValue(code)
|
||||
return tea.Bool(tmp >= 400 && tmp < 500)
|
||||
}
|
||||
|
||||
func Is5xx(code *int) *bool {
|
||||
tmp := tea.IntValue(code)
|
||||
return tea.Bool(tmp >= 500 && tmp < 600)
|
||||
}
|
||||
|
||||
func Sleep(millisecond *int) error {
|
||||
ms := tea.IntValue(millisecond)
|
||||
time.Sleep(time.Duration(ms) * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ToArray(in interface{}) []map[string]interface{} {
|
||||
if tea.BoolValue(IsUnset(in)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
tmp := make([]map[string]interface{}, 0)
|
||||
byt, _ := json.Marshal(in)
|
||||
d := json.NewDecoder(bytes.NewReader(byt))
|
||||
d.UseNumber()
|
||||
err := d.Decode(&tmp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
52
vendor/github.com/alibabacloud-go/tea-utils/service/util.go
generated
vendored
52
vendor/github.com/alibabacloud-go/tea-utils/service/util.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
rand2 "math/rand"
|
||||
)
|
||||
|
||||
type UUID [16]byte
|
||||
|
||||
const numBytes = "1234567890"
|
||||
|
||||
func getUUID() (uuidHex string) {
|
||||
uuid := newUUID()
|
||||
uuidHex = hex.EncodeToString(uuid[:])
|
||||
return
|
||||
}
|
||||
|
||||
func randStringBytes(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = numBytes[rand2.Intn(len(numBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func newUUID() UUID {
|
||||
ns := UUID{}
|
||||
safeRandom(ns[:])
|
||||
u := newFromHash(md5.New(), ns, randStringBytes(16))
|
||||
u[6] = (u[6] & 0x0f) | (byte(2) << 4)
|
||||
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func newFromHash(h hash.Hash, ns UUID, name string) UUID {
|
||||
u := UUID{}
|
||||
h.Write(ns[:])
|
||||
h.Write([]byte(name))
|
||||
copy(u[:], h.Sum(nil))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func safeRandom(dest []byte) {
|
||||
if _, err := rand.Read(dest); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
105
vendor/github.com/alibabacloud-go/tea-xml/service/service.go
generated
vendored
105
vendor/github.com/alibabacloud-go/tea-xml/service/service.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
v2 "github.com/clbanning/mxj/v2"
|
||||
)
|
||||
|
||||
func ToXML(obj map[string]interface{}) *string {
|
||||
return tea.String(mapToXML(obj))
|
||||
}
|
||||
|
||||
func ParseXml(val *string, result interface{}) map[string]interface{} {
|
||||
resp := make(map[string]interface{})
|
||||
|
||||
start := getStartElement([]byte(tea.StringValue(val)))
|
||||
if result == nil {
|
||||
vm, err := v2.NewMapXml([]byte(tea.StringValue(val)))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return vm
|
||||
}
|
||||
out, err := xmlUnmarshal([]byte(tea.StringValue(val)), result)
|
||||
if err != nil {
|
||||
return resp
|
||||
}
|
||||
resp[start] = out
|
||||
return resp
|
||||
}
|
||||
|
||||
func mapToXML(val map[string]interface{}) string {
|
||||
res := ""
|
||||
for key, value := range val {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
for _, v := range value.([]interface{}) {
|
||||
switch v.(type) {
|
||||
case map[string]interface{}:
|
||||
res += `<` + key + `>`
|
||||
res += mapToXML(v.(map[string]interface{}))
|
||||
res += `</` + key + `>`
|
||||
default:
|
||||
if fmt.Sprintf("%v", v) != `<nil>` {
|
||||
res += `<` + key + `>`
|
||||
res += fmt.Sprintf("%v", v)
|
||||
res += `</` + key + `>`
|
||||
}
|
||||
}
|
||||
}
|
||||
case map[string]interface{}:
|
||||
res += `<` + key + `>`
|
||||
res += mapToXML(value.(map[string]interface{}))
|
||||
res += `</` + key + `>`
|
||||
default:
|
||||
if fmt.Sprintf("%v", value) != `<nil>` {
|
||||
res += `<` + key + `>`
|
||||
res += fmt.Sprintf("%v", value)
|
||||
res += `</` + key + `>`
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getStartElement(body []byte) string {
|
||||
d := xml.NewDecoder(bytes.NewReader(body))
|
||||
for {
|
||||
tok, err := d.Token()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if t, ok := tok.(xml.StartElement); ok {
|
||||
return t.Name.Local
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func xmlUnmarshal(body []byte, result interface{}) (interface{}, error) {
|
||||
start := getStartElement(body)
|
||||
dataValue := reflect.ValueOf(result).Elem()
|
||||
dataType := dataValue.Type()
|
||||
for i := 0; i < dataType.NumField(); i++ {
|
||||
field := dataType.Field(i)
|
||||
name, containsNameTag := field.Tag.Lookup("xml")
|
||||
name = strings.Replace(name, ",omitempty", "", -1)
|
||||
if containsNameTag {
|
||||
if name == start {
|
||||
realType := dataValue.Field(i).Type()
|
||||
realValue := reflect.New(realType).Interface()
|
||||
err := xml.Unmarshal(body, realValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return realValue, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
201
vendor/github.com/alibabacloud-go/tea/LICENSE
generated
vendored
201
vendor/github.com/alibabacloud-go/tea/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
333
vendor/github.com/alibabacloud-go/tea/tea/json_parser.go
generated
vendored
333
vendor/github.com/alibabacloud-go/tea/tea/json_parser.go
generated
vendored
@@ -1,333 +0,0 @@
|
||||
package tea
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/modern-go/reflect2"
|
||||
)
|
||||
|
||||
const maxUint = ^uint(0)
|
||||
const maxInt = int(maxUint >> 1)
|
||||
const minInt = -maxInt - 1
|
||||
|
||||
var jsonParser jsoniter.API
|
||||
|
||||
func init() {
|
||||
jsonParser = jsoniter.Config{
|
||||
EscapeHTML: true,
|
||||
SortMapKeys: true,
|
||||
ValidateJsonRawMessage: true,
|
||||
CaseSensitive: true,
|
||||
}.Froze()
|
||||
|
||||
jsonParser.RegisterExtension(newBetterFuzzyExtension())
|
||||
}
|
||||
|
||||
func newBetterFuzzyExtension() jsoniter.DecoderExtension {
|
||||
return jsoniter.DecoderExtension{
|
||||
reflect2.DefaultTypeOfKind(reflect.String): &nullableFuzzyStringDecoder{},
|
||||
reflect2.DefaultTypeOfKind(reflect.Bool): &fuzzyBoolDecoder{},
|
||||
reflect2.DefaultTypeOfKind(reflect.Float32): &nullableFuzzyFloat32Decoder{},
|
||||
reflect2.DefaultTypeOfKind(reflect.Float64): &nullableFuzzyFloat64Decoder{},
|
||||
reflect2.DefaultTypeOfKind(reflect.Int): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(maxInt) || val < float64(minInt) {
|
||||
iter.ReportError("fuzzy decode int", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*int)(ptr)) = int(val)
|
||||
} else {
|
||||
*((*int)(ptr)) = iter.ReadInt()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Uint): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(maxUint) || val < 0 {
|
||||
iter.ReportError("fuzzy decode uint", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*uint)(ptr)) = uint(val)
|
||||
} else {
|
||||
*((*uint)(ptr)) = iter.ReadUint()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Int8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxInt8) || val < float64(math.MinInt8) {
|
||||
iter.ReportError("fuzzy decode int8", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*int8)(ptr)) = int8(val)
|
||||
} else {
|
||||
*((*int8)(ptr)) = iter.ReadInt8()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Uint8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxUint8) || val < 0 {
|
||||
iter.ReportError("fuzzy decode uint8", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*uint8)(ptr)) = uint8(val)
|
||||
} else {
|
||||
*((*uint8)(ptr)) = iter.ReadUint8()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Int16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxInt16) || val < float64(math.MinInt16) {
|
||||
iter.ReportError("fuzzy decode int16", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*int16)(ptr)) = int16(val)
|
||||
} else {
|
||||
*((*int16)(ptr)) = iter.ReadInt16()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Uint16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxUint16) || val < 0 {
|
||||
iter.ReportError("fuzzy decode uint16", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*uint16)(ptr)) = uint16(val)
|
||||
} else {
|
||||
*((*uint16)(ptr)) = iter.ReadUint16()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Int32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxInt32) || val < float64(math.MinInt32) {
|
||||
iter.ReportError("fuzzy decode int32", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*int32)(ptr)) = int32(val)
|
||||
} else {
|
||||
*((*int32)(ptr)) = iter.ReadInt32()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Uint32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxUint32) || val < 0 {
|
||||
iter.ReportError("fuzzy decode uint32", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*uint32)(ptr)) = uint32(val)
|
||||
} else {
|
||||
*((*uint32)(ptr)) = iter.ReadUint32()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Int64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxInt64) || val < float64(math.MinInt64) {
|
||||
iter.ReportError("fuzzy decode int64", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*int64)(ptr)) = int64(val)
|
||||
} else {
|
||||
*((*int64)(ptr)) = iter.ReadInt64()
|
||||
}
|
||||
}},
|
||||
reflect2.DefaultTypeOfKind(reflect.Uint64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
if isFloat {
|
||||
val := iter.ReadFloat64()
|
||||
if val > float64(math.MaxUint64) || val < 0 {
|
||||
iter.ReportError("fuzzy decode uint64", "exceed range")
|
||||
return
|
||||
}
|
||||
*((*uint64)(ptr)) = uint64(val)
|
||||
} else {
|
||||
*((*uint64)(ptr)) = iter.ReadUint64()
|
||||
}
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
type nullableFuzzyStringDecoder struct {
|
||||
}
|
||||
|
||||
func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
switch valueType {
|
||||
case jsoniter.NumberValue:
|
||||
var number json.Number
|
||||
iter.ReadVal(&number)
|
||||
*((*string)(ptr)) = string(number)
|
||||
case jsoniter.StringValue:
|
||||
*((*string)(ptr)) = iter.ReadString()
|
||||
case jsoniter.BoolValue:
|
||||
*((*string)(ptr)) = strconv.FormatBool(iter.ReadBool())
|
||||
case jsoniter.NilValue:
|
||||
iter.ReadNil()
|
||||
*((*string)(ptr)) = ""
|
||||
default:
|
||||
iter.ReportError("fuzzyStringDecoder", "not number or string or bool")
|
||||
}
|
||||
}
|
||||
|
||||
type fuzzyBoolDecoder struct {
|
||||
}
|
||||
|
||||
func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
switch valueType {
|
||||
case jsoniter.BoolValue:
|
||||
*((*bool)(ptr)) = iter.ReadBool()
|
||||
case jsoniter.NumberValue:
|
||||
var number json.Number
|
||||
iter.ReadVal(&number)
|
||||
num, err := number.Int64()
|
||||
if err != nil {
|
||||
iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed")
|
||||
}
|
||||
if num == 0 {
|
||||
*((*bool)(ptr)) = false
|
||||
} else {
|
||||
*((*bool)(ptr)) = true
|
||||
}
|
||||
case jsoniter.StringValue:
|
||||
strValue := strings.ToLower(iter.ReadString())
|
||||
if strValue == "true" {
|
||||
*((*bool)(ptr)) = true
|
||||
} else if strValue == "false" || strValue == "" {
|
||||
*((*bool)(ptr)) = false
|
||||
} else {
|
||||
iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue)
|
||||
}
|
||||
case jsoniter.NilValue:
|
||||
iter.ReadNil()
|
||||
*((*bool)(ptr)) = false
|
||||
default:
|
||||
iter.ReportError("fuzzyBoolDecoder", "not number or string or nil")
|
||||
}
|
||||
}
|
||||
|
||||
type nullableFuzzyIntegerDecoder struct {
|
||||
fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator)
|
||||
}
|
||||
|
||||
func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
var str string
|
||||
switch valueType {
|
||||
case jsoniter.NumberValue:
|
||||
var number json.Number
|
||||
iter.ReadVal(&number)
|
||||
str = string(number)
|
||||
case jsoniter.StringValue:
|
||||
str = iter.ReadString()
|
||||
// support empty string
|
||||
if str == "" {
|
||||
str = "0"
|
||||
}
|
||||
case jsoniter.BoolValue:
|
||||
if iter.ReadBool() {
|
||||
str = "1"
|
||||
} else {
|
||||
str = "0"
|
||||
}
|
||||
case jsoniter.NilValue:
|
||||
iter.ReadNil()
|
||||
str = "0"
|
||||
default:
|
||||
iter.ReportError("fuzzyIntegerDecoder", "not number or string")
|
||||
}
|
||||
newIter := iter.Pool().BorrowIterator([]byte(str))
|
||||
defer iter.Pool().ReturnIterator(newIter)
|
||||
isFloat := strings.IndexByte(str, '.') != -1
|
||||
decoder.fun(isFloat, ptr, newIter)
|
||||
if newIter.Error != nil && newIter.Error != io.EOF {
|
||||
iter.Error = newIter.Error
|
||||
}
|
||||
}
|
||||
|
||||
type nullableFuzzyFloat32Decoder struct {
|
||||
}
|
||||
|
||||
func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
var str string
|
||||
switch valueType {
|
||||
case jsoniter.NumberValue:
|
||||
*((*float32)(ptr)) = iter.ReadFloat32()
|
||||
case jsoniter.StringValue:
|
||||
str = iter.ReadString()
|
||||
// support empty string
|
||||
if str == "" {
|
||||
*((*float32)(ptr)) = 0
|
||||
return
|
||||
}
|
||||
newIter := iter.Pool().BorrowIterator([]byte(str))
|
||||
defer iter.Pool().ReturnIterator(newIter)
|
||||
*((*float32)(ptr)) = newIter.ReadFloat32()
|
||||
if newIter.Error != nil && newIter.Error != io.EOF {
|
||||
iter.Error = newIter.Error
|
||||
}
|
||||
case jsoniter.BoolValue:
|
||||
// support bool to float32
|
||||
if iter.ReadBool() {
|
||||
*((*float32)(ptr)) = 1
|
||||
} else {
|
||||
*((*float32)(ptr)) = 0
|
||||
}
|
||||
case jsoniter.NilValue:
|
||||
iter.ReadNil()
|
||||
*((*float32)(ptr)) = 0
|
||||
default:
|
||||
iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string")
|
||||
}
|
||||
}
|
||||
|
||||
type nullableFuzzyFloat64Decoder struct {
|
||||
}
|
||||
|
||||
func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
var str string
|
||||
switch valueType {
|
||||
case jsoniter.NumberValue:
|
||||
*((*float64)(ptr)) = iter.ReadFloat64()
|
||||
case jsoniter.StringValue:
|
||||
str = iter.ReadString()
|
||||
// support empty string
|
||||
if str == "" {
|
||||
*((*float64)(ptr)) = 0
|
||||
return
|
||||
}
|
||||
newIter := iter.Pool().BorrowIterator([]byte(str))
|
||||
defer iter.Pool().ReturnIterator(newIter)
|
||||
*((*float64)(ptr)) = newIter.ReadFloat64()
|
||||
if newIter.Error != nil && newIter.Error != io.EOF {
|
||||
iter.Error = newIter.Error
|
||||
}
|
||||
case jsoniter.BoolValue:
|
||||
// support bool to float64
|
||||
if iter.ReadBool() {
|
||||
*((*float64)(ptr)) = 1
|
||||
} else {
|
||||
*((*float64)(ptr)) = 0
|
||||
}
|
||||
case jsoniter.NilValue:
|
||||
// support empty string
|
||||
iter.ReadNil()
|
||||
*((*float64)(ptr)) = 0
|
||||
default:
|
||||
iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string")
|
||||
}
|
||||
}
|
||||
1142
vendor/github.com/alibabacloud-go/tea/tea/tea.go
generated
vendored
1142
vendor/github.com/alibabacloud-go/tea/tea/tea.go
generated
vendored
File diff suppressed because it is too large
Load Diff
491
vendor/github.com/alibabacloud-go/tea/tea/trans.go
generated
vendored
491
vendor/github.com/alibabacloud-go/tea/tea/trans.go
generated
vendored
@@ -1,491 +0,0 @@
|
||||
package tea
|
||||
|
||||
func String(a string) *string {
|
||||
return &a
|
||||
}
|
||||
|
||||
func StringValue(a *string) string {
|
||||
if a == nil {
|
||||
return ""
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Int(a int) *int {
|
||||
return &a
|
||||
}
|
||||
|
||||
func IntValue(a *int) int {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Int8(a int8) *int8 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Int8Value(a *int8) int8 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Int16(a int16) *int16 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Int16Value(a *int16) int16 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Int32(a int32) *int32 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Int32Value(a *int32) int32 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Int64(a int64) *int64 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Int64Value(a *int64) int64 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Bool(a bool) *bool {
|
||||
return &a
|
||||
}
|
||||
|
||||
func BoolValue(a *bool) bool {
|
||||
if a == nil {
|
||||
return false
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Uint(a uint) *uint {
|
||||
return &a
|
||||
}
|
||||
|
||||
func UintValue(a *uint) uint {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Uint8(a uint8) *uint8 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Uint8Value(a *uint8) uint8 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Uint16(a uint16) *uint16 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Uint16Value(a *uint16) uint16 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Uint32(a uint32) *uint32 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Uint32Value(a *uint32) uint32 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Uint64(a uint64) *uint64 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Uint64Value(a *uint64) uint64 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Float32(a float32) *float32 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Float32Value(a *float32) float32 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func Float64(a float64) *float64 {
|
||||
return &a
|
||||
}
|
||||
|
||||
func Float64Value(a *float64) float64 {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
return *a
|
||||
}
|
||||
|
||||
func IntSlice(a []int) []*int {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*int, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func IntValueSlice(a []*int) []int {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]int, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int8Slice(a []int8) []*int8 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*int8, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int8ValueSlice(a []*int8) []int8 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]int8, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int16Slice(a []int16) []*int16 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*int16, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int16ValueSlice(a []*int16) []int16 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]int16, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int32Slice(a []int32) []*int32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*int32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int32ValueSlice(a []*int32) []int32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]int32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int64Slice(a []int64) []*int64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*int64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Int64ValueSlice(a []*int64) []int64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]int64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func UintSlice(a []uint) []*uint {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*uint, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func UintValueSlice(a []*uint) []uint {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]uint, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint8Slice(a []uint8) []*uint8 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*uint8, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint8ValueSlice(a []*uint8) []uint8 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]uint8, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint16Slice(a []uint16) []*uint16 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*uint16, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint16ValueSlice(a []*uint16) []uint16 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]uint16, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint32Slice(a []uint32) []*uint32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*uint32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint32ValueSlice(a []*uint32) []uint32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]uint32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint64Slice(a []uint64) []*uint64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*uint64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Uint64ValueSlice(a []*uint64) []uint64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]uint64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Float32Slice(a []float32) []*float32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*float32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Float32ValueSlice(a []*float32) []float32 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]float32, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Float64Slice(a []float64) []*float64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*float64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func Float64ValueSlice(a []*float64) []float64 {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]float64, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func StringSlice(a []string) []*string {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*string, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func StringSliceValue(a []*string) []string {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]string, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func BoolSlice(a []bool) []*bool {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]*bool, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
res[i] = &a[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func BoolSliceValue(a []*bool) []bool {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]bool, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != nil {
|
||||
res[i] = *a[i]
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
64
vendor/github.com/alibabacloud-go/tea/utils/assert.go
generated
vendored
64
vendor/github.com/alibabacloud-go/tea/utils/assert.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func isNil(object interface{}) bool {
|
||||
if object == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(object)
|
||||
kind := value.Kind()
|
||||
isNilableKind := containsKind(
|
||||
[]reflect.Kind{
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice},
|
||||
kind)
|
||||
|
||||
if isNilableKind && value.IsNil() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
|
||||
for i := 0; i < len(kinds); i++ {
|
||||
if kind == kinds[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func AssertEqual(t *testing.T, a, b interface{}) {
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("%v != %v", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func AssertNil(t *testing.T, object interface{}) {
|
||||
if !isNil(object) {
|
||||
t.Errorf("%v is not nil", object)
|
||||
}
|
||||
}
|
||||
|
||||
func AssertNotNil(t *testing.T, object interface{}) {
|
||||
if isNil(object) {
|
||||
t.Errorf("%v is nil", object)
|
||||
}
|
||||
}
|
||||
|
||||
func AssertContains(t *testing.T, contains string, msgAndArgs ...string) {
|
||||
for _, value := range msgAndArgs {
|
||||
if ok := strings.Contains(contains, value); !ok {
|
||||
t.Errorf("%s does not contain %s", contains, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
109
vendor/github.com/alibabacloud-go/tea/utils/logger.go
generated
vendored
109
vendor/github.com/alibabacloud-go/tea/utils/logger.go
generated
vendored
@@ -1,109 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
formatTemplate string
|
||||
isOpen bool
|
||||
lastLogMsg string
|
||||
}
|
||||
|
||||
var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}`
|
||||
var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"}
|
||||
var logChannel string
|
||||
|
||||
func InitLogMsg(fieldMap map[string]string) {
|
||||
for _, value := range loggerParam {
|
||||
fieldMap[value] = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) SetFormatTemplate(template string) {
|
||||
logger.formatTemplate = template
|
||||
|
||||
}
|
||||
|
||||
func (logger *Logger) GetFormatTemplate() string {
|
||||
return logger.formatTemplate
|
||||
|
||||
}
|
||||
|
||||
func NewLogger(level string, channel string, out io.Writer, template string) *Logger {
|
||||
if level == "" {
|
||||
level = "info"
|
||||
}
|
||||
|
||||
logChannel = "AlibabaCloud"
|
||||
if channel != "" {
|
||||
logChannel = channel
|
||||
}
|
||||
log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile)
|
||||
if template == "" {
|
||||
template = defaultLoggerTemplate
|
||||
}
|
||||
|
||||
return &Logger{
|
||||
Logger: log,
|
||||
formatTemplate: template,
|
||||
isOpen: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) OpenLogger() {
|
||||
logger.isOpen = true
|
||||
}
|
||||
|
||||
func (logger *Logger) CloseLogger() {
|
||||
logger.isOpen = false
|
||||
}
|
||||
|
||||
func (logger *Logger) SetIsopen(isopen bool) {
|
||||
logger.isOpen = isopen
|
||||
}
|
||||
|
||||
func (logger *Logger) GetIsopen() bool {
|
||||
return logger.isOpen
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLastLogMsg(lastLogMsg string) {
|
||||
logger.lastLogMsg = lastLogMsg
|
||||
}
|
||||
|
||||
func (logger *Logger) GetLastLogMsg() string {
|
||||
return logger.lastLogMsg
|
||||
}
|
||||
|
||||
func SetLogChannel(channel string) {
|
||||
logChannel = channel
|
||||
}
|
||||
|
||||
func (logger *Logger) PrintLog(fieldMap map[string]string, err error) {
|
||||
if err != nil {
|
||||
fieldMap["{error}"] = err.Error()
|
||||
}
|
||||
fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
fieldMap["{ts}"] = getTimeInFormatISO8601()
|
||||
fieldMap["{channel}"] = logChannel
|
||||
if logger != nil {
|
||||
logMsg := logger.formatTemplate
|
||||
for key, value := range fieldMap {
|
||||
logMsg = strings.Replace(logMsg, key, value, -1)
|
||||
}
|
||||
logger.lastLogMsg = logMsg
|
||||
if logger.isOpen == true {
|
||||
logger.Output(2, logMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTimeInFormatISO8601() (timeStr string) {
|
||||
gmt := time.FixedZone("GMT", 0)
|
||||
|
||||
return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
|
||||
}
|
||||
60
vendor/github.com/alibabacloud-go/tea/utils/progress.go
generated
vendored
60
vendor/github.com/alibabacloud-go/tea/utils/progress.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
package utils
|
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
type ProgressEventType int
|
||||
|
||||
const (
|
||||
// TransferStartedEvent transfer started, set TotalBytes
|
||||
TransferStartedEvent ProgressEventType = 1 + iota
|
||||
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
|
||||
TransferDataEvent
|
||||
// TransferCompletedEvent transfer completed
|
||||
TransferCompletedEvent
|
||||
// TransferFailedEvent transfer encounters an error
|
||||
TransferFailedEvent
|
||||
)
|
||||
|
||||
// ProgressEvent defines progress event
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
RwBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
// ProgressListener listens progress change
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
// -------------------- Private --------------------
|
||||
|
||||
func NewProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
ConsumedBytes: consumed,
|
||||
TotalBytes: total,
|
||||
RwBytes: rwBytes,
|
||||
EventType: eventType}
|
||||
}
|
||||
|
||||
// publishProgress
|
||||
func PublishProgress(listener ProgressListener, event *ProgressEvent) {
|
||||
if listener != nil && event != nil {
|
||||
listener.ProgressChanged(event)
|
||||
}
|
||||
}
|
||||
|
||||
func GetProgressListener(obj interface{}) ProgressListener {
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
listener, ok := obj.(ProgressListener)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return listener
|
||||
}
|
||||
|
||||
type ReaderTracker struct {
|
||||
CompletedBytes int64
|
||||
}
|
||||
14
vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
generated
vendored
14
vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
generated
vendored
@@ -1,14 +0,0 @@
|
||||
Copyright (c) 2015 aliyun.com
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
339
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
generated
vendored
339
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
generated
vendored
@@ -1,339 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
|
||||
type headerSorter struct {
|
||||
Keys []string
|
||||
Vals []string
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeys get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeysV4 get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// signHeader signs the header and sets it as the authorization header.
|
||||
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string, credentials Credentials) {
|
||||
akIf := credentials
|
||||
authorizationStr := ""
|
||||
if conn.config.AuthVersion == AuthV4 {
|
||||
strDay := ""
|
||||
strDate := req.Header.Get(HttpHeaderOssDate)
|
||||
if strDate == "" {
|
||||
strDate = req.Header.Get(HTTPHeaderDate)
|
||||
t, _ := time.Parse(http.TimeFormat, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
} else {
|
||||
t, _ := time.Parse(timeFormatV4, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
}
|
||||
signHeaderProduct := conn.config.GetSignProduct()
|
||||
signHeaderRegion := conn.config.GetSignRegion()
|
||||
|
||||
additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
|
||||
} else {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
|
||||
}
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
} else {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
}
|
||||
} else {
|
||||
// Get the final authorization string
|
||||
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
|
||||
}
|
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
|
||||
for k, v := range req.Header {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
if _, ok := additionalMap[strings.ToLower(k)]; ok {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := req.Header.Get(HTTPHeaderDate)
|
||||
contentType := req.Header.Get(HTTPHeaderContentType)
|
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
|
||||
|
||||
// default is v1 signature
|
||||
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
|
||||
// v2 signature
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
|
||||
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
|
||||
}
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
return signedStr
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string, signingTime *time.Time) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
|
||||
for k, v := range req.Header {
|
||||
lowKey := strings.ToLower(k)
|
||||
if strings.EqualFold(lowKey, HTTPHeaderContentMD5) ||
|
||||
strings.EqualFold(lowKey, HTTPHeaderContentType) ||
|
||||
strings.HasPrefix(lowKey, "x-oss-") {
|
||||
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
|
||||
} else {
|
||||
if _, ok := additionalMap[lowKey]; ok {
|
||||
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get day,eg 20210914
|
||||
//signingTime
|
||||
signDate := ""
|
||||
strDay := ""
|
||||
if signingTime != nil {
|
||||
signDate = signingTime.Format(timeFormatV4)
|
||||
strDay = signingTime.Format(shortTimeFormatV4)
|
||||
} else {
|
||||
var t time.Time
|
||||
// Required parameters
|
||||
if date := req.Header.Get(HTTPHeaderDate); date != "" {
|
||||
signDate = date
|
||||
t, _ = time.Parse(http.TimeFormat, date)
|
||||
}
|
||||
|
||||
if ossDate := req.Header.Get(HttpHeaderOssDate); ossDate != "" {
|
||||
signDate = ossDate
|
||||
t, _ = time.Parse(timeFormatV4, ossDate)
|
||||
}
|
||||
|
||||
strDay = t.Format("20060102")
|
||||
}
|
||||
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
signStr := ""
|
||||
|
||||
// v4 signature
|
||||
hashedPayload := DefaultContentSha256
|
||||
if val := req.Header.Get(HttpHeaderOssContentSha256); val != "" {
|
||||
hashedPayload = val
|
||||
}
|
||||
|
||||
// subResource
|
||||
resource := canonicalizedResource
|
||||
subResource := ""
|
||||
subPos := strings.LastIndex(canonicalizedResource, "?")
|
||||
if subPos != -1 {
|
||||
subResource = canonicalizedResource[subPos+1:]
|
||||
resource = canonicalizedResource[0:subPos]
|
||||
}
|
||||
|
||||
// get canonical request
|
||||
canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
|
||||
rh := sha256.New()
|
||||
io.WriteString(rh, canonicalReuqest)
|
||||
hashedRequest := hex.EncodeToString(rh.Sum(nil))
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]CanonicalRequest:%s\n", req, EscapeLFString(canonicalReuqest))
|
||||
}
|
||||
|
||||
// Product & Region
|
||||
signedStrV4Product := conn.config.GetSignProduct()
|
||||
signedStrV4Region := conn.config.GetSignRegion()
|
||||
|
||||
signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
|
||||
io.WriteString(h1, strDay)
|
||||
h1Key := h1.Sum(nil)
|
||||
|
||||
h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
|
||||
io.WriteString(h2, signedStrV4Region)
|
||||
h2Key := h2.Sum(nil)
|
||||
|
||||
h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
|
||||
io.WriteString(h3, signedStrV4Product)
|
||||
h3Key := h3.Sum(nil)
|
||||
|
||||
h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
|
||||
io.WriteString(h4, "aliyun_v4_request")
|
||||
h4Key := h4.Sum(nil)
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
|
||||
io.WriteString(h, signStr)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
|
||||
if params[HTTPParamAccessKeyID] == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName)
|
||||
canonParamsKeys := []string{}
|
||||
for key := range params {
|
||||
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken {
|
||||
canonParamsKeys = append(canonParamsKeys, key)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(canonParamsKeys)
|
||||
canonParamsStr := ""
|
||||
for _, key := range canonParamsKeys {
|
||||
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string))
|
||||
}
|
||||
|
||||
expireStr := strconv.FormatInt(expiration, 10)
|
||||
signStr := expireStr + "\n" + canonParamsStr + canonResource
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
return signedStr
|
||||
}
|
||||
|
||||
// newHeaderSorter is an additional function for function SignHeader.
|
||||
func newHeaderSorter(m map[string]string) *headerSorter {
|
||||
hs := &headerSorter{
|
||||
Keys: make([]string, 0, len(m)),
|
||||
Vals: make([]string, 0, len(m)),
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
hs.Keys = append(hs.Keys, k)
|
||||
hs.Vals = append(hs.Vals, v)
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Len() int {
|
||||
return len(hs.Vals)
|
||||
}
|
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
|
||||
}
|
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Swap(i, j int) {
|
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
|
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
|
||||
}
|
||||
1321
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
generated
vendored
1321
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2956
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
generated
vendored
2956
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
301
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
generated
vendored
301
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
generated
vendored
@@ -1,301 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Define the level of the output log
|
||||
const (
|
||||
LogOff = iota
|
||||
Error
|
||||
Warn
|
||||
Info
|
||||
Debug
|
||||
)
|
||||
|
||||
// LogTag Tag for each level of log
|
||||
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
|
||||
|
||||
// HTTPTimeout defines HTTP timeout.
|
||||
type HTTPTimeout struct {
|
||||
ConnectTimeout time.Duration
|
||||
ReadWriteTimeout time.Duration
|
||||
HeaderTimeout time.Duration
|
||||
LongTimeout time.Duration
|
||||
IdleConnTimeout time.Duration
|
||||
}
|
||||
|
||||
// HTTPMaxConns defines max idle connections and max idle connections per host
|
||||
type HTTPMaxConns struct {
|
||||
MaxIdleConns int
|
||||
MaxIdleConnsPerHost int
|
||||
MaxConnsPerHost int
|
||||
}
|
||||
|
||||
// Credentials is interface for get AccessKeyID,AccessKeySecret,SecurityToken
|
||||
type Credentials interface {
|
||||
GetAccessKeyID() string
|
||||
GetAccessKeySecret() string
|
||||
GetSecurityToken() string
|
||||
}
|
||||
|
||||
// CredentialsProvider is interface for get Credential Info
|
||||
type CredentialsProvider interface {
|
||||
GetCredentials() Credentials
|
||||
}
|
||||
|
||||
type CredentialsProviderE interface {
|
||||
CredentialsProvider
|
||||
GetCredentialsE() (Credentials, error)
|
||||
}
|
||||
|
||||
type defaultCredentials struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeyID() string {
|
||||
return defCre.config.AccessKeyID
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeySecret() string {
|
||||
return defCre.config.AccessKeySecret
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetSecurityToken() string {
|
||||
return defCre.config.SecurityToken
|
||||
}
|
||||
|
||||
type defaultCredentialsProvider struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
|
||||
return &defaultCredentials{config: defBuild.config}
|
||||
}
|
||||
|
||||
type envCredentials struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
|
||||
type EnvironmentVariableCredentialsProvider struct {
|
||||
cred Credentials
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetAccessKeyID() string {
|
||||
return credentials.AccessKeyId
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetAccessKeySecret() string {
|
||||
return credentials.AccessKeySecret
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetSecurityToken() string {
|
||||
return credentials.SecurityToken
|
||||
}
|
||||
|
||||
func (defBuild *EnvironmentVariableCredentialsProvider) GetCredentials() Credentials {
|
||||
var accessID, accessKey, token string
|
||||
if defBuild.cred == nil {
|
||||
accessID = os.Getenv("OSS_ACCESS_KEY_ID")
|
||||
accessKey = os.Getenv("OSS_ACCESS_KEY_SECRET")
|
||||
token = os.Getenv("OSS_SESSION_TOKEN")
|
||||
} else {
|
||||
accessID = defBuild.cred.GetAccessKeyID()
|
||||
accessKey = defBuild.cred.GetAccessKeySecret()
|
||||
token = defBuild.cred.GetSecurityToken()
|
||||
}
|
||||
|
||||
return &envCredentials{
|
||||
AccessKeyId: accessID,
|
||||
AccessKeySecret: accessKey,
|
||||
SecurityToken: token,
|
||||
}
|
||||
}
|
||||
|
||||
func NewEnvironmentVariableCredentialsProvider() (EnvironmentVariableCredentialsProvider, error) {
|
||||
var provider EnvironmentVariableCredentialsProvider
|
||||
accessID := os.Getenv("OSS_ACCESS_KEY_ID")
|
||||
if accessID == "" {
|
||||
return provider, fmt.Errorf("access key id is empty!")
|
||||
}
|
||||
accessKey := os.Getenv("OSS_ACCESS_KEY_SECRET")
|
||||
if accessKey == "" {
|
||||
return provider, fmt.Errorf("access key secret is empty!")
|
||||
}
|
||||
token := os.Getenv("OSS_SESSION_TOKEN")
|
||||
envCredential := &envCredentials{
|
||||
AccessKeyId: accessID,
|
||||
AccessKeySecret: accessKey,
|
||||
SecurityToken: token,
|
||||
}
|
||||
return EnvironmentVariableCredentialsProvider{
|
||||
cred: envCredential,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Config defines oss configuration
|
||||
type Config struct {
|
||||
Endpoint string // OSS endpoint
|
||||
AccessKeyID string // AccessId
|
||||
AccessKeySecret string // AccessKey
|
||||
RetryTimes uint // Retry count by default it's 5.
|
||||
UserAgent string // SDK name/version/system information
|
||||
IsDebug bool // Enable debug mode. Default is false.
|
||||
Timeout uint // Timeout in seconds. By default it's 60.
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // If cname is in the endpoint.
|
||||
IsPathStyle bool // If Path Style is in the endpoint.
|
||||
HTTPTimeout HTTPTimeout // HTTP timeout
|
||||
HTTPMaxConns HTTPMaxConns // Http max connections
|
||||
IsUseProxy bool // Flag of using proxy.
|
||||
ProxyHost string // Flag of using proxy host.
|
||||
IsAuthProxy bool // Flag of needing authentication.
|
||||
ProxyUser string // Proxy user
|
||||
ProxyPassword string // Proxy password
|
||||
IsEnableMD5 bool // Flag of enabling MD5 for upload.
|
||||
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
|
||||
IsEnableCRC bool // Flag of enabling CRC for upload.
|
||||
LogLevel int // Log level
|
||||
Logger *log.Logger // For write log
|
||||
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
|
||||
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
|
||||
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
|
||||
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
|
||||
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
|
||||
LocalAddr net.Addr // local client host info
|
||||
UserSetUa bool // UserAgent is set by user or not
|
||||
AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
|
||||
AdditionalHeaders []string // special http headers needed to be sign
|
||||
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
|
||||
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
|
||||
Region string // such as cn-hangzhou
|
||||
CloudBoxId string //
|
||||
Product string // oss or oss-cloudbox, default is oss
|
||||
VerifyObjectStrict bool // a flag of verifying object name strictly. Default is enable.
|
||||
}
|
||||
|
||||
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
|
||||
if uploadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
|
||||
} else if uploadSpeed == 0 {
|
||||
config.UploadLimitSpeed = 0
|
||||
config.UploadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.UploadLimiter, err = GetOssLimiter(uploadSpeed)
|
||||
if err == nil {
|
||||
config.UploadLimitSpeed = uploadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
|
||||
if downloadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
|
||||
} else if downloadSpeed == 0 {
|
||||
config.DownloadLimitSpeed = 0
|
||||
config.DownloadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
|
||||
if err == nil {
|
||||
config.DownloadLimitSpeed = downloadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteLog output log function
|
||||
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
|
||||
if config.LogLevel < LogLevel || config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var logBuffer bytes.Buffer
|
||||
logBuffer.WriteString(LogTag[LogLevel-1])
|
||||
logBuffer.WriteString(fmt.Sprintf(format, a...))
|
||||
config.Logger.Printf("%s", logBuffer.String())
|
||||
}
|
||||
|
||||
// for get Credentials
|
||||
func (config *Config) GetCredentials() Credentials {
|
||||
return config.CredentialsProvider.GetCredentials()
|
||||
}
|
||||
|
||||
// for get Sign Product
|
||||
func (config *Config) GetSignProduct() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return "oss-cloudbox"
|
||||
}
|
||||
return "oss"
|
||||
}
|
||||
|
||||
// for get Sign Region
|
||||
func (config *Config) GetSignRegion() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return config.CloudBoxId
|
||||
}
|
||||
return config.Region
|
||||
}
|
||||
|
||||
// getDefaultOssConfig gets the default configuration.
|
||||
func getDefaultOssConfig() *Config {
|
||||
config := Config{}
|
||||
|
||||
config.Endpoint = ""
|
||||
config.AccessKeyID = ""
|
||||
config.AccessKeySecret = ""
|
||||
config.RetryTimes = 5
|
||||
config.IsDebug = false
|
||||
config.UserAgent = userAgent()
|
||||
config.Timeout = 60 // Seconds
|
||||
config.SecurityToken = ""
|
||||
config.IsCname = false
|
||||
config.IsPathStyle = false
|
||||
|
||||
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
|
||||
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
|
||||
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
|
||||
config.HTTPMaxConns.MaxIdleConns = 100
|
||||
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
|
||||
|
||||
config.IsUseProxy = false
|
||||
config.ProxyHost = ""
|
||||
config.IsAuthProxy = false
|
||||
config.ProxyUser = ""
|
||||
config.ProxyPassword = ""
|
||||
|
||||
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
|
||||
config.IsEnableMD5 = false
|
||||
config.IsEnableCRC = true
|
||||
|
||||
config.LogLevel = LogOff
|
||||
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
provider := &defaultCredentialsProvider{config: &config}
|
||||
config.CredentialsProvider = provider
|
||||
|
||||
config.AuthVersion = AuthV1
|
||||
config.RedirectEnabled = true
|
||||
config.InsecureSkipVerify = false
|
||||
|
||||
config.Product = "oss"
|
||||
|
||||
config.VerifyObjectStrict = true
|
||||
|
||||
return &config
|
||||
}
|
||||
1021
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
generated
vendored
1021
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
generated
vendored
File diff suppressed because it is too large
Load Diff
273
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
generated
vendored
273
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
generated
vendored
@@ -1,273 +0,0 @@
|
||||
package oss
|
||||
|
||||
import "os"
|
||||
|
||||
// ACLType bucket/object ACL
|
||||
type ACLType string
|
||||
|
||||
const (
|
||||
// ACLPrivate definition : private read and write
|
||||
ACLPrivate ACLType = "private"
|
||||
|
||||
// ACLPublicRead definition : public read and private write
|
||||
ACLPublicRead ACLType = "public-read"
|
||||
|
||||
// ACLPublicReadWrite definition : public read and public write
|
||||
ACLPublicReadWrite ACLType = "public-read-write"
|
||||
|
||||
// ACLDefault Object. It's only applicable for object.
|
||||
ACLDefault ACLType = "default"
|
||||
)
|
||||
|
||||
// bucket versioning status
|
||||
type VersioningStatus string
|
||||
|
||||
const (
|
||||
// Versioning Status definition: Enabled
|
||||
VersionEnabled VersioningStatus = "Enabled"
|
||||
|
||||
// Versioning Status definition: Suspended
|
||||
VersionSuspended VersioningStatus = "Suspended"
|
||||
)
|
||||
|
||||
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
|
||||
type MetadataDirectiveType string
|
||||
|
||||
const (
|
||||
// MetaCopy the target object's metadata is copied from the source one
|
||||
MetaCopy MetadataDirectiveType = "COPY"
|
||||
|
||||
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
|
||||
MetaReplace MetadataDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
|
||||
type TaggingDirectiveType string
|
||||
|
||||
const (
|
||||
// TaggingCopy the target object's tagging is copied from the source one
|
||||
TaggingCopy TaggingDirectiveType = "COPY"
|
||||
|
||||
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
|
||||
TaggingReplace TaggingDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// AlgorithmType specifying the server side encryption algorithm name
|
||||
type AlgorithmType string
|
||||
|
||||
const (
|
||||
KMSAlgorithm AlgorithmType = "KMS"
|
||||
AESAlgorithm AlgorithmType = "AES256"
|
||||
SM4Algorithm AlgorithmType = "SM4"
|
||||
)
|
||||
|
||||
// StorageClassType bucket storage type
|
||||
type StorageClassType string
|
||||
|
||||
const (
|
||||
// StorageStandard standard
|
||||
StorageStandard StorageClassType = "Standard"
|
||||
|
||||
// StorageIA infrequent access
|
||||
StorageIA StorageClassType = "IA"
|
||||
|
||||
// StorageArchive archive
|
||||
StorageArchive StorageClassType = "Archive"
|
||||
|
||||
// StorageColdArchive cold archive
|
||||
StorageColdArchive StorageClassType = "ColdArchive"
|
||||
|
||||
// StorageDeepColdArchive deep cold archive
|
||||
StorageDeepColdArchive StorageClassType = "DeepColdArchive"
|
||||
)
|
||||
|
||||
//RedundancyType bucket data Redundancy type
|
||||
type DataRedundancyType string
|
||||
|
||||
const (
|
||||
// RedundancyLRS Local redundancy, default value
|
||||
RedundancyLRS DataRedundancyType = "LRS"
|
||||
|
||||
// RedundancyZRS Same city redundancy
|
||||
RedundancyZRS DataRedundancyType = "ZRS"
|
||||
)
|
||||
|
||||
//ObjecthashFuncType
|
||||
type ObjecthashFuncType string
|
||||
|
||||
const (
|
||||
HashFuncSha1 ObjecthashFuncType = "SHA-1"
|
||||
HashFuncSha256 ObjecthashFuncType = "SHA-256"
|
||||
)
|
||||
|
||||
// PayerType the type of request payer
|
||||
type PayerType string
|
||||
|
||||
const (
|
||||
// Requester the requester who send the request
|
||||
Requester PayerType = "Requester"
|
||||
|
||||
// BucketOwner the requester who send the request
|
||||
BucketOwner PayerType = "BucketOwner"
|
||||
)
|
||||
|
||||
//RestoreMode the restore mode for coldArchive object
|
||||
type RestoreMode string
|
||||
|
||||
const (
|
||||
//RestoreExpedited object will be restored in 1 hour
|
||||
RestoreExpedited RestoreMode = "Expedited"
|
||||
|
||||
//RestoreStandard object will be restored in 2-5 hours
|
||||
RestoreStandard RestoreMode = "Standard"
|
||||
|
||||
//RestoreBulk object will be restored in 5-10 hours
|
||||
RestoreBulk RestoreMode = "Bulk"
|
||||
)
|
||||
|
||||
// HTTPMethod HTTP request method
|
||||
type HTTPMethod string
|
||||
|
||||
const (
|
||||
// HTTPGet HTTP GET
|
||||
HTTPGet HTTPMethod = "GET"
|
||||
|
||||
// HTTPPut HTTP PUT
|
||||
HTTPPut HTTPMethod = "PUT"
|
||||
|
||||
// HTTPHead HTTP HEAD
|
||||
HTTPHead HTTPMethod = "HEAD"
|
||||
|
||||
// HTTPPost HTTP POST
|
||||
HTTPPost HTTPMethod = "POST"
|
||||
|
||||
// HTTPDelete HTTP DELETE
|
||||
HTTPDelete HTTPMethod = "DELETE"
|
||||
)
|
||||
|
||||
// HTTP headers
|
||||
const (
|
||||
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
|
||||
HTTPHeaderAuthorization = "Authorization"
|
||||
HTTPHeaderCacheControl = "Cache-Control"
|
||||
HTTPHeaderContentDisposition = "Content-Disposition"
|
||||
HTTPHeaderContentEncoding = "Content-Encoding"
|
||||
HTTPHeaderContentLength = "Content-Length"
|
||||
HTTPHeaderContentMD5 = "Content-MD5"
|
||||
HTTPHeaderContentType = "Content-Type"
|
||||
HTTPHeaderContentLanguage = "Content-Language"
|
||||
HTTPHeaderDate = "Date"
|
||||
HTTPHeaderEtag = "ETag"
|
||||
HTTPHeaderExpires = "Expires"
|
||||
HTTPHeaderHost = "Host"
|
||||
HTTPHeaderLastModified = "Last-Modified"
|
||||
HTTPHeaderRange = "Range"
|
||||
HTTPHeaderLocation = "Location"
|
||||
HTTPHeaderOrigin = "Origin"
|
||||
HTTPHeaderServer = "Server"
|
||||
HTTPHeaderUserAgent = "User-Agent"
|
||||
HTTPHeaderIfModifiedSince = "If-Modified-Since"
|
||||
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
HTTPHeaderIfMatch = "If-Match"
|
||||
HTTPHeaderIfNoneMatch = "If-None-Match"
|
||||
HTTPHeaderACReqMethod = "Access-Control-Request-Method"
|
||||
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
HTTPHeaderOssACL = "X-Oss-Acl"
|
||||
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
|
||||
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
|
||||
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
|
||||
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
|
||||
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
|
||||
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
|
||||
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
|
||||
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
|
||||
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
|
||||
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
|
||||
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
|
||||
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
|
||||
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
|
||||
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
|
||||
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
|
||||
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
|
||||
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
|
||||
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
|
||||
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
|
||||
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
|
||||
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
|
||||
HTTPHeaderOssCallback = "X-Oss-Callback"
|
||||
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
|
||||
HTTPHeaderOssRequester = "X-Oss-Request-Payer"
|
||||
HTTPHeaderOssTagging = "X-Oss-Tagging"
|
||||
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
|
||||
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
|
||||
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
|
||||
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
|
||||
HTTPHeaderOssTaskID = "X-Oss-Task-Id"
|
||||
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
|
||||
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
|
||||
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
|
||||
HttpHeaderOssDate = "X-Oss-Date"
|
||||
HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
|
||||
HttpHeaderOssNotification = "X-Oss-Notification"
|
||||
HTTPHeaderOssEc = "X-Oss-Ec"
|
||||
HTTPHeaderOssErr = "X-Oss-Err"
|
||||
)
|
||||
|
||||
// HTTP Param
|
||||
const (
|
||||
HTTPParamExpires = "Expires"
|
||||
HTTPParamAccessKeyID = "OSSAccessKeyId"
|
||||
HTTPParamSignature = "Signature"
|
||||
HTTPParamSecurityToken = "security-token"
|
||||
HTTPParamPlaylistName = "playlistName"
|
||||
|
||||
HTTPParamSignatureVersion = "x-oss-signature-version"
|
||||
HTTPParamExpiresV2 = "x-oss-expires"
|
||||
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
|
||||
HTTPParamSignatureV2 = "x-oss-signature"
|
||||
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
|
||||
HTTPParamCredential = "x-oss-credential"
|
||||
HTTPParamDate = "x-oss-date"
|
||||
HTTPParamOssSecurityToken = "x-oss-security-token"
|
||||
)
|
||||
|
||||
// Other constants
|
||||
const (
|
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
|
||||
MinPartSize = 100 * 1024 // Min part size, 100KB
|
||||
|
||||
FilePermMode = os.FileMode(0664) // Default file permission
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // Temp file prefix
|
||||
TempFileSuffix = ".temp" // Temp file suffix
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
|
||||
|
||||
NullVersion = "null"
|
||||
|
||||
DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
|
||||
|
||||
Version = "v3.0.2" // Go SDK version
|
||||
)
|
||||
|
||||
// FrameType
|
||||
const (
|
||||
DataFrameType = 8388609
|
||||
ContinuousFrameType = 8388612
|
||||
EndFrameType = 8388613
|
||||
MetaEndFrameCSVType = 8388614
|
||||
MetaEndFrameJSONType = 8388615
|
||||
)
|
||||
|
||||
// AuthVersion the version of auth
|
||||
type AuthVersionType string
|
||||
|
||||
const (
|
||||
// AuthV1 v1
|
||||
AuthV1 AuthVersionType = "v1"
|
||||
// AuthV2 v2
|
||||
AuthV2 AuthVersionType = "v2"
|
||||
// AuthV4 v4
|
||||
AuthV4 AuthVersionType = "v4"
|
||||
)
|
||||
123
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
generated
vendored
123
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
generated
vendored
@@ -1,123 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint64
|
||||
tab *crc64.Table
|
||||
}
|
||||
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
|
||||
|
||||
// Size returns the number of bytes sum will return.
|
||||
func (d *digest) Size() int { return crc64.Size }
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
// Reset resets the hash to its initial state.
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
d.crc = crc64.Update(d.crc, d.tab, p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Sum64 returns CRC64 value.
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
// Sum returns hash value.
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// gf2Dim dimension of GF(2) vectors (length of CRC)
|
||||
const gf2Dim int = 64
|
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
for i := 0; vec != 0; i++ {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[i]
|
||||
}
|
||||
|
||||
vec >>= 1
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square []uint64, mat []uint64) {
|
||||
for n := 0; n < gf2Dim; n++ {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// CRC64Combine combines CRC64
|
||||
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
|
||||
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
|
||||
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
|
||||
|
||||
// Degenerate case
|
||||
if len2 == 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
// Put operator for one zero bit in odd
|
||||
odd[0] = crc64.ECMA // CRC64 polynomial
|
||||
var row uint64 = 1
|
||||
for n := 1; n < gf2Dim; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// Put operator for two zero bits in even
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
// Put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
|
||||
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
|
||||
for {
|
||||
// Apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(even[:], crc1)
|
||||
}
|
||||
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(odd[:], crc1)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return combined CRC
|
||||
crc1 ^= crc2
|
||||
return crc1
|
||||
}
|
||||
567
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
generated
vendored
567
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
generated
vendored
@@ -1,567 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DownloadFile downloads files with multipart download.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// filePath the local file to download from objectKey in OSS.
|
||||
// partSize the part size in bytes.
|
||||
// options object's constraints, check out GetObject for the reference.
|
||||
//
|
||||
// error it's nil when the call succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < 1 {
|
||||
return errors.New("oss: part size smaller than 1")
|
||||
}
|
||||
|
||||
uRange, err := GetRangeConfig(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
|
||||
if cpFilePath != "" {
|
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
|
||||
}
|
||||
|
||||
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
absPath, _ := filepath.Abs(destFile)
|
||||
cpFileName := getCpFileName(src, absPath, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// downloadWorkerArg is download worker's parameters
|
||||
type downloadWorkerArg struct {
|
||||
bucket *Bucket
|
||||
key string
|
||||
filePath string
|
||||
options []Option
|
||||
hook downloadPartHook
|
||||
enableCRC bool
|
||||
}
|
||||
|
||||
// downloadPartHook is hook for test
|
||||
type downloadPartHook func(part downloadPart) error
|
||||
|
||||
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
|
||||
|
||||
func defaultDownloadPartHook(part downloadPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
|
||||
type defaultDownloadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
// downloadWorker
|
||||
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
|
||||
for part := range jobs {
|
||||
if err := arg.hook(part); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
// Resolve options
|
||||
r := Range(part.Start, part.End)
|
||||
p := Progress(&defaultDownloadProgressListener{})
|
||||
|
||||
var respHeader http.Header
|
||||
opts := make([]Option, len(arg.options)+3)
|
||||
// Append orderly, can not be reversed!
|
||||
opts = append(opts, arg.options...)
|
||||
opts = append(opts, r, p, GetResponseHeader(&respHeader))
|
||||
|
||||
rd, err := arg.bucket.GetObject(arg.key, opts...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
var crcCalc hash.Hash64
|
||||
if arg.enableCRC {
|
||||
crcCalc = crc64.New(CrcTable())
|
||||
contentLen := part.End - part.Start + 1
|
||||
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
_, err = io.Copy(fd, rd)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
if arg.enableCRC {
|
||||
part.CRC64 = crcCalc.Sum64()
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// downloadScheduler
|
||||
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// downloadPart defines download part
|
||||
type downloadPart struct {
|
||||
Index int // Part number, starting from 0
|
||||
Start int64 // Start index
|
||||
End int64 // End index
|
||||
Offset int64 // Offset
|
||||
CRC64 uint64 // CRC check value of part
|
||||
}
|
||||
|
||||
// getDownloadParts gets download parts
|
||||
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
|
||||
parts := []downloadPart{}
|
||||
part := downloadPart{}
|
||||
i := 0
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
for offset := start; offset < end; offset += partSize {
|
||||
part.Index = i
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, end, partSize)
|
||||
part.Offset = start
|
||||
part.CRC64 = 0
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getObjectBytes gets object bytes length
|
||||
func getObjectBytes(parts []downloadPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// combineCRCInParts caculates the total CRC of continuous parts
|
||||
func combineCRCInParts(dps []downloadPart) uint64 {
|
||||
if dps == nil || len(dps) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
crc := dps[0].CRC64
|
||||
for i := 1; i < len(dps); i++ {
|
||||
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
|
||||
}
|
||||
|
||||
return crc
|
||||
}
|
||||
|
||||
// downloadFile downloads file concurrently without checkpoint.
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// If the file does not exist, create one. If exists, the download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enableCRC := false
|
||||
expectedCRC := (uint64)(0)
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
enableCRC = true
|
||||
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the parts of the file
|
||||
parts := getDownloadParts(objectSize, partSize, uRange)
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Download parts concurrently
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Waiting for parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
parts[part.Index].CRC64 = part.CRC64
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCRC {
|
||||
actualCRC := combineCRCInParts(parts)
|
||||
err = CheckDownloadCRC(actualCRC, expectedCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// ----- Concurrent download with chcekpoint -----
|
||||
|
||||
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
|
||||
|
||||
type downloadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint content MD5
|
||||
FilePath string // Local file
|
||||
Object string // Key
|
||||
ObjStat objectStat // Object status
|
||||
Parts []downloadPart // All download parts
|
||||
PartStat []bool // Parts' download status
|
||||
Start int64 // Start point of the file
|
||||
End int64 // End point of the file
|
||||
enableCRC bool // Whether has CRC check
|
||||
CRC uint64 // CRC check value
|
||||
}
|
||||
|
||||
type objectStat struct {
|
||||
Size int64 // Object size
|
||||
LastModified string // Last modified time
|
||||
Etag string // Etag
|
||||
}
|
||||
|
||||
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
|
||||
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
|
||||
// Compare the CP's Magic and the MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size, last modified time and etag
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check the download range
|
||||
if uRange != nil {
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
if start != cp.Start || end != cp.End {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load checkpoint from local file
|
||||
func (cp *downloadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump funciton dumps to file
|
||||
func (cp *downloadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialize
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts gets unfinished parts
|
||||
func (cp downloadCheckpoint) todoParts() []downloadPart {
|
||||
dps := []downloadPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes gets completed size
|
||||
func (cp downloadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initiates download tasks
|
||||
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
|
||||
// CP
|
||||
cp.Magic = downloadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.Object = objectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
cp.enableCRC = true
|
||||
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Parts
|
||||
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
|
||||
err := os.Rename(downFilepath, cp.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// downloadFileWithCp downloads files with checkpoint.
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load checkpoint data.
|
||||
dcp := downloadCheckpoint{}
|
||||
err := dcp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or data invalid. Re-initialize the download.
|
||||
valid, err := dcp.isValid(meta, uRange)
|
||||
if err != nil || !valid {
|
||||
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Create the file if not exists. Otherwise the parts download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Unfinished parts
|
||||
parts := dcp.todoParts()
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := dcp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers routine
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Concurrently downloads parts
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
dcp.PartStat[part.Index] = true
|
||||
dcp.Parts[part.Index].CRC64 = part.CRC64
|
||||
dcp.dump(cpFilePath)
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if dcp.enableCRC {
|
||||
actualCRC := combineCRCInParts(dcp.Parts)
|
||||
err = CheckDownloadCRC(actualCRC, dcp.CRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dcp.complete(cpFilePath, tempFilePath)
|
||||
}
|
||||
136
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
generated
vendored
136
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
generated
vendored
@@ -1,136 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ServiceError contains fields of the error response from Oss Service REST API.
|
||||
type ServiceError struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"` // The error code returned from OSS to the caller
|
||||
Message string `xml:"Message"` // The detail error message from OSS
|
||||
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
|
||||
HostID string `xml:"HostId"` // The OSS server cluster's Id
|
||||
Endpoint string `xml:"Endpoint"`
|
||||
Ec string `xml:"EC"`
|
||||
RawMessage string // The raw messages from OSS
|
||||
StatusCode int // HTTP status code
|
||||
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e ServiceError) Error() string {
|
||||
errorStr := fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
if len(e.Endpoint) > 0 {
|
||||
errorStr = fmt.Sprintf("%s, Endpoint=%s", errorStr, e.Endpoint)
|
||||
}
|
||||
if len(e.Ec) > 0 {
|
||||
errorStr = fmt.Sprintf("%s, Ec=%s", errorStr, e.Ec)
|
||||
}
|
||||
return errorStr
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int // The expected HTTP stats code returned from OSS
|
||||
got int // The actual HTTP status code from OSS
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
|
||||
got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// Got is the actual status code returned by oss.
|
||||
func (e UnexpectedStatusCodeError) Got() int {
|
||||
return e.got
|
||||
}
|
||||
|
||||
// CheckRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func CheckRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
||||
|
||||
// CheckCallbackResp return error if the given response code is not 200
|
||||
func CheckCallbackResp(resp *Response) error {
|
||||
var err error
|
||||
contentLengthStr := resp.Headers.Get("Content-Length")
|
||||
contentLength, _ := strconv.Atoi(contentLengthStr)
|
||||
var bodyBytes []byte
|
||||
if contentLength > 0 {
|
||||
bodyBytes, _ = ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
if len(bodyBytes) > 0 {
|
||||
srvErr, errIn := serviceErrFromXML(bodyBytes, resp.StatusCode,
|
||||
resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
if errIn != nil {
|
||||
if len(resp.Headers.Get(HTTPHeaderOssEc)) > 0 {
|
||||
err = fmt.Errorf("unknown response body, status code = %d, RequestId = %s, ec = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID), resp.Headers.Get(HTTPHeaderOssEc))
|
||||
} else {
|
||||
err = fmt.Errorf("unknown response body, status code= %d, RequestId = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
}
|
||||
} else {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func tryConvertServiceError(data []byte, resp *Response, def error) (err error) {
|
||||
err = def
|
||||
if len(data) > 0 {
|
||||
srvErr, errIn := serviceErrFromXML(data, resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
if errIn == nil {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CRCCheckError is returned when crc check is inconsistent between client and server
|
||||
type CRCCheckError struct {
|
||||
clientCRC uint64 // Calculated CRC64 in client
|
||||
serverCRC uint64 // Calculated CRC64 in server
|
||||
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
|
||||
requestID string // The request id of this operation
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e CRCCheckError) Error() string {
|
||||
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
|
||||
e.operation, e.clientCRC, e.serverCRC, e.requestID)
|
||||
}
|
||||
|
||||
func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
|
||||
if clientCRC == serverCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
|
||||
}
|
||||
|
||||
func CheckCRC(resp *Response, operation string) error {
|
||||
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
|
||||
}
|
||||
29
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
generated
vendored
29
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
|
||||
// this file is only for build,not supports limit upload speed
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
type OssLimiter struct {
|
||||
}
|
||||
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
err = fmt.Errorf("rate.Limiter is not supported below version go1.7")
|
||||
return nil, err
|
||||
}
|
||||
91
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
generated
vendored
91
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
// OssLimiter wrapper rate.Limiter
|
||||
type OssLimiter struct {
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
// GetOssLimiter create OssLimiter
|
||||
// uploadSpeed KB/s
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
|
||||
|
||||
// first consume the initial full token,the limiter will behave more accurately
|
||||
limiter.AllowN(time.Now(), uploadSpeed)
|
||||
|
||||
return &OssLimiter{
|
||||
limiter: limiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LimitSpeedReader for limit bandwidth upload
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
// Read
|
||||
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
|
||||
n = 0
|
||||
err = nil
|
||||
start := 0
|
||||
burst := r.ossLimiter.limiter.Burst()
|
||||
var end int
|
||||
var tmpN int
|
||||
var tc int
|
||||
for start < len(p) {
|
||||
if start+burst*perTokenBandwidthSize < len(p) {
|
||||
end = start + burst*perTokenBandwidthSize
|
||||
} else {
|
||||
end = len(p)
|
||||
}
|
||||
|
||||
tmpN, err = r.reader.Read(p[start:end])
|
||||
if tmpN > 0 {
|
||||
n += tmpN
|
||||
start = n
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize)))
|
||||
now := time.Now()
|
||||
re := r.ossLimiter.limiter.ReserveN(now, tc)
|
||||
if !re.OK() {
|
||||
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
|
||||
start, end, burst, perTokenBandwidthSize)
|
||||
return
|
||||
}
|
||||
timeDelay := re.Delay()
|
||||
time.Sleep(timeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close ...
|
||||
func (r *LimitSpeedReader) Close() error {
|
||||
rc, ok := r.reader.(io.ReadCloser)
|
||||
if ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
257
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
generated
vendored
257
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
generated
vendored
@@ -1,257 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
//
|
||||
// CreateLiveChannel create a live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// config configuration of the channel
|
||||
//
|
||||
// CreateLiveChannelResult the result of create live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) {
|
||||
var out CreateLiveChannelResult
|
||||
|
||||
bs, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// status enabled/disabled
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["status"] = status
|
||||
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
key := fmt.Sprintf("%s/%s", channelName, playlistName)
|
||||
resp, err := bucket.do("POST", key, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelStat Get the state of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelStat the state of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) {
|
||||
var out LiveChannelStat
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "stat"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelInfo Get the configuration info of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelConfiguration the configuration info of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) {
|
||||
var out LiveChannelConfiguration
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelHistory Get push records of live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelHistory push records
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) {
|
||||
var out LiveChannelHistory
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "history"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// ListLiveChannel list the live-channels
|
||||
//
|
||||
// options Prefix: filter by the name start with the value of "Prefix"
|
||||
// MaxKeys: the maximum count returned
|
||||
// Marker: cursor from which starting list
|
||||
//
|
||||
// ListLiveChannelResult live-channel list
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
|
||||
var out ListLiveChannelResult
|
||||
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.doInner("GET", "", params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) DeleteLiveChannel(channelName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
if channelName == "" {
|
||||
return fmt.Errorf("invalid argument: channel name is empty")
|
||||
}
|
||||
|
||||
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
//
|
||||
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// expires expiration (in seconds)
|
||||
//
|
||||
// string singed rtmp push stream url
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) {
|
||||
if expires <= 0 {
|
||||
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires)
|
||||
}
|
||||
expiration := time.Now().Unix() + expires
|
||||
|
||||
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil
|
||||
}
|
||||
594
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
generated
vendored
594
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
generated
vendored
@@ -1,594 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var extToMimeType = map[string]string{
|
||||
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
|
||||
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
|
||||
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
|
||||
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
|
||||
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
|
||||
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
|
||||
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
|
||||
".apk": "application/vnd.android.package-archive",
|
||||
".hqx": "application/mac-binhex40",
|
||||
".cpt": "application/mac-compactpro",
|
||||
".doc": "application/msword",
|
||||
".ogg": "application/ogg",
|
||||
".pdf": "application/pdf",
|
||||
".rtf": "text/rtf",
|
||||
".mif": "application/vnd.mif",
|
||||
".xls": "application/vnd.ms-excel",
|
||||
".ppt": "application/vnd.ms-powerpoint",
|
||||
".odc": "application/vnd.oasis.opendocument.chart",
|
||||
".odb": "application/vnd.oasis.opendocument.database",
|
||||
".odf": "application/vnd.oasis.opendocument.formula",
|
||||
".odg": "application/vnd.oasis.opendocument.graphics",
|
||||
".otg": "application/vnd.oasis.opendocument.graphics-template",
|
||||
".odi": "application/vnd.oasis.opendocument.image",
|
||||
".odp": "application/vnd.oasis.opendocument.presentation",
|
||||
".otp": "application/vnd.oasis.opendocument.presentation-template",
|
||||
".ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
|
||||
".odt": "application/vnd.oasis.opendocument.text",
|
||||
".odm": "application/vnd.oasis.opendocument.text-master",
|
||||
".ott": "application/vnd.oasis.opendocument.text-template",
|
||||
".oth": "application/vnd.oasis.opendocument.text-web",
|
||||
".sxw": "application/vnd.sun.xml.writer",
|
||||
".stw": "application/vnd.sun.xml.writer.template",
|
||||
".sxc": "application/vnd.sun.xml.calc",
|
||||
".stc": "application/vnd.sun.xml.calc.template",
|
||||
".sxd": "application/vnd.sun.xml.draw",
|
||||
".std": "application/vnd.sun.xml.draw.template",
|
||||
".sxi": "application/vnd.sun.xml.impress",
|
||||
".sti": "application/vnd.sun.xml.impress.template",
|
||||
".sxg": "application/vnd.sun.xml.writer.global",
|
||||
".sxm": "application/vnd.sun.xml.math",
|
||||
".sis": "application/vnd.symbian.install",
|
||||
".wbxml": "application/vnd.wap.wbxml",
|
||||
".wmlc": "application/vnd.wap.wmlc",
|
||||
".wmlsc": "application/vnd.wap.wmlscriptc",
|
||||
".bcpio": "application/x-bcpio",
|
||||
".torrent": "application/x-bittorrent",
|
||||
".bz2": "application/x-bzip2",
|
||||
".vcd": "application/x-cdlink",
|
||||
".pgn": "application/x-chess-pgn",
|
||||
".cpio": "application/x-cpio",
|
||||
".csh": "application/x-csh",
|
||||
".dvi": "application/x-dvi",
|
||||
".spl": "application/x-futuresplash",
|
||||
".gtar": "application/x-gtar",
|
||||
".hdf": "application/x-hdf",
|
||||
".jar": "application/x-java-archive",
|
||||
".jnlp": "application/x-java-jnlp-file",
|
||||
".js": "application/x-javascript",
|
||||
".ksp": "application/x-kspread",
|
||||
".chrt": "application/x-kchart",
|
||||
".kil": "application/x-killustrator",
|
||||
".latex": "application/x-latex",
|
||||
".rpm": "application/x-rpm",
|
||||
".sh": "application/x-sh",
|
||||
".shar": "application/x-shar",
|
||||
".swf": "application/x-shockwave-flash",
|
||||
".sit": "application/x-stuffit",
|
||||
".sv4cpio": "application/x-sv4cpio",
|
||||
".sv4crc": "application/x-sv4crc",
|
||||
".tar": "application/x-tar",
|
||||
".tcl": "application/x-tcl",
|
||||
".tex": "application/x-tex",
|
||||
".man": "application/x-troff-man",
|
||||
".me": "application/x-troff-me",
|
||||
".ms": "application/x-troff-ms",
|
||||
".ustar": "application/x-ustar",
|
||||
".src": "application/x-wais-source",
|
||||
".zip": "application/zip",
|
||||
".m3u": "audio/x-mpegurl",
|
||||
".ra": "audio/x-pn-realaudio",
|
||||
".wav": "audio/x-wav",
|
||||
".wma": "audio/x-ms-wma",
|
||||
".wax": "audio/x-ms-wax",
|
||||
".pdb": "chemical/x-pdb",
|
||||
".xyz": "chemical/x-xyz",
|
||||
".bmp": "image/bmp",
|
||||
".gif": "image/gif",
|
||||
".ief": "image/ief",
|
||||
".png": "image/png",
|
||||
".wbmp": "image/vnd.wap.wbmp",
|
||||
".ras": "image/x-cmu-raster",
|
||||
".pnm": "image/x-portable-anymap",
|
||||
".pbm": "image/x-portable-bitmap",
|
||||
".pgm": "image/x-portable-graymap",
|
||||
".ppm": "image/x-portable-pixmap",
|
||||
".rgb": "image/x-rgb",
|
||||
".xbm": "image/x-xbitmap",
|
||||
".xpm": "image/x-xpixmap",
|
||||
".xwd": "image/x-xwindowdump",
|
||||
".css": "text/css",
|
||||
".rtx": "text/richtext",
|
||||
".tsv": "text/tab-separated-values",
|
||||
".jad": "text/vnd.sun.j2me.app-descriptor",
|
||||
".wml": "text/vnd.wap.wml",
|
||||
".wmls": "text/vnd.wap.wmlscript",
|
||||
".etx": "text/x-setext",
|
||||
".mxu": "video/vnd.mpegurl",
|
||||
".flv": "video/x-flv",
|
||||
".wm": "video/x-ms-wm",
|
||||
".wmv": "video/x-ms-wmv",
|
||||
".wmx": "video/x-ms-wmx",
|
||||
".wvx": "video/x-ms-wvx",
|
||||
".avi": "video/x-msvideo",
|
||||
".movie": "video/x-sgi-movie",
|
||||
".ice": "x-conference/x-cooltalk",
|
||||
".3gp": "video/3gpp",
|
||||
".ai": "application/postscript",
|
||||
".aif": "audio/x-aiff",
|
||||
".aifc": "audio/x-aiff",
|
||||
".aiff": "audio/x-aiff",
|
||||
".asc": "text/plain",
|
||||
".atom": "application/atom+xml",
|
||||
".au": "audio/basic",
|
||||
".bin": "application/octet-stream",
|
||||
".cdf": "application/x-netcdf",
|
||||
".cgm": "image/cgm",
|
||||
".class": "application/octet-stream",
|
||||
".dcr": "application/x-director",
|
||||
".dif": "video/x-dv",
|
||||
".dir": "application/x-director",
|
||||
".djv": "image/vnd.djvu",
|
||||
".djvu": "image/vnd.djvu",
|
||||
".dll": "application/octet-stream",
|
||||
".dmg": "application/octet-stream",
|
||||
".dms": "application/octet-stream",
|
||||
".dtd": "application/xml-dtd",
|
||||
".dv": "video/x-dv",
|
||||
".dxr": "application/x-director",
|
||||
".eps": "application/postscript",
|
||||
".exe": "application/octet-stream",
|
||||
".ez": "application/andrew-inset",
|
||||
".gram": "application/srgs",
|
||||
".grxml": "application/srgs+xml",
|
||||
".gz": "application/x-gzip",
|
||||
".htm": "text/html",
|
||||
".html": "text/html",
|
||||
".ico": "image/x-icon",
|
||||
".ics": "text/calendar",
|
||||
".ifb": "text/calendar",
|
||||
".iges": "model/iges",
|
||||
".igs": "model/iges",
|
||||
".jp2": "image/jp2",
|
||||
".jpe": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".jpg": "image/jpeg",
|
||||
".kar": "audio/midi",
|
||||
".lha": "application/octet-stream",
|
||||
".lzh": "application/octet-stream",
|
||||
".m4a": "audio/mp4a-latm",
|
||||
".m4p": "audio/mp4a-latm",
|
||||
".m4u": "video/vnd.mpegurl",
|
||||
".m4v": "video/x-m4v",
|
||||
".mac": "image/x-macpaint",
|
||||
".mathml": "application/mathml+xml",
|
||||
".mesh": "model/mesh",
|
||||
".mid": "audio/midi",
|
||||
".midi": "audio/midi",
|
||||
".mov": "video/quicktime",
|
||||
".mp2": "audio/mpeg",
|
||||
".mp3": "audio/mpeg",
|
||||
".mp4": "video/mp4",
|
||||
".mpe": "video/mpeg",
|
||||
".mpeg": "video/mpeg",
|
||||
".mpg": "video/mpeg",
|
||||
".mpga": "audio/mpeg",
|
||||
".msh": "model/mesh",
|
||||
".nc": "application/x-netcdf",
|
||||
".oda": "application/oda",
|
||||
".ogv": "video/ogv",
|
||||
".pct": "image/pict",
|
||||
".pic": "image/pict",
|
||||
".pict": "image/pict",
|
||||
".pnt": "image/x-macpaint",
|
||||
".pntg": "image/x-macpaint",
|
||||
".ps": "application/postscript",
|
||||
".qt": "video/quicktime",
|
||||
".qti": "image/x-quicktime",
|
||||
".qtif": "image/x-quicktime",
|
||||
".ram": "audio/x-pn-realaudio",
|
||||
".rdf": "application/rdf+xml",
|
||||
".rm": "application/vnd.rn-realmedia",
|
||||
".roff": "application/x-troff",
|
||||
".sgm": "text/sgml",
|
||||
".sgml": "text/sgml",
|
||||
".silo": "model/mesh",
|
||||
".skd": "application/x-koan",
|
||||
".skm": "application/x-koan",
|
||||
".skp": "application/x-koan",
|
||||
".skt": "application/x-koan",
|
||||
".smi": "application/smil",
|
||||
".smil": "application/smil",
|
||||
".snd": "audio/basic",
|
||||
".so": "application/octet-stream",
|
||||
".svg": "image/svg+xml",
|
||||
".t": "application/x-troff",
|
||||
".texi": "application/x-texinfo",
|
||||
".texinfo": "application/x-texinfo",
|
||||
".tif": "image/tiff",
|
||||
".tiff": "image/tiff",
|
||||
".tr": "application/x-troff",
|
||||
".txt": "text/plain",
|
||||
".vrml": "model/vrml",
|
||||
".vxml": "application/voicexml+xml",
|
||||
".webm": "video/webm",
|
||||
".wrl": "model/vrml",
|
||||
".xht": "application/xhtml+xml",
|
||||
".xhtml": "application/xhtml+xml",
|
||||
".xml": "application/xml",
|
||||
".xsl": "application/xml",
|
||||
".xslt": "application/xslt+xml",
|
||||
".xul": "application/vnd.mozilla.xul+xml",
|
||||
".webp": "image/webp",
|
||||
".323": "text/h323",
|
||||
".aab": "application/x-authoware-bin",
|
||||
".aam": "application/x-authoware-map",
|
||||
".aas": "application/x-authoware-seg",
|
||||
".acx": "application/internet-property-stream",
|
||||
".als": "audio/X-Alpha5",
|
||||
".amc": "application/x-mpeg",
|
||||
".ani": "application/octet-stream",
|
||||
".asd": "application/astound",
|
||||
".asf": "video/x-ms-asf",
|
||||
".asn": "application/astound",
|
||||
".asp": "application/x-asap",
|
||||
".asr": "video/x-ms-asf",
|
||||
".asx": "video/x-ms-asf",
|
||||
".avb": "application/octet-stream",
|
||||
".awb": "audio/amr-wb",
|
||||
".axs": "application/olescript",
|
||||
".bas": "text/plain",
|
||||
".bin ": "application/octet-stream",
|
||||
".bld": "application/bld",
|
||||
".bld2": "application/bld2",
|
||||
".bpk": "application/octet-stream",
|
||||
".c": "text/plain",
|
||||
".cal": "image/x-cals",
|
||||
".cat": "application/vnd.ms-pkiseccat",
|
||||
".ccn": "application/x-cnc",
|
||||
".cco": "application/x-cocoa",
|
||||
".cer": "application/x-x509-ca-cert",
|
||||
".cgi": "magnus-internal/cgi",
|
||||
".chat": "application/x-chat",
|
||||
".clp": "application/x-msclip",
|
||||
".cmx": "image/x-cmx",
|
||||
".co": "application/x-cult3d-object",
|
||||
".cod": "image/cis-cod",
|
||||
".conf": "text/plain",
|
||||
".cpp": "text/plain",
|
||||
".crd": "application/x-mscardfile",
|
||||
".crl": "application/pkix-crl",
|
||||
".crt": "application/x-x509-ca-cert",
|
||||
".csm": "chemical/x-csml",
|
||||
".csml": "chemical/x-csml",
|
||||
".cur": "application/octet-stream",
|
||||
".dcm": "x-lml/x-evm",
|
||||
".dcx": "image/x-dcx",
|
||||
".der": "application/x-x509-ca-cert",
|
||||
".dhtml": "text/html",
|
||||
".dot": "application/msword",
|
||||
".dwf": "drawing/x-dwf",
|
||||
".dwg": "application/x-autocad",
|
||||
".dxf": "application/x-autocad",
|
||||
".ebk": "application/x-expandedbook",
|
||||
".emb": "chemical/x-embl-dl-nucleotide",
|
||||
".embl": "chemical/x-embl-dl-nucleotide",
|
||||
".epub": "application/epub+zip",
|
||||
".eri": "image/x-eri",
|
||||
".es": "audio/echospeech",
|
||||
".esl": "audio/echospeech",
|
||||
".etc": "application/x-earthtime",
|
||||
".evm": "x-lml/x-evm",
|
||||
".evy": "application/envoy",
|
||||
".fh4": "image/x-freehand",
|
||||
".fh5": "image/x-freehand",
|
||||
".fhc": "image/x-freehand",
|
||||
".fif": "application/fractals",
|
||||
".flr": "x-world/x-vrml",
|
||||
".fm": "application/x-maker",
|
||||
".fpx": "image/x-fpx",
|
||||
".fvi": "video/isivideo",
|
||||
".gau": "chemical/x-gaussian-input",
|
||||
".gca": "application/x-gca-compressed",
|
||||
".gdb": "x-lml/x-gdb",
|
||||
".gps": "application/x-gps",
|
||||
".h": "text/plain",
|
||||
".hdm": "text/x-hdml",
|
||||
".hdml": "text/x-hdml",
|
||||
".hlp": "application/winhlp",
|
||||
".hta": "application/hta",
|
||||
".htc": "text/x-component",
|
||||
".hts": "text/html",
|
||||
".htt": "text/webviewhtml",
|
||||
".ifm": "image/gif",
|
||||
".ifs": "image/ifs",
|
||||
".iii": "application/x-iphone",
|
||||
".imy": "audio/melody",
|
||||
".ins": "application/x-internet-signup",
|
||||
".ips": "application/x-ipscript",
|
||||
".ipx": "application/x-ipix",
|
||||
".isp": "application/x-internet-signup",
|
||||
".it": "audio/x-mod",
|
||||
".itz": "audio/x-mod",
|
||||
".ivr": "i-world/i-vrml",
|
||||
".j2k": "image/j2k",
|
||||
".jam": "application/x-jam",
|
||||
".java": "text/plain",
|
||||
".jfif": "image/pipeg",
|
||||
".jpz": "image/jpeg",
|
||||
".jwc": "application/jwc",
|
||||
".kjx": "application/x-kjx",
|
||||
".lak": "x-lml/x-lak",
|
||||
".lcc": "application/fastman",
|
||||
".lcl": "application/x-digitalloca",
|
||||
".lcr": "application/x-digitalloca",
|
||||
".lgh": "application/lgh",
|
||||
".lml": "x-lml/x-lml",
|
||||
".lmlpack": "x-lml/x-lmlpack",
|
||||
".log": "text/plain",
|
||||
".lsf": "video/x-la-asf",
|
||||
".lsx": "video/x-la-asf",
|
||||
".m13": "application/x-msmediaview",
|
||||
".m14": "application/x-msmediaview",
|
||||
".m15": "audio/x-mod",
|
||||
".m3url": "audio/x-mpegurl",
|
||||
".m4b": "audio/mp4a-latm",
|
||||
".ma1": "audio/ma1",
|
||||
".ma2": "audio/ma2",
|
||||
".ma3": "audio/ma3",
|
||||
".ma5": "audio/ma5",
|
||||
".map": "magnus-internal/imagemap",
|
||||
".mbd": "application/mbedlet",
|
||||
".mct": "application/x-mascot",
|
||||
".mdb": "application/x-msaccess",
|
||||
".mdz": "audio/x-mod",
|
||||
".mel": "text/x-vmel",
|
||||
".mht": "message/rfc822",
|
||||
".mhtml": "message/rfc822",
|
||||
".mi": "application/x-mif",
|
||||
".mil": "image/x-cals",
|
||||
".mio": "audio/x-mio",
|
||||
".mmf": "application/x-skt-lbs",
|
||||
".mng": "video/x-mng",
|
||||
".mny": "application/x-msmoney",
|
||||
".moc": "application/x-mocha",
|
||||
".mocha": "application/x-mocha",
|
||||
".mod": "audio/x-mod",
|
||||
".mof": "application/x-yumekara",
|
||||
".mol": "chemical/x-mdl-molfile",
|
||||
".mop": "chemical/x-mopac-input",
|
||||
".mpa": "video/mpeg",
|
||||
".mpc": "application/vnd.mpohun.certificate",
|
||||
".mpg4": "video/mp4",
|
||||
".mpn": "application/vnd.mophun.application",
|
||||
".mpp": "application/vnd.ms-project",
|
||||
".mps": "application/x-mapserver",
|
||||
".mpv2": "video/mpeg",
|
||||
".mrl": "text/x-mrml",
|
||||
".mrm": "application/x-mrm",
|
||||
".msg": "application/vnd.ms-outlook",
|
||||
".mts": "application/metastream",
|
||||
".mtx": "application/metastream",
|
||||
".mtz": "application/metastream",
|
||||
".mvb": "application/x-msmediaview",
|
||||
".mzv": "application/metastream",
|
||||
".nar": "application/zip",
|
||||
".nbmp": "image/nbmp",
|
||||
".ndb": "x-lml/x-ndb",
|
||||
".ndwn": "application/ndwn",
|
||||
".nif": "application/x-nif",
|
||||
".nmz": "application/x-scream",
|
||||
".nokia-op-logo": "image/vnd.nok-oplogo-color",
|
||||
".npx": "application/x-netfpx",
|
||||
".nsnd": "audio/nsnd",
|
||||
".nva": "application/x-neva1",
|
||||
".nws": "message/rfc822",
|
||||
".oom": "application/x-AtlasMate-Plugin",
|
||||
".p10": "application/pkcs10",
|
||||
".p12": "application/x-pkcs12",
|
||||
".p7b": "application/x-pkcs7-certificates",
|
||||
".p7c": "application/x-pkcs7-mime",
|
||||
".p7m": "application/x-pkcs7-mime",
|
||||
".p7r": "application/x-pkcs7-certreqresp",
|
||||
".p7s": "application/x-pkcs7-signature",
|
||||
".pac": "audio/x-pac",
|
||||
".pae": "audio/x-epac",
|
||||
".pan": "application/x-pan",
|
||||
".pcx": "image/x-pcx",
|
||||
".pda": "image/x-pda",
|
||||
".pfr": "application/font-tdpfr",
|
||||
".pfx": "application/x-pkcs12",
|
||||
".pko": "application/ynd.ms-pkipko",
|
||||
".pm": "application/x-perl",
|
||||
".pma": "application/x-perfmon",
|
||||
".pmc": "application/x-perfmon",
|
||||
".pmd": "application/x-pmd",
|
||||
".pml": "application/x-perfmon",
|
||||
".pmr": "application/x-perfmon",
|
||||
".pmw": "application/x-perfmon",
|
||||
".pnz": "image/png",
|
||||
".pot,": "application/vnd.ms-powerpoint",
|
||||
".pps": "application/vnd.ms-powerpoint",
|
||||
".pqf": "application/x-cprplayer",
|
||||
".pqi": "application/cprplayer",
|
||||
".prc": "application/x-prc",
|
||||
".prf": "application/pics-rules",
|
||||
".prop": "text/plain",
|
||||
".proxy": "application/x-ns-proxy-autoconfig",
|
||||
".ptlk": "application/listenup",
|
||||
".pub": "application/x-mspublisher",
|
||||
".pvx": "video/x-pv-pvx",
|
||||
".qcp": "audio/vnd.qcelp",
|
||||
".r3t": "text/vnd.rn-realtext3d",
|
||||
".rar": "application/octet-stream",
|
||||
".rc": "text/plain",
|
||||
".rf": "image/vnd.rn-realflash",
|
||||
".rlf": "application/x-richlink",
|
||||
".rmf": "audio/x-rmf",
|
||||
".rmi": "audio/mid",
|
||||
".rmm": "audio/x-pn-realaudio",
|
||||
".rmvb": "audio/x-pn-realaudio",
|
||||
".rnx": "application/vnd.rn-realplayer",
|
||||
".rp": "image/vnd.rn-realpix",
|
||||
".rt": "text/vnd.rn-realtext",
|
||||
".rte": "x-lml/x-gps",
|
||||
".rtg": "application/metastream",
|
||||
".rv": "video/vnd.rn-realvideo",
|
||||
".rwc": "application/x-rogerwilco",
|
||||
".s3m": "audio/x-mod",
|
||||
".s3z": "audio/x-mod",
|
||||
".sca": "application/x-supercard",
|
||||
".scd": "application/x-msschedule",
|
||||
".sct": "text/scriptlet",
|
||||
".sdf": "application/e-score",
|
||||
".sea": "application/x-stuffit",
|
||||
".setpay": "application/set-payment-initiation",
|
||||
".setreg": "application/set-registration-initiation",
|
||||
".shtml": "text/html",
|
||||
".shtm": "text/html",
|
||||
".shw": "application/presentations",
|
||||
".si6": "image/si6",
|
||||
".si7": "image/vnd.stiwap.sis",
|
||||
".si9": "image/vnd.lgtwap.sis",
|
||||
".slc": "application/x-salsa",
|
||||
".smd": "audio/x-smd",
|
||||
".smp": "application/studiom",
|
||||
".smz": "audio/x-smd",
|
||||
".spc": "application/x-pkcs7-certificates",
|
||||
".spr": "application/x-sprite",
|
||||
".sprite": "application/x-sprite",
|
||||
".sdp": "application/sdp",
|
||||
".spt": "application/x-spt",
|
||||
".sst": "application/vnd.ms-pkicertstore",
|
||||
".stk": "application/hyperstudio",
|
||||
".stl": "application/vnd.ms-pkistl",
|
||||
".stm": "text/html",
|
||||
".svf": "image/vnd",
|
||||
".svh": "image/svh",
|
||||
".svr": "x-world/x-svr",
|
||||
".swfl": "application/x-shockwave-flash",
|
||||
".tad": "application/octet-stream",
|
||||
".talk": "text/x-speech",
|
||||
".taz": "application/x-tar",
|
||||
".tbp": "application/x-timbuktu",
|
||||
".tbt": "application/x-timbuktu",
|
||||
".tgz": "application/x-compressed",
|
||||
".thm": "application/vnd.eri.thm",
|
||||
".tki": "application/x-tkined",
|
||||
".tkined": "application/x-tkined",
|
||||
".toc": "application/toc",
|
||||
".toy": "image/toy",
|
||||
".trk": "x-lml/x-gps",
|
||||
".trm": "application/x-msterminal",
|
||||
".tsi": "audio/tsplayer",
|
||||
".tsp": "application/dsptype",
|
||||
".ttf": "application/octet-stream",
|
||||
".ttz": "application/t-time",
|
||||
".uls": "text/iuls",
|
||||
".ult": "audio/x-mod",
|
||||
".uu": "application/x-uuencode",
|
||||
".uue": "application/x-uuencode",
|
||||
".vcf": "text/x-vcard",
|
||||
".vdo": "video/vdo",
|
||||
".vib": "audio/vib",
|
||||
".viv": "video/vivo",
|
||||
".vivo": "video/vivo",
|
||||
".vmd": "application/vocaltec-media-desc",
|
||||
".vmf": "application/vocaltec-media-file",
|
||||
".vmi": "application/x-dreamcast-vms-info",
|
||||
".vms": "application/x-dreamcast-vms",
|
||||
".vox": "audio/voxware",
|
||||
".vqe": "audio/x-twinvq-plugin",
|
||||
".vqf": "audio/x-twinvq",
|
||||
".vql": "audio/x-twinvq",
|
||||
".vre": "x-world/x-vream",
|
||||
".vrt": "x-world/x-vrt",
|
||||
".vrw": "x-world/x-vream",
|
||||
".vts": "workbook/formulaone",
|
||||
".wcm": "application/vnd.ms-works",
|
||||
".wdb": "application/vnd.ms-works",
|
||||
".web": "application/vnd.xara",
|
||||
".wi": "image/wavelet",
|
||||
".wis": "application/x-InstallShield",
|
||||
".wks": "application/vnd.ms-works",
|
||||
".wmd": "application/x-ms-wmd",
|
||||
".wmf": "application/x-msmetafile",
|
||||
".wmlscript": "text/vnd.wap.wmlscript",
|
||||
".wmz": "application/x-ms-wmz",
|
||||
".wpng": "image/x-up-wpng",
|
||||
".wps": "application/vnd.ms-works",
|
||||
".wpt": "x-lml/x-gps",
|
||||
".wri": "application/x-mswrite",
|
||||
".wrz": "x-world/x-vrml",
|
||||
".ws": "text/vnd.wap.wmlscript",
|
||||
".wsc": "application/vnd.wap.wmlscriptc",
|
||||
".wv": "video/wavelet",
|
||||
".wxl": "application/x-wxl",
|
||||
".x-gzip": "application/x-gzip",
|
||||
".xaf": "x-world/x-vrml",
|
||||
".xar": "application/vnd.xara",
|
||||
".xdm": "application/x-xdma",
|
||||
".xdma": "application/x-xdma",
|
||||
".xdw": "application/vnd.fujixerox.docuworks",
|
||||
".xhtm": "application/xhtml+xml",
|
||||
".xla": "application/vnd.ms-excel",
|
||||
".xlc": "application/vnd.ms-excel",
|
||||
".xll": "application/x-excel",
|
||||
".xlm": "application/vnd.ms-excel",
|
||||
".xlt": "application/vnd.ms-excel",
|
||||
".xlw": "application/vnd.ms-excel",
|
||||
".xm": "audio/x-mod",
|
||||
".xmz": "audio/x-mod",
|
||||
".xof": "x-world/x-vrml",
|
||||
".xpi": "application/x-xpinstall",
|
||||
".xsit": "text/xml",
|
||||
".yz1": "application/x-yz1",
|
||||
".z": "application/x-compress",
|
||||
".zac": "application/x-zaurus-zac",
|
||||
".json": "application/json",
|
||||
}
|
||||
|
||||
// TypeByExtension returns the MIME type associated with the file extension ext.
|
||||
// gets the file's MIME type for HTTP header Content-Type
|
||||
func TypeByExtension(filePath string) string {
|
||||
typ := mime.TypeByExtension(path.Ext(filePath))
|
||||
if typ == "" {
|
||||
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
|
||||
} else {
|
||||
if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") {
|
||||
typ = removeCharsetInMimeType(typ)
|
||||
}
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// Remove charset from mime type
|
||||
func removeCharsetInMimeType(typ string) (str string) {
|
||||
temArr := strings.Split(typ, ";")
|
||||
var builder strings.Builder
|
||||
for i, s := range temArr {
|
||||
tmpStr := strings.Trim(s, " ")
|
||||
if strings.Contains(tmpStr, "charset=") {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
builder.WriteString(s)
|
||||
} else {
|
||||
builder.WriteString("; " + s)
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
69
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
generated
vendored
69
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Response defines HTTP response from OSS
|
||||
type Response struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
ClientCRC uint64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
func (r *Response) Read(p []byte) (n int, err error) {
|
||||
return r.Body.Read(p)
|
||||
}
|
||||
|
||||
// Close close http reponse body
|
||||
func (r *Response) Close() error {
|
||||
return r.Body.Close()
|
||||
}
|
||||
|
||||
// PutObjectRequest is the request of DoPutObject
|
||||
type PutObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
// GetObjectRequest is the request of DoGetObject
|
||||
type GetObjectRequest struct {
|
||||
ObjectKey string
|
||||
}
|
||||
|
||||
// GetObjectResult is the result of DoGetObject
|
||||
type GetObjectResult struct {
|
||||
Response *Response
|
||||
ClientCRC hash.Hash64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
// AppendObjectRequest is the requtest of DoAppendObject
|
||||
type AppendObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
Position int64
|
||||
}
|
||||
|
||||
// AppendObjectResult is the result of DoAppendObject
|
||||
type AppendObjectResult struct {
|
||||
NextPosition int64
|
||||
CRC uint64
|
||||
}
|
||||
|
||||
// UploadPartRequest is the request of DoUploadPart
|
||||
type UploadPartRequest struct {
|
||||
InitResult *InitiateMultipartUploadResult
|
||||
Reader io.Reader
|
||||
PartSize int64
|
||||
PartNumber int
|
||||
}
|
||||
|
||||
// UploadPartResult is the result of DoUploadPart
|
||||
type UploadPartResult struct {
|
||||
Part UploadPart
|
||||
}
|
||||
474
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
generated
vendored
474
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
generated
vendored
@@ -1,474 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CopyFile is multipart copy object
|
||||
//
|
||||
// srcBucketName source bucket name
|
||||
// srcObjectKey source object name
|
||||
// destObjectKey target object name in the form of bucketname.objectkey
|
||||
// partSize the part size in byte.
|
||||
// options object's contraints. Check out function InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
|
||||
destBucketName := bucket.BucketName
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (1024KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
|
||||
if cpFilePath != "" {
|
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
|
||||
partSize, options, routines)
|
||||
}
|
||||
|
||||
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
cpFileName := getCpFileName(src, dest, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- Concurrently copy without checkpoint ---------
|
||||
|
||||
// copyWorkerArg defines the copy worker arguments
|
||||
type copyWorkerArg struct {
|
||||
bucket *Bucket
|
||||
imur InitiateMultipartUploadResult
|
||||
srcBucketName string
|
||||
srcObjectKey string
|
||||
options []Option
|
||||
hook copyPartHook
|
||||
}
|
||||
|
||||
// copyPartHook is the hook for testing purpose
|
||||
type copyPartHook func(part copyPart) error
|
||||
|
||||
var copyPartHooker copyPartHook = defaultCopyPartHook
|
||||
|
||||
func defaultCopyPartHook(part copyPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyWorker copies worker
|
||||
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
chunkSize := chunk.End - chunk.Start + 1
|
||||
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
|
||||
chunk.Start, chunkSize, chunk.Number, arg.options...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// copyScheduler
|
||||
func copyScheduler(jobs chan copyPart, parts []copyPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// copyPart structure
|
||||
type copyPart struct {
|
||||
Number int // Part number (from 1 to 10,000)
|
||||
Start int64 // The start index in the source file.
|
||||
End int64 // The end index in the source file
|
||||
}
|
||||
|
||||
// getCopyParts calculates copy parts
|
||||
func getCopyParts(objectSize, partSize int64) []copyPart {
|
||||
parts := []copyPart{}
|
||||
part := copyPart{}
|
||||
i := 0
|
||||
for offset := int64(0); offset < objectSize; offset += partSize {
|
||||
part.Number = i + 1
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, objectSize, partSize)
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getSrcObjectBytes gets the source file size
|
||||
func getSrcObjectBytes(parts []copyPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// copyFile is a concurrently copy without checkpoint
|
||||
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get copy parts
|
||||
parts := getCopyParts(objectSize, partSize)
|
||||
// Initialize the multipart upload
|
||||
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getSrcObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start to copy workers
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts finished.
|
||||
completed := 0
|
||||
ups := make([]UploadPart, len(parts))
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ups[part.PartNumber-1] = part
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
descBucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- Concurrently copy with checkpoint -----
|
||||
|
||||
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
|
||||
|
||||
type copyCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // CP content MD5
|
||||
SrcBucketName string // Source bucket
|
||||
SrcObjectKey string // Source object
|
||||
DestBucketName string // Target bucket
|
||||
DestObjectKey string // Target object
|
||||
CopyID string // Copy ID
|
||||
ObjStat objectStat // Object stat
|
||||
Parts []copyPart // Copy parts
|
||||
CopyParts []UploadPart // The uploaded parts
|
||||
PartStat []bool // The part status
|
||||
}
|
||||
|
||||
// isValid checks if the data is valid which means CP is valid and object is not updated.
|
||||
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
||||
// Compare CP's magic number and the MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size and last modified time and etag.
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the checkpoint file
|
||||
func (cp *copyCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// update updates the parts status
|
||||
func (cp *copyCheckpoint) update(part UploadPart) {
|
||||
cp.CopyParts[part.PartNumber-1] = part
|
||||
cp.PartStat[part.PartNumber-1] = true
|
||||
}
|
||||
|
||||
// dump dumps the CP to the file
|
||||
func (cp *copyCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp copyCheckpoint) todoParts() []copyPart {
|
||||
dps := []copyPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns finished bytes count
|
||||
func (cp copyCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
|
||||
partSize int64, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = copyCpMagic
|
||||
cp.SrcBucketName = srcBucket.BucketName
|
||||
cp.SrcObjectKey = srcObjectKey
|
||||
cp.DestBucketName = destBucket.BucketName
|
||||
cp.DestObjectKey = destObjectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
// Parts
|
||||
cp.Parts = getCopyParts(objectSize, partSize)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
cp.CopyParts = make([]UploadPart, len(cp.Parts))
|
||||
|
||||
// Init copy
|
||||
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.CopyID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
|
||||
Key: cp.DestObjectKey, UploadID: cp.CopyID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// copyFileWithCp is concurrently copy with checkpoint
|
||||
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load CP data
|
||||
ccp := copyCheckpoint{}
|
||||
err = ccp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid---reinitialize
|
||||
valid, err := ccp.isValid(meta)
|
||||
if err != nil || !valid {
|
||||
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Unfinished parts
|
||||
parts := ccp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: destBucketName,
|
||||
Key: destObjectKey,
|
||||
UploadID: ccp.CopyID}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ccp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutines
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts completed.
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ccp.update(part)
|
||||
ccp.dump(cpFilePath)
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
|
||||
}
|
||||
320
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
generated
vendored
320
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
generated
vendored
@@ -1,320 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// InitiateMultipartUpload initializes multipart upload
|
||||
//
|
||||
// objectKey object name
|
||||
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
//
|
||||
// ServerSideEncryption, Meta, check out the following link:
|
||||
// https://www.alibabacloud.com/help/en/object-storage-service/latest/initiatemultipartupload
|
||||
//
|
||||
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
|
||||
var imur InitiateMultipartUploadResult
|
||||
opts := AddContentType(options, objectKey)
|
||||
params, _ := GetRawParams(options)
|
||||
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
|
||||
ConvertEmptyValueToNil(params, paramKeys)
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return imur, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &imur)
|
||||
return imur, err
|
||||
}
|
||||
|
||||
// UploadPart uploads parts
|
||||
//
|
||||
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
|
||||
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
|
||||
// And thus with the same part number and upload Id, another part upload will overwrite the data.
|
||||
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
|
||||
//
|
||||
// imur the returned value of InitiateMultipartUpload.
|
||||
// reader io.Reader the reader for the part's data.
|
||||
// size the part size.
|
||||
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
|
||||
//
|
||||
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
|
||||
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: reader,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// UploadPartFromFile uploads part from the file.
|
||||
//
|
||||
// imur the return value of a successful InitiateMultipartUpload.
|
||||
// filePath the local file path to upload.
|
||||
// startPosition the start position in the local file.
|
||||
// partSize the part size.
|
||||
// partNumber the part number (from 1 to 10,000)
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var part = UploadPart{}
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer fd.Close()
|
||||
fd.Seek(startPosition, os.SEEK_SET)
|
||||
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: fd,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// DoUploadPart does the actual part upload.
|
||||
//
|
||||
// request part upload request
|
||||
//
|
||||
// UploadPartResult the result of uploading part.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
|
||||
listener := GetProgressListener(options)
|
||||
options = append(options, ContentLength(request.PartSize))
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(request.PartNumber)
|
||||
params["uploadId"] = request.InitResult.UploadID
|
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
|
||||
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
|
||||
if err != nil {
|
||||
return &UploadPartResult{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
part := UploadPart{
|
||||
ETag: resp.Headers.Get(HTTPHeaderEtag),
|
||||
PartNumber: request.PartNumber,
|
||||
}
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC {
|
||||
err = CheckCRC(resp, "DoUploadPart")
|
||||
if err != nil {
|
||||
return &UploadPartResult{part}, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UploadPartResult{part}, nil
|
||||
}
|
||||
|
||||
// UploadPartCopy uploads part copy
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload
|
||||
// copySrc source Object name
|
||||
// startPosition the part's start index in the source file
|
||||
// partSize the part size
|
||||
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
|
||||
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
|
||||
//
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
|
||||
// https://www.alibabacloud.com/help/en/object-storage-service/latest/uploadpartcopy
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var out UploadPartCopyResult
|
||||
var part UploadPart
|
||||
var opts []Option
|
||||
|
||||
//first find version id
|
||||
versionIdKey := "versionId"
|
||||
versionId, _ := FindOption(options, versionIdKey, nil)
|
||||
if versionId == nil {
|
||||
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
} else {
|
||||
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
options = DeleteOption(options, versionIdKey)
|
||||
}
|
||||
|
||||
opts = append(opts, options...)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(partNumber)
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
part.ETag = out.ETag
|
||||
part.PartNumber = partNumber
|
||||
|
||||
return part, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
|
||||
//
|
||||
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
||||
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
|
||||
var out CompleteMultipartUploadResult
|
||||
|
||||
sort.Sort(UploadParts(parts))
|
||||
cxml := completeMultipartUploadXML{}
|
||||
cxml.Part = parts
|
||||
bs, err := xml.Marshal(cxml)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
if len(body) > 0 {
|
||||
if err != nil {
|
||||
err = tryConvertServiceError(body, resp, err)
|
||||
} else {
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback == nil {
|
||||
err = xml.Unmarshal(body, &out)
|
||||
} else {
|
||||
rb, _ := FindOption(options, responseBody, nil)
|
||||
if rb != nil {
|
||||
if rbody, ok := rb.(*[]byte); ok {
|
||||
*rbody = body
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// ListUploadedParts lists the uploaded parts.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
|
||||
var out ListUploadedPartsResult
|
||||
options = append(options, EncodingType("url"))
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListUploadedPartsResult(&out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all ongoing multipart upload tasks
|
||||
//
|
||||
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
|
||||
//
|
||||
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
|
||||
//
|
||||
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
|
||||
var out ListMultipartUploadResult
|
||||
|
||||
options = append(options, EncodingType("url"))
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.doInner("GET", "", params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListMultipartUploadResult(&out)
|
||||
return out, err
|
||||
}
|
||||
735
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
generated
vendored
735
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
generated
vendored
@@ -1,735 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type optionType string
|
||||
|
||||
const (
|
||||
optionParam optionType = "HTTPParameter" // URL parameter
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP header
|
||||
optionContext optionType = "HTTPContext" // context
|
||||
optionArg optionType = "FuncArgument" // Function argument
|
||||
|
||||
)
|
||||
|
||||
const (
|
||||
deleteObjectsQuiet = "delete-objects-quiet"
|
||||
routineNum = "x-routine-num"
|
||||
checkpointConfig = "x-cp-config"
|
||||
initCRC64 = "init-crc64"
|
||||
progressListener = "x-progress-listener"
|
||||
storageClass = "storage-class"
|
||||
responseHeader = "x-response-header"
|
||||
redundancyType = "redundancy-type"
|
||||
objectHashFunc = "object-hash-func"
|
||||
responseBody = "x-response-body"
|
||||
contextArg = "x-context-arg"
|
||||
)
|
||||
|
||||
type (
|
||||
optionValue struct {
|
||||
Value interface{}
|
||||
Type optionType
|
||||
}
|
||||
|
||||
// Option HTTP option
|
||||
Option func(map[string]optionValue) error
|
||||
)
|
||||
|
||||
// ACL is an option to set X-Oss-Acl header
|
||||
func ACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssACL, string(acl))
|
||||
}
|
||||
|
||||
// ContentType is an option to set Content-Type header
|
||||
func ContentType(value string) Option {
|
||||
return setHeader(HTTPHeaderContentType, value)
|
||||
}
|
||||
|
||||
// ContentLength is an option to set Content-Length header
|
||||
func ContentLength(length int64) Option {
|
||||
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
|
||||
}
|
||||
|
||||
// CacheControl is an option to set Cache-Control header
|
||||
func CacheControl(value string) Option {
|
||||
return setHeader(HTTPHeaderCacheControl, value)
|
||||
}
|
||||
|
||||
// ContentDisposition is an option to set Content-Disposition header
|
||||
func ContentDisposition(value string) Option {
|
||||
return setHeader(HTTPHeaderContentDisposition, value)
|
||||
}
|
||||
|
||||
// ContentEncoding is an option to set Content-Encoding header
|
||||
func ContentEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderContentEncoding, value)
|
||||
}
|
||||
|
||||
// ContentLanguage is an option to set Content-Language header
|
||||
func ContentLanguage(value string) Option {
|
||||
return setHeader(HTTPHeaderContentLanguage, value)
|
||||
}
|
||||
|
||||
// ContentMD5 is an option to set Content-MD5 header
|
||||
func ContentMD5(value string) Option {
|
||||
return setHeader(HTTPHeaderContentMD5, value)
|
||||
}
|
||||
|
||||
// Expires is an option to set Expires header
|
||||
func Expires(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Meta is an option to set Meta header
|
||||
func Meta(key, value string) Option {
|
||||
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
|
||||
}
|
||||
|
||||
// Range is an option to set Range header, [start, end]
|
||||
func Range(start, end int64) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
}
|
||||
|
||||
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
|
||||
func NormalizedRange(nr string) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
|
||||
}
|
||||
|
||||
// AcceptEncoding is an option to set Accept-Encoding header
|
||||
func AcceptEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderAcceptEncoding, value)
|
||||
}
|
||||
|
||||
// IfModifiedSince is an option to set If-Modified-Since header
|
||||
func IfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
|
||||
func IfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfMatch is an option to set If-Match header
|
||||
func IfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfMatch, value)
|
||||
}
|
||||
|
||||
// IfNoneMatch is an option to set IfNoneMatch header
|
||||
func IfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySource is an option to set X-Oss-Copy-Source header
|
||||
func CopySource(sourceBucket, sourceObject string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
|
||||
}
|
||||
|
||||
// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
|
||||
func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId)
|
||||
}
|
||||
|
||||
// CopySourceRange is an option to set X-Oss-Copy-Source header
|
||||
func CopySourceRange(startPosition, partSize int64) Option {
|
||||
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
|
||||
strconv.FormatInt((startPosition+partSize-1), 10)
|
||||
return setHeader(HTTPHeaderOssCopySourceRange, val)
|
||||
}
|
||||
|
||||
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
|
||||
func CopySourceIfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
|
||||
func CopySourceIfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
|
||||
func CopySourceIfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
|
||||
func CopySourceIfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func MetadataDirective(directive MetadataDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
|
||||
}
|
||||
|
||||
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
|
||||
func ServerSideEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryption, value)
|
||||
}
|
||||
|
||||
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
|
||||
func ServerSideEncryptionKeyID(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
|
||||
}
|
||||
|
||||
// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
|
||||
func ServerSideDataEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideDataEncryption, value)
|
||||
}
|
||||
|
||||
// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
|
||||
func SSECAlgorithm(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECAlgorithm, value)
|
||||
}
|
||||
|
||||
// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
|
||||
func SSECKey(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKey, value)
|
||||
}
|
||||
|
||||
// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
|
||||
func SSECKeyMd5(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKeyMd5, value)
|
||||
}
|
||||
|
||||
// ObjectACL is an option to set X-Oss-Object-Acl header
|
||||
func ObjectACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssObjectACL, string(acl))
|
||||
}
|
||||
|
||||
// symlinkTarget is an option to set X-Oss-Symlink-Target
|
||||
func symlinkTarget(targetObjectKey string) Option {
|
||||
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
|
||||
}
|
||||
|
||||
// Origin is an option to set Origin header
|
||||
func Origin(value string) Option {
|
||||
return setHeader(HTTPHeaderOrigin, value)
|
||||
}
|
||||
|
||||
// ObjectStorageClass is an option to set the storage class of object
|
||||
func ObjectStorageClass(storageClass StorageClassType) Option {
|
||||
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
|
||||
}
|
||||
|
||||
// Callback is an option to set callback values
|
||||
func Callback(callback string) Option {
|
||||
return setHeader(HTTPHeaderOssCallback, callback)
|
||||
}
|
||||
|
||||
// CallbackVar is an option to set callback user defined values
|
||||
func CallbackVar(callbackVar string) Option {
|
||||
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
|
||||
}
|
||||
|
||||
// RequestPayer is an option to set payer who pay for the request
|
||||
func RequestPayer(payerType PayerType) Option {
|
||||
return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// RequestPayerParam is an option to set payer who pay for the request
|
||||
func RequestPayerParam(payerType PayerType) Option {
|
||||
return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// SetTagging is an option to set object tagging
|
||||
func SetTagging(tagging Tagging) Option {
|
||||
if len(tagging.Tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
taggingValue := ""
|
||||
for index, tag := range tagging.Tags {
|
||||
if index != 0 {
|
||||
taggingValue += "&"
|
||||
}
|
||||
taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value)
|
||||
}
|
||||
return setHeader(HTTPHeaderOssTagging, taggingValue)
|
||||
}
|
||||
|
||||
// TaggingDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func TaggingDirective(directive TaggingDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssTaggingDirective, string(directive))
|
||||
}
|
||||
|
||||
// ACReqMethod is an option to set Access-Control-Request-Method header
|
||||
func ACReqMethod(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqMethod, value)
|
||||
}
|
||||
|
||||
// ACReqHeaders is an option to set Access-Control-Request-Headers header
|
||||
func ACReqHeaders(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqHeaders, value)
|
||||
}
|
||||
|
||||
// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
|
||||
func TrafficLimitHeader(value int64) Option {
|
||||
return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// UserAgentHeader is an option to set HTTPHeaderUserAgent
|
||||
func UserAgentHeader(ua string) Option {
|
||||
return setHeader(HTTPHeaderUserAgent, ua)
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func ForbidOverWrite(forbidWrite bool) Option {
|
||||
if forbidWrite {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "false")
|
||||
}
|
||||
}
|
||||
|
||||
// RangeBehavior is an option to set Range value, such as "standard"
|
||||
func RangeBehavior(value string) Option {
|
||||
return setHeader(HTTPHeaderOssRangeBehavior, value)
|
||||
}
|
||||
|
||||
func PartHashCtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssHashCtx, value)
|
||||
}
|
||||
|
||||
func PartMd5CtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssMd5Ctx, value)
|
||||
}
|
||||
|
||||
func PartHashCtxParam(value string) Option {
|
||||
return addParam("x-oss-hash-ctx", value)
|
||||
}
|
||||
|
||||
func PartMd5CtxParam(value string) Option {
|
||||
return addParam("x-oss-md5-ctx", value)
|
||||
}
|
||||
|
||||
// Delimiter is an option to set delimiler parameter
|
||||
func Delimiter(value string) Option {
|
||||
return addParam("delimiter", value)
|
||||
}
|
||||
|
||||
// Marker is an option to set marker parameter
|
||||
func Marker(value string) Option {
|
||||
return addParam("marker", value)
|
||||
}
|
||||
|
||||
// MaxKeys is an option to set maxkeys parameter
|
||||
func MaxKeys(value int) Option {
|
||||
return addParam("max-keys", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Prefix is an option to set prefix parameter
|
||||
func Prefix(value string) Option {
|
||||
return addParam("prefix", value)
|
||||
}
|
||||
|
||||
// EncodingType is an option to set encoding-type parameter
|
||||
func EncodingType(value string) Option {
|
||||
return addParam("encoding-type", value)
|
||||
}
|
||||
|
||||
// MaxUploads is an option to set max-uploads parameter
|
||||
func MaxUploads(value int) Option {
|
||||
return addParam("max-uploads", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// KeyMarker is an option to set key-marker parameter
|
||||
func KeyMarker(value string) Option {
|
||||
return addParam("key-marker", value)
|
||||
}
|
||||
|
||||
// VersionIdMarker is an option to set version-id-marker parameter
|
||||
func VersionIdMarker(value string) Option {
|
||||
return addParam("version-id-marker", value)
|
||||
}
|
||||
|
||||
// VersionId is an option to set versionId parameter
|
||||
func VersionId(value string) Option {
|
||||
return addParam("versionId", value)
|
||||
}
|
||||
|
||||
// TagKey is an option to set tag key parameter
|
||||
func TagKey(value string) Option {
|
||||
return addParam("tag-key", value)
|
||||
}
|
||||
|
||||
// TagValue is an option to set tag value parameter
|
||||
func TagValue(value string) Option {
|
||||
return addParam("tag-value", value)
|
||||
}
|
||||
|
||||
// UploadIDMarker is an option to set upload-id-marker parameter
|
||||
func UploadIDMarker(value string) Option {
|
||||
return addParam("upload-id-marker", value)
|
||||
}
|
||||
|
||||
// MaxParts is an option to set max-parts parameter
|
||||
func MaxParts(value int) Option {
|
||||
return addParam("max-parts", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// PartNumberMarker is an option to set part-number-marker parameter
|
||||
func PartNumberMarker(value int) Option {
|
||||
return addParam("part-number-marker", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Sequential is an option to set sequential parameter for InitiateMultipartUpload
|
||||
func Sequential() Option {
|
||||
return addParam("sequential", "")
|
||||
}
|
||||
|
||||
// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
|
||||
func WithHashContext() Option {
|
||||
return addParam("withHashContext", "")
|
||||
}
|
||||
|
||||
// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
|
||||
func EnableMd5() Option {
|
||||
return addParam("x-oss-enable-md5", "")
|
||||
}
|
||||
|
||||
// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
|
||||
func EnableSha1() Option {
|
||||
return addParam("x-oss-enable-sha1", "")
|
||||
}
|
||||
|
||||
// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
|
||||
func EnableSha256() Option {
|
||||
return addParam("x-oss-enable-sha256", "")
|
||||
}
|
||||
|
||||
// ListType is an option to set List-type parameter for ListObjectsV2
|
||||
func ListType(value int) Option {
|
||||
return addParam("list-type", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// StartAfter is an option to set start-after parameter for ListObjectsV2
|
||||
func StartAfter(value string) Option {
|
||||
return addParam("start-after", value)
|
||||
}
|
||||
|
||||
// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
|
||||
func ContinuationToken(value string) Option {
|
||||
if value == "" {
|
||||
return addParam("continuation-token", nil)
|
||||
}
|
||||
return addParam("continuation-token", value)
|
||||
}
|
||||
|
||||
// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
|
||||
func FetchOwner(value bool) Option {
|
||||
if value {
|
||||
return addParam("fetch-owner", "true")
|
||||
}
|
||||
return addParam("fetch-owner", "false")
|
||||
}
|
||||
|
||||
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
|
||||
func DeleteObjectsQuiet(isQuiet bool) Option {
|
||||
return addArg(deleteObjectsQuiet, isQuiet)
|
||||
}
|
||||
|
||||
// StorageClass bucket storage class
|
||||
func StorageClass(value StorageClassType) Option {
|
||||
return addArg(storageClass, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func RedundancyType(value DataRedundancyType) Option {
|
||||
return addArg(redundancyType, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func ObjectHashFunc(value ObjecthashFuncType) Option {
|
||||
return addArg(objectHashFunc, value)
|
||||
}
|
||||
|
||||
// WithContext returns an option that sets the context for requests.
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return addArg(contextArg, ctx)
|
||||
}
|
||||
|
||||
// Checkpoint configuration
|
||||
type cpConfig struct {
|
||||
IsEnable bool
|
||||
FilePath string
|
||||
DirPath string
|
||||
}
|
||||
|
||||
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
|
||||
func Checkpoint(isEnable bool, filePath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
|
||||
}
|
||||
|
||||
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
|
||||
func CheckpointDir(isEnable bool, dirPath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
|
||||
}
|
||||
|
||||
// Routines DownloadFile/UploadFile routine count
|
||||
func Routines(n int) Option {
|
||||
return addArg(routineNum, n)
|
||||
}
|
||||
|
||||
// InitCRC Init AppendObject CRC
|
||||
func InitCRC(initCRC uint64) Option {
|
||||
return addArg(initCRC64, initCRC)
|
||||
}
|
||||
|
||||
// Progress set progress listener
|
||||
func Progress(listener ProgressListener) Option {
|
||||
return addArg(progressListener, listener)
|
||||
}
|
||||
|
||||
// GetResponseHeader for get response http header
|
||||
func GetResponseHeader(respHeader *http.Header) Option {
|
||||
return addArg(responseHeader, respHeader)
|
||||
}
|
||||
|
||||
// CallbackResult for get response of call back
|
||||
func CallbackResult(body *[]byte) Option {
|
||||
return addArg(responseBody, body)
|
||||
}
|
||||
|
||||
// ResponseContentType is an option to set response-content-type param
|
||||
func ResponseContentType(value string) Option {
|
||||
return addParam("response-content-type", value)
|
||||
}
|
||||
|
||||
// ResponseContentLanguage is an option to set response-content-language param
|
||||
func ResponseContentLanguage(value string) Option {
|
||||
return addParam("response-content-language", value)
|
||||
}
|
||||
|
||||
// ResponseExpires is an option to set response-expires param
|
||||
func ResponseExpires(value string) Option {
|
||||
return addParam("response-expires", value)
|
||||
}
|
||||
|
||||
// ResponseCacheControl is an option to set response-cache-control param
|
||||
func ResponseCacheControl(value string) Option {
|
||||
return addParam("response-cache-control", value)
|
||||
}
|
||||
|
||||
// ResponseContentDisposition is an option to set response-content-disposition param
|
||||
func ResponseContentDisposition(value string) Option {
|
||||
return addParam("response-content-disposition", value)
|
||||
}
|
||||
|
||||
// ResponseContentEncoding is an option to set response-content-encoding param
|
||||
func ResponseContentEncoding(value string) Option {
|
||||
return addParam("response-content-encoding", value)
|
||||
}
|
||||
|
||||
// Process is an option to set x-oss-process param
|
||||
func Process(value string) Option {
|
||||
return addParam("x-oss-process", value)
|
||||
}
|
||||
|
||||
// TrafficLimitParam is a option to set x-oss-traffic-limit
|
||||
func TrafficLimitParam(value int64) Option {
|
||||
return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// SetHeader Allow users to set personalized http headers
|
||||
func SetHeader(key string, value interface{}) Option {
|
||||
return setHeader(key, value)
|
||||
}
|
||||
|
||||
// AddParam Allow users to set personalized http params
|
||||
func AddParam(key string, value interface{}) Option {
|
||||
return addParam(key, value)
|
||||
}
|
||||
|
||||
func setHeader(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionHTTP}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addParam(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionParam}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addArg(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionArg}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleOptions(headers map[string]string, options []Option) error {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range params {
|
||||
if v.Type == optionHTTP {
|
||||
headers[k] = v.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetRawParams(options []Option) (map[string]interface{}, error) {
|
||||
// Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
paramsm := map[string]interface{}{}
|
||||
// Serialize
|
||||
for k, v := range params {
|
||||
if v.Type == optionParam {
|
||||
vs := params[k]
|
||||
paramsm[k] = vs.Value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return paramsm, nil
|
||||
}
|
||||
|
||||
func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[param]; ok {
|
||||
return val.Value, nil
|
||||
}
|
||||
return defaultVal, nil
|
||||
}
|
||||
|
||||
func IsOptionSet(options []Option, option string) (bool, interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[option]; ok {
|
||||
return true, val.Value, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
func DeleteOption(options []Option, strKey string) []Option {
|
||||
var outOption []Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
option(params)
|
||||
_, exist := params[strKey]
|
||||
if !exist {
|
||||
outOption = append(outOption, option)
|
||||
} else {
|
||||
delete(params, strKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
return outOption
|
||||
}
|
||||
|
||||
func GetRequestId(header http.Header) string {
|
||||
return header.Get("x-oss-request-id")
|
||||
}
|
||||
|
||||
func GetVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-version-id")
|
||||
}
|
||||
|
||||
func GetCopySrcVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-copy-source-version-id")
|
||||
}
|
||||
|
||||
func GetDeleteMark(header http.Header) bool {
|
||||
value := header.Get("x-oss-delete-marker")
|
||||
if strings.ToUpper(value) == "TRUE" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetQosDelayTime(header http.Header) string {
|
||||
return header.Get("x-oss-qos-delay-time")
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func AllowSameActionOverLap(enabled bool) Option {
|
||||
if enabled {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "false")
|
||||
}
|
||||
}
|
||||
|
||||
func GetCallbackBody(options []Option, resp *Response, callbackSet bool) error {
|
||||
var err error
|
||||
|
||||
// get response body
|
||||
if callbackSet {
|
||||
err = setBody(options, resp)
|
||||
} else {
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback != nil {
|
||||
err = setBody(options, resp)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func setBody(options []Option, resp *Response) error {
|
||||
respBody, _ := FindOption(options, responseBody, nil)
|
||||
if respBody != nil && resp != nil {
|
||||
pRespBody := respBody.(*[]byte)
|
||||
pBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pBody != nil {
|
||||
*pRespBody = pBody
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
116
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
generated
vendored
116
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
generated
vendored
@@ -1,116 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
type ProgressEventType int
|
||||
|
||||
const (
|
||||
// TransferStartedEvent transfer started, set TotalBytes
|
||||
TransferStartedEvent ProgressEventType = 1 + iota
|
||||
// TransferDataEvent transfer data, set ConsumedBytes and TotalBytes
|
||||
TransferDataEvent
|
||||
// TransferCompletedEvent transfer completed
|
||||
TransferCompletedEvent
|
||||
// TransferFailedEvent transfer encounters an error
|
||||
TransferFailedEvent
|
||||
)
|
||||
|
||||
// ProgressEvent defines progress event
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
RwBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
// ProgressListener listens progress change
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
// -------------------- Private --------------------
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
ConsumedBytes: consumed,
|
||||
TotalBytes: total,
|
||||
RwBytes: rwBytes,
|
||||
EventType: eventType}
|
||||
}
|
||||
|
||||
// publishProgress
|
||||
func publishProgress(listener ProgressListener, event *ProgressEvent) {
|
||||
if listener != nil && event != nil {
|
||||
listener.ProgressChanged(event)
|
||||
}
|
||||
}
|
||||
|
||||
type readerTracker struct {
|
||||
completedBytes int64
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader io.Reader
|
||||
writer io.Writer
|
||||
listener ProgressListener
|
||||
consumedBytes int64
|
||||
totalBytes int64
|
||||
tracker *readerTracker
|
||||
}
|
||||
|
||||
// TeeReader returns a Reader that writes to w what it reads from r.
|
||||
// All reads from r performed through it are matched with
|
||||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
|
||||
return &teeReader{
|
||||
reader: reader,
|
||||
writer: writer,
|
||||
listener: listener,
|
||||
consumedBytes: 0,
|
||||
totalBytes: totalBytes,
|
||||
tracker: tracker,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *teeReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
|
||||
// Read encountered error
|
||||
if err != nil && err != io.EOF {
|
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.consumedBytes += int64(n)
|
||||
// CRC
|
||||
if t.writer != nil {
|
||||
if n, err := t.writer.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
// Progress
|
||||
if t.listener != nil {
|
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
// Track
|
||||
if t.tracker != nil {
|
||||
t.tracker.completedBytes = t.consumedBytes
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *teeReader) Close() error {
|
||||
if rc, ok := t.reader.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
12
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
generated
vendored
12
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
|
||||
}
|
||||
13
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
generated
vendored
13
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
197
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
generated
vendored
197
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
generated
vendored
@@ -1,197 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CreateSelectCsvObjectMeta is Creating csv object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the csv file meta
|
||||
// options the options for create csv Meta of the object.
|
||||
//
|
||||
// MetaEndFrameCSV the csv file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) {
|
||||
var endFrame MetaEndFrameCSV
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "csv/meta"
|
||||
|
||||
csvMeta.encodeBase64()
|
||||
bs, err := xml.Marshal(csvMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameCSV, err
|
||||
}
|
||||
|
||||
// CreateSelectJsonObjectMeta is Creating json object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the json file meta
|
||||
// options the options for create json Meta of the object.
|
||||
//
|
||||
// MetaEndFrameJSON the json file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) {
|
||||
var endFrame MetaEndFrameJSON
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "json/meta"
|
||||
|
||||
bs, err := xml.Marshal(jsonMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameJSON, err
|
||||
}
|
||||
|
||||
// SelectObject is the select object api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true {
|
||||
resp.Frame.EnablePayloadCrc = true
|
||||
}
|
||||
resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE"
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
|
||||
// buf the request data trans to buffer.
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// SelectObjectResponse the response of select object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) {
|
||||
resp, err := bucket.do("POST", key, params, options, buf, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SelectObjectResponse{
|
||||
Body: resp.Body,
|
||||
StatusCode: resp.StatusCode,
|
||||
Frame: SelectObjectResult{},
|
||||
}
|
||||
result.Headers = resp.Headers
|
||||
// result.Frame = SelectObjectResult{}
|
||||
result.ReadTimeOut = bucket.GetConfig().Timeout
|
||||
|
||||
// Progress
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// CRC32
|
||||
crcCalc := crc32.NewIEEE()
|
||||
result.WriterForCheckCrc32 = crcCalc
|
||||
result.Body = TeeReader(resp.Body, nil, 0, listener, nil)
|
||||
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SelectObjectIntoFile is the selectObject to file api
|
||||
//
|
||||
// key the object key.
|
||||
// fileName saving file's name to localstation.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error {
|
||||
tempFilePath := fileName + TempFileSuffix
|
||||
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
// If the local file does not exist, create a new one. If it exists, overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the data to the local file path.
|
||||
_, err = io.Copy(fd, resp)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, fileName)
|
||||
}
|
||||
365
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
generated
vendored
365
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
generated
vendored
@@ -1,365 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The adapter class for Select object's response.
|
||||
// The response consists of frames. Each frame has the following format:
|
||||
|
||||
// Type | Payload Length | Header Checksum | Payload | Payload Checksum
|
||||
|
||||
// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
|
||||
// And we have three kind of frames.
|
||||
// Data Frame:
|
||||
// Type:8388609
|
||||
// Payload: Offset | Data
|
||||
// <-8 bytes>
|
||||
|
||||
// Continuous Frame
|
||||
// Type:8388612
|
||||
// Payload: Offset (8-bytes)
|
||||
|
||||
// End Frame
|
||||
// Type:8388613
|
||||
// Payload: Offset | total scanned bytes | http status code | error message
|
||||
// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
|
||||
|
||||
// SelectObjectResponse defines HTTP response from OSS SelectObject
|
||||
type SelectObjectResponse struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
Frame SelectObjectResult
|
||||
ReadTimeOut uint
|
||||
ClientCRC32 uint32
|
||||
ServerCRC32 uint32
|
||||
WriterForCheckCrc32 hash.Hash32
|
||||
Finish bool
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) {
|
||||
n, err = sr.readFrames(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Close http reponse body
|
||||
func (sr *SelectObjectResponse) Close() error {
|
||||
return sr.Body.Close()
|
||||
}
|
||||
|
||||
// PostSelectResult is the request of SelectObject
|
||||
type PostSelectResult struct {
|
||||
Response *SelectObjectResponse
|
||||
}
|
||||
|
||||
// readFrames is read Frame
|
||||
func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) {
|
||||
var nn int
|
||||
var err error
|
||||
var checkValid bool
|
||||
if sr.Frame.OutputRawData == true {
|
||||
nn, err = sr.Body.Read(p)
|
||||
return nn, err
|
||||
}
|
||||
|
||||
if sr.Finish {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for {
|
||||
// if this Frame is Readed, then not reading Header
|
||||
if sr.Frame.OpenLine != true {
|
||||
err = sr.analysisHeader()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
|
||||
if sr.Frame.FrameType == DataFrameType {
|
||||
n, err := sr.analysisData(p[nn:])
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += n
|
||||
|
||||
// if this Frame is readed all data, then empty the Frame to read it with next frame
|
||||
if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
sr.emptyFrame()
|
||||
}
|
||||
|
||||
if nn == len(p) {
|
||||
return nn, nil
|
||||
}
|
||||
} else if sr.Frame.FrameType == ContinuousFrameType {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
sr.Frame.OpenLine = false
|
||||
} else if sr.Frame.FrameType == EndFrameType {
|
||||
err = sr.analysisEndFrame()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameCSVType {
|
||||
err = sr.analysisMetaEndFrameCSV()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameJSONType {
|
||||
err = sr.analysisMetaEndFrameJSON()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
type chanReadIO struct {
|
||||
readLen int
|
||||
err error
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) {
|
||||
r := sr.Body
|
||||
ch := make(chan chanReadIO, 1)
|
||||
defer close(ch)
|
||||
go func(p []byte) {
|
||||
var needReadLength int
|
||||
readChan := chanReadIO{}
|
||||
needReadLength = len(p)
|
||||
for {
|
||||
n, err := r.Read(p[readChan.readLen:needReadLength])
|
||||
readChan.readLen += n
|
||||
if err != nil {
|
||||
readChan.err = err
|
||||
ch <- readChan
|
||||
return
|
||||
}
|
||||
|
||||
if readChan.readLen == needReadLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
ch <- readChan
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * timeOut):
|
||||
return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p))
|
||||
case result := <-ch:
|
||||
return result.readLen, result.err
|
||||
}
|
||||
}
|
||||
|
||||
// analysisHeader is reading selectObject response body's header
|
||||
func (sr *SelectObjectResponse) analysisHeader() error {
|
||||
headFrameByte := make([]byte, 20)
|
||||
_, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
frameTypeByte := headFrameByte[0:4]
|
||||
sr.Frame.Version = frameTypeByte[0]
|
||||
frameTypeByte[0] = 0
|
||||
bytesToInt(frameTypeByte, &sr.Frame.FrameType)
|
||||
|
||||
if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType &&
|
||||
sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType {
|
||||
return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType)
|
||||
}
|
||||
|
||||
payloadLengthByte := headFrameByte[4:8]
|
||||
bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength)
|
||||
headCheckSumByte := headFrameByte[8:12]
|
||||
bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum)
|
||||
byteOffset := headFrameByte[12:20]
|
||||
bytesToInt(byteOffset, &sr.Frame.Offset)
|
||||
sr.Frame.OpenLine = true
|
||||
|
||||
err = sr.writerCheckCrc32(byteOffset)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisData is reading the DataFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) {
|
||||
var needReadLength int32
|
||||
lenP := int32(len(p))
|
||||
restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength
|
||||
if lenP <= restByteLength {
|
||||
needReadLength = lenP
|
||||
} else {
|
||||
needReadLength = restByteLength
|
||||
}
|
||||
n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("read frame data error,%s", err.Error())
|
||||
}
|
||||
sr.Frame.ConsumedBytesLength += int32(n)
|
||||
err = sr.writerCheckCrc32(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// analysisEndFrame is reading the EndFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisEndFrame() error {
|
||||
var eF EndFrame
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read end frame error:%s", err.Error())
|
||||
}
|
||||
bytesToInt(payLoadBytes[0:8], &eF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode)
|
||||
errMsgLength := sr.Frame.PayloadLength - 20
|
||||
eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12])
|
||||
sr.Frame.EndFrame.TotalScanned = eF.TotalScanned
|
||||
sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode
|
||||
sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error {
|
||||
var mCF MetaEndFrameCSV
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end csv frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mCF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mCF.RowsCount)
|
||||
bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 36
|
||||
mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28])
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned
|
||||
sr.Frame.MetaEndFrameCSV.Status = mCF.Status
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error {
|
||||
var mJF MetaEndFrameJSON
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end json frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mJF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mJF.RowsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 32
|
||||
mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24])
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned
|
||||
sr.Frame.MetaEndFrameJSON.Status = mJF.Status
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount
|
||||
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) {
|
||||
payLoadChecksumByte := make([]byte, 4)
|
||||
n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut))
|
||||
if n == 4 {
|
||||
bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum)
|
||||
sr.ServerCRC32 = sr.Frame.PayloadChecksum
|
||||
sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32()
|
||||
if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 {
|
||||
return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d",
|
||||
sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) {
|
||||
err = nil
|
||||
if sr.Frame.EnablePayloadCrc == true {
|
||||
_, err = sr.WriterForCheckCrc32.Write(p)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// emptyFrame is emptying SelectObjectResponse Frame information
|
||||
func (sr *SelectObjectResponse) emptyFrame() {
|
||||
crcCalc := crc32.NewIEEE()
|
||||
sr.WriterForCheckCrc32 = crcCalc
|
||||
sr.Finish = false
|
||||
|
||||
sr.Frame.ConsumedBytesLength = 0
|
||||
sr.Frame.OpenLine = false
|
||||
sr.Frame.Version = byte(0)
|
||||
sr.Frame.FrameType = 0
|
||||
sr.Frame.PayloadLength = 0
|
||||
sr.Frame.HeaderCheckSum = 0
|
||||
sr.Frame.Offset = 0
|
||||
sr.Frame.Data = ""
|
||||
|
||||
sr.Frame.EndFrame.TotalScanned = 0
|
||||
sr.Frame.EndFrame.HTTPStatusCode = 0
|
||||
sr.Frame.EndFrame.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameCSV.Status = 0
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameJSON.Status = 0
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = ""
|
||||
|
||||
sr.Frame.PayloadChecksum = 0
|
||||
}
|
||||
|
||||
// bytesToInt byte's array trans to int
|
||||
func bytesToInt(b []byte, ret interface{}) {
|
||||
binBuf := bytes.NewBuffer(b)
|
||||
binary.Read(binBuf, binary.BigEndian, ret)
|
||||
}
|
||||
42
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
generated
vendored
42
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
generated
vendored
@@ -1,42 +0,0 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
||||
45
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
generated
vendored
45
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConns: httpMaxConns.MaxIdleConns,
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
MaxConnsPerHost: httpMaxConns.MaxConnsPerHost,
|
||||
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
||||
1695
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
generated
vendored
1695
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
generated
vendored
File diff suppressed because it is too large
Load Diff
578
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
generated
vendored
578
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
generated
vendored
@@ -1,578 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadFile is multipart file upload.
|
||||
//
|
||||
// objectKey the object name.
|
||||
// filePath the local file path to upload.
|
||||
// partSize the part size in byte.
|
||||
// options the options for uploading object.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (100KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
|
||||
if cpFilePath != "" {
|
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
|
||||
}
|
||||
|
||||
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
absPath, _ := filepath.Abs(srcFile)
|
||||
cpFileName := getCpFileName(absPath, dest, "")
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- concurrent upload without checkpoint -----
|
||||
|
||||
// getCpConfig gets checkpoint configuration
|
||||
func getCpConfig(options []Option) *cpConfig {
|
||||
cpcOpt, err := FindOption(options, checkpointConfig, nil)
|
||||
if err != nil || cpcOpt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cpcOpt.(*cpConfig)
|
||||
}
|
||||
|
||||
// getCpFileName return the name of the checkpoint file
|
||||
func getCpFileName(src, dest, versionId string) string {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(src))
|
||||
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(dest))
|
||||
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
if versionId == "" {
|
||||
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
|
||||
}
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(versionId))
|
||||
versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum)
|
||||
}
|
||||
|
||||
// getRoutines gets the routine count. by default it's 1.
|
||||
func getRoutines(options []Option) int {
|
||||
rtnOpt, err := FindOption(options, routineNum, nil)
|
||||
if err != nil || rtnOpt == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
rs := rtnOpt.(int)
|
||||
if rs < 1 {
|
||||
rs = 1
|
||||
} else if rs > 100 {
|
||||
rs = 100
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// getPayer return the payer of the request
|
||||
func getPayer(options []Option) string {
|
||||
payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if err != nil || payerOpt == nil {
|
||||
return ""
|
||||
}
|
||||
return payerOpt.(string)
|
||||
}
|
||||
|
||||
// GetProgressListener gets the progress callback
|
||||
func GetProgressListener(options []Option) ProgressListener {
|
||||
isSet, listener, _ := IsOptionSet(options, progressListener)
|
||||
if !isSet {
|
||||
return nil
|
||||
}
|
||||
return listener.(ProgressListener)
|
||||
}
|
||||
|
||||
// uploadPartHook is for testing usage
|
||||
type uploadPartHook func(id int, chunk FileChunk) error
|
||||
|
||||
var uploadPartHooker uploadPartHook = defaultUploadPart
|
||||
|
||||
func defaultUploadPart(id int, chunk FileChunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// workerArg defines worker argument structure
|
||||
type workerArg struct {
|
||||
bucket *Bucket
|
||||
filePath string
|
||||
imur InitiateMultipartUploadResult
|
||||
options []Option
|
||||
hook uploadPartHook
|
||||
}
|
||||
|
||||
// worker is the worker coroutine function
|
||||
type defaultUploadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(id, chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
var respHeader http.Header
|
||||
p := Progress(&defaultUploadProgressListener{})
|
||||
opts := make([]Option, len(arg.options)+2)
|
||||
opts = append(opts, arg.options...)
|
||||
|
||||
// use defaultUploadProgressListener
|
||||
opts = append(opts, p, GetResponseHeader(&respHeader))
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error())
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// scheduler function
|
||||
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
jobs <- chunk
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
func getTotalBytes(chunks []FileChunk) int64 {
|
||||
var tb int64
|
||||
for _, chunk := range chunks {
|
||||
tb += chunk.Size
|
||||
}
|
||||
return tb
|
||||
}
|
||||
|
||||
// uploadFile is a concurrent upload, without checkpoint
|
||||
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
chunks, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
// Initialize the multipart upload
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getTotalBytes(chunks)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutine
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule the jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the upload finished
|
||||
completed := 0
|
||||
parts := make([]UploadPart, len(chunks))
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
parts[part.PartNumber-1] = part
|
||||
completedBytes += chunks[part.PartNumber-1].Size
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multpart upload
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- concurrent upload with checkpoint -----
|
||||
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
|
||||
|
||||
type uploadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint file content's MD5
|
||||
FilePath string // Local file path
|
||||
FileStat cpStat // File state
|
||||
ObjectKey string // Key
|
||||
UploadID string // Upload ID
|
||||
Parts []cpPart // All parts of the local file
|
||||
CallbackVal string
|
||||
CallbackBody *[]byte
|
||||
}
|
||||
|
||||
type cpStat struct {
|
||||
Size int64 // File size
|
||||
LastModified time.Time // File's last modified time
|
||||
MD5 string // Local file's MD5
|
||||
}
|
||||
|
||||
type cpPart struct {
|
||||
Chunk FileChunk // File chunk
|
||||
Part UploadPart // Uploaded part
|
||||
IsCompleted bool // Upload complete flag
|
||||
}
|
||||
|
||||
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
|
||||
func (cp uploadCheckpoint) isValid(filePath string,options []Option) (bool, error) {
|
||||
|
||||
callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
|
||||
if callbackVal != "" && cp.CallbackVal != callbackVal {
|
||||
return false, nil
|
||||
}
|
||||
callbackBody, _ := FindOption(options, responseBody, nil)
|
||||
if callbackBody != nil{
|
||||
body, _ := json.Marshal(callbackBody)
|
||||
if bytes.Equal(*cp.CallbackBody, body) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// Compare the CP's magic number and MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Make sure if the local file is updated.
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the file size, file's last modified time and file's MD5
|
||||
if cp.FileStat.Size != st.Size() ||
|
||||
!cp.FileStat.LastModified.Equal(st.ModTime()) ||
|
||||
cp.FileStat.MD5 != md {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the file
|
||||
func (cp *uploadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump dumps to the local file
|
||||
func (cp *uploadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// updatePart updates the part status
|
||||
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
|
||||
cp.Parts[part.PartNumber-1].Part = part
|
||||
cp.Parts[part.PartNumber-1].IsCompleted = true
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp *uploadCheckpoint) todoParts() []FileChunk {
|
||||
fcs := []FileChunk{}
|
||||
for _, part := range cp.Parts {
|
||||
if !part.IsCompleted {
|
||||
fcs = append(fcs, part.Chunk)
|
||||
}
|
||||
}
|
||||
return fcs
|
||||
}
|
||||
|
||||
// allParts returns all parts
|
||||
func (cp *uploadCheckpoint) allParts() []UploadPart {
|
||||
ps := []UploadPart{}
|
||||
for _, part := range cp.Parts {
|
||||
ps = append(ps, part.Part)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns completed bytes count
|
||||
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for _, part := range cp.Parts {
|
||||
if part.IsCompleted {
|
||||
completedBytes += part.Chunk.Size
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// calcFileMD5 calculates the MD5 for the specified local file
|
||||
func calcFileMD5(filePath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = uploadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.ObjectKey = objectKey
|
||||
|
||||
// Local file
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.Size = st.Size()
|
||||
cp.FileStat.LastModified = st.ModTime()
|
||||
callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
|
||||
cp.CallbackVal = callbackVal.(string)
|
||||
callbackBody, _ := FindOption(options, responseBody, nil)
|
||||
if callbackBody != nil {
|
||||
body, _ := json.Marshal(callbackBody)
|
||||
cp.CallbackBody = &body
|
||||
}
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.MD5 = md
|
||||
|
||||
// Chunks
|
||||
parts, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.Parts = make([]cpPart, len(parts))
|
||||
for i, part := range parts {
|
||||
cp.Parts[i].Chunk = part
|
||||
cp.Parts[i].IsCompleted = false
|
||||
}
|
||||
|
||||
// Init load
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.UploadID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// complete completes the multipart upload and deletes the local CP files
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
|
||||
Key: cp.ObjectKey, UploadID: cp.UploadID}
|
||||
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
if e, ok := err.(ServiceError);ok && (e.StatusCode == 203 || e.StatusCode == 404) {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadFileWithCp handles concurrent upload with checkpoint
|
||||
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
// Load CP data
|
||||
ucp := uploadCheckpoint{}
|
||||
err := ucp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid.
|
||||
valid, err := ucp.isValid(filePath,options)
|
||||
if err != nil || !valid {
|
||||
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
chunks := ucp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: bucket.BucketName,
|
||||
Key: objectKey,
|
||||
UploadID: ucp.UploadID}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ucp.getCompletedBytes()
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the workers
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the job finished
|
||||
completed := 0
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ucp.updatePart(part)
|
||||
ucp.dump(cpFilePath)
|
||||
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions)
|
||||
return err
|
||||
}
|
||||
674
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
generated
vendored
674
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
generated
vendored
@@ -1,674 +0,0 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var sys_name string
|
||||
var sys_release string
|
||||
var sys_machine string
|
||||
|
||||
var (
|
||||
escQuot = []byte(""") // shorter than """
|
||||
escApos = []byte("'") // shorter than "'"
|
||||
escAmp = []byte("&")
|
||||
escLT = []byte("<")
|
||||
escGT = []byte(">")
|
||||
escTab = []byte("	")
|
||||
escNL = []byte("
")
|
||||
escCR = []byte("
")
|
||||
escFFFD = []byte("\uFFFD") // Unicode replacement character
|
||||
)
|
||||
|
||||
func init() {
|
||||
sys_name = runtime.GOOS
|
||||
sys_release = "-"
|
||||
sys_machine = runtime.GOARCH
|
||||
}
|
||||
|
||||
// userAgent gets user agent
|
||||
// It has the SDK version information, OS information and GO version
|
||||
func userAgent() string {
|
||||
sys := getSysInfo()
|
||||
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
|
||||
sys.release, sys.machine, runtime.Version())
|
||||
}
|
||||
|
||||
type sysInfo struct {
|
||||
name string // OS name such as windows/Linux
|
||||
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
|
||||
machine string // CPU type amd64/x86_64
|
||||
}
|
||||
|
||||
// getSysInfo gets system info
|
||||
// gets the OS information and CPU type
|
||||
func getSysInfo() sysInfo {
|
||||
return sysInfo{name: sys_name, release: sys_release, machine: sys_machine}
|
||||
}
|
||||
|
||||
// GetRangeConfig gets the download range from the options.
|
||||
func GetRangeConfig(options []Option) (*UnpackedRange, error) {
|
||||
rangeOpt, err := FindOption(options, HTTPHeaderRange, nil)
|
||||
if err != nil || rangeOpt == nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseRange(rangeOpt.(string))
|
||||
}
|
||||
|
||||
// UnpackedRange
|
||||
type UnpackedRange struct {
|
||||
HasStart bool // Flag indicates if the start point is specified
|
||||
HasEnd bool // Flag indicates if the end point is specified
|
||||
Start int64 // Start point
|
||||
End int64 // End point
|
||||
}
|
||||
|
||||
// InvalidRangeError returns invalid range error
|
||||
func InvalidRangeError(r string) error {
|
||||
return fmt.Errorf("InvalidRange %s", r)
|
||||
}
|
||||
|
||||
func GetRangeString(unpackRange UnpackedRange) string {
|
||||
var strRange string
|
||||
if unpackRange.HasStart && unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End)
|
||||
} else if unpackRange.HasStart {
|
||||
strRange = fmt.Sprintf("%d-", unpackRange.Start)
|
||||
} else if unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("-%d", unpackRange.End)
|
||||
}
|
||||
return strRange
|
||||
}
|
||||
|
||||
// ParseRange parse various styles of range such as bytes=M-N
|
||||
func ParseRange(normalizedRange string) (*UnpackedRange, error) {
|
||||
var err error
|
||||
hasStart := false
|
||||
hasEnd := false
|
||||
var start int64
|
||||
var end int64
|
||||
|
||||
// Bytes==M-N or ranges=M-N
|
||||
nrSlice := strings.Split(normalizedRange, "=")
|
||||
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
|
||||
// Bytes=M-N,X-Y
|
||||
rSlice := strings.Split(nrSlice[1], ",")
|
||||
rStr := rSlice[0]
|
||||
|
||||
if strings.HasSuffix(rStr, "-") { // M-
|
||||
startStr := rStr[:len(rStr)-1]
|
||||
start, err = strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
} else if strings.HasPrefix(rStr, "-") { // -N
|
||||
len := rStr[1:]
|
||||
end, err = strconv.ParseInt(len, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
if end == 0 { // -0
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
} else { // M-N
|
||||
valSlice := strings.Split(rStr, "-")
|
||||
if len(valSlice) != 2 {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
start, err = strconv.ParseInt(valSlice[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
end, err = strconv.ParseInt(valSlice[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
}
|
||||
|
||||
return &UnpackedRange{hasStart, hasEnd, start, end}, nil
|
||||
}
|
||||
|
||||
// AdjustRange returns adjusted range, adjust the range according to the length of the file
|
||||
func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) {
|
||||
if ur == nil {
|
||||
return 0, size
|
||||
}
|
||||
|
||||
if ur.HasStart && ur.HasEnd {
|
||||
start = ur.Start
|
||||
end = ur.End + 1
|
||||
if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
} else if ur.HasStart {
|
||||
start = ur.Start
|
||||
end = size
|
||||
if ur.Start < 0 || ur.Start >= size {
|
||||
start = 0
|
||||
}
|
||||
} else if ur.HasEnd {
|
||||
start = size - ur.End
|
||||
end = size
|
||||
if ur.End < 0 || ur.End > size {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// gets the current time in Unix time, in seconds.
|
||||
func GetNowSec() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC. The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64. Note that this
|
||||
// means the result of calling UnixNano on the zero Time is undefined.
|
||||
// gets the current time in Unix time, in nanoseconds.
|
||||
func GetNowNanoSec() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// GetNowGMT gets the current time in GMT format.
|
||||
func GetNowGMT() string {
|
||||
return time.Now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
// FileChunk is the file chunk definition
|
||||
type FileChunk struct {
|
||||
Number int // Chunk number
|
||||
Offset int64 // Chunk offset
|
||||
Size int64 // Chunk size.
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if int64(chunkNum) > stat.Size() {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (stat.Size() / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
|
||||
} else {
|
||||
chunk.Size = stat.Size() / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var chunkN = stat.Size() / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if stat.Size()%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = stat.Size() % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetPartEnd calculates the end position
|
||||
func GetPartEnd(begin int64, total int64, per int64) int64 {
|
||||
if begin+per > total {
|
||||
return total - 1
|
||||
}
|
||||
return begin + per - 1
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var CrcTable = func() *crc64.Table {
|
||||
return crc64.MakeTable(crc64.ECMA)
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var crc32Table = func() *crc32.Table {
|
||||
return crc32.MakeTable(crc32.IEEE)
|
||||
}
|
||||
|
||||
// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
|
||||
func ChoiceTransferPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil)
|
||||
if trafficLimit != nil {
|
||||
speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64)
|
||||
outOption = append(outOption, TrafficLimitHeader(speed))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
|
||||
func ChoiceCompletePartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil)
|
||||
if acl != nil {
|
||||
outOption = append(outOption, ObjectACL(ACLType(acl.(string))))
|
||||
}
|
||||
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback != nil {
|
||||
outOption = append(outOption, Callback(callback.(string)))
|
||||
}
|
||||
|
||||
callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil)
|
||||
if callbackVar != nil {
|
||||
outOption = append(outOption, CallbackVar(callbackVar.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil)
|
||||
if forbidOverWrite != nil {
|
||||
if forbidOverWrite.(string) == "true" {
|
||||
outOption = append(outOption, ForbidOverWrite(true))
|
||||
} else {
|
||||
outOption = append(outOption, ForbidOverWrite(false))
|
||||
}
|
||||
}
|
||||
|
||||
notification, _ := FindOption(options, HttpHeaderOssNotification, nil)
|
||||
if notification != nil {
|
||||
outOption = append(outOption, SetHeader(HttpHeaderOssNotification, notification))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
|
||||
func ChoiceAbortPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceHeadObjectOption choices valid option supported by HeadObject
|
||||
func ChoiceHeadObjectOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
// not select HTTPHeaderRange to get whole object length
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
func CheckBucketName(bucketName string) error {
|
||||
nameLen := len(bucketName)
|
||||
if nameLen < 3 || nameLen > 63 {
|
||||
return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen)
|
||||
}
|
||||
|
||||
for _, v := range bucketName {
|
||||
if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') {
|
||||
return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName)
|
||||
}
|
||||
}
|
||||
if bucketName[0] == '-' || bucketName[nameLen-1] == '-' {
|
||||
return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckObjectName(objectName string) error {
|
||||
if len(objectName) == 0 {
|
||||
return fmt.Errorf("object name is empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckObjectNameEx(objectName string, strict bool) error {
|
||||
if err := CheckObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strict && strings.HasPrefix(objectName, "?") {
|
||||
return fmt.Errorf("object name is invalid, can't start with '?'")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *bytes.Buffer:
|
||||
contentLength = int64(v.Len())
|
||||
case *bytes.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *strings.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *os.File:
|
||||
fInfo, fError := v.Stat()
|
||||
if fError != nil {
|
||||
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
|
||||
} else {
|
||||
contentLength = fInfo.Size()
|
||||
}
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *LimitedReadCloser:
|
||||
contentLength = int64(v.N)
|
||||
default:
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
||||
*/
|
||||
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *LimitedReadCloser:
|
||||
contentLength = int64(v.N)
|
||||
default:
|
||||
// Len
|
||||
type lenner interface {
|
||||
Len() int
|
||||
}
|
||||
if lr, ok := reader.(lenner); ok {
|
||||
return int64(lr.Len()), nil
|
||||
}
|
||||
// seeker len
|
||||
if s, ok := reader.(io.Seeker); ok {
|
||||
curOffset, err := s.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
endOffset, err := s.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = s.Seek(curOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n := endOffset - curOffset
|
||||
if n >= 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
//
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
||||
|
||||
func LimitReadCloser(r io.Reader, n int64) io.Reader {
|
||||
var lc LimitedReadCloser
|
||||
lc.R = r
|
||||
lc.N = n
|
||||
return &lc
|
||||
}
|
||||
|
||||
// LimitedRC support Close()
|
||||
type LimitedReadCloser struct {
|
||||
io.LimitedReader
|
||||
}
|
||||
|
||||
func (lc *LimitedReadCloser) Close() error {
|
||||
if closer, ok := lc.R.(io.ReadCloser); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DiscardReadCloser struct {
|
||||
RC io.ReadCloser
|
||||
Discard int
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Read(b []byte) (int, error) {
|
||||
n, err := drc.RC.Read(b)
|
||||
if drc.Discard == 0 || n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if n <= drc.Discard {
|
||||
drc.Discard -= n
|
||||
return 0, err
|
||||
}
|
||||
|
||||
realLen := n - drc.Discard
|
||||
copy(b[0:realLen], b[drc.Discard:n])
|
||||
drc.Discard = 0
|
||||
return realLen, err
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Close() error {
|
||||
closer, ok := drc.RC.(io.ReadCloser)
|
||||
if ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) {
|
||||
for _, key := range keys {
|
||||
value, ok := params[key]
|
||||
if ok && value == "" {
|
||||
// convert "" to nil
|
||||
params[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func EscapeLFString(str string) string {
|
||||
var log bytes.Buffer
|
||||
for i := 0; i < len(str); i++ {
|
||||
if str[i] != '\n' {
|
||||
log.WriteByte(str[i])
|
||||
} else {
|
||||
log.WriteString("\\n")
|
||||
}
|
||||
}
|
||||
return log.String()
|
||||
}
|
||||
|
||||
// EscapeString writes to p the properly escaped XML equivalent
|
||||
// of the plain text data s.
|
||||
func EscapeXml(s string) string {
|
||||
var p strings.Builder
|
||||
var esc []byte
|
||||
hextable := "0123456789ABCDEF"
|
||||
escPattern := []byte("�")
|
||||
last := 0
|
||||
for i := 0; i < len(s); {
|
||||
r, width := utf8.DecodeRuneInString(s[i:])
|
||||
i += width
|
||||
switch r {
|
||||
case '"':
|
||||
esc = escQuot
|
||||
case '\'':
|
||||
esc = escApos
|
||||
case '&':
|
||||
esc = escAmp
|
||||
case '<':
|
||||
esc = escLT
|
||||
case '>':
|
||||
esc = escGT
|
||||
case '\t':
|
||||
esc = escTab
|
||||
case '\n':
|
||||
esc = escNL
|
||||
case '\r':
|
||||
esc = escCR
|
||||
default:
|
||||
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
|
||||
if r >= 0x00 && r < 0x20 {
|
||||
escPattern[3] = hextable[r>>4]
|
||||
escPattern[4] = hextable[r&0x0f]
|
||||
esc = escPattern
|
||||
} else {
|
||||
esc = escFFFD
|
||||
}
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.WriteString(s[last : i-width])
|
||||
p.Write(esc)
|
||||
last = i
|
||||
}
|
||||
p.WriteString(s[last:])
|
||||
return p.String()
|
||||
}
|
||||
|
||||
// Decide whether the given rune is in the XML Character Range, per
|
||||
// the Char production of https://www.xml.com/axml/testaxml.htm,
|
||||
// Section 2.2 Characters.
|
||||
func isInCharacterRange(r rune) (inrange bool) {
|
||||
return r == 0x09 ||
|
||||
r == 0x0A ||
|
||||
r == 0x0D ||
|
||||
r >= 0x20 && r <= 0xD7FF ||
|
||||
r >= 0xE000 && r <= 0xFFFD ||
|
||||
r >= 0x10000 && r <= 0x10FFFF
|
||||
}
|
||||
|
||||
func isVerifyObjectStrict(config *Config) bool {
|
||||
if config != nil {
|
||||
if config.AuthVersion == AuthV2 || config.AuthVersion == AuthV4 {
|
||||
return false
|
||||
}
|
||||
return config.VerifyObjectStrict
|
||||
}
|
||||
return true
|
||||
}
|
||||
201
vendor/github.com/aliyun/credentials-go/LICENSE
generated
vendored
201
vendor/github.com/aliyun/credentials-go/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
41
vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go
generated
vendored
41
vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go
generated
vendored
@@ -1,41 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// AccessKeyCredential is a kind of credential
|
||||
type AccessKeyCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
}
|
||||
|
||||
func newAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential {
|
||||
return &AccessKeyCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns AccessKeyCreential's AccessKeyId
|
||||
func (a *AccessKeyCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(a.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns AccessKeyCreential's AccessKeySecret
|
||||
func (a *AccessKeyCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(a.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless for AccessKeyCreential
|
||||
func (a *AccessKeyCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for AccessKeyCreential
|
||||
func (a *AccessKeyCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns AccessKeyCreential's type
|
||||
func (a *AccessKeyCredential) GetType() *string {
|
||||
return tea.String("access_key")
|
||||
}
|
||||
40
vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go
generated
vendored
40
vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// BearerTokenCredential is a kind of credential
|
||||
type BearerTokenCredential struct {
|
||||
BearerToken string
|
||||
}
|
||||
|
||||
// newBearerTokenCredential return a BearerTokenCredential object
|
||||
func newBearerTokenCredential(token string) *BearerTokenCredential {
|
||||
return &BearerTokenCredential{
|
||||
BearerToken: token,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(("")), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken reutrns BearerTokenCredential's BearerToken
|
||||
func (b *BearerTokenCredential) GetBearerToken() *string {
|
||||
return tea.String(b.BearerToken)
|
||||
}
|
||||
|
||||
// GetType reutrns BearerTokenCredential's type
|
||||
func (b *BearerTokenCredential) GetType() *string {
|
||||
return tea.String("bearer")
|
||||
}
|
||||
349
vendor/github.com/aliyun/credentials-go/credentials/credential.go
generated
vendored
349
vendor/github.com/aliyun/credentials-go/credentials/credential.go
generated
vendored
@@ -1,349 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/debug/debug"
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/response"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
var debuglog = debug.Init("credential")
|
||||
|
||||
var hookParse = func(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Credential is an interface for getting actual credential
|
||||
type Credential interface {
|
||||
GetAccessKeyId() (*string, error)
|
||||
GetAccessKeySecret() (*string, error)
|
||||
GetSecurityToken() (*string, error)
|
||||
GetBearerToken() *string
|
||||
GetType() *string
|
||||
}
|
||||
|
||||
// Config is important when call NewCredential
|
||||
type Config struct {
|
||||
Type *string `json:"type"`
|
||||
AccessKeyId *string `json:"access_key_id"`
|
||||
AccessKeySecret *string `json:"access_key_secret"`
|
||||
RoleArn *string `json:"role_arn"`
|
||||
RoleSessionName *string `json:"role_session_name"`
|
||||
PublicKeyId *string `json:"public_key_id"`
|
||||
RoleName *string `json:"role_name"`
|
||||
SessionExpiration *int `json:"session_expiration"`
|
||||
PrivateKeyFile *string `json:"private_key_file"`
|
||||
BearerToken *string `json:"bearer_token"`
|
||||
SecurityToken *string `json:"security_token"`
|
||||
RoleSessionExpiration *int `json:"role_session_expiratioon"`
|
||||
Policy *string `json:"policy"`
|
||||
Host *string `json:"host"`
|
||||
Timeout *int `json:"timeout"`
|
||||
ConnectTimeout *int `json:"connect_timeout"`
|
||||
Proxy *string `json:"proxy"`
|
||||
}
|
||||
|
||||
func (s Config) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s Config) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *Config) SetAccessKeyId(v string) *Config {
|
||||
s.AccessKeyId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetAccessKeySecret(v string) *Config {
|
||||
s.AccessKeySecret = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetSecurityToken(v string) *Config {
|
||||
s.SecurityToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleArn(v string) *Config {
|
||||
s.RoleArn = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleSessionName(v string) *Config {
|
||||
s.RoleSessionName = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPublicKeyId(v string) *Config {
|
||||
s.PublicKeyId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleName(v string) *Config {
|
||||
s.RoleName = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetSessionExpiration(v int) *Config {
|
||||
s.SessionExpiration = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPrivateKeyFile(v string) *Config {
|
||||
s.PrivateKeyFile = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetBearerToken(v string) *Config {
|
||||
s.BearerToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleSessionExpiration(v int) *Config {
|
||||
s.RoleSessionExpiration = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPolicy(v string) *Config {
|
||||
s.Policy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetHost(v string) *Config {
|
||||
s.Host = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetTimeout(v int) *Config {
|
||||
s.Timeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetConnectTimeout(v int) *Config {
|
||||
s.ConnectTimeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetProxy(v string) *Config {
|
||||
s.Proxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetType(v string) *Config {
|
||||
s.Type = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// NewCredential return a credential according to the type in config.
|
||||
// if config is nil, the function will use default provider chain to get credential.
|
||||
// please see README.md for detail.
|
||||
func NewCredential(config *Config) (credential Credential, err error) {
|
||||
if config == nil {
|
||||
config, err = defaultChain.resolve()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return NewCredential(config)
|
||||
}
|
||||
switch tea.StringValue(config.Type) {
|
||||
case "access_key":
|
||||
err = checkAccessKey(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
credential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret))
|
||||
case "sts":
|
||||
err = checkSTS(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
credential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken))
|
||||
case "ecs_ram_role":
|
||||
checkEcsRAMRole(config)
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), runtime)
|
||||
case "ram_role_arn":
|
||||
err = checkRAMRoleArn(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newRAMRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime)
|
||||
case "rsa_key_pair":
|
||||
err = checkRSAKeyPair(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file, err1 := os.Open(tea.StringValue(config.PrivateKeyFile))
|
||||
if err1 != nil {
|
||||
err = fmt.Errorf("InvalidPath: Can not open PrivateKeyFile, err is %s", err1.Error())
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
var privateKey string
|
||||
scan := bufio.NewScanner(file)
|
||||
for scan.Scan() {
|
||||
if strings.HasPrefix(scan.Text(), "----") {
|
||||
continue
|
||||
}
|
||||
privateKey += scan.Text() + "\n"
|
||||
}
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime)
|
||||
case "bearer":
|
||||
if tea.StringValue(config.BearerToken) == "" {
|
||||
err = errors.New("BearerToken cannot be empty")
|
||||
return
|
||||
}
|
||||
credential = newBearerTokenCredential(tea.StringValue(config.BearerToken))
|
||||
default:
|
||||
err = errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair")
|
||||
return
|
||||
}
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
func checkRSAKeyPair(config *Config) (err error) {
|
||||
if tea.StringValue(config.PrivateKeyFile) == "" {
|
||||
err = errors.New("PrivateKeyFile cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.PublicKeyId) == "" {
|
||||
err = errors.New("PublicKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkRAMRoleArn(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.RoleArn) == "" {
|
||||
err = errors.New("RoleArn cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.RoleSessionName) == "" {
|
||||
err = errors.New("RoleSessionName cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkEcsRAMRole(config *Config) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func checkSTS(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.SecurityToken) == "" {
|
||||
err = errors.New("SecurityToken cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkAccessKey(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content []byte, err error) {
|
||||
httpRequest, err := http.NewRequest(request.Method, request.URL, strings.NewReader(""))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
httpRequest.Proto = "HTTP/1.1"
|
||||
httpRequest.Host = request.Domain
|
||||
debuglog("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto)
|
||||
debuglog("> Host: %s", httpRequest.Host)
|
||||
for key, value := range request.Headers {
|
||||
if value != "" {
|
||||
debuglog("> %s: %s", key, value)
|
||||
httpRequest.Header[key] = []string{value}
|
||||
}
|
||||
}
|
||||
debuglog(">")
|
||||
httpClient := &http.Client{}
|
||||
httpClient.Timeout = time.Duration(runtime.ReadTimeout) * time.Second
|
||||
proxy := &url.URL{}
|
||||
if runtime.Proxy != "" {
|
||||
proxy, err = url.Parse(runtime.Proxy)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
trans := &http.Transport{}
|
||||
if proxy != nil && runtime.Proxy != "" {
|
||||
trans.Proxy = http.ProxyURL(proxy)
|
||||
}
|
||||
trans.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second)
|
||||
httpClient.Transport = trans
|
||||
httpResponse, err := hookDo(httpClient.Do)(httpRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
debuglog("< %s %s", httpResponse.Proto, httpResponse.Status)
|
||||
for key, value := range httpResponse.Header {
|
||||
debuglog("< %s: %v", key, strings.Join(value, ""))
|
||||
}
|
||||
debuglog("<")
|
||||
|
||||
resp := &response.CommonResponse{}
|
||||
err = hookParse(resp.ParseFromHTTPResponse(httpResponse))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
debuglog("%s", resp.GetHTTPContentString())
|
||||
if resp.GetHTTPStatus() != http.StatusOK {
|
||||
err = fmt.Errorf("httpStatus: %d, message = %s", resp.GetHTTPStatus(), resp.GetHTTPContentString())
|
||||
return
|
||||
}
|
||||
return resp.GetHTTPContentBytes(), nil
|
||||
}
|
||||
25
vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go
generated
vendored
25
vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultInAdvanceScale = 0.95
|
||||
|
||||
var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
type credentialUpdater struct {
|
||||
credentialExpiration int
|
||||
lastUpdateTimestamp int64
|
||||
inAdvanceScale float64
|
||||
}
|
||||
|
||||
func (updater *credentialUpdater) needUpdateCredential() (result bool) {
|
||||
if updater.inAdvanceScale == 0 {
|
||||
updater.inAdvanceScale = defaultInAdvanceScale
|
||||
}
|
||||
return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale)
|
||||
}
|
||||
136
vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go
generated
vendored
136
vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go
generated
vendored
@@ -1,136 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
|
||||
|
||||
// EcsRAMRoleCredential is a kind of credential
|
||||
type EcsRAMRoleCredential struct {
|
||||
*credentialUpdater
|
||||
RoleName string
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type ecsRAMRoleResponse struct {
|
||||
Code string `json:"Code" xml:"Code"`
|
||||
AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"`
|
||||
AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"`
|
||||
SecurityToken string `json:"SecurityToken" xml:"SecurityToken"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newEcsRAMRoleCredential(roleName string, runtime *utils.Runtime) *EcsRAMRoleCredential {
|
||||
return &EcsRAMRoleCredential{
|
||||
RoleName: roleName,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns EcsRAMRoleCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetAccessKeyId() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns EcsRAMRoleCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetAccessKeySecret() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns EcsRAMRoleCredential's SecurityToken
|
||||
// if SecurityToken is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetSecurityToken() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for EcsRAMRoleCredential
|
||||
func (e *EcsRAMRoleCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns EcsRAMRoleCredential's type
|
||||
func (e *EcsRAMRoleCredential) GetType() *string {
|
||||
return tea.String("ecs_ram_role")
|
||||
}
|
||||
|
||||
func getRoleName() (string, error) {
|
||||
runtime := utils.NewRuntime(1, 1, "", "")
|
||||
request := request.NewCommonRequest()
|
||||
request.URL = securityCredURL
|
||||
request.Method = "GET"
|
||||
content, err := doAction(request, runtime)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(content), nil
|
||||
}
|
||||
|
||||
func (e *EcsRAMRoleCredential) updateCredential() (err error) {
|
||||
if e.runtime == nil {
|
||||
e.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
if e.RoleName == "" {
|
||||
e.RoleName, err = getRoleName()
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
|
||||
}
|
||||
}
|
||||
request.URL = securityCredURL + e.RoleName
|
||||
request.Method = "GET"
|
||||
content, err := doAction(request, e.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
|
||||
}
|
||||
var resp *ecsRAMRoleResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp.Code != "Success" {
|
||||
return fmt.Errorf("refresh Ecs sts token err: Code is not Success")
|
||||
}
|
||||
if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" {
|
||||
return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration)
|
||||
e.lastUpdateTimestamp = time.Now().Unix()
|
||||
e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
e.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: resp.AccessKeyId,
|
||||
AccessKeySecret: resp.AccessKeySecret,
|
||||
SecurityToken: resp.SecurityToken,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
43
vendor/github.com/aliyun/credentials-go/credentials/env_provider.go
generated
vendored
43
vendor/github.com/aliyun/credentials-go/credentials/env_provider.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
type envProvider struct{}
|
||||
|
||||
var providerEnv = new(envProvider)
|
||||
|
||||
const (
|
||||
// EnvVarAccessKeyId is a name of ALIBABA_CLOUD_ACCESS_KEY_Id
|
||||
EnvVarAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_Id"
|
||||
// EnvVarAccessKeySecret is a name of ALIBABA_CLOUD_ACCESS_KEY_SECRET
|
||||
EnvVarAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
|
||||
)
|
||||
|
||||
func newEnvProvider() Provider {
|
||||
return &envProvider{}
|
||||
}
|
||||
|
||||
func (p *envProvider) resolve() (*Config, error) {
|
||||
accessKeyId, ok1 := os.LookupEnv(EnvVarAccessKeyId)
|
||||
accessKeySecret, ok2 := os.LookupEnv(EnvVarAccessKeySecret)
|
||||
if !ok1 || !ok2 {
|
||||
return nil, nil
|
||||
}
|
||||
if accessKeyId == "" {
|
||||
return nil, errors.New(EnvVarAccessKeyId + " cannot be empty")
|
||||
}
|
||||
if accessKeySecret == "" {
|
||||
return nil, errors.New(EnvVarAccessKeySecret + " cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("access_key"),
|
||||
AccessKeyId: tea.String(accessKeyId),
|
||||
AccessKeySecret: tea.String(accessKeySecret),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
28
vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go
generated
vendored
28
vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
type instanceCredentialsProvider struct{}
|
||||
|
||||
var providerInstance = new(instanceCredentialsProvider)
|
||||
|
||||
func newInstanceCredentialsProvider() Provider {
|
||||
return &instanceCredentialsProvider{}
|
||||
}
|
||||
|
||||
func (p *instanceCredentialsProvider) resolve() (*Config, error) {
|
||||
roleName, ok := os.LookupEnv(ENVEcsMetadata)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
Type: tea.String("ecs_ram_role"),
|
||||
RoleName: tea.String(roleName),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
350
vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go
generated
vendored
350
vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go
generated
vendored
@@ -1,350 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
ini "gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
type profileProvider struct {
|
||||
Profile string
|
||||
}
|
||||
|
||||
var providerProfile = newProfileProvider()
|
||||
|
||||
var hookOS = func(goos string) string {
|
||||
return goos
|
||||
}
|
||||
|
||||
var hookState = func(info os.FileInfo, err error) (os.FileInfo, error) {
|
||||
return info, err
|
||||
}
|
||||
|
||||
// NewProfileProvider receive zero or more parameters,
|
||||
// when length of name is 0, the value of field Profile will be "default",
|
||||
// and when there are multiple inputs, the function will take the
|
||||
// first one and discard the other values.
|
||||
func newProfileProvider(name ...string) Provider {
|
||||
p := new(profileProvider)
|
||||
if len(name) == 0 {
|
||||
p.Profile = "default"
|
||||
} else {
|
||||
p.Profile = name[0]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// resolve implements the Provider interface
|
||||
// when credential type is rsa_key_pair, the content of private_key file
|
||||
// must be able to be parsed directly into the required string
|
||||
// that NewRsaKeyPairCredential function needed
|
||||
func (p *profileProvider) resolve() (*Config, error) {
|
||||
path, ok := os.LookupEnv(ENVCredentialFile)
|
||||
if !ok {
|
||||
path, err := checkDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if path == "" {
|
||||
return nil, nil
|
||||
}
|
||||
} else if path == "" {
|
||||
return nil, errors.New(ENVCredentialFile + " cannot be empty")
|
||||
}
|
||||
|
||||
value, section, err := getType(path, p.Profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch value.String() {
|
||||
case "access_key":
|
||||
config, err := getAccessKey(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "sts":
|
||||
config, err := getSTS(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "bearer":
|
||||
config, err := getBearerToken(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "ecs_ram_role":
|
||||
config, err := getEcsRAMRole(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "ram_role_arn":
|
||||
config, err := getRAMRoleArn(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "rsa_key_pair":
|
||||
config, err := getRSAKeyPair(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
default:
|
||||
return nil, errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair")
|
||||
}
|
||||
}
|
||||
|
||||
func getRSAKeyPair(section *ini.Section) (*Config, error) {
|
||||
publicKeyId, err := section.GetKey("public_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required public_key_id option in profile for rsa_key_pair")
|
||||
}
|
||||
if publicKeyId.String() == "" {
|
||||
return nil, errors.New("public_key_id cannot be empty")
|
||||
}
|
||||
privateKeyFile, err := section.GetKey("private_key_file")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required private_key_file option in profile for rsa_key_pair")
|
||||
}
|
||||
if privateKeyFile.String() == "" {
|
||||
return nil, errors.New("private_key_file cannot be empty")
|
||||
}
|
||||
sessionExpiration, _ := section.GetKey("session_expiration")
|
||||
expiration := 0
|
||||
if sessionExpiration != nil {
|
||||
expiration, err = sessionExpiration.Int()
|
||||
if err != nil {
|
||||
return nil, errors.New("session_expiration must be an int")
|
||||
}
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("rsa_key_pair"),
|
||||
PublicKeyId: tea.String(publicKeyId.String()),
|
||||
PrivateKeyFile: tea.String(privateKeyFile.String()),
|
||||
SessionExpiration: tea.Int(expiration),
|
||||
}
|
||||
err = setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getRAMRoleArn(section *ini.Section) (*Config, error) {
|
||||
accessKeyId, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for ram_role_arn")
|
||||
}
|
||||
if accessKeyId.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for ram_role_arn")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
roleArn, err := section.GetKey("role_arn")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required role_arn option in profile for ram_role_arn")
|
||||
}
|
||||
if roleArn.String() == "" {
|
||||
return nil, errors.New("role_arn cannot be empty")
|
||||
}
|
||||
roleSessionName, err := section.GetKey("role_session_name")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required role_session_name option in profile for ram_role_arn")
|
||||
}
|
||||
if roleSessionName.String() == "" {
|
||||
return nil, errors.New("role_session_name cannot be empty")
|
||||
}
|
||||
roleSessionExpiration, _ := section.GetKey("role_session_expiration")
|
||||
expiration := 0
|
||||
if roleSessionExpiration != nil {
|
||||
expiration, err = roleSessionExpiration.Int()
|
||||
if err != nil {
|
||||
return nil, errors.New("role_session_expiration must be an int")
|
||||
}
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("ram_role_arn"),
|
||||
AccessKeyId: tea.String(accessKeyId.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
RoleArn: tea.String(roleArn.String()),
|
||||
RoleSessionName: tea.String(roleSessionName.String()),
|
||||
RoleSessionExpiration: tea.Int(expiration),
|
||||
}
|
||||
err = setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getEcsRAMRole(section *ini.Section) (*Config, error) {
|
||||
roleName, _ := section.GetKey("role_name")
|
||||
config := &Config{
|
||||
Type: tea.String("ecs_ram_role"),
|
||||
}
|
||||
if roleName != nil {
|
||||
config.RoleName = tea.String(roleName.String())
|
||||
}
|
||||
err := setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getBearerToken(section *ini.Section) (*Config, error) {
|
||||
bearerToken, err := section.GetKey("bearer_token")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required bearer_token option in profile for bearer")
|
||||
}
|
||||
if bearerToken.String() == "" {
|
||||
return nil, errors.New("bearer_token cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("bearer"),
|
||||
BearerToken: tea.String(bearerToken.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getSTS(section *ini.Section) (*Config, error) {
|
||||
accesskeyid, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for sts")
|
||||
}
|
||||
if accesskeyid.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for sts")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
securityToken, err := section.GetKey("security_token")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required security_token option in profile for sts")
|
||||
}
|
||||
if securityToken.String() == "" {
|
||||
return nil, errors.New("security_token cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("sts"),
|
||||
AccessKeyId: tea.String(accesskeyid.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
SecurityToken: tea.String(securityToken.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getAccessKey(section *ini.Section) (*Config, error) {
|
||||
accesskeyid, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for access_key")
|
||||
}
|
||||
if accesskeyid.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for access_key")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("access_key"),
|
||||
AccessKeyId: tea.String(accesskeyid.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getType(path, profile string) (*ini.Key, *ini.Section, error) {
|
||||
ini, err := ini.Load(path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("ERROR: Can not open file " + err.Error())
|
||||
}
|
||||
|
||||
section, err := ini.GetSection(profile)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("ERROR: Can not load section " + err.Error())
|
||||
}
|
||||
|
||||
value, err := section.GetKey("type")
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("Missing required type option " + err.Error())
|
||||
}
|
||||
return value, section, nil
|
||||
}
|
||||
|
||||
func getHomePath() string {
|
||||
if hookOS(runtime.GOOS) == "windows" {
|
||||
path, ok := os.LookupEnv("USERPROFILE")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
path, ok := os.LookupEnv("HOME")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func checkDefaultPath() (path string, err error) {
|
||||
path = getHomePath()
|
||||
if path == "" {
|
||||
return "", errors.New("The default credential file path is invalid")
|
||||
}
|
||||
path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1)
|
||||
_, err = hookState(os.Stat(path))
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func setRuntimeToConfig(config *Config, section *ini.Section) error {
|
||||
rawTimeout, _ := section.GetKey("timeout")
|
||||
rawConnectTimeout, _ := section.GetKey("connect_timeout")
|
||||
rawProxy, _ := section.GetKey("proxy")
|
||||
rawHost, _ := section.GetKey("host")
|
||||
if rawProxy != nil {
|
||||
config.Proxy = tea.String(rawProxy.String())
|
||||
}
|
||||
if rawConnectTimeout != nil {
|
||||
connectTimeout, err := rawConnectTimeout.Int()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Please set connect_timeout with an int value")
|
||||
}
|
||||
config.ConnectTimeout = tea.Int(connectTimeout)
|
||||
}
|
||||
if rawTimeout != nil {
|
||||
timeout, err := rawTimeout.Int()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Please set timeout with an int value")
|
||||
}
|
||||
config.Timeout = tea.Int(timeout)
|
||||
}
|
||||
if rawHost != nil {
|
||||
config.Host = tea.String(rawHost.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
vendor/github.com/aliyun/credentials-go/credentials/provider.go
generated
vendored
13
vendor/github.com/aliyun/credentials-go/credentials/provider.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
package credentials
|
||||
|
||||
//Environmental virables that may be used by the provider
|
||||
const (
|
||||
ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE"
|
||||
ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA"
|
||||
PATHCredentialFile = "~/.alibabacloud/credentials"
|
||||
)
|
||||
|
||||
// Provider will be implemented When you want to customize the provider.
|
||||
type Provider interface {
|
||||
resolve() (*Config, error)
|
||||
}
|
||||
32
vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go
generated
vendored
32
vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type providerChain struct {
|
||||
Providers []Provider
|
||||
}
|
||||
|
||||
var defaultproviders = []Provider{providerEnv, providerProfile, providerInstance}
|
||||
var defaultChain = newProviderChain(defaultproviders)
|
||||
|
||||
func newProviderChain(providers []Provider) Provider {
|
||||
return &providerChain{
|
||||
Providers: providers,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *providerChain) resolve() (*Config, error) {
|
||||
for _, provider := range p.Providers {
|
||||
config, err := provider.resolve()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if config == nil {
|
||||
continue
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
return nil, errors.New("No credential found")
|
||||
|
||||
}
|
||||
59
vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go
generated
vendored
59
vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go
generated
vendored
@@ -1,59 +0,0 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
// CommonRequest is for requesting credential
|
||||
type CommonRequest struct {
|
||||
Scheme string
|
||||
Method string
|
||||
Domain string
|
||||
RegionId string
|
||||
URL string
|
||||
ReadTimeout time.Duration
|
||||
ConnectTimeout time.Duration
|
||||
isInsecure *bool
|
||||
|
||||
userAgent map[string]string
|
||||
QueryParams map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
queries string
|
||||
}
|
||||
|
||||
// NewCommonRequest returns a CommonRequest
|
||||
func NewCommonRequest() *CommonRequest {
|
||||
return &CommonRequest{
|
||||
QueryParams: make(map[string]string),
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// BuildURL returns a url
|
||||
func (request *CommonRequest) BuildURL() string {
|
||||
url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain)
|
||||
request.queries = "/?" + utils.GetURLFormedMap(request.QueryParams)
|
||||
return url + request.queries
|
||||
}
|
||||
|
||||
// BuildStringToSign returns BuildStringToSign
|
||||
func (request *CommonRequest) BuildStringToSign() (stringToSign string) {
|
||||
signParams := make(map[string]string)
|
||||
for key, value := range request.QueryParams {
|
||||
signParams[key] = value
|
||||
}
|
||||
|
||||
stringToSign = utils.GetURLFormedMap(signParams)
|
||||
stringToSign = strings.Replace(stringToSign, "+", "%20", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "*", "%2A", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "%7E", "~", -1)
|
||||
stringToSign = url.QueryEscape(stringToSign)
|
||||
stringToSign = request.Method + "&%2F&" + stringToSign
|
||||
return
|
||||
}
|
||||
53
vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go
generated
vendored
53
vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
package response
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var hookReadAll = func(fn func(r io.Reader) (b []byte, err error)) func(r io.Reader) (b []byte, err error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
// CommonResponse is for storing message of httpResponse
|
||||
type CommonResponse struct {
|
||||
httpStatus int
|
||||
httpHeaders map[string][]string
|
||||
httpContentString string
|
||||
httpContentBytes []byte
|
||||
}
|
||||
|
||||
// ParseFromHTTPResponse assigns for CommonResponse, returns err when body is too large.
|
||||
func (resp *CommonResponse) ParseFromHTTPResponse(httpResponse *http.Response) (err error) {
|
||||
defer httpResponse.Body.Close()
|
||||
body, err := hookReadAll(ioutil.ReadAll)(httpResponse.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.httpStatus = httpResponse.StatusCode
|
||||
resp.httpHeaders = httpResponse.Header
|
||||
resp.httpContentBytes = body
|
||||
resp.httpContentString = string(body)
|
||||
return
|
||||
}
|
||||
|
||||
// GetHTTPStatus returns httpStatus
|
||||
func (resp *CommonResponse) GetHTTPStatus() int {
|
||||
return resp.httpStatus
|
||||
}
|
||||
|
||||
// GetHTTPHeaders returns httpresponse's headers
|
||||
func (resp *CommonResponse) GetHTTPHeaders() map[string][]string {
|
||||
return resp.httpHeaders
|
||||
}
|
||||
|
||||
// GetHTTPContentString return body content as string
|
||||
func (resp *CommonResponse) GetHTTPContentString() string {
|
||||
return resp.httpContentString
|
||||
}
|
||||
|
||||
// GetHTTPContentBytes return body content as []byte
|
||||
func (resp *CommonResponse) GetHTTPContentBytes() []byte {
|
||||
return resp.httpContentBytes
|
||||
}
|
||||
145
vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go
generated
vendored
145
vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go
generated
vendored
@@ -1,145 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
// RsaKeyPairCredential is a kind of credentials
|
||||
type RsaKeyPairCredential struct {
|
||||
*credentialUpdater
|
||||
PrivateKey string
|
||||
PublicKeyId string
|
||||
SessionExpiration int
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type rsaKeyPairResponse struct {
|
||||
SessionAccessKey *sessionAccessKey `json:"SessionAccessKey" xml:"SessionAccessKey"`
|
||||
}
|
||||
|
||||
type sessionAccessKey struct {
|
||||
SessionAccessKeyId string `json:"SessionAccessKeyId" xml:"SessionAccessKeyId"`
|
||||
SessionAccessKeySecret string `json:"SessionAccessKeySecret" xml:"SessionAccessKeySecret"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredential {
|
||||
return &RsaKeyPairCredential{
|
||||
PrivateKey: privateKey,
|
||||
PublicKeyId: publicKeyId,
|
||||
SessionExpiration: sessionExpiration,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns RsaKeyPairCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (r *RsaKeyPairCredential) GetAccessKeyId() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns RsaKeyPairCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (r *RsaKeyPairCredential) GetAccessKeySecret() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless RsaKeyPairCredential
|
||||
func (r *RsaKeyPairCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for RsaKeyPairCredential
|
||||
func (r *RsaKeyPairCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns RsaKeyPairCredential's type
|
||||
func (r *RsaKeyPairCredential) GetType() *string {
|
||||
return tea.String("rsa_key_pair")
|
||||
}
|
||||
|
||||
func (r *RsaKeyPairCredential) updateCredential() (err error) {
|
||||
if r.runtime == nil {
|
||||
r.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
request.Domain = "sts.aliyuncs.com"
|
||||
if r.runtime.Host != "" {
|
||||
request.Domain = r.runtime.Host
|
||||
}
|
||||
request.Scheme = "HTTPS"
|
||||
request.Method = "GET"
|
||||
request.QueryParams["AccessKeyId"] = r.PublicKeyId
|
||||
request.QueryParams["Action"] = "GenerateSessionAccessKey"
|
||||
request.QueryParams["Format"] = "JSON"
|
||||
if r.SessionExpiration > 0 {
|
||||
if r.SessionExpiration >= 900 && r.SessionExpiration <= 3600 {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(r.SessionExpiration)
|
||||
} else {
|
||||
err = errors.New("[InvalidParam]:Key Pair session duration should be in the range of 15min - 1Hr")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds)
|
||||
}
|
||||
request.QueryParams["SignatureMethod"] = "SHA256withRSA"
|
||||
request.QueryParams["SignatureType"] = "PRIVATEKEY"
|
||||
request.QueryParams["SignatureVersion"] = "1.0"
|
||||
request.QueryParams["Version"] = "2015-04-01"
|
||||
request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601()
|
||||
request.QueryParams["SignatureNonce"] = utils.GetUUID()
|
||||
signature := utils.Sha256WithRsa(request.BuildStringToSign(), r.PrivateKey)
|
||||
request.QueryParams["Signature"] = signature
|
||||
request.Headers["Host"] = request.Domain
|
||||
request.Headers["Accept-Encoding"] = "identity"
|
||||
request.URL = request.BuildURL()
|
||||
content, err := doAction(request, r.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh KeyPair err: %s", err.Error())
|
||||
}
|
||||
var resp *rsaKeyPairResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh KeyPair err: Json Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp == nil || resp.SessionAccessKey == nil {
|
||||
return fmt.Errorf("refresh KeyPair err: SessionAccessKey is empty")
|
||||
}
|
||||
sessionAccessKey := resp.SessionAccessKey
|
||||
if sessionAccessKey.SessionAccessKeyId == "" || sessionAccessKey.SessionAccessKeySecret == "" || sessionAccessKey.Expiration == "" {
|
||||
return fmt.Errorf("refresh KeyPair err: SessionAccessKeyId: %v, SessionAccessKeySecret: %v, Expiration: %v", sessionAccessKey.SessionAccessKeyId, sessionAccessKey.SessionAccessKeySecret, sessionAccessKey.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionAccessKey.Expiration)
|
||||
r.lastUpdateTimestamp = time.Now().Unix()
|
||||
r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
r.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: sessionAccessKey.SessionAccessKeyId,
|
||||
AccessKeySecret: sessionAccessKey.SessionAccessKeySecret,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
7
vendor/github.com/aliyun/credentials-go/credentials/session_credential.go
generated
vendored
7
vendor/github.com/aliyun/credentials-go/credentials/session_credential.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
package credentials
|
||||
|
||||
type sessionCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
43
vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go
generated
vendored
43
vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// StsTokenCredential is a kind of credentials
|
||||
type StsTokenCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
|
||||
func newStsTokenCredential(accessKeyId, accessKeySecret, securityToken string) *StsTokenCredential {
|
||||
return &StsTokenCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
SecurityToken: securityToken,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns StsTokenCredential's AccessKeyId
|
||||
func (s *StsTokenCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(s.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns StsTokenCredential's AccessKeySecret
|
||||
func (s *StsTokenCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(s.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns StsTokenCredential's SecurityToken
|
||||
func (s *StsTokenCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(s.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless StsTokenCredential
|
||||
func (s *StsTokenCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns StsTokenCredential's type
|
||||
func (s *StsTokenCredential) GetType() *string {
|
||||
return tea.String("sts")
|
||||
}
|
||||
163
vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go
generated
vendored
163
vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go
generated
vendored
@@ -1,163 +0,0 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
const defaultDurationSeconds = 3600
|
||||
|
||||
// RAMRoleArnCredential is a kind of credentials
|
||||
type RAMRoleArnCredential struct {
|
||||
*credentialUpdater
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
RoleArn string
|
||||
RoleSessionName string
|
||||
RoleSessionExpiration int
|
||||
Policy string
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type ramRoleArnResponse struct {
|
||||
Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"`
|
||||
}
|
||||
|
||||
type credentialsInResponse struct {
|
||||
AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"`
|
||||
AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"`
|
||||
SecurityToken string `json:"SecurityToken" xml:"SecurityToken"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredential {
|
||||
return &RAMRoleArnCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
RoleArn: roleArn,
|
||||
RoleSessionName: roleSessionName,
|
||||
RoleSessionExpiration: roleSessionExpiration,
|
||||
Policy: policy,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns RamRoleArnCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetAccessKeyId() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns RamRoleArnCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetAccessKeySecret() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns RamRoleArnCredential's SecurityToken
|
||||
// if SecurityToken is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetSecurityToken() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless RamRoleArnCredential
|
||||
func (r *RAMRoleArnCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns RamRoleArnCredential's type
|
||||
func (r *RAMRoleArnCredential) GetType() *string {
|
||||
return tea.String("ram_role_arn")
|
||||
}
|
||||
|
||||
func (r *RAMRoleArnCredential) updateCredential() (err error) {
|
||||
if r.runtime == nil {
|
||||
r.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
request.Domain = "sts.aliyuncs.com"
|
||||
request.Scheme = "HTTPS"
|
||||
request.Method = "GET"
|
||||
request.QueryParams["AccessKeyId"] = r.AccessKeyId
|
||||
request.QueryParams["Action"] = "AssumeRole"
|
||||
request.QueryParams["Format"] = "JSON"
|
||||
if r.RoleSessionExpiration > 0 {
|
||||
if r.RoleSessionExpiration >= 900 && r.RoleSessionExpiration <= 3600 {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration)
|
||||
} else {
|
||||
err = errors.New("[InvalidParam]:Assume Role session duration should be in the range of 15min - 1Hr")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds)
|
||||
}
|
||||
request.QueryParams["RoleArn"] = r.RoleArn
|
||||
if r.Policy != "" {
|
||||
request.QueryParams["Policy"] = r.Policy
|
||||
}
|
||||
request.QueryParams["RoleSessionName"] = r.RoleSessionName
|
||||
request.QueryParams["SignatureMethod"] = "HMAC-SHA1"
|
||||
request.QueryParams["SignatureVersion"] = "1.0"
|
||||
request.QueryParams["Version"] = "2015-04-01"
|
||||
request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601()
|
||||
request.QueryParams["SignatureNonce"] = utils.GetUUID()
|
||||
signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&")
|
||||
request.QueryParams["Signature"] = signature
|
||||
request.Headers["Host"] = request.Domain
|
||||
request.Headers["Accept-Encoding"] = "identity"
|
||||
request.URL = request.BuildURL()
|
||||
content, err := doAction(request, r.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error())
|
||||
}
|
||||
var resp *ramRoleArnResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp == nil || resp.Credentials == nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty")
|
||||
}
|
||||
respCredentials := resp.Credentials
|
||||
if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration)
|
||||
r.lastUpdateTimestamp = time.Now().Unix()
|
||||
r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
r.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: respCredentials.AccessKeyId,
|
||||
AccessKeySecret: respCredentials.AccessKeySecret,
|
||||
SecurityToken: respCredentials.SecurityToken,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
35
vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go
generated
vendored
35
vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Runtime is for setting timeout, proxy and host
|
||||
type Runtime struct {
|
||||
ReadTimeout int
|
||||
ConnectTimeout int
|
||||
Proxy string
|
||||
Host string
|
||||
}
|
||||
|
||||
// NewRuntime returns a Runtime
|
||||
func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime {
|
||||
return &Runtime{
|
||||
ReadTimeout: readTimeout,
|
||||
ConnectTimeout: connectTimeout,
|
||||
Proxy: proxy,
|
||||
Host: host,
|
||||
}
|
||||
}
|
||||
|
||||
// Timeout is for connect Timeout
|
||||
func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{
|
||||
Timeout: connectTimeout,
|
||||
DualStack: true,
|
||||
}).DialContext(ctx, network, address)
|
||||
}
|
||||
}
|
||||
146
vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go
generated
vendored
146
vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
rand2 "math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
type uuid [16]byte
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
// GetUUID returns a uuid
|
||||
func GetUUID() (uuidHex string) {
|
||||
uuid := newUUID()
|
||||
uuidHex = hex.EncodeToString(uuid[:])
|
||||
return
|
||||
}
|
||||
|
||||
// RandStringBytes returns a rand string
|
||||
func RandStringBytes(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand2.Intn(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// ShaHmac1 return a string which has been hashed
|
||||
func ShaHmac1(source, secret string) string {
|
||||
key := []byte(secret)
|
||||
hmac := hmac.New(sha1.New, key)
|
||||
hmac.Write([]byte(source))
|
||||
signedBytes := hmac.Sum(nil)
|
||||
signedString := base64.StdEncoding.EncodeToString(signedBytes)
|
||||
return signedString
|
||||
}
|
||||
|
||||
// Sha256WithRsa return a string which has been hashed with Rsa
|
||||
func Sha256WithRsa(source, secret string) string {
|
||||
decodeString, err := base64.StdEncoding.DecodeString(secret)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
private, err := x509.ParsePKCS8PrivateKey(decodeString)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h := crypto.Hash.New(crypto.SHA256)
|
||||
h.Write([]byte(source))
|
||||
hashed := h.Sum(nil)
|
||||
signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey),
|
||||
crypto.SHA256, hashed)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(signature)
|
||||
}
|
||||
|
||||
// GetMD5Base64 returns a string which has been base64
|
||||
func GetMD5Base64(bytes []byte) (base64Value string) {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write(bytes)
|
||||
md5Value := md5Ctx.Sum(nil)
|
||||
base64Value = base64.StdEncoding.EncodeToString(md5Value)
|
||||
return
|
||||
}
|
||||
|
||||
// GetTimeInFormatISO8601 returns a time string
|
||||
func GetTimeInFormatISO8601() (timeStr string) {
|
||||
gmt := time.FixedZone("GMT", 0)
|
||||
|
||||
return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
|
||||
}
|
||||
|
||||
// GetURLFormedMap returns a url encoded string
|
||||
func GetURLFormedMap(source map[string]string) (urlEncoded string) {
|
||||
urlEncoder := url.Values{}
|
||||
for key, value := range source {
|
||||
urlEncoder.Add(key, value)
|
||||
}
|
||||
urlEncoded = urlEncoder.Encode()
|
||||
return
|
||||
}
|
||||
|
||||
func newUUID() uuid {
|
||||
ns := uuid{}
|
||||
safeRandom(ns[:])
|
||||
u := newFromHash(md5.New(), ns, RandStringBytes(16))
|
||||
u[6] = (u[6] & 0x0f) | (byte(2) << 4)
|
||||
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func newFromHash(h hash.Hash, ns uuid, name string) uuid {
|
||||
u := uuid{}
|
||||
h.Write(ns[:])
|
||||
h.Write([]byte(name))
|
||||
copy(u[:], h.Sum(nil))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func safeRandom(dest []byte) {
|
||||
if _, err := hookRead(rand.Read)(dest); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (u uuid) String() string {
|
||||
buf := make([]byte, 36)
|
||||
|
||||
hex.Encode(buf[0:8], u[0:4])
|
||||
buf[8] = '-'
|
||||
hex.Encode(buf[9:13], u[4:6])
|
||||
buf[13] = '-'
|
||||
hex.Encode(buf[14:18], u[6:8])
|
||||
buf[18] = '-'
|
||||
hex.Encode(buf[19:23], u[8:10])
|
||||
buf[23] = '-'
|
||||
hex.Encode(buf[24:], u[10:])
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
52
vendor/github.com/bytedance/sonic/.gitignore
generated
vendored
52
vendor/github.com/bytedance/sonic/.gitignore
generated
vendored
@@ -1,52 +0,0 @@
|
||||
*.o
|
||||
*.swp
|
||||
*.swm
|
||||
*.swn
|
||||
*.a
|
||||
*.so
|
||||
_obj
|
||||
_test
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
_testmain.go
|
||||
*.exe
|
||||
*.exe~
|
||||
*.test
|
||||
*.prof
|
||||
*.rar
|
||||
*.zip
|
||||
*.gz
|
||||
*.psd
|
||||
*.bmd
|
||||
*.cfg
|
||||
*.pptx
|
||||
*.log
|
||||
*nohup.out
|
||||
*settings.pyc
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.DS_Store
|
||||
/.idea/
|
||||
/.vscode/
|
||||
/output/
|
||||
/vendor/
|
||||
/Gopkg.lock
|
||||
/Gopkg.toml
|
||||
coverage.html
|
||||
coverage.out
|
||||
coverage.xml
|
||||
junit.xml
|
||||
*.profile
|
||||
*.svg
|
||||
*.out
|
||||
ast/test.out
|
||||
ast/bench.sh
|
||||
|
||||
!testdata/*.json.gz
|
||||
fuzz/testdata
|
||||
*__debug_bin
|
||||
6
vendor/github.com/bytedance/sonic/.gitmodules
generated
vendored
6
vendor/github.com/bytedance/sonic/.gitmodules
generated
vendored
@@ -1,6 +0,0 @@
|
||||
[submodule "cloudwego"]
|
||||
path = tools/asm2asm
|
||||
url = https://github.com/cloudwego/asm2asm.git
|
||||
[submodule "tools/simde"]
|
||||
path = tools/simde
|
||||
url = https://github.com/simd-everywhere/simde.git
|
||||
24
vendor/github.com/bytedance/sonic/.licenserc.yaml
generated
vendored
24
vendor/github.com/bytedance/sonic/.licenserc.yaml
generated
vendored
@@ -1,24 +0,0 @@
|
||||
header:
|
||||
license:
|
||||
spdx-id: Apache-2.0
|
||||
copyright-owner: ByteDance Inc.
|
||||
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- '**/*.s'
|
||||
|
||||
paths-ignore:
|
||||
- 'ast/asm.s' # empty file
|
||||
- 'decoder/asm.s' # empty file
|
||||
- 'encoder/asm.s' # empty file
|
||||
- 'internal/caching/asm.s' # empty file
|
||||
- 'internal/jit/asm.s' # empty file
|
||||
- 'internal/native/avx/native_amd64.s' # auto-generated by asm2asm
|
||||
- 'internal/native/avx/native_subr_amd64.go' # auto-generated by asm2asm
|
||||
- 'internal/native/avx2/native_amd64.s' # auto-generated by asm2asm
|
||||
- 'internal/native/avx2/native_subr_amd64.go' # auto-generated by asm2asm
|
||||
- 'internal/resolver/asm.s' # empty file
|
||||
- 'internal/rt/asm.s' # empty file
|
||||
- 'internal/loader/asm.s' # empty file
|
||||
|
||||
comment: on-failure
|
||||
128
vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md
generated
vendored
128
vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,128 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
wudi.daniel@bytedance.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user