init repo
This commit is contained in:
14
vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
generated
vendored
Normal file
14
vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
Copyright (c) 2015 aliyun.com
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
339
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
generated
vendored
Normal file
339
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
generated
vendored
Normal file
@@ -0,0 +1,339 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
|
||||
type headerSorter struct {
|
||||
Keys []string
|
||||
Vals []string
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeys get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeysV4 get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// signHeader signs the header and sets it as the authorization header.
|
||||
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string, credentials Credentials) {
|
||||
akIf := credentials
|
||||
authorizationStr := ""
|
||||
if conn.config.AuthVersion == AuthV4 {
|
||||
strDay := ""
|
||||
strDate := req.Header.Get(HttpHeaderOssDate)
|
||||
if strDate == "" {
|
||||
strDate = req.Header.Get(HTTPHeaderDate)
|
||||
t, _ := time.Parse(http.TimeFormat, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
} else {
|
||||
t, _ := time.Parse(timeFormatV4, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
}
|
||||
signHeaderProduct := conn.config.GetSignProduct()
|
||||
signHeaderRegion := conn.config.GetSignRegion()
|
||||
|
||||
additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
|
||||
} else {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
|
||||
}
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
} else {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
}
|
||||
} else {
|
||||
// Get the final authorization string
|
||||
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
|
||||
}
|
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
|
||||
for k, v := range req.Header {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
if _, ok := additionalMap[strings.ToLower(k)]; ok {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := req.Header.Get(HTTPHeaderDate)
|
||||
contentType := req.Header.Get(HTTPHeaderContentType)
|
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
|
||||
|
||||
// default is v1 signature
|
||||
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
|
||||
// v2 signature
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
|
||||
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
|
||||
}
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
return signedStr
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string, signingTime *time.Time) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
|
||||
for k, v := range req.Header {
|
||||
lowKey := strings.ToLower(k)
|
||||
if strings.EqualFold(lowKey, HTTPHeaderContentMD5) ||
|
||||
strings.EqualFold(lowKey, HTTPHeaderContentType) ||
|
||||
strings.HasPrefix(lowKey, "x-oss-") {
|
||||
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
|
||||
} else {
|
||||
if _, ok := additionalMap[lowKey]; ok {
|
||||
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get day,eg 20210914
|
||||
//signingTime
|
||||
signDate := ""
|
||||
strDay := ""
|
||||
if signingTime != nil {
|
||||
signDate = signingTime.Format(timeFormatV4)
|
||||
strDay = signingTime.Format(shortTimeFormatV4)
|
||||
} else {
|
||||
var t time.Time
|
||||
// Required parameters
|
||||
if date := req.Header.Get(HTTPHeaderDate); date != "" {
|
||||
signDate = date
|
||||
t, _ = time.Parse(http.TimeFormat, date)
|
||||
}
|
||||
|
||||
if ossDate := req.Header.Get(HttpHeaderOssDate); ossDate != "" {
|
||||
signDate = ossDate
|
||||
t, _ = time.Parse(timeFormatV4, ossDate)
|
||||
}
|
||||
|
||||
strDay = t.Format("20060102")
|
||||
}
|
||||
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
signStr := ""
|
||||
|
||||
// v4 signature
|
||||
hashedPayload := DefaultContentSha256
|
||||
if val := req.Header.Get(HttpHeaderOssContentSha256); val != "" {
|
||||
hashedPayload = val
|
||||
}
|
||||
|
||||
// subResource
|
||||
resource := canonicalizedResource
|
||||
subResource := ""
|
||||
subPos := strings.LastIndex(canonicalizedResource, "?")
|
||||
if subPos != -1 {
|
||||
subResource = canonicalizedResource[subPos+1:]
|
||||
resource = canonicalizedResource[0:subPos]
|
||||
}
|
||||
|
||||
// get canonical request
|
||||
canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
|
||||
rh := sha256.New()
|
||||
io.WriteString(rh, canonicalReuqest)
|
||||
hashedRequest := hex.EncodeToString(rh.Sum(nil))
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]CanonicalRequest:%s\n", req, EscapeLFString(canonicalReuqest))
|
||||
}
|
||||
|
||||
// Product & Region
|
||||
signedStrV4Product := conn.config.GetSignProduct()
|
||||
signedStrV4Region := conn.config.GetSignRegion()
|
||||
|
||||
signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
|
||||
io.WriteString(h1, strDay)
|
||||
h1Key := h1.Sum(nil)
|
||||
|
||||
h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
|
||||
io.WriteString(h2, signedStrV4Region)
|
||||
h2Key := h2.Sum(nil)
|
||||
|
||||
h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
|
||||
io.WriteString(h3, signedStrV4Product)
|
||||
h3Key := h3.Sum(nil)
|
||||
|
||||
h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
|
||||
io.WriteString(h4, "aliyun_v4_request")
|
||||
h4Key := h4.Sum(nil)
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
|
||||
io.WriteString(h, signStr)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
|
||||
if params[HTTPParamAccessKeyID] == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName)
|
||||
canonParamsKeys := []string{}
|
||||
for key := range params {
|
||||
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken {
|
||||
canonParamsKeys = append(canonParamsKeys, key)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(canonParamsKeys)
|
||||
canonParamsStr := ""
|
||||
for _, key := range canonParamsKeys {
|
||||
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string))
|
||||
}
|
||||
|
||||
expireStr := strconv.FormatInt(expiration, 10)
|
||||
signStr := expireStr + "\n" + canonParamsStr + canonResource
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
return signedStr
|
||||
}
|
||||
|
||||
// newHeaderSorter is an additional function for function SignHeader.
|
||||
func newHeaderSorter(m map[string]string) *headerSorter {
|
||||
hs := &headerSorter{
|
||||
Keys: make([]string, 0, len(m)),
|
||||
Vals: make([]string, 0, len(m)),
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
hs.Keys = append(hs.Keys, k)
|
||||
hs.Vals = append(hs.Vals, v)
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Len() int {
|
||||
return len(hs.Vals)
|
||||
}
|
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
|
||||
}
|
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Swap(i, j int) {
|
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
|
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
|
||||
}
|
||||
1321
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
generated
vendored
Normal file
1321
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2956
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
generated
vendored
Normal file
2956
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
301
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
generated
vendored
Normal file
301
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
generated
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Define the level of the output log
|
||||
const (
|
||||
LogOff = iota
|
||||
Error
|
||||
Warn
|
||||
Info
|
||||
Debug
|
||||
)
|
||||
|
||||
// LogTag Tag for each level of log
|
||||
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
|
||||
|
||||
// HTTPTimeout defines HTTP timeout.
|
||||
type HTTPTimeout struct {
|
||||
ConnectTimeout time.Duration
|
||||
ReadWriteTimeout time.Duration
|
||||
HeaderTimeout time.Duration
|
||||
LongTimeout time.Duration
|
||||
IdleConnTimeout time.Duration
|
||||
}
|
||||
|
||||
// HTTPMaxConns defines max idle connections and max idle connections per host
|
||||
type HTTPMaxConns struct {
|
||||
MaxIdleConns int
|
||||
MaxIdleConnsPerHost int
|
||||
MaxConnsPerHost int
|
||||
}
|
||||
|
||||
// Credentials is interface for get AccessKeyID,AccessKeySecret,SecurityToken
|
||||
type Credentials interface {
|
||||
GetAccessKeyID() string
|
||||
GetAccessKeySecret() string
|
||||
GetSecurityToken() string
|
||||
}
|
||||
|
||||
// CredentialsProvider is interface for get Credential Info
|
||||
type CredentialsProvider interface {
|
||||
GetCredentials() Credentials
|
||||
}
|
||||
|
||||
type CredentialsProviderE interface {
|
||||
CredentialsProvider
|
||||
GetCredentialsE() (Credentials, error)
|
||||
}
|
||||
|
||||
type defaultCredentials struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeyID() string {
|
||||
return defCre.config.AccessKeyID
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeySecret() string {
|
||||
return defCre.config.AccessKeySecret
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetSecurityToken() string {
|
||||
return defCre.config.SecurityToken
|
||||
}
|
||||
|
||||
type defaultCredentialsProvider struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
|
||||
return &defaultCredentials{config: defBuild.config}
|
||||
}
|
||||
|
||||
type envCredentials struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
|
||||
type EnvironmentVariableCredentialsProvider struct {
|
||||
cred Credentials
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetAccessKeyID() string {
|
||||
return credentials.AccessKeyId
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetAccessKeySecret() string {
|
||||
return credentials.AccessKeySecret
|
||||
}
|
||||
|
||||
func (credentials *envCredentials) GetSecurityToken() string {
|
||||
return credentials.SecurityToken
|
||||
}
|
||||
|
||||
func (defBuild *EnvironmentVariableCredentialsProvider) GetCredentials() Credentials {
|
||||
var accessID, accessKey, token string
|
||||
if defBuild.cred == nil {
|
||||
accessID = os.Getenv("OSS_ACCESS_KEY_ID")
|
||||
accessKey = os.Getenv("OSS_ACCESS_KEY_SECRET")
|
||||
token = os.Getenv("OSS_SESSION_TOKEN")
|
||||
} else {
|
||||
accessID = defBuild.cred.GetAccessKeyID()
|
||||
accessKey = defBuild.cred.GetAccessKeySecret()
|
||||
token = defBuild.cred.GetSecurityToken()
|
||||
}
|
||||
|
||||
return &envCredentials{
|
||||
AccessKeyId: accessID,
|
||||
AccessKeySecret: accessKey,
|
||||
SecurityToken: token,
|
||||
}
|
||||
}
|
||||
|
||||
func NewEnvironmentVariableCredentialsProvider() (EnvironmentVariableCredentialsProvider, error) {
|
||||
var provider EnvironmentVariableCredentialsProvider
|
||||
accessID := os.Getenv("OSS_ACCESS_KEY_ID")
|
||||
if accessID == "" {
|
||||
return provider, fmt.Errorf("access key id is empty!")
|
||||
}
|
||||
accessKey := os.Getenv("OSS_ACCESS_KEY_SECRET")
|
||||
if accessKey == "" {
|
||||
return provider, fmt.Errorf("access key secret is empty!")
|
||||
}
|
||||
token := os.Getenv("OSS_SESSION_TOKEN")
|
||||
envCredential := &envCredentials{
|
||||
AccessKeyId: accessID,
|
||||
AccessKeySecret: accessKey,
|
||||
SecurityToken: token,
|
||||
}
|
||||
return EnvironmentVariableCredentialsProvider{
|
||||
cred: envCredential,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Config defines oss configuration
|
||||
type Config struct {
|
||||
Endpoint string // OSS endpoint
|
||||
AccessKeyID string // AccessId
|
||||
AccessKeySecret string // AccessKey
|
||||
RetryTimes uint // Retry count by default it's 5.
|
||||
UserAgent string // SDK name/version/system information
|
||||
IsDebug bool // Enable debug mode. Default is false.
|
||||
Timeout uint // Timeout in seconds. By default it's 60.
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // If cname is in the endpoint.
|
||||
IsPathStyle bool // If Path Style is in the endpoint.
|
||||
HTTPTimeout HTTPTimeout // HTTP timeout
|
||||
HTTPMaxConns HTTPMaxConns // Http max connections
|
||||
IsUseProxy bool // Flag of using proxy.
|
||||
ProxyHost string // Flag of using proxy host.
|
||||
IsAuthProxy bool // Flag of needing authentication.
|
||||
ProxyUser string // Proxy user
|
||||
ProxyPassword string // Proxy password
|
||||
IsEnableMD5 bool // Flag of enabling MD5 for upload.
|
||||
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
|
||||
IsEnableCRC bool // Flag of enabling CRC for upload.
|
||||
LogLevel int // Log level
|
||||
Logger *log.Logger // For write log
|
||||
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
|
||||
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
|
||||
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
|
||||
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
|
||||
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
|
||||
LocalAddr net.Addr // local client host info
|
||||
UserSetUa bool // UserAgent is set by user or not
|
||||
AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
|
||||
AdditionalHeaders []string // special http headers needed to be sign
|
||||
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
|
||||
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
|
||||
Region string // such as cn-hangzhou
|
||||
CloudBoxId string //
|
||||
Product string // oss or oss-cloudbox, default is oss
|
||||
VerifyObjectStrict bool // a flag of verifying object name strictly. Default is enable.
|
||||
}
|
||||
|
||||
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
|
||||
if uploadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
|
||||
} else if uploadSpeed == 0 {
|
||||
config.UploadLimitSpeed = 0
|
||||
config.UploadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.UploadLimiter, err = GetOssLimiter(uploadSpeed)
|
||||
if err == nil {
|
||||
config.UploadLimitSpeed = uploadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
|
||||
if downloadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
|
||||
} else if downloadSpeed == 0 {
|
||||
config.DownloadLimitSpeed = 0
|
||||
config.DownloadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
|
||||
if err == nil {
|
||||
config.DownloadLimitSpeed = downloadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteLog output log function
|
||||
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
|
||||
if config.LogLevel < LogLevel || config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var logBuffer bytes.Buffer
|
||||
logBuffer.WriteString(LogTag[LogLevel-1])
|
||||
logBuffer.WriteString(fmt.Sprintf(format, a...))
|
||||
config.Logger.Printf("%s", logBuffer.String())
|
||||
}
|
||||
|
||||
// for get Credentials
|
||||
func (config *Config) GetCredentials() Credentials {
|
||||
return config.CredentialsProvider.GetCredentials()
|
||||
}
|
||||
|
||||
// for get Sign Product
|
||||
func (config *Config) GetSignProduct() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return "oss-cloudbox"
|
||||
}
|
||||
return "oss"
|
||||
}
|
||||
|
||||
// for get Sign Region
|
||||
func (config *Config) GetSignRegion() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return config.CloudBoxId
|
||||
}
|
||||
return config.Region
|
||||
}
|
||||
|
||||
// getDefaultOssConfig gets the default configuration.
|
||||
func getDefaultOssConfig() *Config {
|
||||
config := Config{}
|
||||
|
||||
config.Endpoint = ""
|
||||
config.AccessKeyID = ""
|
||||
config.AccessKeySecret = ""
|
||||
config.RetryTimes = 5
|
||||
config.IsDebug = false
|
||||
config.UserAgent = userAgent()
|
||||
config.Timeout = 60 // Seconds
|
||||
config.SecurityToken = ""
|
||||
config.IsCname = false
|
||||
config.IsPathStyle = false
|
||||
|
||||
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
|
||||
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
|
||||
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
|
||||
config.HTTPMaxConns.MaxIdleConns = 100
|
||||
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
|
||||
|
||||
config.IsUseProxy = false
|
||||
config.ProxyHost = ""
|
||||
config.IsAuthProxy = false
|
||||
config.ProxyUser = ""
|
||||
config.ProxyPassword = ""
|
||||
|
||||
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
|
||||
config.IsEnableMD5 = false
|
||||
config.IsEnableCRC = true
|
||||
|
||||
config.LogLevel = LogOff
|
||||
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
provider := &defaultCredentialsProvider{config: &config}
|
||||
config.CredentialsProvider = provider
|
||||
|
||||
config.AuthVersion = AuthV1
|
||||
config.RedirectEnabled = true
|
||||
config.InsecureSkipVerify = false
|
||||
|
||||
config.Product = "oss"
|
||||
|
||||
config.VerifyObjectStrict = true
|
||||
|
||||
return &config
|
||||
}
|
||||
1021
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
generated
vendored
Normal file
1021
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
273
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
generated
vendored
Normal file
273
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
package oss
|
||||
|
||||
import "os"
|
||||
|
||||
// ACLType bucket/object ACL
|
||||
type ACLType string
|
||||
|
||||
const (
|
||||
// ACLPrivate definition : private read and write
|
||||
ACLPrivate ACLType = "private"
|
||||
|
||||
// ACLPublicRead definition : public read and private write
|
||||
ACLPublicRead ACLType = "public-read"
|
||||
|
||||
// ACLPublicReadWrite definition : public read and public write
|
||||
ACLPublicReadWrite ACLType = "public-read-write"
|
||||
|
||||
// ACLDefault Object. It's only applicable for object.
|
||||
ACLDefault ACLType = "default"
|
||||
)
|
||||
|
||||
// bucket versioning status
|
||||
type VersioningStatus string
|
||||
|
||||
const (
|
||||
// Versioning Status definition: Enabled
|
||||
VersionEnabled VersioningStatus = "Enabled"
|
||||
|
||||
// Versioning Status definition: Suspended
|
||||
VersionSuspended VersioningStatus = "Suspended"
|
||||
)
|
||||
|
||||
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
|
||||
type MetadataDirectiveType string
|
||||
|
||||
const (
|
||||
// MetaCopy the target object's metadata is copied from the source one
|
||||
MetaCopy MetadataDirectiveType = "COPY"
|
||||
|
||||
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
|
||||
MetaReplace MetadataDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
|
||||
type TaggingDirectiveType string
|
||||
|
||||
const (
|
||||
// TaggingCopy the target object's tagging is copied from the source one
|
||||
TaggingCopy TaggingDirectiveType = "COPY"
|
||||
|
||||
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
|
||||
TaggingReplace TaggingDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// AlgorithmType specifying the server side encryption algorithm name
|
||||
type AlgorithmType string
|
||||
|
||||
const (
|
||||
KMSAlgorithm AlgorithmType = "KMS"
|
||||
AESAlgorithm AlgorithmType = "AES256"
|
||||
SM4Algorithm AlgorithmType = "SM4"
|
||||
)
|
||||
|
||||
// StorageClassType bucket storage type
|
||||
type StorageClassType string
|
||||
|
||||
const (
|
||||
// StorageStandard standard
|
||||
StorageStandard StorageClassType = "Standard"
|
||||
|
||||
// StorageIA infrequent access
|
||||
StorageIA StorageClassType = "IA"
|
||||
|
||||
// StorageArchive archive
|
||||
StorageArchive StorageClassType = "Archive"
|
||||
|
||||
// StorageColdArchive cold archive
|
||||
StorageColdArchive StorageClassType = "ColdArchive"
|
||||
|
||||
// StorageDeepColdArchive deep cold archive
|
||||
StorageDeepColdArchive StorageClassType = "DeepColdArchive"
|
||||
)
|
||||
|
||||
//RedundancyType bucket data Redundancy type
|
||||
type DataRedundancyType string
|
||||
|
||||
const (
|
||||
// RedundancyLRS Local redundancy, default value
|
||||
RedundancyLRS DataRedundancyType = "LRS"
|
||||
|
||||
// RedundancyZRS Same city redundancy
|
||||
RedundancyZRS DataRedundancyType = "ZRS"
|
||||
)
|
||||
|
||||
//ObjecthashFuncType
|
||||
type ObjecthashFuncType string
|
||||
|
||||
const (
|
||||
HashFuncSha1 ObjecthashFuncType = "SHA-1"
|
||||
HashFuncSha256 ObjecthashFuncType = "SHA-256"
|
||||
)
|
||||
|
||||
// PayerType the type of request payer
|
||||
type PayerType string
|
||||
|
||||
const (
|
||||
// Requester the requester who send the request
|
||||
Requester PayerType = "Requester"
|
||||
|
||||
// BucketOwner the requester who send the request
|
||||
BucketOwner PayerType = "BucketOwner"
|
||||
)
|
||||
|
||||
//RestoreMode the restore mode for coldArchive object
|
||||
type RestoreMode string
|
||||
|
||||
const (
|
||||
//RestoreExpedited object will be restored in 1 hour
|
||||
RestoreExpedited RestoreMode = "Expedited"
|
||||
|
||||
//RestoreStandard object will be restored in 2-5 hours
|
||||
RestoreStandard RestoreMode = "Standard"
|
||||
|
||||
//RestoreBulk object will be restored in 5-10 hours
|
||||
RestoreBulk RestoreMode = "Bulk"
|
||||
)
|
||||
|
||||
// HTTPMethod HTTP request method
|
||||
type HTTPMethod string
|
||||
|
||||
const (
|
||||
// HTTPGet HTTP GET
|
||||
HTTPGet HTTPMethod = "GET"
|
||||
|
||||
// HTTPPut HTTP PUT
|
||||
HTTPPut HTTPMethod = "PUT"
|
||||
|
||||
// HTTPHead HTTP HEAD
|
||||
HTTPHead HTTPMethod = "HEAD"
|
||||
|
||||
// HTTPPost HTTP POST
|
||||
HTTPPost HTTPMethod = "POST"
|
||||
|
||||
// HTTPDelete HTTP DELETE
|
||||
HTTPDelete HTTPMethod = "DELETE"
|
||||
)
|
||||
|
||||
// HTTP headers
|
||||
const (
|
||||
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
|
||||
HTTPHeaderAuthorization = "Authorization"
|
||||
HTTPHeaderCacheControl = "Cache-Control"
|
||||
HTTPHeaderContentDisposition = "Content-Disposition"
|
||||
HTTPHeaderContentEncoding = "Content-Encoding"
|
||||
HTTPHeaderContentLength = "Content-Length"
|
||||
HTTPHeaderContentMD5 = "Content-MD5"
|
||||
HTTPHeaderContentType = "Content-Type"
|
||||
HTTPHeaderContentLanguage = "Content-Language"
|
||||
HTTPHeaderDate = "Date"
|
||||
HTTPHeaderEtag = "ETag"
|
||||
HTTPHeaderExpires = "Expires"
|
||||
HTTPHeaderHost = "Host"
|
||||
HTTPHeaderLastModified = "Last-Modified"
|
||||
HTTPHeaderRange = "Range"
|
||||
HTTPHeaderLocation = "Location"
|
||||
HTTPHeaderOrigin = "Origin"
|
||||
HTTPHeaderServer = "Server"
|
||||
HTTPHeaderUserAgent = "User-Agent"
|
||||
HTTPHeaderIfModifiedSince = "If-Modified-Since"
|
||||
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
HTTPHeaderIfMatch = "If-Match"
|
||||
HTTPHeaderIfNoneMatch = "If-None-Match"
|
||||
HTTPHeaderACReqMethod = "Access-Control-Request-Method"
|
||||
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
HTTPHeaderOssACL = "X-Oss-Acl"
|
||||
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
|
||||
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
|
||||
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
|
||||
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
|
||||
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
|
||||
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
|
||||
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
|
||||
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
|
||||
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
|
||||
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
|
||||
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
|
||||
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
|
||||
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
|
||||
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
|
||||
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
|
||||
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
|
||||
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
|
||||
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
|
||||
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
|
||||
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
|
||||
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
|
||||
HTTPHeaderOssCallback = "X-Oss-Callback"
|
||||
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
|
||||
HTTPHeaderOssRequester = "X-Oss-Request-Payer"
|
||||
HTTPHeaderOssTagging = "X-Oss-Tagging"
|
||||
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
|
||||
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
|
||||
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
|
||||
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
|
||||
HTTPHeaderOssTaskID = "X-Oss-Task-Id"
|
||||
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
|
||||
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
|
||||
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
|
||||
HttpHeaderOssDate = "X-Oss-Date"
|
||||
HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
|
||||
HttpHeaderOssNotification = "X-Oss-Notification"
|
||||
HTTPHeaderOssEc = "X-Oss-Ec"
|
||||
HTTPHeaderOssErr = "X-Oss-Err"
|
||||
)
|
||||
|
||||
// HTTP Param
|
||||
const (
|
||||
HTTPParamExpires = "Expires"
|
||||
HTTPParamAccessKeyID = "OSSAccessKeyId"
|
||||
HTTPParamSignature = "Signature"
|
||||
HTTPParamSecurityToken = "security-token"
|
||||
HTTPParamPlaylistName = "playlistName"
|
||||
|
||||
HTTPParamSignatureVersion = "x-oss-signature-version"
|
||||
HTTPParamExpiresV2 = "x-oss-expires"
|
||||
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
|
||||
HTTPParamSignatureV2 = "x-oss-signature"
|
||||
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
|
||||
HTTPParamCredential = "x-oss-credential"
|
||||
HTTPParamDate = "x-oss-date"
|
||||
HTTPParamOssSecurityToken = "x-oss-security-token"
|
||||
)
|
||||
|
||||
// Other constants
|
||||
const (
|
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
|
||||
MinPartSize = 100 * 1024 // Min part size, 100KB
|
||||
|
||||
FilePermMode = os.FileMode(0664) // Default file permission
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // Temp file prefix
|
||||
TempFileSuffix = ".temp" // Temp file suffix
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
|
||||
|
||||
NullVersion = "null"
|
||||
|
||||
DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
|
||||
|
||||
Version = "v3.0.2" // Go SDK version
|
||||
)
|
||||
|
||||
// FrameType
|
||||
const (
|
||||
DataFrameType = 8388609
|
||||
ContinuousFrameType = 8388612
|
||||
EndFrameType = 8388613
|
||||
MetaEndFrameCSVType = 8388614
|
||||
MetaEndFrameJSONType = 8388615
|
||||
)
|
||||
|
||||
// AuthVersion the version of auth
|
||||
type AuthVersionType string
|
||||
|
||||
const (
|
||||
// AuthV1 v1
|
||||
AuthV1 AuthVersionType = "v1"
|
||||
// AuthV2 v2
|
||||
AuthV2 AuthVersionType = "v2"
|
||||
// AuthV4 v4
|
||||
AuthV4 AuthVersionType = "v4"
|
||||
)
|
||||
123
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
generated
vendored
Normal file
123
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint64
|
||||
tab *crc64.Table
|
||||
}
|
||||
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
|
||||
|
||||
// Size returns the number of bytes sum will return.
|
||||
func (d *digest) Size() int { return crc64.Size }
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
// Reset resets the hash to its initial state.
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
d.crc = crc64.Update(d.crc, d.tab, p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Sum64 returns CRC64 value.
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
// Sum returns hash value.
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// gf2Dim dimension of GF(2) vectors (length of CRC)
|
||||
const gf2Dim int = 64
|
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
for i := 0; vec != 0; i++ {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[i]
|
||||
}
|
||||
|
||||
vec >>= 1
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square []uint64, mat []uint64) {
|
||||
for n := 0; n < gf2Dim; n++ {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// CRC64Combine combines CRC64
|
||||
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
|
||||
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
|
||||
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
|
||||
|
||||
// Degenerate case
|
||||
if len2 == 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
// Put operator for one zero bit in odd
|
||||
odd[0] = crc64.ECMA // CRC64 polynomial
|
||||
var row uint64 = 1
|
||||
for n := 1; n < gf2Dim; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// Put operator for two zero bits in even
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
// Put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
|
||||
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
|
||||
for {
|
||||
// Apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(even[:], crc1)
|
||||
}
|
||||
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(odd[:], crc1)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return combined CRC
|
||||
crc1 ^= crc2
|
||||
return crc1
|
||||
}
|
||||
567
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
generated
vendored
Normal file
567
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
generated
vendored
Normal file
@@ -0,0 +1,567 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DownloadFile downloads files with multipart download.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// filePath the local file to download from objectKey in OSS.
|
||||
// partSize the part size in bytes.
|
||||
// options object's constraints, check out GetObject for the reference.
|
||||
//
|
||||
// error it's nil when the call succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < 1 {
|
||||
return errors.New("oss: part size smaller than 1")
|
||||
}
|
||||
|
||||
uRange, err := GetRangeConfig(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
|
||||
if cpFilePath != "" {
|
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
|
||||
}
|
||||
|
||||
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
absPath, _ := filepath.Abs(destFile)
|
||||
cpFileName := getCpFileName(src, absPath, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// downloadWorkerArg is download worker's parameters
|
||||
type downloadWorkerArg struct {
|
||||
bucket *Bucket
|
||||
key string
|
||||
filePath string
|
||||
options []Option
|
||||
hook downloadPartHook
|
||||
enableCRC bool
|
||||
}
|
||||
|
||||
// downloadPartHook is hook for test
|
||||
type downloadPartHook func(part downloadPart) error
|
||||
|
||||
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
|
||||
|
||||
func defaultDownloadPartHook(part downloadPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
|
||||
type defaultDownloadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
// downloadWorker
|
||||
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
|
||||
for part := range jobs {
|
||||
if err := arg.hook(part); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
// Resolve options
|
||||
r := Range(part.Start, part.End)
|
||||
p := Progress(&defaultDownloadProgressListener{})
|
||||
|
||||
var respHeader http.Header
|
||||
opts := make([]Option, len(arg.options)+3)
|
||||
// Append orderly, can not be reversed!
|
||||
opts = append(opts, arg.options...)
|
||||
opts = append(opts, r, p, GetResponseHeader(&respHeader))
|
||||
|
||||
rd, err := arg.bucket.GetObject(arg.key, opts...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
var crcCalc hash.Hash64
|
||||
if arg.enableCRC {
|
||||
crcCalc = crc64.New(CrcTable())
|
||||
contentLen := part.End - part.Start + 1
|
||||
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
_, err = io.Copy(fd, rd)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
if arg.enableCRC {
|
||||
part.CRC64 = crcCalc.Sum64()
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// downloadScheduler
|
||||
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// downloadPart defines download part
|
||||
type downloadPart struct {
|
||||
Index int // Part number, starting from 0
|
||||
Start int64 // Start index
|
||||
End int64 // End index
|
||||
Offset int64 // Offset
|
||||
CRC64 uint64 // CRC check value of part
|
||||
}
|
||||
|
||||
// getDownloadParts gets download parts
|
||||
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
|
||||
parts := []downloadPart{}
|
||||
part := downloadPart{}
|
||||
i := 0
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
for offset := start; offset < end; offset += partSize {
|
||||
part.Index = i
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, end, partSize)
|
||||
part.Offset = start
|
||||
part.CRC64 = 0
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getObjectBytes gets object bytes length
|
||||
func getObjectBytes(parts []downloadPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// combineCRCInParts caculates the total CRC of continuous parts
|
||||
func combineCRCInParts(dps []downloadPart) uint64 {
|
||||
if dps == nil || len(dps) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
crc := dps[0].CRC64
|
||||
for i := 1; i < len(dps); i++ {
|
||||
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
|
||||
}
|
||||
|
||||
return crc
|
||||
}
|
||||
|
||||
// downloadFile downloads file concurrently without checkpoint.
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// If the file does not exist, create one. If exists, the download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enableCRC := false
|
||||
expectedCRC := (uint64)(0)
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
enableCRC = true
|
||||
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the parts of the file
|
||||
parts := getDownloadParts(objectSize, partSize, uRange)
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Download parts concurrently
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Waiting for parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
parts[part.Index].CRC64 = part.CRC64
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCRC {
|
||||
actualCRC := combineCRCInParts(parts)
|
||||
err = CheckDownloadCRC(actualCRC, expectedCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// ----- Concurrent download with chcekpoint -----
|
||||
|
||||
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
|
||||
|
||||
type downloadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint content MD5
|
||||
FilePath string // Local file
|
||||
Object string // Key
|
||||
ObjStat objectStat // Object status
|
||||
Parts []downloadPart // All download parts
|
||||
PartStat []bool // Parts' download status
|
||||
Start int64 // Start point of the file
|
||||
End int64 // End point of the file
|
||||
enableCRC bool // Whether has CRC check
|
||||
CRC uint64 // CRC check value
|
||||
}
|
||||
|
||||
type objectStat struct {
|
||||
Size int64 // Object size
|
||||
LastModified string // Last modified time
|
||||
Etag string // Etag
|
||||
}
|
||||
|
||||
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
|
||||
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
|
||||
// Compare the CP's Magic and the MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size, last modified time and etag
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check the download range
|
||||
if uRange != nil {
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
if start != cp.Start || end != cp.End {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load checkpoint from local file
|
||||
func (cp *downloadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump funciton dumps to file
|
||||
func (cp *downloadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialize
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts gets unfinished parts
|
||||
func (cp downloadCheckpoint) todoParts() []downloadPart {
|
||||
dps := []downloadPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes gets completed size
|
||||
func (cp downloadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initiates download tasks
|
||||
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
|
||||
// CP
|
||||
cp.Magic = downloadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.Object = objectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
cp.enableCRC = true
|
||||
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Parts
|
||||
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
|
||||
err := os.Rename(downFilepath, cp.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// downloadFileWithCp downloads files with checkpoint.
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load checkpoint data.
|
||||
dcp := downloadCheckpoint{}
|
||||
err := dcp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or data invalid. Re-initialize the download.
|
||||
valid, err := dcp.isValid(meta, uRange)
|
||||
if err != nil || !valid {
|
||||
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Create the file if not exists. Otherwise the parts download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Unfinished parts
|
||||
parts := dcp.todoParts()
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := dcp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers routine
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Concurrently downloads parts
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
dcp.PartStat[part.Index] = true
|
||||
dcp.Parts[part.Index].CRC64 = part.CRC64
|
||||
dcp.dump(cpFilePath)
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if dcp.enableCRC {
|
||||
actualCRC := combineCRCInParts(dcp.Parts)
|
||||
err = CheckDownloadCRC(actualCRC, dcp.CRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dcp.complete(cpFilePath, tempFilePath)
|
||||
}
|
||||
136
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
generated
vendored
Normal file
136
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ServiceError contains fields of the error response from Oss Service REST API.
|
||||
type ServiceError struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"` // The error code returned from OSS to the caller
|
||||
Message string `xml:"Message"` // The detail error message from OSS
|
||||
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
|
||||
HostID string `xml:"HostId"` // The OSS server cluster's Id
|
||||
Endpoint string `xml:"Endpoint"`
|
||||
Ec string `xml:"EC"`
|
||||
RawMessage string // The raw messages from OSS
|
||||
StatusCode int // HTTP status code
|
||||
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e ServiceError) Error() string {
|
||||
errorStr := fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
if len(e.Endpoint) > 0 {
|
||||
errorStr = fmt.Sprintf("%s, Endpoint=%s", errorStr, e.Endpoint)
|
||||
}
|
||||
if len(e.Ec) > 0 {
|
||||
errorStr = fmt.Sprintf("%s, Ec=%s", errorStr, e.Ec)
|
||||
}
|
||||
return errorStr
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int // The expected HTTP stats code returned from OSS
|
||||
got int // The actual HTTP status code from OSS
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
|
||||
got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// Got is the actual status code returned by oss.
|
||||
func (e UnexpectedStatusCodeError) Got() int {
|
||||
return e.got
|
||||
}
|
||||
|
||||
// CheckRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func CheckRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
||||
|
||||
// CheckCallbackResp return error if the given response code is not 200
|
||||
func CheckCallbackResp(resp *Response) error {
|
||||
var err error
|
||||
contentLengthStr := resp.Headers.Get("Content-Length")
|
||||
contentLength, _ := strconv.Atoi(contentLengthStr)
|
||||
var bodyBytes []byte
|
||||
if contentLength > 0 {
|
||||
bodyBytes, _ = ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
if len(bodyBytes) > 0 {
|
||||
srvErr, errIn := serviceErrFromXML(bodyBytes, resp.StatusCode,
|
||||
resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
if errIn != nil {
|
||||
if len(resp.Headers.Get(HTTPHeaderOssEc)) > 0 {
|
||||
err = fmt.Errorf("unknown response body, status code = %d, RequestId = %s, ec = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID), resp.Headers.Get(HTTPHeaderOssEc))
|
||||
} else {
|
||||
err = fmt.Errorf("unknown response body, status code= %d, RequestId = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
}
|
||||
} else {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func tryConvertServiceError(data []byte, resp *Response, def error) (err error) {
|
||||
err = def
|
||||
if len(data) > 0 {
|
||||
srvErr, errIn := serviceErrFromXML(data, resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
|
||||
if errIn == nil {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CRCCheckError is returned when crc check is inconsistent between client and server
|
||||
type CRCCheckError struct {
|
||||
clientCRC uint64 // Calculated CRC64 in client
|
||||
serverCRC uint64 // Calculated CRC64 in server
|
||||
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
|
||||
requestID string // The request id of this operation
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e CRCCheckError) Error() string {
|
||||
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
|
||||
e.operation, e.clientCRC, e.serverCRC, e.requestID)
|
||||
}
|
||||
|
||||
func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
|
||||
if clientCRC == serverCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
|
||||
}
|
||||
|
||||
func CheckCRC(resp *Response, operation string) error {
|
||||
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
|
||||
}
|
||||
29
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
generated
vendored
Normal file
29
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
|
||||
// this file is only for build,not supports limit upload speed
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
type OssLimiter struct {
|
||||
}
|
||||
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
err = fmt.Errorf("rate.Limiter is not supported below version go1.7")
|
||||
return nil, err
|
||||
}
|
||||
91
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
generated
vendored
Normal file
91
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
// OssLimiter wrapper rate.Limiter
|
||||
type OssLimiter struct {
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
// GetOssLimiter create OssLimiter
|
||||
// uploadSpeed KB/s
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
|
||||
|
||||
// first consume the initial full token,the limiter will behave more accurately
|
||||
limiter.AllowN(time.Now(), uploadSpeed)
|
||||
|
||||
return &OssLimiter{
|
||||
limiter: limiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LimitSpeedReader for limit bandwidth upload
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
// Read
|
||||
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
|
||||
n = 0
|
||||
err = nil
|
||||
start := 0
|
||||
burst := r.ossLimiter.limiter.Burst()
|
||||
var end int
|
||||
var tmpN int
|
||||
var tc int
|
||||
for start < len(p) {
|
||||
if start+burst*perTokenBandwidthSize < len(p) {
|
||||
end = start + burst*perTokenBandwidthSize
|
||||
} else {
|
||||
end = len(p)
|
||||
}
|
||||
|
||||
tmpN, err = r.reader.Read(p[start:end])
|
||||
if tmpN > 0 {
|
||||
n += tmpN
|
||||
start = n
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize)))
|
||||
now := time.Now()
|
||||
re := r.ossLimiter.limiter.ReserveN(now, tc)
|
||||
if !re.OK() {
|
||||
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
|
||||
start, end, burst, perTokenBandwidthSize)
|
||||
return
|
||||
}
|
||||
timeDelay := re.Delay()
|
||||
time.Sleep(timeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close ...
|
||||
func (r *LimitSpeedReader) Close() error {
|
||||
rc, ok := r.reader.(io.ReadCloser)
|
||||
if ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
257
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
generated
vendored
Normal file
257
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
//
|
||||
// CreateLiveChannel create a live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// config configuration of the channel
|
||||
//
|
||||
// CreateLiveChannelResult the result of create live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) {
|
||||
var out CreateLiveChannelResult
|
||||
|
||||
bs, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// status enabled/disabled
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["status"] = status
|
||||
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
key := fmt.Sprintf("%s/%s", channelName, playlistName)
|
||||
resp, err := bucket.do("POST", key, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelStat Get the state of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelStat the state of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) {
|
||||
var out LiveChannelStat
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "stat"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelInfo Get the configuration info of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelConfiguration the configuration info of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) {
|
||||
var out LiveChannelConfiguration
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelHistory Get push records of live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelHistory push records
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) {
|
||||
var out LiveChannelHistory
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "history"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// ListLiveChannel list the live-channels
|
||||
//
|
||||
// options Prefix: filter by the name start with the value of "Prefix"
|
||||
// MaxKeys: the maximum count returned
|
||||
// Marker: cursor from which starting list
|
||||
//
|
||||
// ListLiveChannelResult live-channel list
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
|
||||
var out ListLiveChannelResult
|
||||
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.doInner("GET", "", params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) DeleteLiveChannel(channelName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
if channelName == "" {
|
||||
return fmt.Errorf("invalid argument: channel name is empty")
|
||||
}
|
||||
|
||||
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
//
|
||||
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// expires expiration (in seconds)
|
||||
//
|
||||
// string singed rtmp push stream url
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) {
|
||||
if expires <= 0 {
|
||||
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires)
|
||||
}
|
||||
expiration := time.Now().Unix() + expires
|
||||
|
||||
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil
|
||||
}
|
||||
594
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
generated
vendored
Normal file
594
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
generated
vendored
Normal file
@@ -0,0 +1,594 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var extToMimeType = map[string]string{
|
||||
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
|
||||
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
|
||||
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
|
||||
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
|
||||
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
|
||||
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
|
||||
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
|
||||
".apk": "application/vnd.android.package-archive",
|
||||
".hqx": "application/mac-binhex40",
|
||||
".cpt": "application/mac-compactpro",
|
||||
".doc": "application/msword",
|
||||
".ogg": "application/ogg",
|
||||
".pdf": "application/pdf",
|
||||
".rtf": "text/rtf",
|
||||
".mif": "application/vnd.mif",
|
||||
".xls": "application/vnd.ms-excel",
|
||||
".ppt": "application/vnd.ms-powerpoint",
|
||||
".odc": "application/vnd.oasis.opendocument.chart",
|
||||
".odb": "application/vnd.oasis.opendocument.database",
|
||||
".odf": "application/vnd.oasis.opendocument.formula",
|
||||
".odg": "application/vnd.oasis.opendocument.graphics",
|
||||
".otg": "application/vnd.oasis.opendocument.graphics-template",
|
||||
".odi": "application/vnd.oasis.opendocument.image",
|
||||
".odp": "application/vnd.oasis.opendocument.presentation",
|
||||
".otp": "application/vnd.oasis.opendocument.presentation-template",
|
||||
".ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
|
||||
".odt": "application/vnd.oasis.opendocument.text",
|
||||
".odm": "application/vnd.oasis.opendocument.text-master",
|
||||
".ott": "application/vnd.oasis.opendocument.text-template",
|
||||
".oth": "application/vnd.oasis.opendocument.text-web",
|
||||
".sxw": "application/vnd.sun.xml.writer",
|
||||
".stw": "application/vnd.sun.xml.writer.template",
|
||||
".sxc": "application/vnd.sun.xml.calc",
|
||||
".stc": "application/vnd.sun.xml.calc.template",
|
||||
".sxd": "application/vnd.sun.xml.draw",
|
||||
".std": "application/vnd.sun.xml.draw.template",
|
||||
".sxi": "application/vnd.sun.xml.impress",
|
||||
".sti": "application/vnd.sun.xml.impress.template",
|
||||
".sxg": "application/vnd.sun.xml.writer.global",
|
||||
".sxm": "application/vnd.sun.xml.math",
|
||||
".sis": "application/vnd.symbian.install",
|
||||
".wbxml": "application/vnd.wap.wbxml",
|
||||
".wmlc": "application/vnd.wap.wmlc",
|
||||
".wmlsc": "application/vnd.wap.wmlscriptc",
|
||||
".bcpio": "application/x-bcpio",
|
||||
".torrent": "application/x-bittorrent",
|
||||
".bz2": "application/x-bzip2",
|
||||
".vcd": "application/x-cdlink",
|
||||
".pgn": "application/x-chess-pgn",
|
||||
".cpio": "application/x-cpio",
|
||||
".csh": "application/x-csh",
|
||||
".dvi": "application/x-dvi",
|
||||
".spl": "application/x-futuresplash",
|
||||
".gtar": "application/x-gtar",
|
||||
".hdf": "application/x-hdf",
|
||||
".jar": "application/x-java-archive",
|
||||
".jnlp": "application/x-java-jnlp-file",
|
||||
".js": "application/x-javascript",
|
||||
".ksp": "application/x-kspread",
|
||||
".chrt": "application/x-kchart",
|
||||
".kil": "application/x-killustrator",
|
||||
".latex": "application/x-latex",
|
||||
".rpm": "application/x-rpm",
|
||||
".sh": "application/x-sh",
|
||||
".shar": "application/x-shar",
|
||||
".swf": "application/x-shockwave-flash",
|
||||
".sit": "application/x-stuffit",
|
||||
".sv4cpio": "application/x-sv4cpio",
|
||||
".sv4crc": "application/x-sv4crc",
|
||||
".tar": "application/x-tar",
|
||||
".tcl": "application/x-tcl",
|
||||
".tex": "application/x-tex",
|
||||
".man": "application/x-troff-man",
|
||||
".me": "application/x-troff-me",
|
||||
".ms": "application/x-troff-ms",
|
||||
".ustar": "application/x-ustar",
|
||||
".src": "application/x-wais-source",
|
||||
".zip": "application/zip",
|
||||
".m3u": "audio/x-mpegurl",
|
||||
".ra": "audio/x-pn-realaudio",
|
||||
".wav": "audio/x-wav",
|
||||
".wma": "audio/x-ms-wma",
|
||||
".wax": "audio/x-ms-wax",
|
||||
".pdb": "chemical/x-pdb",
|
||||
".xyz": "chemical/x-xyz",
|
||||
".bmp": "image/bmp",
|
||||
".gif": "image/gif",
|
||||
".ief": "image/ief",
|
||||
".png": "image/png",
|
||||
".wbmp": "image/vnd.wap.wbmp",
|
||||
".ras": "image/x-cmu-raster",
|
||||
".pnm": "image/x-portable-anymap",
|
||||
".pbm": "image/x-portable-bitmap",
|
||||
".pgm": "image/x-portable-graymap",
|
||||
".ppm": "image/x-portable-pixmap",
|
||||
".rgb": "image/x-rgb",
|
||||
".xbm": "image/x-xbitmap",
|
||||
".xpm": "image/x-xpixmap",
|
||||
".xwd": "image/x-xwindowdump",
|
||||
".css": "text/css",
|
||||
".rtx": "text/richtext",
|
||||
".tsv": "text/tab-separated-values",
|
||||
".jad": "text/vnd.sun.j2me.app-descriptor",
|
||||
".wml": "text/vnd.wap.wml",
|
||||
".wmls": "text/vnd.wap.wmlscript",
|
||||
".etx": "text/x-setext",
|
||||
".mxu": "video/vnd.mpegurl",
|
||||
".flv": "video/x-flv",
|
||||
".wm": "video/x-ms-wm",
|
||||
".wmv": "video/x-ms-wmv",
|
||||
".wmx": "video/x-ms-wmx",
|
||||
".wvx": "video/x-ms-wvx",
|
||||
".avi": "video/x-msvideo",
|
||||
".movie": "video/x-sgi-movie",
|
||||
".ice": "x-conference/x-cooltalk",
|
||||
".3gp": "video/3gpp",
|
||||
".ai": "application/postscript",
|
||||
".aif": "audio/x-aiff",
|
||||
".aifc": "audio/x-aiff",
|
||||
".aiff": "audio/x-aiff",
|
||||
".asc": "text/plain",
|
||||
".atom": "application/atom+xml",
|
||||
".au": "audio/basic",
|
||||
".bin": "application/octet-stream",
|
||||
".cdf": "application/x-netcdf",
|
||||
".cgm": "image/cgm",
|
||||
".class": "application/octet-stream",
|
||||
".dcr": "application/x-director",
|
||||
".dif": "video/x-dv",
|
||||
".dir": "application/x-director",
|
||||
".djv": "image/vnd.djvu",
|
||||
".djvu": "image/vnd.djvu",
|
||||
".dll": "application/octet-stream",
|
||||
".dmg": "application/octet-stream",
|
||||
".dms": "application/octet-stream",
|
||||
".dtd": "application/xml-dtd",
|
||||
".dv": "video/x-dv",
|
||||
".dxr": "application/x-director",
|
||||
".eps": "application/postscript",
|
||||
".exe": "application/octet-stream",
|
||||
".ez": "application/andrew-inset",
|
||||
".gram": "application/srgs",
|
||||
".grxml": "application/srgs+xml",
|
||||
".gz": "application/x-gzip",
|
||||
".htm": "text/html",
|
||||
".html": "text/html",
|
||||
".ico": "image/x-icon",
|
||||
".ics": "text/calendar",
|
||||
".ifb": "text/calendar",
|
||||
".iges": "model/iges",
|
||||
".igs": "model/iges",
|
||||
".jp2": "image/jp2",
|
||||
".jpe": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".jpg": "image/jpeg",
|
||||
".kar": "audio/midi",
|
||||
".lha": "application/octet-stream",
|
||||
".lzh": "application/octet-stream",
|
||||
".m4a": "audio/mp4a-latm",
|
||||
".m4p": "audio/mp4a-latm",
|
||||
".m4u": "video/vnd.mpegurl",
|
||||
".m4v": "video/x-m4v",
|
||||
".mac": "image/x-macpaint",
|
||||
".mathml": "application/mathml+xml",
|
||||
".mesh": "model/mesh",
|
||||
".mid": "audio/midi",
|
||||
".midi": "audio/midi",
|
||||
".mov": "video/quicktime",
|
||||
".mp2": "audio/mpeg",
|
||||
".mp3": "audio/mpeg",
|
||||
".mp4": "video/mp4",
|
||||
".mpe": "video/mpeg",
|
||||
".mpeg": "video/mpeg",
|
||||
".mpg": "video/mpeg",
|
||||
".mpga": "audio/mpeg",
|
||||
".msh": "model/mesh",
|
||||
".nc": "application/x-netcdf",
|
||||
".oda": "application/oda",
|
||||
".ogv": "video/ogv",
|
||||
".pct": "image/pict",
|
||||
".pic": "image/pict",
|
||||
".pict": "image/pict",
|
||||
".pnt": "image/x-macpaint",
|
||||
".pntg": "image/x-macpaint",
|
||||
".ps": "application/postscript",
|
||||
".qt": "video/quicktime",
|
||||
".qti": "image/x-quicktime",
|
||||
".qtif": "image/x-quicktime",
|
||||
".ram": "audio/x-pn-realaudio",
|
||||
".rdf": "application/rdf+xml",
|
||||
".rm": "application/vnd.rn-realmedia",
|
||||
".roff": "application/x-troff",
|
||||
".sgm": "text/sgml",
|
||||
".sgml": "text/sgml",
|
||||
".silo": "model/mesh",
|
||||
".skd": "application/x-koan",
|
||||
".skm": "application/x-koan",
|
||||
".skp": "application/x-koan",
|
||||
".skt": "application/x-koan",
|
||||
".smi": "application/smil",
|
||||
".smil": "application/smil",
|
||||
".snd": "audio/basic",
|
||||
".so": "application/octet-stream",
|
||||
".svg": "image/svg+xml",
|
||||
".t": "application/x-troff",
|
||||
".texi": "application/x-texinfo",
|
||||
".texinfo": "application/x-texinfo",
|
||||
".tif": "image/tiff",
|
||||
".tiff": "image/tiff",
|
||||
".tr": "application/x-troff",
|
||||
".txt": "text/plain",
|
||||
".vrml": "model/vrml",
|
||||
".vxml": "application/voicexml+xml",
|
||||
".webm": "video/webm",
|
||||
".wrl": "model/vrml",
|
||||
".xht": "application/xhtml+xml",
|
||||
".xhtml": "application/xhtml+xml",
|
||||
".xml": "application/xml",
|
||||
".xsl": "application/xml",
|
||||
".xslt": "application/xslt+xml",
|
||||
".xul": "application/vnd.mozilla.xul+xml",
|
||||
".webp": "image/webp",
|
||||
".323": "text/h323",
|
||||
".aab": "application/x-authoware-bin",
|
||||
".aam": "application/x-authoware-map",
|
||||
".aas": "application/x-authoware-seg",
|
||||
".acx": "application/internet-property-stream",
|
||||
".als": "audio/X-Alpha5",
|
||||
".amc": "application/x-mpeg",
|
||||
".ani": "application/octet-stream",
|
||||
".asd": "application/astound",
|
||||
".asf": "video/x-ms-asf",
|
||||
".asn": "application/astound",
|
||||
".asp": "application/x-asap",
|
||||
".asr": "video/x-ms-asf",
|
||||
".asx": "video/x-ms-asf",
|
||||
".avb": "application/octet-stream",
|
||||
".awb": "audio/amr-wb",
|
||||
".axs": "application/olescript",
|
||||
".bas": "text/plain",
|
||||
".bin ": "application/octet-stream",
|
||||
".bld": "application/bld",
|
||||
".bld2": "application/bld2",
|
||||
".bpk": "application/octet-stream",
|
||||
".c": "text/plain",
|
||||
".cal": "image/x-cals",
|
||||
".cat": "application/vnd.ms-pkiseccat",
|
||||
".ccn": "application/x-cnc",
|
||||
".cco": "application/x-cocoa",
|
||||
".cer": "application/x-x509-ca-cert",
|
||||
".cgi": "magnus-internal/cgi",
|
||||
".chat": "application/x-chat",
|
||||
".clp": "application/x-msclip",
|
||||
".cmx": "image/x-cmx",
|
||||
".co": "application/x-cult3d-object",
|
||||
".cod": "image/cis-cod",
|
||||
".conf": "text/plain",
|
||||
".cpp": "text/plain",
|
||||
".crd": "application/x-mscardfile",
|
||||
".crl": "application/pkix-crl",
|
||||
".crt": "application/x-x509-ca-cert",
|
||||
".csm": "chemical/x-csml",
|
||||
".csml": "chemical/x-csml",
|
||||
".cur": "application/octet-stream",
|
||||
".dcm": "x-lml/x-evm",
|
||||
".dcx": "image/x-dcx",
|
||||
".der": "application/x-x509-ca-cert",
|
||||
".dhtml": "text/html",
|
||||
".dot": "application/msword",
|
||||
".dwf": "drawing/x-dwf",
|
||||
".dwg": "application/x-autocad",
|
||||
".dxf": "application/x-autocad",
|
||||
".ebk": "application/x-expandedbook",
|
||||
".emb": "chemical/x-embl-dl-nucleotide",
|
||||
".embl": "chemical/x-embl-dl-nucleotide",
|
||||
".epub": "application/epub+zip",
|
||||
".eri": "image/x-eri",
|
||||
".es": "audio/echospeech",
|
||||
".esl": "audio/echospeech",
|
||||
".etc": "application/x-earthtime",
|
||||
".evm": "x-lml/x-evm",
|
||||
".evy": "application/envoy",
|
||||
".fh4": "image/x-freehand",
|
||||
".fh5": "image/x-freehand",
|
||||
".fhc": "image/x-freehand",
|
||||
".fif": "application/fractals",
|
||||
".flr": "x-world/x-vrml",
|
||||
".fm": "application/x-maker",
|
||||
".fpx": "image/x-fpx",
|
||||
".fvi": "video/isivideo",
|
||||
".gau": "chemical/x-gaussian-input",
|
||||
".gca": "application/x-gca-compressed",
|
||||
".gdb": "x-lml/x-gdb",
|
||||
".gps": "application/x-gps",
|
||||
".h": "text/plain",
|
||||
".hdm": "text/x-hdml",
|
||||
".hdml": "text/x-hdml",
|
||||
".hlp": "application/winhlp",
|
||||
".hta": "application/hta",
|
||||
".htc": "text/x-component",
|
||||
".hts": "text/html",
|
||||
".htt": "text/webviewhtml",
|
||||
".ifm": "image/gif",
|
||||
".ifs": "image/ifs",
|
||||
".iii": "application/x-iphone",
|
||||
".imy": "audio/melody",
|
||||
".ins": "application/x-internet-signup",
|
||||
".ips": "application/x-ipscript",
|
||||
".ipx": "application/x-ipix",
|
||||
".isp": "application/x-internet-signup",
|
||||
".it": "audio/x-mod",
|
||||
".itz": "audio/x-mod",
|
||||
".ivr": "i-world/i-vrml",
|
||||
".j2k": "image/j2k",
|
||||
".jam": "application/x-jam",
|
||||
".java": "text/plain",
|
||||
".jfif": "image/pipeg",
|
||||
".jpz": "image/jpeg",
|
||||
".jwc": "application/jwc",
|
||||
".kjx": "application/x-kjx",
|
||||
".lak": "x-lml/x-lak",
|
||||
".lcc": "application/fastman",
|
||||
".lcl": "application/x-digitalloca",
|
||||
".lcr": "application/x-digitalloca",
|
||||
".lgh": "application/lgh",
|
||||
".lml": "x-lml/x-lml",
|
||||
".lmlpack": "x-lml/x-lmlpack",
|
||||
".log": "text/plain",
|
||||
".lsf": "video/x-la-asf",
|
||||
".lsx": "video/x-la-asf",
|
||||
".m13": "application/x-msmediaview",
|
||||
".m14": "application/x-msmediaview",
|
||||
".m15": "audio/x-mod",
|
||||
".m3url": "audio/x-mpegurl",
|
||||
".m4b": "audio/mp4a-latm",
|
||||
".ma1": "audio/ma1",
|
||||
".ma2": "audio/ma2",
|
||||
".ma3": "audio/ma3",
|
||||
".ma5": "audio/ma5",
|
||||
".map": "magnus-internal/imagemap",
|
||||
".mbd": "application/mbedlet",
|
||||
".mct": "application/x-mascot",
|
||||
".mdb": "application/x-msaccess",
|
||||
".mdz": "audio/x-mod",
|
||||
".mel": "text/x-vmel",
|
||||
".mht": "message/rfc822",
|
||||
".mhtml": "message/rfc822",
|
||||
".mi": "application/x-mif",
|
||||
".mil": "image/x-cals",
|
||||
".mio": "audio/x-mio",
|
||||
".mmf": "application/x-skt-lbs",
|
||||
".mng": "video/x-mng",
|
||||
".mny": "application/x-msmoney",
|
||||
".moc": "application/x-mocha",
|
||||
".mocha": "application/x-mocha",
|
||||
".mod": "audio/x-mod",
|
||||
".mof": "application/x-yumekara",
|
||||
".mol": "chemical/x-mdl-molfile",
|
||||
".mop": "chemical/x-mopac-input",
|
||||
".mpa": "video/mpeg",
|
||||
".mpc": "application/vnd.mpohun.certificate",
|
||||
".mpg4": "video/mp4",
|
||||
".mpn": "application/vnd.mophun.application",
|
||||
".mpp": "application/vnd.ms-project",
|
||||
".mps": "application/x-mapserver",
|
||||
".mpv2": "video/mpeg",
|
||||
".mrl": "text/x-mrml",
|
||||
".mrm": "application/x-mrm",
|
||||
".msg": "application/vnd.ms-outlook",
|
||||
".mts": "application/metastream",
|
||||
".mtx": "application/metastream",
|
||||
".mtz": "application/metastream",
|
||||
".mvb": "application/x-msmediaview",
|
||||
".mzv": "application/metastream",
|
||||
".nar": "application/zip",
|
||||
".nbmp": "image/nbmp",
|
||||
".ndb": "x-lml/x-ndb",
|
||||
".ndwn": "application/ndwn",
|
||||
".nif": "application/x-nif",
|
||||
".nmz": "application/x-scream",
|
||||
".nokia-op-logo": "image/vnd.nok-oplogo-color",
|
||||
".npx": "application/x-netfpx",
|
||||
".nsnd": "audio/nsnd",
|
||||
".nva": "application/x-neva1",
|
||||
".nws": "message/rfc822",
|
||||
".oom": "application/x-AtlasMate-Plugin",
|
||||
".p10": "application/pkcs10",
|
||||
".p12": "application/x-pkcs12",
|
||||
".p7b": "application/x-pkcs7-certificates",
|
||||
".p7c": "application/x-pkcs7-mime",
|
||||
".p7m": "application/x-pkcs7-mime",
|
||||
".p7r": "application/x-pkcs7-certreqresp",
|
||||
".p7s": "application/x-pkcs7-signature",
|
||||
".pac": "audio/x-pac",
|
||||
".pae": "audio/x-epac",
|
||||
".pan": "application/x-pan",
|
||||
".pcx": "image/x-pcx",
|
||||
".pda": "image/x-pda",
|
||||
".pfr": "application/font-tdpfr",
|
||||
".pfx": "application/x-pkcs12",
|
||||
".pko": "application/ynd.ms-pkipko",
|
||||
".pm": "application/x-perl",
|
||||
".pma": "application/x-perfmon",
|
||||
".pmc": "application/x-perfmon",
|
||||
".pmd": "application/x-pmd",
|
||||
".pml": "application/x-perfmon",
|
||||
".pmr": "application/x-perfmon",
|
||||
".pmw": "application/x-perfmon",
|
||||
".pnz": "image/png",
|
||||
".pot,": "application/vnd.ms-powerpoint",
|
||||
".pps": "application/vnd.ms-powerpoint",
|
||||
".pqf": "application/x-cprplayer",
|
||||
".pqi": "application/cprplayer",
|
||||
".prc": "application/x-prc",
|
||||
".prf": "application/pics-rules",
|
||||
".prop": "text/plain",
|
||||
".proxy": "application/x-ns-proxy-autoconfig",
|
||||
".ptlk": "application/listenup",
|
||||
".pub": "application/x-mspublisher",
|
||||
".pvx": "video/x-pv-pvx",
|
||||
".qcp": "audio/vnd.qcelp",
|
||||
".r3t": "text/vnd.rn-realtext3d",
|
||||
".rar": "application/octet-stream",
|
||||
".rc": "text/plain",
|
||||
".rf": "image/vnd.rn-realflash",
|
||||
".rlf": "application/x-richlink",
|
||||
".rmf": "audio/x-rmf",
|
||||
".rmi": "audio/mid",
|
||||
".rmm": "audio/x-pn-realaudio",
|
||||
".rmvb": "audio/x-pn-realaudio",
|
||||
".rnx": "application/vnd.rn-realplayer",
|
||||
".rp": "image/vnd.rn-realpix",
|
||||
".rt": "text/vnd.rn-realtext",
|
||||
".rte": "x-lml/x-gps",
|
||||
".rtg": "application/metastream",
|
||||
".rv": "video/vnd.rn-realvideo",
|
||||
".rwc": "application/x-rogerwilco",
|
||||
".s3m": "audio/x-mod",
|
||||
".s3z": "audio/x-mod",
|
||||
".sca": "application/x-supercard",
|
||||
".scd": "application/x-msschedule",
|
||||
".sct": "text/scriptlet",
|
||||
".sdf": "application/e-score",
|
||||
".sea": "application/x-stuffit",
|
||||
".setpay": "application/set-payment-initiation",
|
||||
".setreg": "application/set-registration-initiation",
|
||||
".shtml": "text/html",
|
||||
".shtm": "text/html",
|
||||
".shw": "application/presentations",
|
||||
".si6": "image/si6",
|
||||
".si7": "image/vnd.stiwap.sis",
|
||||
".si9": "image/vnd.lgtwap.sis",
|
||||
".slc": "application/x-salsa",
|
||||
".smd": "audio/x-smd",
|
||||
".smp": "application/studiom",
|
||||
".smz": "audio/x-smd",
|
||||
".spc": "application/x-pkcs7-certificates",
|
||||
".spr": "application/x-sprite",
|
||||
".sprite": "application/x-sprite",
|
||||
".sdp": "application/sdp",
|
||||
".spt": "application/x-spt",
|
||||
".sst": "application/vnd.ms-pkicertstore",
|
||||
".stk": "application/hyperstudio",
|
||||
".stl": "application/vnd.ms-pkistl",
|
||||
".stm": "text/html",
|
||||
".svf": "image/vnd",
|
||||
".svh": "image/svh",
|
||||
".svr": "x-world/x-svr",
|
||||
".swfl": "application/x-shockwave-flash",
|
||||
".tad": "application/octet-stream",
|
||||
".talk": "text/x-speech",
|
||||
".taz": "application/x-tar",
|
||||
".tbp": "application/x-timbuktu",
|
||||
".tbt": "application/x-timbuktu",
|
||||
".tgz": "application/x-compressed",
|
||||
".thm": "application/vnd.eri.thm",
|
||||
".tki": "application/x-tkined",
|
||||
".tkined": "application/x-tkined",
|
||||
".toc": "application/toc",
|
||||
".toy": "image/toy",
|
||||
".trk": "x-lml/x-gps",
|
||||
".trm": "application/x-msterminal",
|
||||
".tsi": "audio/tsplayer",
|
||||
".tsp": "application/dsptype",
|
||||
".ttf": "application/octet-stream",
|
||||
".ttz": "application/t-time",
|
||||
".uls": "text/iuls",
|
||||
".ult": "audio/x-mod",
|
||||
".uu": "application/x-uuencode",
|
||||
".uue": "application/x-uuencode",
|
||||
".vcf": "text/x-vcard",
|
||||
".vdo": "video/vdo",
|
||||
".vib": "audio/vib",
|
||||
".viv": "video/vivo",
|
||||
".vivo": "video/vivo",
|
||||
".vmd": "application/vocaltec-media-desc",
|
||||
".vmf": "application/vocaltec-media-file",
|
||||
".vmi": "application/x-dreamcast-vms-info",
|
||||
".vms": "application/x-dreamcast-vms",
|
||||
".vox": "audio/voxware",
|
||||
".vqe": "audio/x-twinvq-plugin",
|
||||
".vqf": "audio/x-twinvq",
|
||||
".vql": "audio/x-twinvq",
|
||||
".vre": "x-world/x-vream",
|
||||
".vrt": "x-world/x-vrt",
|
||||
".vrw": "x-world/x-vream",
|
||||
".vts": "workbook/formulaone",
|
||||
".wcm": "application/vnd.ms-works",
|
||||
".wdb": "application/vnd.ms-works",
|
||||
".web": "application/vnd.xara",
|
||||
".wi": "image/wavelet",
|
||||
".wis": "application/x-InstallShield",
|
||||
".wks": "application/vnd.ms-works",
|
||||
".wmd": "application/x-ms-wmd",
|
||||
".wmf": "application/x-msmetafile",
|
||||
".wmlscript": "text/vnd.wap.wmlscript",
|
||||
".wmz": "application/x-ms-wmz",
|
||||
".wpng": "image/x-up-wpng",
|
||||
".wps": "application/vnd.ms-works",
|
||||
".wpt": "x-lml/x-gps",
|
||||
".wri": "application/x-mswrite",
|
||||
".wrz": "x-world/x-vrml",
|
||||
".ws": "text/vnd.wap.wmlscript",
|
||||
".wsc": "application/vnd.wap.wmlscriptc",
|
||||
".wv": "video/wavelet",
|
||||
".wxl": "application/x-wxl",
|
||||
".x-gzip": "application/x-gzip",
|
||||
".xaf": "x-world/x-vrml",
|
||||
".xar": "application/vnd.xara",
|
||||
".xdm": "application/x-xdma",
|
||||
".xdma": "application/x-xdma",
|
||||
".xdw": "application/vnd.fujixerox.docuworks",
|
||||
".xhtm": "application/xhtml+xml",
|
||||
".xla": "application/vnd.ms-excel",
|
||||
".xlc": "application/vnd.ms-excel",
|
||||
".xll": "application/x-excel",
|
||||
".xlm": "application/vnd.ms-excel",
|
||||
".xlt": "application/vnd.ms-excel",
|
||||
".xlw": "application/vnd.ms-excel",
|
||||
".xm": "audio/x-mod",
|
||||
".xmz": "audio/x-mod",
|
||||
".xof": "x-world/x-vrml",
|
||||
".xpi": "application/x-xpinstall",
|
||||
".xsit": "text/xml",
|
||||
".yz1": "application/x-yz1",
|
||||
".z": "application/x-compress",
|
||||
".zac": "application/x-zaurus-zac",
|
||||
".json": "application/json",
|
||||
}
|
||||
|
||||
// TypeByExtension returns the MIME type associated with the file extension ext.
|
||||
// gets the file's MIME type for HTTP header Content-Type
|
||||
func TypeByExtension(filePath string) string {
|
||||
typ := mime.TypeByExtension(path.Ext(filePath))
|
||||
if typ == "" {
|
||||
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
|
||||
} else {
|
||||
if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") {
|
||||
typ = removeCharsetInMimeType(typ)
|
||||
}
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// Remove charset from mime type
|
||||
func removeCharsetInMimeType(typ string) (str string) {
|
||||
temArr := strings.Split(typ, ";")
|
||||
var builder strings.Builder
|
||||
for i, s := range temArr {
|
||||
tmpStr := strings.Trim(s, " ")
|
||||
if strings.Contains(tmpStr, "charset=") {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
builder.WriteString(s)
|
||||
} else {
|
||||
builder.WriteString("; " + s)
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
69
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
generated
vendored
Normal file
69
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Response defines HTTP response from OSS
|
||||
type Response struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
ClientCRC uint64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
func (r *Response) Read(p []byte) (n int, err error) {
|
||||
return r.Body.Read(p)
|
||||
}
|
||||
|
||||
// Close close http reponse body
|
||||
func (r *Response) Close() error {
|
||||
return r.Body.Close()
|
||||
}
|
||||
|
||||
// PutObjectRequest is the request of DoPutObject
|
||||
type PutObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
// GetObjectRequest is the request of DoGetObject
|
||||
type GetObjectRequest struct {
|
||||
ObjectKey string
|
||||
}
|
||||
|
||||
// GetObjectResult is the result of DoGetObject
|
||||
type GetObjectResult struct {
|
||||
Response *Response
|
||||
ClientCRC hash.Hash64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
// AppendObjectRequest is the requtest of DoAppendObject
|
||||
type AppendObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
Position int64
|
||||
}
|
||||
|
||||
// AppendObjectResult is the result of DoAppendObject
|
||||
type AppendObjectResult struct {
|
||||
NextPosition int64
|
||||
CRC uint64
|
||||
}
|
||||
|
||||
// UploadPartRequest is the request of DoUploadPart
|
||||
type UploadPartRequest struct {
|
||||
InitResult *InitiateMultipartUploadResult
|
||||
Reader io.Reader
|
||||
PartSize int64
|
||||
PartNumber int
|
||||
}
|
||||
|
||||
// UploadPartResult is the result of DoUploadPart
|
||||
type UploadPartResult struct {
|
||||
Part UploadPart
|
||||
}
|
||||
474
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
generated
vendored
Normal file
474
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
generated
vendored
Normal file
@@ -0,0 +1,474 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CopyFile is multipart copy object
|
||||
//
|
||||
// srcBucketName source bucket name
|
||||
// srcObjectKey source object name
|
||||
// destObjectKey target object name in the form of bucketname.objectkey
|
||||
// partSize the part size in byte.
|
||||
// options object's contraints. Check out function InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
|
||||
destBucketName := bucket.BucketName
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (1024KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
|
||||
if cpFilePath != "" {
|
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
|
||||
partSize, options, routines)
|
||||
}
|
||||
|
||||
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
cpFileName := getCpFileName(src, dest, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- Concurrently copy without checkpoint ---------
|
||||
|
||||
// copyWorkerArg defines the copy worker arguments
|
||||
type copyWorkerArg struct {
|
||||
bucket *Bucket
|
||||
imur InitiateMultipartUploadResult
|
||||
srcBucketName string
|
||||
srcObjectKey string
|
||||
options []Option
|
||||
hook copyPartHook
|
||||
}
|
||||
|
||||
// copyPartHook is the hook for testing purpose
|
||||
type copyPartHook func(part copyPart) error
|
||||
|
||||
var copyPartHooker copyPartHook = defaultCopyPartHook
|
||||
|
||||
func defaultCopyPartHook(part copyPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyWorker copies worker
|
||||
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
chunkSize := chunk.End - chunk.Start + 1
|
||||
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
|
||||
chunk.Start, chunkSize, chunk.Number, arg.options...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// copyScheduler
|
||||
func copyScheduler(jobs chan copyPart, parts []copyPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// copyPart structure
|
||||
type copyPart struct {
|
||||
Number int // Part number (from 1 to 10,000)
|
||||
Start int64 // The start index in the source file.
|
||||
End int64 // The end index in the source file
|
||||
}
|
||||
|
||||
// getCopyParts calculates copy parts
|
||||
func getCopyParts(objectSize, partSize int64) []copyPart {
|
||||
parts := []copyPart{}
|
||||
part := copyPart{}
|
||||
i := 0
|
||||
for offset := int64(0); offset < objectSize; offset += partSize {
|
||||
part.Number = i + 1
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, objectSize, partSize)
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getSrcObjectBytes gets the source file size
|
||||
func getSrcObjectBytes(parts []copyPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// copyFile is a concurrently copy without checkpoint
|
||||
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get copy parts
|
||||
parts := getCopyParts(objectSize, partSize)
|
||||
// Initialize the multipart upload
|
||||
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getSrcObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start to copy workers
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts finished.
|
||||
completed := 0
|
||||
ups := make([]UploadPart, len(parts))
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ups[part.PartNumber-1] = part
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
descBucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- Concurrently copy with checkpoint -----
|
||||
|
||||
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
|
||||
|
||||
type copyCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // CP content MD5
|
||||
SrcBucketName string // Source bucket
|
||||
SrcObjectKey string // Source object
|
||||
DestBucketName string // Target bucket
|
||||
DestObjectKey string // Target object
|
||||
CopyID string // Copy ID
|
||||
ObjStat objectStat // Object stat
|
||||
Parts []copyPart // Copy parts
|
||||
CopyParts []UploadPart // The uploaded parts
|
||||
PartStat []bool // The part status
|
||||
}
|
||||
|
||||
// isValid checks if the data is valid which means CP is valid and object is not updated.
|
||||
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
||||
// Compare CP's magic number and the MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size and last modified time and etag.
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the checkpoint file
|
||||
func (cp *copyCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// update updates the parts status
|
||||
func (cp *copyCheckpoint) update(part UploadPart) {
|
||||
cp.CopyParts[part.PartNumber-1] = part
|
||||
cp.PartStat[part.PartNumber-1] = true
|
||||
}
|
||||
|
||||
// dump dumps the CP to the file
|
||||
func (cp *copyCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp copyCheckpoint) todoParts() []copyPart {
|
||||
dps := []copyPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns finished bytes count
|
||||
func (cp copyCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
|
||||
partSize int64, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = copyCpMagic
|
||||
cp.SrcBucketName = srcBucket.BucketName
|
||||
cp.SrcObjectKey = srcObjectKey
|
||||
cp.DestBucketName = destBucket.BucketName
|
||||
cp.DestObjectKey = destObjectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
// Parts
|
||||
cp.Parts = getCopyParts(objectSize, partSize)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
cp.CopyParts = make([]UploadPart, len(cp.Parts))
|
||||
|
||||
// Init copy
|
||||
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.CopyID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
|
||||
Key: cp.DestObjectKey, UploadID: cp.CopyID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// copyFileWithCp is concurrently copy with checkpoint
|
||||
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load CP data
|
||||
ccp := copyCheckpoint{}
|
||||
err = ccp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid---reinitialize
|
||||
valid, err := ccp.isValid(meta)
|
||||
if err != nil || !valid {
|
||||
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Unfinished parts
|
||||
parts := ccp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: destBucketName,
|
||||
Key: destObjectKey,
|
||||
UploadID: ccp.CopyID}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ccp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutines
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts completed.
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ccp.update(part)
|
||||
ccp.dump(cpFilePath)
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
|
||||
}
|
||||
320
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
generated
vendored
Normal file
320
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
generated
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// InitiateMultipartUpload initializes multipart upload
|
||||
//
|
||||
// objectKey object name
|
||||
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
//
|
||||
// ServerSideEncryption, Meta, check out the following link:
|
||||
// https://www.alibabacloud.com/help/en/object-storage-service/latest/initiatemultipartupload
|
||||
//
|
||||
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
|
||||
var imur InitiateMultipartUploadResult
|
||||
opts := AddContentType(options, objectKey)
|
||||
params, _ := GetRawParams(options)
|
||||
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
|
||||
ConvertEmptyValueToNil(params, paramKeys)
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return imur, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &imur)
|
||||
return imur, err
|
||||
}
|
||||
|
||||
// UploadPart uploads parts
|
||||
//
|
||||
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
|
||||
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
|
||||
// And thus with the same part number and upload Id, another part upload will overwrite the data.
|
||||
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
|
||||
//
|
||||
// imur the returned value of InitiateMultipartUpload.
|
||||
// reader io.Reader the reader for the part's data.
|
||||
// size the part size.
|
||||
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
|
||||
//
|
||||
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
|
||||
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: reader,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// UploadPartFromFile uploads part from the file.
|
||||
//
|
||||
// imur the return value of a successful InitiateMultipartUpload.
|
||||
// filePath the local file path to upload.
|
||||
// startPosition the start position in the local file.
|
||||
// partSize the part size.
|
||||
// partNumber the part number (from 1 to 10,000)
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var part = UploadPart{}
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer fd.Close()
|
||||
fd.Seek(startPosition, os.SEEK_SET)
|
||||
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: fd,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// DoUploadPart does the actual part upload.
|
||||
//
|
||||
// request part upload request
|
||||
//
|
||||
// UploadPartResult the result of uploading part.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
|
||||
listener := GetProgressListener(options)
|
||||
options = append(options, ContentLength(request.PartSize))
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(request.PartNumber)
|
||||
params["uploadId"] = request.InitResult.UploadID
|
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
|
||||
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
|
||||
if err != nil {
|
||||
return &UploadPartResult{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
part := UploadPart{
|
||||
ETag: resp.Headers.Get(HTTPHeaderEtag),
|
||||
PartNumber: request.PartNumber,
|
||||
}
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC {
|
||||
err = CheckCRC(resp, "DoUploadPart")
|
||||
if err != nil {
|
||||
return &UploadPartResult{part}, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UploadPartResult{part}, nil
|
||||
}
|
||||
|
||||
// UploadPartCopy uploads part copy
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload
|
||||
// copySrc source Object name
|
||||
// startPosition the part's start index in the source file
|
||||
// partSize the part size
|
||||
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
|
||||
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
|
||||
//
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
|
||||
// https://www.alibabacloud.com/help/en/object-storage-service/latest/uploadpartcopy
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var out UploadPartCopyResult
|
||||
var part UploadPart
|
||||
var opts []Option
|
||||
|
||||
//first find version id
|
||||
versionIdKey := "versionId"
|
||||
versionId, _ := FindOption(options, versionIdKey, nil)
|
||||
if versionId == nil {
|
||||
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
} else {
|
||||
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
options = DeleteOption(options, versionIdKey)
|
||||
}
|
||||
|
||||
opts = append(opts, options...)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(partNumber)
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
part.ETag = out.ETag
|
||||
part.PartNumber = partNumber
|
||||
|
||||
return part, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
|
||||
//
|
||||
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
||||
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
|
||||
var out CompleteMultipartUploadResult
|
||||
|
||||
sort.Sort(UploadParts(parts))
|
||||
cxml := completeMultipartUploadXML{}
|
||||
cxml.Part = parts
|
||||
bs, err := xml.Marshal(cxml)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
if len(body) > 0 {
|
||||
if err != nil {
|
||||
err = tryConvertServiceError(body, resp, err)
|
||||
} else {
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback == nil {
|
||||
err = xml.Unmarshal(body, &out)
|
||||
} else {
|
||||
rb, _ := FindOption(options, responseBody, nil)
|
||||
if rb != nil {
|
||||
if rbody, ok := rb.(*[]byte); ok {
|
||||
*rbody = body
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// ListUploadedParts lists the uploaded parts.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
|
||||
var out ListUploadedPartsResult
|
||||
options = append(options, EncodingType("url"))
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListUploadedPartsResult(&out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all ongoing multipart upload tasks
|
||||
//
|
||||
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
|
||||
//
|
||||
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
|
||||
//
|
||||
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
|
||||
var out ListMultipartUploadResult
|
||||
|
||||
options = append(options, EncodingType("url"))
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.doInner("GET", "", params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListMultipartUploadResult(&out)
|
||||
return out, err
|
||||
}
|
||||
735
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
generated
vendored
Normal file
735
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
generated
vendored
Normal file
@@ -0,0 +1,735 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type optionType string
|
||||
|
||||
const (
|
||||
optionParam optionType = "HTTPParameter" // URL parameter
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP header
|
||||
optionContext optionType = "HTTPContext" // context
|
||||
optionArg optionType = "FuncArgument" // Function argument
|
||||
|
||||
)
|
||||
|
||||
const (
|
||||
deleteObjectsQuiet = "delete-objects-quiet"
|
||||
routineNum = "x-routine-num"
|
||||
checkpointConfig = "x-cp-config"
|
||||
initCRC64 = "init-crc64"
|
||||
progressListener = "x-progress-listener"
|
||||
storageClass = "storage-class"
|
||||
responseHeader = "x-response-header"
|
||||
redundancyType = "redundancy-type"
|
||||
objectHashFunc = "object-hash-func"
|
||||
responseBody = "x-response-body"
|
||||
contextArg = "x-context-arg"
|
||||
)
|
||||
|
||||
type (
|
||||
optionValue struct {
|
||||
Value interface{}
|
||||
Type optionType
|
||||
}
|
||||
|
||||
// Option HTTP option
|
||||
Option func(map[string]optionValue) error
|
||||
)
|
||||
|
||||
// ACL is an option to set X-Oss-Acl header
|
||||
func ACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssACL, string(acl))
|
||||
}
|
||||
|
||||
// ContentType is an option to set Content-Type header
|
||||
func ContentType(value string) Option {
|
||||
return setHeader(HTTPHeaderContentType, value)
|
||||
}
|
||||
|
||||
// ContentLength is an option to set Content-Length header
|
||||
func ContentLength(length int64) Option {
|
||||
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
|
||||
}
|
||||
|
||||
// CacheControl is an option to set Cache-Control header
|
||||
func CacheControl(value string) Option {
|
||||
return setHeader(HTTPHeaderCacheControl, value)
|
||||
}
|
||||
|
||||
// ContentDisposition is an option to set Content-Disposition header
|
||||
func ContentDisposition(value string) Option {
|
||||
return setHeader(HTTPHeaderContentDisposition, value)
|
||||
}
|
||||
|
||||
// ContentEncoding is an option to set Content-Encoding header
|
||||
func ContentEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderContentEncoding, value)
|
||||
}
|
||||
|
||||
// ContentLanguage is an option to set Content-Language header
|
||||
func ContentLanguage(value string) Option {
|
||||
return setHeader(HTTPHeaderContentLanguage, value)
|
||||
}
|
||||
|
||||
// ContentMD5 is an option to set Content-MD5 header
|
||||
func ContentMD5(value string) Option {
|
||||
return setHeader(HTTPHeaderContentMD5, value)
|
||||
}
|
||||
|
||||
// Expires is an option to set Expires header
|
||||
func Expires(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Meta is an option to set Meta header
|
||||
func Meta(key, value string) Option {
|
||||
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
|
||||
}
|
||||
|
||||
// Range is an option to set Range header, [start, end]
|
||||
func Range(start, end int64) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
}
|
||||
|
||||
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
|
||||
func NormalizedRange(nr string) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
|
||||
}
|
||||
|
||||
// AcceptEncoding is an option to set Accept-Encoding header
|
||||
func AcceptEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderAcceptEncoding, value)
|
||||
}
|
||||
|
||||
// IfModifiedSince is an option to set If-Modified-Since header
|
||||
func IfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
|
||||
func IfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfMatch is an option to set If-Match header
|
||||
func IfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfMatch, value)
|
||||
}
|
||||
|
||||
// IfNoneMatch is an option to set IfNoneMatch header
|
||||
func IfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySource is an option to set X-Oss-Copy-Source header
|
||||
func CopySource(sourceBucket, sourceObject string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
|
||||
}
|
||||
|
||||
// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
|
||||
func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId)
|
||||
}
|
||||
|
||||
// CopySourceRange is an option to set X-Oss-Copy-Source header
|
||||
func CopySourceRange(startPosition, partSize int64) Option {
|
||||
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
|
||||
strconv.FormatInt((startPosition+partSize-1), 10)
|
||||
return setHeader(HTTPHeaderOssCopySourceRange, val)
|
||||
}
|
||||
|
||||
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
|
||||
func CopySourceIfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
|
||||
func CopySourceIfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
|
||||
func CopySourceIfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
|
||||
func CopySourceIfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func MetadataDirective(directive MetadataDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
|
||||
}
|
||||
|
||||
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
|
||||
func ServerSideEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryption, value)
|
||||
}
|
||||
|
||||
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
|
||||
func ServerSideEncryptionKeyID(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
|
||||
}
|
||||
|
||||
// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
|
||||
func ServerSideDataEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideDataEncryption, value)
|
||||
}
|
||||
|
||||
// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
|
||||
func SSECAlgorithm(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECAlgorithm, value)
|
||||
}
|
||||
|
||||
// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
|
||||
func SSECKey(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKey, value)
|
||||
}
|
||||
|
||||
// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
|
||||
func SSECKeyMd5(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKeyMd5, value)
|
||||
}
|
||||
|
||||
// ObjectACL is an option to set X-Oss-Object-Acl header
|
||||
func ObjectACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssObjectACL, string(acl))
|
||||
}
|
||||
|
||||
// symlinkTarget is an option to set X-Oss-Symlink-Target
|
||||
func symlinkTarget(targetObjectKey string) Option {
|
||||
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
|
||||
}
|
||||
|
||||
// Origin is an option to set Origin header
|
||||
func Origin(value string) Option {
|
||||
return setHeader(HTTPHeaderOrigin, value)
|
||||
}
|
||||
|
||||
// ObjectStorageClass is an option to set the storage class of object
|
||||
func ObjectStorageClass(storageClass StorageClassType) Option {
|
||||
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
|
||||
}
|
||||
|
||||
// Callback is an option to set callback values
|
||||
func Callback(callback string) Option {
|
||||
return setHeader(HTTPHeaderOssCallback, callback)
|
||||
}
|
||||
|
||||
// CallbackVar is an option to set callback user defined values
|
||||
func CallbackVar(callbackVar string) Option {
|
||||
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
|
||||
}
|
||||
|
||||
// RequestPayer is an option to set payer who pay for the request
|
||||
func RequestPayer(payerType PayerType) Option {
|
||||
return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// RequestPayerParam is an option to set payer who pay for the request
|
||||
func RequestPayerParam(payerType PayerType) Option {
|
||||
return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// SetTagging is an option to set object tagging
|
||||
func SetTagging(tagging Tagging) Option {
|
||||
if len(tagging.Tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
taggingValue := ""
|
||||
for index, tag := range tagging.Tags {
|
||||
if index != 0 {
|
||||
taggingValue += "&"
|
||||
}
|
||||
taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value)
|
||||
}
|
||||
return setHeader(HTTPHeaderOssTagging, taggingValue)
|
||||
}
|
||||
|
||||
// TaggingDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func TaggingDirective(directive TaggingDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssTaggingDirective, string(directive))
|
||||
}
|
||||
|
||||
// ACReqMethod is an option to set Access-Control-Request-Method header
|
||||
func ACReqMethod(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqMethod, value)
|
||||
}
|
||||
|
||||
// ACReqHeaders is an option to set Access-Control-Request-Headers header
|
||||
func ACReqHeaders(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqHeaders, value)
|
||||
}
|
||||
|
||||
// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
|
||||
func TrafficLimitHeader(value int64) Option {
|
||||
return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// UserAgentHeader is an option to set HTTPHeaderUserAgent
|
||||
func UserAgentHeader(ua string) Option {
|
||||
return setHeader(HTTPHeaderUserAgent, ua)
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func ForbidOverWrite(forbidWrite bool) Option {
|
||||
if forbidWrite {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "false")
|
||||
}
|
||||
}
|
||||
|
||||
// RangeBehavior is an option to set Range value, such as "standard"
|
||||
func RangeBehavior(value string) Option {
|
||||
return setHeader(HTTPHeaderOssRangeBehavior, value)
|
||||
}
|
||||
|
||||
func PartHashCtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssHashCtx, value)
|
||||
}
|
||||
|
||||
func PartMd5CtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssMd5Ctx, value)
|
||||
}
|
||||
|
||||
func PartHashCtxParam(value string) Option {
|
||||
return addParam("x-oss-hash-ctx", value)
|
||||
}
|
||||
|
||||
func PartMd5CtxParam(value string) Option {
|
||||
return addParam("x-oss-md5-ctx", value)
|
||||
}
|
||||
|
||||
// Delimiter is an option to set delimiler parameter
|
||||
func Delimiter(value string) Option {
|
||||
return addParam("delimiter", value)
|
||||
}
|
||||
|
||||
// Marker is an option to set marker parameter
|
||||
func Marker(value string) Option {
|
||||
return addParam("marker", value)
|
||||
}
|
||||
|
||||
// MaxKeys is an option to set maxkeys parameter
|
||||
func MaxKeys(value int) Option {
|
||||
return addParam("max-keys", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Prefix is an option to set prefix parameter
|
||||
func Prefix(value string) Option {
|
||||
return addParam("prefix", value)
|
||||
}
|
||||
|
||||
// EncodingType is an option to set encoding-type parameter
|
||||
func EncodingType(value string) Option {
|
||||
return addParam("encoding-type", value)
|
||||
}
|
||||
|
||||
// MaxUploads is an option to set max-uploads parameter
|
||||
func MaxUploads(value int) Option {
|
||||
return addParam("max-uploads", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// KeyMarker is an option to set key-marker parameter
|
||||
func KeyMarker(value string) Option {
|
||||
return addParam("key-marker", value)
|
||||
}
|
||||
|
||||
// VersionIdMarker is an option to set version-id-marker parameter
|
||||
func VersionIdMarker(value string) Option {
|
||||
return addParam("version-id-marker", value)
|
||||
}
|
||||
|
||||
// VersionId is an option to set versionId parameter
|
||||
func VersionId(value string) Option {
|
||||
return addParam("versionId", value)
|
||||
}
|
||||
|
||||
// TagKey is an option to set tag key parameter
|
||||
func TagKey(value string) Option {
|
||||
return addParam("tag-key", value)
|
||||
}
|
||||
|
||||
// TagValue is an option to set tag value parameter
|
||||
func TagValue(value string) Option {
|
||||
return addParam("tag-value", value)
|
||||
}
|
||||
|
||||
// UploadIDMarker is an option to set upload-id-marker parameter
|
||||
func UploadIDMarker(value string) Option {
|
||||
return addParam("upload-id-marker", value)
|
||||
}
|
||||
|
||||
// MaxParts is an option to set max-parts parameter
|
||||
func MaxParts(value int) Option {
|
||||
return addParam("max-parts", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// PartNumberMarker is an option to set part-number-marker parameter
|
||||
func PartNumberMarker(value int) Option {
|
||||
return addParam("part-number-marker", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Sequential is an option to set sequential parameter for InitiateMultipartUpload
|
||||
func Sequential() Option {
|
||||
return addParam("sequential", "")
|
||||
}
|
||||
|
||||
// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
|
||||
func WithHashContext() Option {
|
||||
return addParam("withHashContext", "")
|
||||
}
|
||||
|
||||
// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
|
||||
func EnableMd5() Option {
|
||||
return addParam("x-oss-enable-md5", "")
|
||||
}
|
||||
|
||||
// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
|
||||
func EnableSha1() Option {
|
||||
return addParam("x-oss-enable-sha1", "")
|
||||
}
|
||||
|
||||
// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
|
||||
func EnableSha256() Option {
|
||||
return addParam("x-oss-enable-sha256", "")
|
||||
}
|
||||
|
||||
// ListType is an option to set List-type parameter for ListObjectsV2
|
||||
func ListType(value int) Option {
|
||||
return addParam("list-type", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// StartAfter is an option to set start-after parameter for ListObjectsV2
|
||||
func StartAfter(value string) Option {
|
||||
return addParam("start-after", value)
|
||||
}
|
||||
|
||||
// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
|
||||
func ContinuationToken(value string) Option {
|
||||
if value == "" {
|
||||
return addParam("continuation-token", nil)
|
||||
}
|
||||
return addParam("continuation-token", value)
|
||||
}
|
||||
|
||||
// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
|
||||
func FetchOwner(value bool) Option {
|
||||
if value {
|
||||
return addParam("fetch-owner", "true")
|
||||
}
|
||||
return addParam("fetch-owner", "false")
|
||||
}
|
||||
|
||||
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
|
||||
func DeleteObjectsQuiet(isQuiet bool) Option {
|
||||
return addArg(deleteObjectsQuiet, isQuiet)
|
||||
}
|
||||
|
||||
// StorageClass bucket storage class
|
||||
func StorageClass(value StorageClassType) Option {
|
||||
return addArg(storageClass, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func RedundancyType(value DataRedundancyType) Option {
|
||||
return addArg(redundancyType, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func ObjectHashFunc(value ObjecthashFuncType) Option {
|
||||
return addArg(objectHashFunc, value)
|
||||
}
|
||||
|
||||
// WithContext returns an option that sets the context for requests.
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return addArg(contextArg, ctx)
|
||||
}
|
||||
|
||||
// Checkpoint configuration
|
||||
type cpConfig struct {
|
||||
IsEnable bool
|
||||
FilePath string
|
||||
DirPath string
|
||||
}
|
||||
|
||||
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
|
||||
func Checkpoint(isEnable bool, filePath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
|
||||
}
|
||||
|
||||
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
|
||||
func CheckpointDir(isEnable bool, dirPath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
|
||||
}
|
||||
|
||||
// Routines DownloadFile/UploadFile routine count
|
||||
func Routines(n int) Option {
|
||||
return addArg(routineNum, n)
|
||||
}
|
||||
|
||||
// InitCRC Init AppendObject CRC
|
||||
func InitCRC(initCRC uint64) Option {
|
||||
return addArg(initCRC64, initCRC)
|
||||
}
|
||||
|
||||
// Progress set progress listener
|
||||
func Progress(listener ProgressListener) Option {
|
||||
return addArg(progressListener, listener)
|
||||
}
|
||||
|
||||
// GetResponseHeader for get response http header
|
||||
func GetResponseHeader(respHeader *http.Header) Option {
|
||||
return addArg(responseHeader, respHeader)
|
||||
}
|
||||
|
||||
// CallbackResult for get response of call back
|
||||
func CallbackResult(body *[]byte) Option {
|
||||
return addArg(responseBody, body)
|
||||
}
|
||||
|
||||
// ResponseContentType is an option to set response-content-type param
|
||||
func ResponseContentType(value string) Option {
|
||||
return addParam("response-content-type", value)
|
||||
}
|
||||
|
||||
// ResponseContentLanguage is an option to set response-content-language param
|
||||
func ResponseContentLanguage(value string) Option {
|
||||
return addParam("response-content-language", value)
|
||||
}
|
||||
|
||||
// ResponseExpires is an option to set response-expires param
|
||||
func ResponseExpires(value string) Option {
|
||||
return addParam("response-expires", value)
|
||||
}
|
||||
|
||||
// ResponseCacheControl is an option to set response-cache-control param
|
||||
func ResponseCacheControl(value string) Option {
|
||||
return addParam("response-cache-control", value)
|
||||
}
|
||||
|
||||
// ResponseContentDisposition is an option to set response-content-disposition param
|
||||
func ResponseContentDisposition(value string) Option {
|
||||
return addParam("response-content-disposition", value)
|
||||
}
|
||||
|
||||
// ResponseContentEncoding is an option to set response-content-encoding param
|
||||
func ResponseContentEncoding(value string) Option {
|
||||
return addParam("response-content-encoding", value)
|
||||
}
|
||||
|
||||
// Process is an option to set x-oss-process param
|
||||
func Process(value string) Option {
|
||||
return addParam("x-oss-process", value)
|
||||
}
|
||||
|
||||
// TrafficLimitParam is a option to set x-oss-traffic-limit
|
||||
func TrafficLimitParam(value int64) Option {
|
||||
return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// SetHeader Allow users to set personalized http headers
|
||||
func SetHeader(key string, value interface{}) Option {
|
||||
return setHeader(key, value)
|
||||
}
|
||||
|
||||
// AddParam Allow users to set personalized http params
|
||||
func AddParam(key string, value interface{}) Option {
|
||||
return addParam(key, value)
|
||||
}
|
||||
|
||||
func setHeader(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionHTTP}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addParam(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionParam}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addArg(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionArg}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleOptions(headers map[string]string, options []Option) error {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range params {
|
||||
if v.Type == optionHTTP {
|
||||
headers[k] = v.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetRawParams(options []Option) (map[string]interface{}, error) {
|
||||
// Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
paramsm := map[string]interface{}{}
|
||||
// Serialize
|
||||
for k, v := range params {
|
||||
if v.Type == optionParam {
|
||||
vs := params[k]
|
||||
paramsm[k] = vs.Value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return paramsm, nil
|
||||
}
|
||||
|
||||
func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[param]; ok {
|
||||
return val.Value, nil
|
||||
}
|
||||
return defaultVal, nil
|
||||
}
|
||||
|
||||
func IsOptionSet(options []Option, option string) (bool, interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[option]; ok {
|
||||
return true, val.Value, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
func DeleteOption(options []Option, strKey string) []Option {
|
||||
var outOption []Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
option(params)
|
||||
_, exist := params[strKey]
|
||||
if !exist {
|
||||
outOption = append(outOption, option)
|
||||
} else {
|
||||
delete(params, strKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
return outOption
|
||||
}
|
||||
|
||||
func GetRequestId(header http.Header) string {
|
||||
return header.Get("x-oss-request-id")
|
||||
}
|
||||
|
||||
func GetVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-version-id")
|
||||
}
|
||||
|
||||
func GetCopySrcVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-copy-source-version-id")
|
||||
}
|
||||
|
||||
func GetDeleteMark(header http.Header) bool {
|
||||
value := header.Get("x-oss-delete-marker")
|
||||
if strings.ToUpper(value) == "TRUE" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetQosDelayTime(header http.Header) string {
|
||||
return header.Get("x-oss-qos-delay-time")
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func AllowSameActionOverLap(enabled bool) Option {
|
||||
if enabled {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "false")
|
||||
}
|
||||
}
|
||||
|
||||
func GetCallbackBody(options []Option, resp *Response, callbackSet bool) error {
|
||||
var err error
|
||||
|
||||
// get response body
|
||||
if callbackSet {
|
||||
err = setBody(options, resp)
|
||||
} else {
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback != nil {
|
||||
err = setBody(options, resp)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func setBody(options []Option, resp *Response) error {
|
||||
respBody, _ := FindOption(options, responseBody, nil)
|
||||
if respBody != nil && resp != nil {
|
||||
pRespBody := respBody.(*[]byte)
|
||||
pBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pBody != nil {
|
||||
*pRespBody = pBody
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
116
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
generated
vendored
Normal file
116
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
type ProgressEventType int
|
||||
|
||||
const (
|
||||
// TransferStartedEvent transfer started, set TotalBytes
|
||||
TransferStartedEvent ProgressEventType = 1 + iota
|
||||
// TransferDataEvent transfer data, set ConsumedBytes and TotalBytes
|
||||
TransferDataEvent
|
||||
// TransferCompletedEvent transfer completed
|
||||
TransferCompletedEvent
|
||||
// TransferFailedEvent transfer encounters an error
|
||||
TransferFailedEvent
|
||||
)
|
||||
|
||||
// ProgressEvent defines progress event
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
RwBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
// ProgressListener listens progress change
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
// -------------------- Private --------------------
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
ConsumedBytes: consumed,
|
||||
TotalBytes: total,
|
||||
RwBytes: rwBytes,
|
||||
EventType: eventType}
|
||||
}
|
||||
|
||||
// publishProgress
|
||||
func publishProgress(listener ProgressListener, event *ProgressEvent) {
|
||||
if listener != nil && event != nil {
|
||||
listener.ProgressChanged(event)
|
||||
}
|
||||
}
|
||||
|
||||
type readerTracker struct {
|
||||
completedBytes int64
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader io.Reader
|
||||
writer io.Writer
|
||||
listener ProgressListener
|
||||
consumedBytes int64
|
||||
totalBytes int64
|
||||
tracker *readerTracker
|
||||
}
|
||||
|
||||
// TeeReader returns a Reader that writes to w what it reads from r.
|
||||
// All reads from r performed through it are matched with
|
||||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
|
||||
return &teeReader{
|
||||
reader: reader,
|
||||
writer: writer,
|
||||
listener: listener,
|
||||
consumedBytes: 0,
|
||||
totalBytes: totalBytes,
|
||||
tracker: tracker,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *teeReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
|
||||
// Read encountered error
|
||||
if err != nil && err != io.EOF {
|
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.consumedBytes += int64(n)
|
||||
// CRC
|
||||
if t.writer != nil {
|
||||
if n, err := t.writer.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
// Progress
|
||||
if t.listener != nil {
|
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
// Track
|
||||
if t.tracker != nil {
|
||||
t.tracker.completedBytes = t.consumedBytes
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *teeReader) Close() error {
|
||||
if rc, ok := t.reader.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
12
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
generated
vendored
Normal file
12
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
|
||||
}
|
||||
13
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
generated
vendored
Normal file
13
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
197
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
generated
vendored
Normal file
197
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go
generated
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CreateSelectCsvObjectMeta is Creating csv object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the csv file meta
|
||||
// options the options for create csv Meta of the object.
|
||||
//
|
||||
// MetaEndFrameCSV the csv file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) {
|
||||
var endFrame MetaEndFrameCSV
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "csv/meta"
|
||||
|
||||
csvMeta.encodeBase64()
|
||||
bs, err := xml.Marshal(csvMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameCSV, err
|
||||
}
|
||||
|
||||
// CreateSelectJsonObjectMeta is Creating json object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the json file meta
|
||||
// options the options for create json Meta of the object.
|
||||
//
|
||||
// MetaEndFrameJSON the json file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) {
|
||||
var endFrame MetaEndFrameJSON
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "json/meta"
|
||||
|
||||
bs, err := xml.Marshal(jsonMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameJSON, err
|
||||
}
|
||||
|
||||
// SelectObject is the select object api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true {
|
||||
resp.Frame.EnablePayloadCrc = true
|
||||
}
|
||||
resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE"
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
|
||||
// buf the request data trans to buffer.
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// SelectObjectResponse the response of select object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) {
|
||||
resp, err := bucket.do("POST", key, params, options, buf, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SelectObjectResponse{
|
||||
Body: resp.Body,
|
||||
StatusCode: resp.StatusCode,
|
||||
Frame: SelectObjectResult{},
|
||||
}
|
||||
result.Headers = resp.Headers
|
||||
// result.Frame = SelectObjectResult{}
|
||||
result.ReadTimeOut = bucket.GetConfig().Timeout
|
||||
|
||||
// Progress
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// CRC32
|
||||
crcCalc := crc32.NewIEEE()
|
||||
result.WriterForCheckCrc32 = crcCalc
|
||||
result.Body = TeeReader(resp.Body, nil, 0, listener, nil)
|
||||
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SelectObjectIntoFile is the selectObject to file api
|
||||
//
|
||||
// key the object key.
|
||||
// fileName saving file's name to localstation.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error {
|
||||
tempFilePath := fileName + TempFileSuffix
|
||||
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
// If the local file does not exist, create a new one. If it exists, overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the data to the local file path.
|
||||
_, err = io.Copy(fd, resp)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, fileName)
|
||||
}
|
||||
365
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
generated
vendored
Normal file
365
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go
generated
vendored
Normal file
@@ -0,0 +1,365 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The adapter class for Select object's response.
|
||||
// The response consists of frames. Each frame has the following format:
|
||||
|
||||
// Type | Payload Length | Header Checksum | Payload | Payload Checksum
|
||||
|
||||
// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
|
||||
// And we have three kind of frames.
|
||||
// Data Frame:
|
||||
// Type:8388609
|
||||
// Payload: Offset | Data
|
||||
// <-8 bytes>
|
||||
|
||||
// Continuous Frame
|
||||
// Type:8388612
|
||||
// Payload: Offset (8-bytes)
|
||||
|
||||
// End Frame
|
||||
// Type:8388613
|
||||
// Payload: Offset | total scanned bytes | http status code | error message
|
||||
// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
|
||||
|
||||
// SelectObjectResponse defines HTTP response from OSS SelectObject
|
||||
type SelectObjectResponse struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
Frame SelectObjectResult
|
||||
ReadTimeOut uint
|
||||
ClientCRC32 uint32
|
||||
ServerCRC32 uint32
|
||||
WriterForCheckCrc32 hash.Hash32
|
||||
Finish bool
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) {
|
||||
n, err = sr.readFrames(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Close http reponse body
|
||||
func (sr *SelectObjectResponse) Close() error {
|
||||
return sr.Body.Close()
|
||||
}
|
||||
|
||||
// PostSelectResult is the request of SelectObject
|
||||
type PostSelectResult struct {
|
||||
Response *SelectObjectResponse
|
||||
}
|
||||
|
||||
// readFrames is read Frame
|
||||
func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) {
|
||||
var nn int
|
||||
var err error
|
||||
var checkValid bool
|
||||
if sr.Frame.OutputRawData == true {
|
||||
nn, err = sr.Body.Read(p)
|
||||
return nn, err
|
||||
}
|
||||
|
||||
if sr.Finish {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for {
|
||||
// if this Frame is Readed, then not reading Header
|
||||
if sr.Frame.OpenLine != true {
|
||||
err = sr.analysisHeader()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
|
||||
if sr.Frame.FrameType == DataFrameType {
|
||||
n, err := sr.analysisData(p[nn:])
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += n
|
||||
|
||||
// if this Frame is readed all data, then empty the Frame to read it with next frame
|
||||
if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
sr.emptyFrame()
|
||||
}
|
||||
|
||||
if nn == len(p) {
|
||||
return nn, nil
|
||||
}
|
||||
} else if sr.Frame.FrameType == ContinuousFrameType {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
sr.Frame.OpenLine = false
|
||||
} else if sr.Frame.FrameType == EndFrameType {
|
||||
err = sr.analysisEndFrame()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameCSVType {
|
||||
err = sr.analysisMetaEndFrameCSV()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameJSONType {
|
||||
err = sr.analysisMetaEndFrameJSON()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
type chanReadIO struct {
|
||||
readLen int
|
||||
err error
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) {
|
||||
r := sr.Body
|
||||
ch := make(chan chanReadIO, 1)
|
||||
defer close(ch)
|
||||
go func(p []byte) {
|
||||
var needReadLength int
|
||||
readChan := chanReadIO{}
|
||||
needReadLength = len(p)
|
||||
for {
|
||||
n, err := r.Read(p[readChan.readLen:needReadLength])
|
||||
readChan.readLen += n
|
||||
if err != nil {
|
||||
readChan.err = err
|
||||
ch <- readChan
|
||||
return
|
||||
}
|
||||
|
||||
if readChan.readLen == needReadLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
ch <- readChan
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * timeOut):
|
||||
return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p))
|
||||
case result := <-ch:
|
||||
return result.readLen, result.err
|
||||
}
|
||||
}
|
||||
|
||||
// analysisHeader is reading selectObject response body's header
|
||||
func (sr *SelectObjectResponse) analysisHeader() error {
|
||||
headFrameByte := make([]byte, 20)
|
||||
_, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
frameTypeByte := headFrameByte[0:4]
|
||||
sr.Frame.Version = frameTypeByte[0]
|
||||
frameTypeByte[0] = 0
|
||||
bytesToInt(frameTypeByte, &sr.Frame.FrameType)
|
||||
|
||||
if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType &&
|
||||
sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType {
|
||||
return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType)
|
||||
}
|
||||
|
||||
payloadLengthByte := headFrameByte[4:8]
|
||||
bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength)
|
||||
headCheckSumByte := headFrameByte[8:12]
|
||||
bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum)
|
||||
byteOffset := headFrameByte[12:20]
|
||||
bytesToInt(byteOffset, &sr.Frame.Offset)
|
||||
sr.Frame.OpenLine = true
|
||||
|
||||
err = sr.writerCheckCrc32(byteOffset)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisData is reading the DataFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) {
|
||||
var needReadLength int32
|
||||
lenP := int32(len(p))
|
||||
restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength
|
||||
if lenP <= restByteLength {
|
||||
needReadLength = lenP
|
||||
} else {
|
||||
needReadLength = restByteLength
|
||||
}
|
||||
n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("read frame data error,%s", err.Error())
|
||||
}
|
||||
sr.Frame.ConsumedBytesLength += int32(n)
|
||||
err = sr.writerCheckCrc32(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// analysisEndFrame is reading the EndFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisEndFrame() error {
|
||||
var eF EndFrame
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read end frame error:%s", err.Error())
|
||||
}
|
||||
bytesToInt(payLoadBytes[0:8], &eF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode)
|
||||
errMsgLength := sr.Frame.PayloadLength - 20
|
||||
eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12])
|
||||
sr.Frame.EndFrame.TotalScanned = eF.TotalScanned
|
||||
sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode
|
||||
sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error {
|
||||
var mCF MetaEndFrameCSV
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end csv frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mCF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mCF.RowsCount)
|
||||
bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 36
|
||||
mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28])
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned
|
||||
sr.Frame.MetaEndFrameCSV.Status = mCF.Status
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error {
|
||||
var mJF MetaEndFrameJSON
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end json frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mJF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mJF.RowsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 32
|
||||
mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24])
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned
|
||||
sr.Frame.MetaEndFrameJSON.Status = mJF.Status
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount
|
||||
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) {
|
||||
payLoadChecksumByte := make([]byte, 4)
|
||||
n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut))
|
||||
if n == 4 {
|
||||
bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum)
|
||||
sr.ServerCRC32 = sr.Frame.PayloadChecksum
|
||||
sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32()
|
||||
if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 {
|
||||
return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d",
|
||||
sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) {
|
||||
err = nil
|
||||
if sr.Frame.EnablePayloadCrc == true {
|
||||
_, err = sr.WriterForCheckCrc32.Write(p)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// emptyFrame is emptying SelectObjectResponse Frame information
|
||||
func (sr *SelectObjectResponse) emptyFrame() {
|
||||
crcCalc := crc32.NewIEEE()
|
||||
sr.WriterForCheckCrc32 = crcCalc
|
||||
sr.Finish = false
|
||||
|
||||
sr.Frame.ConsumedBytesLength = 0
|
||||
sr.Frame.OpenLine = false
|
||||
sr.Frame.Version = byte(0)
|
||||
sr.Frame.FrameType = 0
|
||||
sr.Frame.PayloadLength = 0
|
||||
sr.Frame.HeaderCheckSum = 0
|
||||
sr.Frame.Offset = 0
|
||||
sr.Frame.Data = ""
|
||||
|
||||
sr.Frame.EndFrame.TotalScanned = 0
|
||||
sr.Frame.EndFrame.HTTPStatusCode = 0
|
||||
sr.Frame.EndFrame.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameCSV.Status = 0
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameJSON.Status = 0
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = ""
|
||||
|
||||
sr.Frame.PayloadChecksum = 0
|
||||
}
|
||||
|
||||
// bytesToInt byte's array trans to int
|
||||
func bytesToInt(b []byte, ret interface{}) {
|
||||
binBuf := bytes.NewBuffer(b)
|
||||
binary.Read(binBuf, binary.BigEndian, ret)
|
||||
}
|
||||
42
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
generated
vendored
Normal file
42
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
||||
45
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
generated
vendored
Normal file
45
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConns: httpMaxConns.MaxIdleConns,
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
MaxConnsPerHost: httpMaxConns.MaxConnsPerHost,
|
||||
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
||||
1695
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
generated
vendored
Normal file
1695
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
578
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
generated
vendored
Normal file
578
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
generated
vendored
Normal file
@@ -0,0 +1,578 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadFile is multipart file upload.
|
||||
//
|
||||
// objectKey the object name.
|
||||
// filePath the local file path to upload.
|
||||
// partSize the part size in byte.
|
||||
// options the options for uploading object.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (100KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
|
||||
if cpFilePath != "" {
|
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
|
||||
}
|
||||
|
||||
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
absPath, _ := filepath.Abs(srcFile)
|
||||
cpFileName := getCpFileName(absPath, dest, "")
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- concurrent upload without checkpoint -----
|
||||
|
||||
// getCpConfig gets checkpoint configuration
|
||||
func getCpConfig(options []Option) *cpConfig {
|
||||
cpcOpt, err := FindOption(options, checkpointConfig, nil)
|
||||
if err != nil || cpcOpt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cpcOpt.(*cpConfig)
|
||||
}
|
||||
|
||||
// getCpFileName return the name of the checkpoint file
|
||||
func getCpFileName(src, dest, versionId string) string {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(src))
|
||||
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(dest))
|
||||
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
if versionId == "" {
|
||||
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
|
||||
}
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(versionId))
|
||||
versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum)
|
||||
}
|
||||
|
||||
// getRoutines gets the routine count. by default it's 1.
|
||||
func getRoutines(options []Option) int {
|
||||
rtnOpt, err := FindOption(options, routineNum, nil)
|
||||
if err != nil || rtnOpt == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
rs := rtnOpt.(int)
|
||||
if rs < 1 {
|
||||
rs = 1
|
||||
} else if rs > 100 {
|
||||
rs = 100
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// getPayer return the payer of the request
|
||||
func getPayer(options []Option) string {
|
||||
payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if err != nil || payerOpt == nil {
|
||||
return ""
|
||||
}
|
||||
return payerOpt.(string)
|
||||
}
|
||||
|
||||
// GetProgressListener gets the progress callback
|
||||
func GetProgressListener(options []Option) ProgressListener {
|
||||
isSet, listener, _ := IsOptionSet(options, progressListener)
|
||||
if !isSet {
|
||||
return nil
|
||||
}
|
||||
return listener.(ProgressListener)
|
||||
}
|
||||
|
||||
// uploadPartHook is for testing usage
|
||||
type uploadPartHook func(id int, chunk FileChunk) error
|
||||
|
||||
var uploadPartHooker uploadPartHook = defaultUploadPart
|
||||
|
||||
func defaultUploadPart(id int, chunk FileChunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// workerArg defines worker argument structure
|
||||
type workerArg struct {
|
||||
bucket *Bucket
|
||||
filePath string
|
||||
imur InitiateMultipartUploadResult
|
||||
options []Option
|
||||
hook uploadPartHook
|
||||
}
|
||||
|
||||
// worker is the worker coroutine function
|
||||
type defaultUploadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(id, chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
var respHeader http.Header
|
||||
p := Progress(&defaultUploadProgressListener{})
|
||||
opts := make([]Option, len(arg.options)+2)
|
||||
opts = append(opts, arg.options...)
|
||||
|
||||
// use defaultUploadProgressListener
|
||||
opts = append(opts, p, GetResponseHeader(&respHeader))
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error())
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// scheduler function
|
||||
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
jobs <- chunk
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
func getTotalBytes(chunks []FileChunk) int64 {
|
||||
var tb int64
|
||||
for _, chunk := range chunks {
|
||||
tb += chunk.Size
|
||||
}
|
||||
return tb
|
||||
}
|
||||
|
||||
// uploadFile is a concurrent upload, without checkpoint
|
||||
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
chunks, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
// Initialize the multipart upload
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getTotalBytes(chunks)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutine
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule the jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the upload finished
|
||||
completed := 0
|
||||
parts := make([]UploadPart, len(chunks))
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
parts[part.PartNumber-1] = part
|
||||
completedBytes += chunks[part.PartNumber-1].Size
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multpart upload
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- concurrent upload with checkpoint -----
|
||||
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
|
||||
|
||||
type uploadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint file content's MD5
|
||||
FilePath string // Local file path
|
||||
FileStat cpStat // File state
|
||||
ObjectKey string // Key
|
||||
UploadID string // Upload ID
|
||||
Parts []cpPart // All parts of the local file
|
||||
CallbackVal string
|
||||
CallbackBody *[]byte
|
||||
}
|
||||
|
||||
type cpStat struct {
|
||||
Size int64 // File size
|
||||
LastModified time.Time // File's last modified time
|
||||
MD5 string // Local file's MD5
|
||||
}
|
||||
|
||||
type cpPart struct {
|
||||
Chunk FileChunk // File chunk
|
||||
Part UploadPart // Uploaded part
|
||||
IsCompleted bool // Upload complete flag
|
||||
}
|
||||
|
||||
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
|
||||
func (cp uploadCheckpoint) isValid(filePath string,options []Option) (bool, error) {
|
||||
|
||||
callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
|
||||
if callbackVal != "" && cp.CallbackVal != callbackVal {
|
||||
return false, nil
|
||||
}
|
||||
callbackBody, _ := FindOption(options, responseBody, nil)
|
||||
if callbackBody != nil{
|
||||
body, _ := json.Marshal(callbackBody)
|
||||
if bytes.Equal(*cp.CallbackBody, body) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// Compare the CP's magic number and MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Make sure if the local file is updated.
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the file size, file's last modified time and file's MD5
|
||||
if cp.FileStat.Size != st.Size() ||
|
||||
!cp.FileStat.LastModified.Equal(st.ModTime()) ||
|
||||
cp.FileStat.MD5 != md {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the file
|
||||
func (cp *uploadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump dumps to the local file
|
||||
func (cp *uploadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// updatePart updates the part status
|
||||
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
|
||||
cp.Parts[part.PartNumber-1].Part = part
|
||||
cp.Parts[part.PartNumber-1].IsCompleted = true
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp *uploadCheckpoint) todoParts() []FileChunk {
|
||||
fcs := []FileChunk{}
|
||||
for _, part := range cp.Parts {
|
||||
if !part.IsCompleted {
|
||||
fcs = append(fcs, part.Chunk)
|
||||
}
|
||||
}
|
||||
return fcs
|
||||
}
|
||||
|
||||
// allParts returns all parts
|
||||
func (cp *uploadCheckpoint) allParts() []UploadPart {
|
||||
ps := []UploadPart{}
|
||||
for _, part := range cp.Parts {
|
||||
ps = append(ps, part.Part)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns completed bytes count
|
||||
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for _, part := range cp.Parts {
|
||||
if part.IsCompleted {
|
||||
completedBytes += part.Chunk.Size
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// calcFileMD5 calculates the MD5 for the specified local file
|
||||
func calcFileMD5(filePath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = uploadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.ObjectKey = objectKey
|
||||
|
||||
// Local file
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.Size = st.Size()
|
||||
cp.FileStat.LastModified = st.ModTime()
|
||||
callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "")
|
||||
cp.CallbackVal = callbackVal.(string)
|
||||
callbackBody, _ := FindOption(options, responseBody, nil)
|
||||
if callbackBody != nil {
|
||||
body, _ := json.Marshal(callbackBody)
|
||||
cp.CallbackBody = &body
|
||||
}
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.MD5 = md
|
||||
|
||||
// Chunks
|
||||
parts, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.Parts = make([]cpPart, len(parts))
|
||||
for i, part := range parts {
|
||||
cp.Parts[i].Chunk = part
|
||||
cp.Parts[i].IsCompleted = false
|
||||
}
|
||||
|
||||
// Init load
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.UploadID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// complete completes the multipart upload and deletes the local CP files
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
|
||||
Key: cp.ObjectKey, UploadID: cp.UploadID}
|
||||
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
if e, ok := err.(ServiceError);ok && (e.StatusCode == 203 || e.StatusCode == 404) {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadFileWithCp handles concurrent upload with checkpoint
|
||||
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
// Load CP data
|
||||
ucp := uploadCheckpoint{}
|
||||
err := ucp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid.
|
||||
valid, err := ucp.isValid(filePath,options)
|
||||
if err != nil || !valid {
|
||||
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
chunks := ucp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: bucket.BucketName,
|
||||
Key: objectKey,
|
||||
UploadID: ucp.UploadID}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ucp.getCompletedBytes()
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the workers
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the job finished
|
||||
completed := 0
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ucp.updatePart(part)
|
||||
ucp.dump(cpFilePath)
|
||||
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions)
|
||||
return err
|
||||
}
|
||||
674
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
generated
vendored
Normal file
674
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
generated
vendored
Normal file
@@ -0,0 +1,674 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var sys_name string
|
||||
var sys_release string
|
||||
var sys_machine string
|
||||
|
||||
var (
|
||||
escQuot = []byte(""") // shorter than """
|
||||
escApos = []byte("'") // shorter than "'"
|
||||
escAmp = []byte("&")
|
||||
escLT = []byte("<")
|
||||
escGT = []byte(">")
|
||||
escTab = []byte("	")
|
||||
escNL = []byte("
")
|
||||
escCR = []byte("
")
|
||||
escFFFD = []byte("\uFFFD") // Unicode replacement character
|
||||
)
|
||||
|
||||
func init() {
|
||||
sys_name = runtime.GOOS
|
||||
sys_release = "-"
|
||||
sys_machine = runtime.GOARCH
|
||||
}
|
||||
|
||||
// userAgent gets user agent
|
||||
// It has the SDK version information, OS information and GO version
|
||||
func userAgent() string {
|
||||
sys := getSysInfo()
|
||||
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
|
||||
sys.release, sys.machine, runtime.Version())
|
||||
}
|
||||
|
||||
type sysInfo struct {
|
||||
name string // OS name such as windows/Linux
|
||||
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
|
||||
machine string // CPU type amd64/x86_64
|
||||
}
|
||||
|
||||
// getSysInfo gets system info
|
||||
// gets the OS information and CPU type
|
||||
func getSysInfo() sysInfo {
|
||||
return sysInfo{name: sys_name, release: sys_release, machine: sys_machine}
|
||||
}
|
||||
|
||||
// GetRangeConfig gets the download range from the options.
|
||||
func GetRangeConfig(options []Option) (*UnpackedRange, error) {
|
||||
rangeOpt, err := FindOption(options, HTTPHeaderRange, nil)
|
||||
if err != nil || rangeOpt == nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseRange(rangeOpt.(string))
|
||||
}
|
||||
|
||||
// UnpackedRange
|
||||
type UnpackedRange struct {
|
||||
HasStart bool // Flag indicates if the start point is specified
|
||||
HasEnd bool // Flag indicates if the end point is specified
|
||||
Start int64 // Start point
|
||||
End int64 // End point
|
||||
}
|
||||
|
||||
// InvalidRangeError returns invalid range error
|
||||
func InvalidRangeError(r string) error {
|
||||
return fmt.Errorf("InvalidRange %s", r)
|
||||
}
|
||||
|
||||
func GetRangeString(unpackRange UnpackedRange) string {
|
||||
var strRange string
|
||||
if unpackRange.HasStart && unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End)
|
||||
} else if unpackRange.HasStart {
|
||||
strRange = fmt.Sprintf("%d-", unpackRange.Start)
|
||||
} else if unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("-%d", unpackRange.End)
|
||||
}
|
||||
return strRange
|
||||
}
|
||||
|
||||
// ParseRange parse various styles of range such as bytes=M-N
|
||||
func ParseRange(normalizedRange string) (*UnpackedRange, error) {
|
||||
var err error
|
||||
hasStart := false
|
||||
hasEnd := false
|
||||
var start int64
|
||||
var end int64
|
||||
|
||||
// Bytes==M-N or ranges=M-N
|
||||
nrSlice := strings.Split(normalizedRange, "=")
|
||||
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
|
||||
// Bytes=M-N,X-Y
|
||||
rSlice := strings.Split(nrSlice[1], ",")
|
||||
rStr := rSlice[0]
|
||||
|
||||
if strings.HasSuffix(rStr, "-") { // M-
|
||||
startStr := rStr[:len(rStr)-1]
|
||||
start, err = strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
} else if strings.HasPrefix(rStr, "-") { // -N
|
||||
len := rStr[1:]
|
||||
end, err = strconv.ParseInt(len, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
if end == 0 { // -0
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
} else { // M-N
|
||||
valSlice := strings.Split(rStr, "-")
|
||||
if len(valSlice) != 2 {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
start, err = strconv.ParseInt(valSlice[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
end, err = strconv.ParseInt(valSlice[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
}
|
||||
|
||||
return &UnpackedRange{hasStart, hasEnd, start, end}, nil
|
||||
}
|
||||
|
||||
// AdjustRange returns adjusted range, adjust the range according to the length of the file
|
||||
func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) {
|
||||
if ur == nil {
|
||||
return 0, size
|
||||
}
|
||||
|
||||
if ur.HasStart && ur.HasEnd {
|
||||
start = ur.Start
|
||||
end = ur.End + 1
|
||||
if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
} else if ur.HasStart {
|
||||
start = ur.Start
|
||||
end = size
|
||||
if ur.Start < 0 || ur.Start >= size {
|
||||
start = 0
|
||||
}
|
||||
} else if ur.HasEnd {
|
||||
start = size - ur.End
|
||||
end = size
|
||||
if ur.End < 0 || ur.End > size {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// gets the current time in Unix time, in seconds.
|
||||
func GetNowSec() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC. The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64. Note that this
|
||||
// means the result of calling UnixNano on the zero Time is undefined.
|
||||
// gets the current time in Unix time, in nanoseconds.
|
||||
func GetNowNanoSec() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// GetNowGMT gets the current time in GMT format.
|
||||
func GetNowGMT() string {
|
||||
return time.Now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
// FileChunk is the file chunk definition
|
||||
type FileChunk struct {
|
||||
Number int // Chunk number
|
||||
Offset int64 // Chunk offset
|
||||
Size int64 // Chunk size.
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if int64(chunkNum) > stat.Size() {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (stat.Size() / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
|
||||
} else {
|
||||
chunk.Size = stat.Size() / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var chunkN = stat.Size() / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if stat.Size()%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = stat.Size() % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetPartEnd calculates the end position
|
||||
func GetPartEnd(begin int64, total int64, per int64) int64 {
|
||||
if begin+per > total {
|
||||
return total - 1
|
||||
}
|
||||
return begin + per - 1
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var CrcTable = func() *crc64.Table {
|
||||
return crc64.MakeTable(crc64.ECMA)
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var crc32Table = func() *crc32.Table {
|
||||
return crc32.MakeTable(crc32.IEEE)
|
||||
}
|
||||
|
||||
// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
|
||||
func ChoiceTransferPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil)
|
||||
if trafficLimit != nil {
|
||||
speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64)
|
||||
outOption = append(outOption, TrafficLimitHeader(speed))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
|
||||
func ChoiceCompletePartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil)
|
||||
if acl != nil {
|
||||
outOption = append(outOption, ObjectACL(ACLType(acl.(string))))
|
||||
}
|
||||
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback != nil {
|
||||
outOption = append(outOption, Callback(callback.(string)))
|
||||
}
|
||||
|
||||
callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil)
|
||||
if callbackVar != nil {
|
||||
outOption = append(outOption, CallbackVar(callbackVar.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil)
|
||||
if forbidOverWrite != nil {
|
||||
if forbidOverWrite.(string) == "true" {
|
||||
outOption = append(outOption, ForbidOverWrite(true))
|
||||
} else {
|
||||
outOption = append(outOption, ForbidOverWrite(false))
|
||||
}
|
||||
}
|
||||
|
||||
notification, _ := FindOption(options, HttpHeaderOssNotification, nil)
|
||||
if notification != nil {
|
||||
outOption = append(outOption, SetHeader(HttpHeaderOssNotification, notification))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
|
||||
func ChoiceAbortPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceHeadObjectOption choices valid option supported by HeadObject
|
||||
func ChoiceHeadObjectOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
// not select HTTPHeaderRange to get whole object length
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
func CheckBucketName(bucketName string) error {
|
||||
nameLen := len(bucketName)
|
||||
if nameLen < 3 || nameLen > 63 {
|
||||
return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen)
|
||||
}
|
||||
|
||||
for _, v := range bucketName {
|
||||
if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') {
|
||||
return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName)
|
||||
}
|
||||
}
|
||||
if bucketName[0] == '-' || bucketName[nameLen-1] == '-' {
|
||||
return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckObjectName(objectName string) error {
|
||||
if len(objectName) == 0 {
|
||||
return fmt.Errorf("object name is empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckObjectNameEx(objectName string, strict bool) error {
|
||||
if err := CheckObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strict && strings.HasPrefix(objectName, "?") {
|
||||
return fmt.Errorf("object name is invalid, can't start with '?'")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *bytes.Buffer:
|
||||
contentLength = int64(v.Len())
|
||||
case *bytes.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *strings.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *os.File:
|
||||
fInfo, fError := v.Stat()
|
||||
if fError != nil {
|
||||
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
|
||||
} else {
|
||||
contentLength = fInfo.Size()
|
||||
}
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *LimitedReadCloser:
|
||||
contentLength = int64(v.N)
|
||||
default:
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
||||
*/
|
||||
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *LimitedReadCloser:
|
||||
contentLength = int64(v.N)
|
||||
default:
|
||||
// Len
|
||||
type lenner interface {
|
||||
Len() int
|
||||
}
|
||||
if lr, ok := reader.(lenner); ok {
|
||||
return int64(lr.Len()), nil
|
||||
}
|
||||
// seeker len
|
||||
if s, ok := reader.(io.Seeker); ok {
|
||||
curOffset, err := s.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
endOffset, err := s.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = s.Seek(curOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n := endOffset - curOffset
|
||||
if n >= 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
//
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
||||
|
||||
func LimitReadCloser(r io.Reader, n int64) io.Reader {
|
||||
var lc LimitedReadCloser
|
||||
lc.R = r
|
||||
lc.N = n
|
||||
return &lc
|
||||
}
|
||||
|
||||
// LimitedRC support Close()
|
||||
type LimitedReadCloser struct {
|
||||
io.LimitedReader
|
||||
}
|
||||
|
||||
func (lc *LimitedReadCloser) Close() error {
|
||||
if closer, ok := lc.R.(io.ReadCloser); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DiscardReadCloser struct {
|
||||
RC io.ReadCloser
|
||||
Discard int
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Read(b []byte) (int, error) {
|
||||
n, err := drc.RC.Read(b)
|
||||
if drc.Discard == 0 || n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if n <= drc.Discard {
|
||||
drc.Discard -= n
|
||||
return 0, err
|
||||
}
|
||||
|
||||
realLen := n - drc.Discard
|
||||
copy(b[0:realLen], b[drc.Discard:n])
|
||||
drc.Discard = 0
|
||||
return realLen, err
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Close() error {
|
||||
closer, ok := drc.RC.(io.ReadCloser)
|
||||
if ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) {
|
||||
for _, key := range keys {
|
||||
value, ok := params[key]
|
||||
if ok && value == "" {
|
||||
// convert "" to nil
|
||||
params[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func EscapeLFString(str string) string {
|
||||
var log bytes.Buffer
|
||||
for i := 0; i < len(str); i++ {
|
||||
if str[i] != '\n' {
|
||||
log.WriteByte(str[i])
|
||||
} else {
|
||||
log.WriteString("\\n")
|
||||
}
|
||||
}
|
||||
return log.String()
|
||||
}
|
||||
|
||||
// EscapeString writes to p the properly escaped XML equivalent
|
||||
// of the plain text data s.
|
||||
func EscapeXml(s string) string {
|
||||
var p strings.Builder
|
||||
var esc []byte
|
||||
hextable := "0123456789ABCDEF"
|
||||
escPattern := []byte("�")
|
||||
last := 0
|
||||
for i := 0; i < len(s); {
|
||||
r, width := utf8.DecodeRuneInString(s[i:])
|
||||
i += width
|
||||
switch r {
|
||||
case '"':
|
||||
esc = escQuot
|
||||
case '\'':
|
||||
esc = escApos
|
||||
case '&':
|
||||
esc = escAmp
|
||||
case '<':
|
||||
esc = escLT
|
||||
case '>':
|
||||
esc = escGT
|
||||
case '\t':
|
||||
esc = escTab
|
||||
case '\n':
|
||||
esc = escNL
|
||||
case '\r':
|
||||
esc = escCR
|
||||
default:
|
||||
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
|
||||
if r >= 0x00 && r < 0x20 {
|
||||
escPattern[3] = hextable[r>>4]
|
||||
escPattern[4] = hextable[r&0x0f]
|
||||
esc = escPattern
|
||||
} else {
|
||||
esc = escFFFD
|
||||
}
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.WriteString(s[last : i-width])
|
||||
p.Write(esc)
|
||||
last = i
|
||||
}
|
||||
p.WriteString(s[last:])
|
||||
return p.String()
|
||||
}
|
||||
|
||||
// Decide whether the given rune is in the XML Character Range, per
|
||||
// the Char production of https://www.xml.com/axml/testaxml.htm,
|
||||
// Section 2.2 Characters.
|
||||
func isInCharacterRange(r rune) (inrange bool) {
|
||||
return r == 0x09 ||
|
||||
r == 0x0A ||
|
||||
r == 0x0D ||
|
||||
r >= 0x20 && r <= 0xD7FF ||
|
||||
r >= 0xE000 && r <= 0xFFFD ||
|
||||
r >= 0x10000 && r <= 0x10FFFF
|
||||
}
|
||||
|
||||
func isVerifyObjectStrict(config *Config) bool {
|
||||
if config != nil {
|
||||
if config.AuthVersion == AuthV2 || config.AuthVersion == AuthV4 {
|
||||
return false
|
||||
}
|
||||
return config.VerifyObjectStrict
|
||||
}
|
||||
return true
|
||||
}
|
||||
201
vendor/github.com/aliyun/credentials-go/LICENSE
generated
vendored
Normal file
201
vendor/github.com/aliyun/credentials-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
41
vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go
generated
vendored
Normal file
41
vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// AccessKeyCredential is a kind of credential
|
||||
type AccessKeyCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
}
|
||||
|
||||
func newAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential {
|
||||
return &AccessKeyCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns AccessKeyCreential's AccessKeyId
|
||||
func (a *AccessKeyCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(a.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns AccessKeyCreential's AccessKeySecret
|
||||
func (a *AccessKeyCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(a.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless for AccessKeyCreential
|
||||
func (a *AccessKeyCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for AccessKeyCreential
|
||||
func (a *AccessKeyCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns AccessKeyCreential's type
|
||||
func (a *AccessKeyCredential) GetType() *string {
|
||||
return tea.String("access_key")
|
||||
}
|
||||
40
vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go
generated
vendored
Normal file
40
vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// BearerTokenCredential is a kind of credential
|
||||
type BearerTokenCredential struct {
|
||||
BearerToken string
|
||||
}
|
||||
|
||||
// newBearerTokenCredential return a BearerTokenCredential object
|
||||
func newBearerTokenCredential(token string) *BearerTokenCredential {
|
||||
return &BearerTokenCredential{
|
||||
BearerToken: token,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(("")), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless for BearerTokenCredential
|
||||
func (b *BearerTokenCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken reutrns BearerTokenCredential's BearerToken
|
||||
func (b *BearerTokenCredential) GetBearerToken() *string {
|
||||
return tea.String(b.BearerToken)
|
||||
}
|
||||
|
||||
// GetType reutrns BearerTokenCredential's type
|
||||
func (b *BearerTokenCredential) GetType() *string {
|
||||
return tea.String("bearer")
|
||||
}
|
||||
349
vendor/github.com/aliyun/credentials-go/credentials/credential.go
generated
vendored
Normal file
349
vendor/github.com/aliyun/credentials-go/credentials/credential.go
generated
vendored
Normal file
@@ -0,0 +1,349 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/debug/debug"
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/response"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
var debuglog = debug.Init("credential")
|
||||
|
||||
var hookParse = func(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Credential is an interface for getting actual credential
|
||||
type Credential interface {
|
||||
GetAccessKeyId() (*string, error)
|
||||
GetAccessKeySecret() (*string, error)
|
||||
GetSecurityToken() (*string, error)
|
||||
GetBearerToken() *string
|
||||
GetType() *string
|
||||
}
|
||||
|
||||
// Config is important when call NewCredential
|
||||
type Config struct {
|
||||
Type *string `json:"type"`
|
||||
AccessKeyId *string `json:"access_key_id"`
|
||||
AccessKeySecret *string `json:"access_key_secret"`
|
||||
RoleArn *string `json:"role_arn"`
|
||||
RoleSessionName *string `json:"role_session_name"`
|
||||
PublicKeyId *string `json:"public_key_id"`
|
||||
RoleName *string `json:"role_name"`
|
||||
SessionExpiration *int `json:"session_expiration"`
|
||||
PrivateKeyFile *string `json:"private_key_file"`
|
||||
BearerToken *string `json:"bearer_token"`
|
||||
SecurityToken *string `json:"security_token"`
|
||||
RoleSessionExpiration *int `json:"role_session_expiratioon"`
|
||||
Policy *string `json:"policy"`
|
||||
Host *string `json:"host"`
|
||||
Timeout *int `json:"timeout"`
|
||||
ConnectTimeout *int `json:"connect_timeout"`
|
||||
Proxy *string `json:"proxy"`
|
||||
}
|
||||
|
||||
func (s Config) String() string {
|
||||
return tea.Prettify(s)
|
||||
}
|
||||
|
||||
func (s Config) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (s *Config) SetAccessKeyId(v string) *Config {
|
||||
s.AccessKeyId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetAccessKeySecret(v string) *Config {
|
||||
s.AccessKeySecret = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetSecurityToken(v string) *Config {
|
||||
s.SecurityToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleArn(v string) *Config {
|
||||
s.RoleArn = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleSessionName(v string) *Config {
|
||||
s.RoleSessionName = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPublicKeyId(v string) *Config {
|
||||
s.PublicKeyId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleName(v string) *Config {
|
||||
s.RoleName = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetSessionExpiration(v int) *Config {
|
||||
s.SessionExpiration = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPrivateKeyFile(v string) *Config {
|
||||
s.PrivateKeyFile = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetBearerToken(v string) *Config {
|
||||
s.BearerToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetRoleSessionExpiration(v int) *Config {
|
||||
s.RoleSessionExpiration = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetPolicy(v string) *Config {
|
||||
s.Policy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetHost(v string) *Config {
|
||||
s.Host = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetTimeout(v int) *Config {
|
||||
s.Timeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetConnectTimeout(v int) *Config {
|
||||
s.ConnectTimeout = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetProxy(v string) *Config {
|
||||
s.Proxy = &v
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Config) SetType(v string) *Config {
|
||||
s.Type = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// NewCredential return a credential according to the type in config.
|
||||
// if config is nil, the function will use default provider chain to get credential.
|
||||
// please see README.md for detail.
|
||||
func NewCredential(config *Config) (credential Credential, err error) {
|
||||
if config == nil {
|
||||
config, err = defaultChain.resolve()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return NewCredential(config)
|
||||
}
|
||||
switch tea.StringValue(config.Type) {
|
||||
case "access_key":
|
||||
err = checkAccessKey(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
credential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret))
|
||||
case "sts":
|
||||
err = checkSTS(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
credential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken))
|
||||
case "ecs_ram_role":
|
||||
checkEcsRAMRole(config)
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), runtime)
|
||||
case "ram_role_arn":
|
||||
err = checkRAMRoleArn(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newRAMRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime)
|
||||
case "rsa_key_pair":
|
||||
err = checkRSAKeyPair(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file, err1 := os.Open(tea.StringValue(config.PrivateKeyFile))
|
||||
if err1 != nil {
|
||||
err = fmt.Errorf("InvalidPath: Can not open PrivateKeyFile, err is %s", err1.Error())
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
var privateKey string
|
||||
scan := bufio.NewScanner(file)
|
||||
for scan.Scan() {
|
||||
if strings.HasPrefix(scan.Text(), "----") {
|
||||
continue
|
||||
}
|
||||
privateKey += scan.Text() + "\n"
|
||||
}
|
||||
runtime := &utils.Runtime{
|
||||
Host: tea.StringValue(config.Host),
|
||||
Proxy: tea.StringValue(config.Proxy),
|
||||
ReadTimeout: tea.IntValue(config.Timeout),
|
||||
ConnectTimeout: tea.IntValue(config.ConnectTimeout),
|
||||
}
|
||||
credential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime)
|
||||
case "bearer":
|
||||
if tea.StringValue(config.BearerToken) == "" {
|
||||
err = errors.New("BearerToken cannot be empty")
|
||||
return
|
||||
}
|
||||
credential = newBearerTokenCredential(tea.StringValue(config.BearerToken))
|
||||
default:
|
||||
err = errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair")
|
||||
return
|
||||
}
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
func checkRSAKeyPair(config *Config) (err error) {
|
||||
if tea.StringValue(config.PrivateKeyFile) == "" {
|
||||
err = errors.New("PrivateKeyFile cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.PublicKeyId) == "" {
|
||||
err = errors.New("PublicKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkRAMRoleArn(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.RoleArn) == "" {
|
||||
err = errors.New("RoleArn cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.RoleSessionName) == "" {
|
||||
err = errors.New("RoleSessionName cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkEcsRAMRole(config *Config) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func checkSTS(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.SecurityToken) == "" {
|
||||
err = errors.New("SecurityToken cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkAccessKey(config *Config) (err error) {
|
||||
if tea.StringValue(config.AccessKeyId) == "" {
|
||||
err = errors.New("AccessKeyId cannot be empty")
|
||||
return
|
||||
}
|
||||
if tea.StringValue(config.AccessKeySecret) == "" {
|
||||
err = errors.New("AccessKeySecret cannot be empty")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content []byte, err error) {
|
||||
httpRequest, err := http.NewRequest(request.Method, request.URL, strings.NewReader(""))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
httpRequest.Proto = "HTTP/1.1"
|
||||
httpRequest.Host = request.Domain
|
||||
debuglog("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto)
|
||||
debuglog("> Host: %s", httpRequest.Host)
|
||||
for key, value := range request.Headers {
|
||||
if value != "" {
|
||||
debuglog("> %s: %s", key, value)
|
||||
httpRequest.Header[key] = []string{value}
|
||||
}
|
||||
}
|
||||
debuglog(">")
|
||||
httpClient := &http.Client{}
|
||||
httpClient.Timeout = time.Duration(runtime.ReadTimeout) * time.Second
|
||||
proxy := &url.URL{}
|
||||
if runtime.Proxy != "" {
|
||||
proxy, err = url.Parse(runtime.Proxy)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
trans := &http.Transport{}
|
||||
if proxy != nil && runtime.Proxy != "" {
|
||||
trans.Proxy = http.ProxyURL(proxy)
|
||||
}
|
||||
trans.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second)
|
||||
httpClient.Transport = trans
|
||||
httpResponse, err := hookDo(httpClient.Do)(httpRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
debuglog("< %s %s", httpResponse.Proto, httpResponse.Status)
|
||||
for key, value := range httpResponse.Header {
|
||||
debuglog("< %s: %v", key, strings.Join(value, ""))
|
||||
}
|
||||
debuglog("<")
|
||||
|
||||
resp := &response.CommonResponse{}
|
||||
err = hookParse(resp.ParseFromHTTPResponse(httpResponse))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
debuglog("%s", resp.GetHTTPContentString())
|
||||
if resp.GetHTTPStatus() != http.StatusOK {
|
||||
err = fmt.Errorf("httpStatus: %d, message = %s", resp.GetHTTPStatus(), resp.GetHTTPContentString())
|
||||
return
|
||||
}
|
||||
return resp.GetHTTPContentBytes(), nil
|
||||
}
|
||||
25
vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go
generated
vendored
Normal file
25
vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultInAdvanceScale = 0.95
|
||||
|
||||
var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
type credentialUpdater struct {
|
||||
credentialExpiration int
|
||||
lastUpdateTimestamp int64
|
||||
inAdvanceScale float64
|
||||
}
|
||||
|
||||
func (updater *credentialUpdater) needUpdateCredential() (result bool) {
|
||||
if updater.inAdvanceScale == 0 {
|
||||
updater.inAdvanceScale = defaultInAdvanceScale
|
||||
}
|
||||
return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale)
|
||||
}
|
||||
136
vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go
generated
vendored
Normal file
136
vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
|
||||
|
||||
// EcsRAMRoleCredential is a kind of credential
|
||||
type EcsRAMRoleCredential struct {
|
||||
*credentialUpdater
|
||||
RoleName string
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type ecsRAMRoleResponse struct {
|
||||
Code string `json:"Code" xml:"Code"`
|
||||
AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"`
|
||||
AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"`
|
||||
SecurityToken string `json:"SecurityToken" xml:"SecurityToken"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newEcsRAMRoleCredential(roleName string, runtime *utils.Runtime) *EcsRAMRoleCredential {
|
||||
return &EcsRAMRoleCredential{
|
||||
RoleName: roleName,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns EcsRAMRoleCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetAccessKeyId() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns EcsRAMRoleCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetAccessKeySecret() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns EcsRAMRoleCredential's SecurityToken
|
||||
// if SecurityToken is not exist or out of date, the function will update it.
|
||||
func (e *EcsRAMRoleCredential) GetSecurityToken() (*string, error) {
|
||||
if e.sessionCredential == nil || e.needUpdateCredential() {
|
||||
err := e.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(e.sessionCredential.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for EcsRAMRoleCredential
|
||||
func (e *EcsRAMRoleCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns EcsRAMRoleCredential's type
|
||||
func (e *EcsRAMRoleCredential) GetType() *string {
|
||||
return tea.String("ecs_ram_role")
|
||||
}
|
||||
|
||||
func getRoleName() (string, error) {
|
||||
runtime := utils.NewRuntime(1, 1, "", "")
|
||||
request := request.NewCommonRequest()
|
||||
request.URL = securityCredURL
|
||||
request.Method = "GET"
|
||||
content, err := doAction(request, runtime)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(content), nil
|
||||
}
|
||||
|
||||
func (e *EcsRAMRoleCredential) updateCredential() (err error) {
|
||||
if e.runtime == nil {
|
||||
e.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
if e.RoleName == "" {
|
||||
e.RoleName, err = getRoleName()
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
|
||||
}
|
||||
}
|
||||
request.URL = securityCredURL + e.RoleName
|
||||
request.Method = "GET"
|
||||
content, err := doAction(request, e.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
|
||||
}
|
||||
var resp *ecsRAMRoleResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp.Code != "Success" {
|
||||
return fmt.Errorf("refresh Ecs sts token err: Code is not Success")
|
||||
}
|
||||
if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" {
|
||||
return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration)
|
||||
e.lastUpdateTimestamp = time.Now().Unix()
|
||||
e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
e.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: resp.AccessKeyId,
|
||||
AccessKeySecret: resp.AccessKeySecret,
|
||||
SecurityToken: resp.SecurityToken,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
43
vendor/github.com/aliyun/credentials-go/credentials/env_provider.go
generated
vendored
Normal file
43
vendor/github.com/aliyun/credentials-go/credentials/env_provider.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
type envProvider struct{}
|
||||
|
||||
var providerEnv = new(envProvider)
|
||||
|
||||
const (
|
||||
// EnvVarAccessKeyId is a name of ALIBABA_CLOUD_ACCESS_KEY_Id
|
||||
EnvVarAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_Id"
|
||||
// EnvVarAccessKeySecret is a name of ALIBABA_CLOUD_ACCESS_KEY_SECRET
|
||||
EnvVarAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
|
||||
)
|
||||
|
||||
func newEnvProvider() Provider {
|
||||
return &envProvider{}
|
||||
}
|
||||
|
||||
func (p *envProvider) resolve() (*Config, error) {
|
||||
accessKeyId, ok1 := os.LookupEnv(EnvVarAccessKeyId)
|
||||
accessKeySecret, ok2 := os.LookupEnv(EnvVarAccessKeySecret)
|
||||
if !ok1 || !ok2 {
|
||||
return nil, nil
|
||||
}
|
||||
if accessKeyId == "" {
|
||||
return nil, errors.New(EnvVarAccessKeyId + " cannot be empty")
|
||||
}
|
||||
if accessKeySecret == "" {
|
||||
return nil, errors.New(EnvVarAccessKeySecret + " cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("access_key"),
|
||||
AccessKeyId: tea.String(accessKeyId),
|
||||
AccessKeySecret: tea.String(accessKeySecret),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
28
vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go
generated
vendored
Normal file
28
vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
)
|
||||
|
||||
type instanceCredentialsProvider struct{}
|
||||
|
||||
var providerInstance = new(instanceCredentialsProvider)
|
||||
|
||||
func newInstanceCredentialsProvider() Provider {
|
||||
return &instanceCredentialsProvider{}
|
||||
}
|
||||
|
||||
func (p *instanceCredentialsProvider) resolve() (*Config, error) {
|
||||
roleName, ok := os.LookupEnv(ENVEcsMetadata)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
Type: tea.String("ecs_ram_role"),
|
||||
RoleName: tea.String(roleName),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
350
vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go
generated
vendored
Normal file
350
vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
ini "gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
type profileProvider struct {
|
||||
Profile string
|
||||
}
|
||||
|
||||
var providerProfile = newProfileProvider()
|
||||
|
||||
var hookOS = func(goos string) string {
|
||||
return goos
|
||||
}
|
||||
|
||||
var hookState = func(info os.FileInfo, err error) (os.FileInfo, error) {
|
||||
return info, err
|
||||
}
|
||||
|
||||
// NewProfileProvider receive zero or more parameters,
|
||||
// when length of name is 0, the value of field Profile will be "default",
|
||||
// and when there are multiple inputs, the function will take the
|
||||
// first one and discard the other values.
|
||||
func newProfileProvider(name ...string) Provider {
|
||||
p := new(profileProvider)
|
||||
if len(name) == 0 {
|
||||
p.Profile = "default"
|
||||
} else {
|
||||
p.Profile = name[0]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// resolve implements the Provider interface
|
||||
// when credential type is rsa_key_pair, the content of private_key file
|
||||
// must be able to be parsed directly into the required string
|
||||
// that NewRsaKeyPairCredential function needed
|
||||
func (p *profileProvider) resolve() (*Config, error) {
|
||||
path, ok := os.LookupEnv(ENVCredentialFile)
|
||||
if !ok {
|
||||
path, err := checkDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if path == "" {
|
||||
return nil, nil
|
||||
}
|
||||
} else if path == "" {
|
||||
return nil, errors.New(ENVCredentialFile + " cannot be empty")
|
||||
}
|
||||
|
||||
value, section, err := getType(path, p.Profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch value.String() {
|
||||
case "access_key":
|
||||
config, err := getAccessKey(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "sts":
|
||||
config, err := getSTS(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "bearer":
|
||||
config, err := getBearerToken(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "ecs_ram_role":
|
||||
config, err := getEcsRAMRole(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "ram_role_arn":
|
||||
config, err := getRAMRoleArn(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
case "rsa_key_pair":
|
||||
config, err := getRSAKeyPair(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
default:
|
||||
return nil, errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair")
|
||||
}
|
||||
}
|
||||
|
||||
func getRSAKeyPair(section *ini.Section) (*Config, error) {
|
||||
publicKeyId, err := section.GetKey("public_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required public_key_id option in profile for rsa_key_pair")
|
||||
}
|
||||
if publicKeyId.String() == "" {
|
||||
return nil, errors.New("public_key_id cannot be empty")
|
||||
}
|
||||
privateKeyFile, err := section.GetKey("private_key_file")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required private_key_file option in profile for rsa_key_pair")
|
||||
}
|
||||
if privateKeyFile.String() == "" {
|
||||
return nil, errors.New("private_key_file cannot be empty")
|
||||
}
|
||||
sessionExpiration, _ := section.GetKey("session_expiration")
|
||||
expiration := 0
|
||||
if sessionExpiration != nil {
|
||||
expiration, err = sessionExpiration.Int()
|
||||
if err != nil {
|
||||
return nil, errors.New("session_expiration must be an int")
|
||||
}
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("rsa_key_pair"),
|
||||
PublicKeyId: tea.String(publicKeyId.String()),
|
||||
PrivateKeyFile: tea.String(privateKeyFile.String()),
|
||||
SessionExpiration: tea.Int(expiration),
|
||||
}
|
||||
err = setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getRAMRoleArn(section *ini.Section) (*Config, error) {
|
||||
accessKeyId, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for ram_role_arn")
|
||||
}
|
||||
if accessKeyId.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for ram_role_arn")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
roleArn, err := section.GetKey("role_arn")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required role_arn option in profile for ram_role_arn")
|
||||
}
|
||||
if roleArn.String() == "" {
|
||||
return nil, errors.New("role_arn cannot be empty")
|
||||
}
|
||||
roleSessionName, err := section.GetKey("role_session_name")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required role_session_name option in profile for ram_role_arn")
|
||||
}
|
||||
if roleSessionName.String() == "" {
|
||||
return nil, errors.New("role_session_name cannot be empty")
|
||||
}
|
||||
roleSessionExpiration, _ := section.GetKey("role_session_expiration")
|
||||
expiration := 0
|
||||
if roleSessionExpiration != nil {
|
||||
expiration, err = roleSessionExpiration.Int()
|
||||
if err != nil {
|
||||
return nil, errors.New("role_session_expiration must be an int")
|
||||
}
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("ram_role_arn"),
|
||||
AccessKeyId: tea.String(accessKeyId.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
RoleArn: tea.String(roleArn.String()),
|
||||
RoleSessionName: tea.String(roleSessionName.String()),
|
||||
RoleSessionExpiration: tea.Int(expiration),
|
||||
}
|
||||
err = setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getEcsRAMRole(section *ini.Section) (*Config, error) {
|
||||
roleName, _ := section.GetKey("role_name")
|
||||
config := &Config{
|
||||
Type: tea.String("ecs_ram_role"),
|
||||
}
|
||||
if roleName != nil {
|
||||
config.RoleName = tea.String(roleName.String())
|
||||
}
|
||||
err := setRuntimeToConfig(config, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getBearerToken(section *ini.Section) (*Config, error) {
|
||||
bearerToken, err := section.GetKey("bearer_token")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required bearer_token option in profile for bearer")
|
||||
}
|
||||
if bearerToken.String() == "" {
|
||||
return nil, errors.New("bearer_token cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("bearer"),
|
||||
BearerToken: tea.String(bearerToken.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getSTS(section *ini.Section) (*Config, error) {
|
||||
accesskeyid, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for sts")
|
||||
}
|
||||
if accesskeyid.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for sts")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
securityToken, err := section.GetKey("security_token")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required security_token option in profile for sts")
|
||||
}
|
||||
if securityToken.String() == "" {
|
||||
return nil, errors.New("security_token cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("sts"),
|
||||
AccessKeyId: tea.String(accesskeyid.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
SecurityToken: tea.String(securityToken.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getAccessKey(section *ini.Section) (*Config, error) {
|
||||
accesskeyid, err := section.GetKey("access_key_id")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_id option in profile for access_key")
|
||||
}
|
||||
if accesskeyid.String() == "" {
|
||||
return nil, errors.New("access_key_id cannot be empty")
|
||||
}
|
||||
accessKeySecret, err := section.GetKey("access_key_secret")
|
||||
if err != nil {
|
||||
return nil, errors.New("Missing required access_key_secret option in profile for access_key")
|
||||
}
|
||||
if accessKeySecret.String() == "" {
|
||||
return nil, errors.New("access_key_secret cannot be empty")
|
||||
}
|
||||
config := &Config{
|
||||
Type: tea.String("access_key"),
|
||||
AccessKeyId: tea.String(accesskeyid.String()),
|
||||
AccessKeySecret: tea.String(accessKeySecret.String()),
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func getType(path, profile string) (*ini.Key, *ini.Section, error) {
|
||||
ini, err := ini.Load(path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("ERROR: Can not open file " + err.Error())
|
||||
}
|
||||
|
||||
section, err := ini.GetSection(profile)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("ERROR: Can not load section " + err.Error())
|
||||
}
|
||||
|
||||
value, err := section.GetKey("type")
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("Missing required type option " + err.Error())
|
||||
}
|
||||
return value, section, nil
|
||||
}
|
||||
|
||||
func getHomePath() string {
|
||||
if hookOS(runtime.GOOS) == "windows" {
|
||||
path, ok := os.LookupEnv("USERPROFILE")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
path, ok := os.LookupEnv("HOME")
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func checkDefaultPath() (path string, err error) {
|
||||
path = getHomePath()
|
||||
if path == "" {
|
||||
return "", errors.New("The default credential file path is invalid")
|
||||
}
|
||||
path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1)
|
||||
_, err = hookState(os.Stat(path))
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func setRuntimeToConfig(config *Config, section *ini.Section) error {
|
||||
rawTimeout, _ := section.GetKey("timeout")
|
||||
rawConnectTimeout, _ := section.GetKey("connect_timeout")
|
||||
rawProxy, _ := section.GetKey("proxy")
|
||||
rawHost, _ := section.GetKey("host")
|
||||
if rawProxy != nil {
|
||||
config.Proxy = tea.String(rawProxy.String())
|
||||
}
|
||||
if rawConnectTimeout != nil {
|
||||
connectTimeout, err := rawConnectTimeout.Int()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Please set connect_timeout with an int value")
|
||||
}
|
||||
config.ConnectTimeout = tea.Int(connectTimeout)
|
||||
}
|
||||
if rawTimeout != nil {
|
||||
timeout, err := rawTimeout.Int()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Please set timeout with an int value")
|
||||
}
|
||||
config.Timeout = tea.Int(timeout)
|
||||
}
|
||||
if rawHost != nil {
|
||||
config.Host = tea.String(rawHost.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
vendor/github.com/aliyun/credentials-go/credentials/provider.go
generated
vendored
Normal file
13
vendor/github.com/aliyun/credentials-go/credentials/provider.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
package credentials
|
||||
|
||||
//Environmental virables that may be used by the provider
|
||||
const (
|
||||
ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE"
|
||||
ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA"
|
||||
PATHCredentialFile = "~/.alibabacloud/credentials"
|
||||
)
|
||||
|
||||
// Provider will be implemented When you want to customize the provider.
|
||||
type Provider interface {
|
||||
resolve() (*Config, error)
|
||||
}
|
||||
32
vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go
generated
vendored
Normal file
32
vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type providerChain struct {
|
||||
Providers []Provider
|
||||
}
|
||||
|
||||
var defaultproviders = []Provider{providerEnv, providerProfile, providerInstance}
|
||||
var defaultChain = newProviderChain(defaultproviders)
|
||||
|
||||
func newProviderChain(providers []Provider) Provider {
|
||||
return &providerChain{
|
||||
Providers: providers,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *providerChain) resolve() (*Config, error) {
|
||||
for _, provider := range p.Providers {
|
||||
config, err := provider.resolve()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if config == nil {
|
||||
continue
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
return nil, errors.New("No credential found")
|
||||
|
||||
}
|
||||
59
vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go
generated
vendored
Normal file
59
vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
// CommonRequest is for requesting credential
|
||||
type CommonRequest struct {
|
||||
Scheme string
|
||||
Method string
|
||||
Domain string
|
||||
RegionId string
|
||||
URL string
|
||||
ReadTimeout time.Duration
|
||||
ConnectTimeout time.Duration
|
||||
isInsecure *bool
|
||||
|
||||
userAgent map[string]string
|
||||
QueryParams map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
queries string
|
||||
}
|
||||
|
||||
// NewCommonRequest returns a CommonRequest
|
||||
func NewCommonRequest() *CommonRequest {
|
||||
return &CommonRequest{
|
||||
QueryParams: make(map[string]string),
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// BuildURL returns a url
|
||||
func (request *CommonRequest) BuildURL() string {
|
||||
url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain)
|
||||
request.queries = "/?" + utils.GetURLFormedMap(request.QueryParams)
|
||||
return url + request.queries
|
||||
}
|
||||
|
||||
// BuildStringToSign returns BuildStringToSign
|
||||
func (request *CommonRequest) BuildStringToSign() (stringToSign string) {
|
||||
signParams := make(map[string]string)
|
||||
for key, value := range request.QueryParams {
|
||||
signParams[key] = value
|
||||
}
|
||||
|
||||
stringToSign = utils.GetURLFormedMap(signParams)
|
||||
stringToSign = strings.Replace(stringToSign, "+", "%20", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "*", "%2A", -1)
|
||||
stringToSign = strings.Replace(stringToSign, "%7E", "~", -1)
|
||||
stringToSign = url.QueryEscape(stringToSign)
|
||||
stringToSign = request.Method + "&%2F&" + stringToSign
|
||||
return
|
||||
}
|
||||
53
vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go
generated
vendored
Normal file
53
vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
package response
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var hookReadAll = func(fn func(r io.Reader) (b []byte, err error)) func(r io.Reader) (b []byte, err error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
// CommonResponse is for storing message of httpResponse
|
||||
type CommonResponse struct {
|
||||
httpStatus int
|
||||
httpHeaders map[string][]string
|
||||
httpContentString string
|
||||
httpContentBytes []byte
|
||||
}
|
||||
|
||||
// ParseFromHTTPResponse assigns for CommonResponse, returns err when body is too large.
|
||||
func (resp *CommonResponse) ParseFromHTTPResponse(httpResponse *http.Response) (err error) {
|
||||
defer httpResponse.Body.Close()
|
||||
body, err := hookReadAll(ioutil.ReadAll)(httpResponse.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.httpStatus = httpResponse.StatusCode
|
||||
resp.httpHeaders = httpResponse.Header
|
||||
resp.httpContentBytes = body
|
||||
resp.httpContentString = string(body)
|
||||
return
|
||||
}
|
||||
|
||||
// GetHTTPStatus returns httpStatus
|
||||
func (resp *CommonResponse) GetHTTPStatus() int {
|
||||
return resp.httpStatus
|
||||
}
|
||||
|
||||
// GetHTTPHeaders returns httpresponse's headers
|
||||
func (resp *CommonResponse) GetHTTPHeaders() map[string][]string {
|
||||
return resp.httpHeaders
|
||||
}
|
||||
|
||||
// GetHTTPContentString return body content as string
|
||||
func (resp *CommonResponse) GetHTTPContentString() string {
|
||||
return resp.httpContentString
|
||||
}
|
||||
|
||||
// GetHTTPContentBytes return body content as []byte
|
||||
func (resp *CommonResponse) GetHTTPContentBytes() []byte {
|
||||
return resp.httpContentBytes
|
||||
}
|
||||
145
vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go
generated
vendored
Normal file
145
vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
// RsaKeyPairCredential is a kind of credentials
|
||||
type RsaKeyPairCredential struct {
|
||||
*credentialUpdater
|
||||
PrivateKey string
|
||||
PublicKeyId string
|
||||
SessionExpiration int
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type rsaKeyPairResponse struct {
|
||||
SessionAccessKey *sessionAccessKey `json:"SessionAccessKey" xml:"SessionAccessKey"`
|
||||
}
|
||||
|
||||
type sessionAccessKey struct {
|
||||
SessionAccessKeyId string `json:"SessionAccessKeyId" xml:"SessionAccessKeyId"`
|
||||
SessionAccessKeySecret string `json:"SessionAccessKeySecret" xml:"SessionAccessKeySecret"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredential {
|
||||
return &RsaKeyPairCredential{
|
||||
PrivateKey: privateKey,
|
||||
PublicKeyId: publicKeyId,
|
||||
SessionExpiration: sessionExpiration,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns RsaKeyPairCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (r *RsaKeyPairCredential) GetAccessKeyId() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns RsaKeyPairCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (r *RsaKeyPairCredential) GetAccessKeySecret() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken is useless RsaKeyPairCredential
|
||||
func (r *RsaKeyPairCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(""), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless for RsaKeyPairCredential
|
||||
func (r *RsaKeyPairCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns RsaKeyPairCredential's type
|
||||
func (r *RsaKeyPairCredential) GetType() *string {
|
||||
return tea.String("rsa_key_pair")
|
||||
}
|
||||
|
||||
func (r *RsaKeyPairCredential) updateCredential() (err error) {
|
||||
if r.runtime == nil {
|
||||
r.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
request.Domain = "sts.aliyuncs.com"
|
||||
if r.runtime.Host != "" {
|
||||
request.Domain = r.runtime.Host
|
||||
}
|
||||
request.Scheme = "HTTPS"
|
||||
request.Method = "GET"
|
||||
request.QueryParams["AccessKeyId"] = r.PublicKeyId
|
||||
request.QueryParams["Action"] = "GenerateSessionAccessKey"
|
||||
request.QueryParams["Format"] = "JSON"
|
||||
if r.SessionExpiration > 0 {
|
||||
if r.SessionExpiration >= 900 && r.SessionExpiration <= 3600 {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(r.SessionExpiration)
|
||||
} else {
|
||||
err = errors.New("[InvalidParam]:Key Pair session duration should be in the range of 15min - 1Hr")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds)
|
||||
}
|
||||
request.QueryParams["SignatureMethod"] = "SHA256withRSA"
|
||||
request.QueryParams["SignatureType"] = "PRIVATEKEY"
|
||||
request.QueryParams["SignatureVersion"] = "1.0"
|
||||
request.QueryParams["Version"] = "2015-04-01"
|
||||
request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601()
|
||||
request.QueryParams["SignatureNonce"] = utils.GetUUID()
|
||||
signature := utils.Sha256WithRsa(request.BuildStringToSign(), r.PrivateKey)
|
||||
request.QueryParams["Signature"] = signature
|
||||
request.Headers["Host"] = request.Domain
|
||||
request.Headers["Accept-Encoding"] = "identity"
|
||||
request.URL = request.BuildURL()
|
||||
content, err := doAction(request, r.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh KeyPair err: %s", err.Error())
|
||||
}
|
||||
var resp *rsaKeyPairResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh KeyPair err: Json Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp == nil || resp.SessionAccessKey == nil {
|
||||
return fmt.Errorf("refresh KeyPair err: SessionAccessKey is empty")
|
||||
}
|
||||
sessionAccessKey := resp.SessionAccessKey
|
||||
if sessionAccessKey.SessionAccessKeyId == "" || sessionAccessKey.SessionAccessKeySecret == "" || sessionAccessKey.Expiration == "" {
|
||||
return fmt.Errorf("refresh KeyPair err: SessionAccessKeyId: %v, SessionAccessKeySecret: %v, Expiration: %v", sessionAccessKey.SessionAccessKeyId, sessionAccessKey.SessionAccessKeySecret, sessionAccessKey.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionAccessKey.Expiration)
|
||||
r.lastUpdateTimestamp = time.Now().Unix()
|
||||
r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
r.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: sessionAccessKey.SessionAccessKeyId,
|
||||
AccessKeySecret: sessionAccessKey.SessionAccessKeySecret,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
7
vendor/github.com/aliyun/credentials-go/credentials/session_credential.go
generated
vendored
Normal file
7
vendor/github.com/aliyun/credentials-go/credentials/session_credential.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package credentials
|
||||
|
||||
type sessionCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
43
vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go
generated
vendored
Normal file
43
vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package credentials
|
||||
|
||||
import "github.com/alibabacloud-go/tea/tea"
|
||||
|
||||
// StsTokenCredential is a kind of credentials
|
||||
type StsTokenCredential struct {
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
SecurityToken string
|
||||
}
|
||||
|
||||
func newStsTokenCredential(accessKeyId, accessKeySecret, securityToken string) *StsTokenCredential {
|
||||
return &StsTokenCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
SecurityToken: securityToken,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns StsTokenCredential's AccessKeyId
|
||||
func (s *StsTokenCredential) GetAccessKeyId() (*string, error) {
|
||||
return tea.String(s.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns StsTokenCredential's AccessKeySecret
|
||||
func (s *StsTokenCredential) GetAccessKeySecret() (*string, error) {
|
||||
return tea.String(s.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns StsTokenCredential's SecurityToken
|
||||
func (s *StsTokenCredential) GetSecurityToken() (*string, error) {
|
||||
return tea.String(s.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless StsTokenCredential
|
||||
func (s *StsTokenCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns StsTokenCredential's type
|
||||
func (s *StsTokenCredential) GetType() *string {
|
||||
return tea.String("sts")
|
||||
}
|
||||
163
vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go
generated
vendored
Normal file
163
vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alibabacloud-go/tea/tea"
|
||||
"github.com/aliyun/credentials-go/credentials/request"
|
||||
"github.com/aliyun/credentials-go/credentials/utils"
|
||||
)
|
||||
|
||||
const defaultDurationSeconds = 3600
|
||||
|
||||
// RAMRoleArnCredential is a kind of credentials
|
||||
type RAMRoleArnCredential struct {
|
||||
*credentialUpdater
|
||||
AccessKeyId string
|
||||
AccessKeySecret string
|
||||
RoleArn string
|
||||
RoleSessionName string
|
||||
RoleSessionExpiration int
|
||||
Policy string
|
||||
sessionCredential *sessionCredential
|
||||
runtime *utils.Runtime
|
||||
}
|
||||
|
||||
type ramRoleArnResponse struct {
|
||||
Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"`
|
||||
}
|
||||
|
||||
type credentialsInResponse struct {
|
||||
AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"`
|
||||
AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"`
|
||||
SecurityToken string `json:"SecurityToken" xml:"SecurityToken"`
|
||||
Expiration string `json:"Expiration" xml:"Expiration"`
|
||||
}
|
||||
|
||||
func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredential {
|
||||
return &RAMRoleArnCredential{
|
||||
AccessKeyId: accessKeyId,
|
||||
AccessKeySecret: accessKeySecret,
|
||||
RoleArn: roleArn,
|
||||
RoleSessionName: roleSessionName,
|
||||
RoleSessionExpiration: roleSessionExpiration,
|
||||
Policy: policy,
|
||||
credentialUpdater: new(credentialUpdater),
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccessKeyId reutrns RamRoleArnCredential's AccessKeyId
|
||||
// if AccessKeyId is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetAccessKeyId() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeyId), nil
|
||||
}
|
||||
|
||||
// GetAccessSecret reutrns RamRoleArnCredential's AccessKeySecret
|
||||
// if AccessKeySecret is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetAccessKeySecret() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.AccessKeySecret), nil
|
||||
}
|
||||
|
||||
// GetSecurityToken reutrns RamRoleArnCredential's SecurityToken
|
||||
// if SecurityToken is not exist or out of date, the function will update it.
|
||||
func (r *RAMRoleArnCredential) GetSecurityToken() (*string, error) {
|
||||
if r.sessionCredential == nil || r.needUpdateCredential() {
|
||||
err := r.updateCredential()
|
||||
if err != nil {
|
||||
return tea.String(""), err
|
||||
}
|
||||
}
|
||||
return tea.String(r.sessionCredential.SecurityToken), nil
|
||||
}
|
||||
|
||||
// GetBearerToken is useless RamRoleArnCredential
|
||||
func (r *RAMRoleArnCredential) GetBearerToken() *string {
|
||||
return tea.String("")
|
||||
}
|
||||
|
||||
// GetType reutrns RamRoleArnCredential's type
|
||||
func (r *RAMRoleArnCredential) GetType() *string {
|
||||
return tea.String("ram_role_arn")
|
||||
}
|
||||
|
||||
func (r *RAMRoleArnCredential) updateCredential() (err error) {
|
||||
if r.runtime == nil {
|
||||
r.runtime = new(utils.Runtime)
|
||||
}
|
||||
request := request.NewCommonRequest()
|
||||
request.Domain = "sts.aliyuncs.com"
|
||||
request.Scheme = "HTTPS"
|
||||
request.Method = "GET"
|
||||
request.QueryParams["AccessKeyId"] = r.AccessKeyId
|
||||
request.QueryParams["Action"] = "AssumeRole"
|
||||
request.QueryParams["Format"] = "JSON"
|
||||
if r.RoleSessionExpiration > 0 {
|
||||
if r.RoleSessionExpiration >= 900 && r.RoleSessionExpiration <= 3600 {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration)
|
||||
} else {
|
||||
err = errors.New("[InvalidParam]:Assume Role session duration should be in the range of 15min - 1Hr")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds)
|
||||
}
|
||||
request.QueryParams["RoleArn"] = r.RoleArn
|
||||
if r.Policy != "" {
|
||||
request.QueryParams["Policy"] = r.Policy
|
||||
}
|
||||
request.QueryParams["RoleSessionName"] = r.RoleSessionName
|
||||
request.QueryParams["SignatureMethod"] = "HMAC-SHA1"
|
||||
request.QueryParams["SignatureVersion"] = "1.0"
|
||||
request.QueryParams["Version"] = "2015-04-01"
|
||||
request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601()
|
||||
request.QueryParams["SignatureNonce"] = utils.GetUUID()
|
||||
signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&")
|
||||
request.QueryParams["Signature"] = signature
|
||||
request.Headers["Host"] = request.Domain
|
||||
request.Headers["Accept-Encoding"] = "identity"
|
||||
request.URL = request.BuildURL()
|
||||
content, err := doAction(request, r.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error())
|
||||
}
|
||||
var resp *ramRoleArnResponse
|
||||
err = json.Unmarshal(content, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error())
|
||||
}
|
||||
if resp == nil || resp.Credentials == nil {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty")
|
||||
}
|
||||
respCredentials := resp.Credentials
|
||||
if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" {
|
||||
return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration)
|
||||
}
|
||||
|
||||
expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration)
|
||||
r.lastUpdateTimestamp = time.Now().Unix()
|
||||
r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
|
||||
r.sessionCredential = &sessionCredential{
|
||||
AccessKeyId: respCredentials.AccessKeyId,
|
||||
AccessKeySecret: respCredentials.AccessKeySecret,
|
||||
SecurityToken: respCredentials.SecurityToken,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
35
vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go
generated
vendored
Normal file
35
vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Runtime is for setting timeout, proxy and host
|
||||
type Runtime struct {
|
||||
ReadTimeout int
|
||||
ConnectTimeout int
|
||||
Proxy string
|
||||
Host string
|
||||
}
|
||||
|
||||
// NewRuntime returns a Runtime
|
||||
func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime {
|
||||
return &Runtime{
|
||||
ReadTimeout: readTimeout,
|
||||
ConnectTimeout: connectTimeout,
|
||||
Proxy: proxy,
|
||||
Host: host,
|
||||
}
|
||||
}
|
||||
|
||||
// Timeout is for connect Timeout
|
||||
func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{
|
||||
Timeout: connectTimeout,
|
||||
DualStack: true,
|
||||
}).DialContext(ctx, network, address)
|
||||
}
|
||||
}
|
||||
146
vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go
generated
vendored
Normal file
146
vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
rand2 "math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
type uuid [16]byte
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
|
||||
return fn
|
||||
}
|
||||
|
||||
// GetUUID returns a uuid
|
||||
func GetUUID() (uuidHex string) {
|
||||
uuid := newUUID()
|
||||
uuidHex = hex.EncodeToString(uuid[:])
|
||||
return
|
||||
}
|
||||
|
||||
// RandStringBytes returns a rand string
|
||||
func RandStringBytes(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand2.Intn(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// ShaHmac1 return a string which has been hashed
|
||||
func ShaHmac1(source, secret string) string {
|
||||
key := []byte(secret)
|
||||
hmac := hmac.New(sha1.New, key)
|
||||
hmac.Write([]byte(source))
|
||||
signedBytes := hmac.Sum(nil)
|
||||
signedString := base64.StdEncoding.EncodeToString(signedBytes)
|
||||
return signedString
|
||||
}
|
||||
|
||||
// Sha256WithRsa return a string which has been hashed with Rsa
|
||||
func Sha256WithRsa(source, secret string) string {
|
||||
decodeString, err := base64.StdEncoding.DecodeString(secret)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
private, err := x509.ParsePKCS8PrivateKey(decodeString)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h := crypto.Hash.New(crypto.SHA256)
|
||||
h.Write([]byte(source))
|
||||
hashed := h.Sum(nil)
|
||||
signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey),
|
||||
crypto.SHA256, hashed)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(signature)
|
||||
}
|
||||
|
||||
// GetMD5Base64 returns a string which has been base64
|
||||
func GetMD5Base64(bytes []byte) (base64Value string) {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write(bytes)
|
||||
md5Value := md5Ctx.Sum(nil)
|
||||
base64Value = base64.StdEncoding.EncodeToString(md5Value)
|
||||
return
|
||||
}
|
||||
|
||||
// GetTimeInFormatISO8601 returns a time string
|
||||
func GetTimeInFormatISO8601() (timeStr string) {
|
||||
gmt := time.FixedZone("GMT", 0)
|
||||
|
||||
return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
|
||||
}
|
||||
|
||||
// GetURLFormedMap returns a url encoded string
|
||||
func GetURLFormedMap(source map[string]string) (urlEncoded string) {
|
||||
urlEncoder := url.Values{}
|
||||
for key, value := range source {
|
||||
urlEncoder.Add(key, value)
|
||||
}
|
||||
urlEncoded = urlEncoder.Encode()
|
||||
return
|
||||
}
|
||||
|
||||
func newUUID() uuid {
|
||||
ns := uuid{}
|
||||
safeRandom(ns[:])
|
||||
u := newFromHash(md5.New(), ns, RandStringBytes(16))
|
||||
u[6] = (u[6] & 0x0f) | (byte(2) << 4)
|
||||
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func newFromHash(h hash.Hash, ns uuid, name string) uuid {
|
||||
u := uuid{}
|
||||
h.Write(ns[:])
|
||||
h.Write([]byte(name))
|
||||
copy(u[:], h.Sum(nil))
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func safeRandom(dest []byte) {
|
||||
if _, err := hookRead(rand.Read)(dest); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (u uuid) String() string {
|
||||
buf := make([]byte, 36)
|
||||
|
||||
hex.Encode(buf[0:8], u[0:4])
|
||||
buf[8] = '-'
|
||||
hex.Encode(buf[9:13], u[4:6])
|
||||
buf[13] = '-'
|
||||
hex.Encode(buf[14:18], u[6:8])
|
||||
buf[18] = '-'
|
||||
hex.Encode(buf[19:23], u[8:10])
|
||||
buf[23] = '-'
|
||||
hex.Encode(buf[24:], u[10:])
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
Reference in New Issue
Block a user