oss data copy optimize after upload finished

Signed-off-by: yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
This commit is contained in:
yaoyao.xyy 2016-10-26 09:37:58 +08:00
parent 8234784a1a
commit f394e82d2b
8 changed files with 144 additions and 43 deletions

6
Godeps/Godeps.json generated
View File

@ -167,15 +167,15 @@
}, },
{ {
"ImportPath": "github.com/denverdino/aliyungo/common", "ImportPath": "github.com/denverdino/aliyungo/common",
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c"
}, },
{ {
"ImportPath": "github.com/denverdino/aliyungo/oss", "ImportPath": "github.com/denverdino/aliyungo/oss",
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c"
}, },
{ {
"ImportPath": "github.com/denverdino/aliyungo/util", "ImportPath": "github.com/denverdino/aliyungo/util",
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c"
}, },
{ {
"ImportPath": "github.com/docker/goamz/aws", "ImportPath": "github.com/docker/goamz/aws",

View File

@ -11,8 +11,9 @@ const (
Hongkong = Region("cn-hongkong") Hongkong = Region("cn-hongkong")
Shenzhen = Region("cn-shenzhen") Shenzhen = Region("cn-shenzhen")
USWest1 = Region("us-west-1") USWest1 = Region("us-west-1")
USEast1 = Region("us-east-1")
APSouthEast1 = Region("ap-southeast-1") APSouthEast1 = Region("ap-southeast-1")
Shanghai = Region("cn-shanghai") Shanghai = Region("cn-shanghai")
) )
var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, APSouthEast1} var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, USEast1, APSouthEast1}

View File

@ -6,3 +6,10 @@ const (
PayByBandwidth = InternetChargeType("PayByBandwidth") PayByBandwidth = InternetChargeType("PayByBandwidth")
PayByTraffic = InternetChargeType("PayByTraffic") PayByTraffic = InternetChargeType("PayByTraffic")
) )
type InstanceChargeType string
const (
PrePaid = InstanceChargeType("PrePaid")
PostPaid = InstanceChargeType("PostPaid")
)

View File

@ -32,6 +32,7 @@ const DefaultContentType = "application/octet-stream"
type Client struct { type Client struct {
AccessKeyId string AccessKeyId string
AccessKeySecret string AccessKeySecret string
SecurityToken string
Region Region Region Region
Internal bool Internal bool
Secure bool Secure bool
@ -87,6 +88,18 @@ var attempts = util.AttemptStrategy{
// NewOSSClient creates a new OSS. // NewOSSClient creates a new OSS.
func NewOSSClientForAssumeRole(region Region, internal bool, accessKeyId string, accessKeySecret string, securityToken string, secure bool) *Client {
return &Client{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
SecurityToken: securityToken,
Region: region,
Internal: internal,
debug: false,
Secure: secure,
}
}
func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client {
return &Client{ return &Client{
AccessKeyId: accessKeyId, AccessKeyId: accessKeyId,
@ -115,6 +128,10 @@ func (client *Client) Bucket(name string) *Bucket {
type BucketInfo struct { type BucketInfo struct {
Name string Name string
CreationDate string CreationDate string
ExtranetEndpoint string
IntranetEndpoint string
Location string
Grant string `xml:"AccessControlList>Grant"`
} }
type GetServiceResp struct { type GetServiceResp struct {
@ -122,6 +139,10 @@ type GetServiceResp struct {
Buckets []BucketInfo `xml:">Bucket"` Buckets []BucketInfo `xml:">Bucket"`
} }
type GetBucketInfoResp struct {
Bucket BucketInfo
}
// GetService gets a list of all buckets owned by an account. // GetService gets a list of all buckets owned by an account.
func (client *Client) GetService() (*GetServiceResp, error) { func (client *Client) GetService() (*GetServiceResp, error) {
bucket := client.Bucket("") bucket := client.Bucket("")
@ -168,6 +189,27 @@ func (client *Client) SetEndpoint(endpoint string) {
client.endpoint = endpoint client.endpoint = endpoint
} }
// Info query basic information about the bucket
//
// You can read doc at https://help.aliyun.com/document_detail/31968.html
func (b *Bucket) Info() (BucketInfo, error) {
params := make(url.Values)
params.Set("bucketInfo", "")
r, err := b.GetWithParams("/", params)
if err != nil {
return BucketInfo{}, err
}
// Parse the XML response.
var resp GetBucketInfoResp
if err = xml.Unmarshal(r, &resp); err != nil {
return BucketInfo{}, err
}
return resp.Bucket, nil
}
// PutBucket creates a new bucket. // PutBucket creates a new bucket.
// //
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket // You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket
@ -844,10 +886,10 @@ func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.
mac := hmac.New(sha1.New, []byte(secretKey)) mac := hmac.New(sha1.New, []byte(secretKey))
mac.Write([]byte(stringToSign)) mac.Write([]byte(stringToSign))
macsum := mac.Sum(nil) macsum := mac.Sum(nil)
signature := base64.StdEncoding.EncodeToString([]byte(macsum)) signature := base64.StdEncoding.EncodeToString(macsum)
signature = strings.TrimSpace(signature) signature = strings.TrimSpace(signature)
signedurl, err := url.Parse("https://" + b.Name + ".client.amazonaws.com/") signedurl, err := url.Parse(b.Region.GetEndpoint(b.Internal, b.Name, b.Secure))
if err != nil { if err != nil {
log.Println("ERROR sining url for OSS upload", err) log.Println("ERROR sining url for OSS upload", err)
return "" return ""
@ -982,6 +1024,10 @@ func partiallyEscapedPath(path string) string {
func (client *Client) prepare(req *request) error { func (client *Client) prepare(req *request) error {
// Copy so they can be mutated without affecting on retries. // Copy so they can be mutated without affecting on retries.
headers := copyHeader(req.headers) headers := copyHeader(req.headers)
if len(client.SecurityToken) != 0 {
headers.Set("x-oss-security-token", client.SecurityToken)
}
params := make(url.Values) params := make(url.Values)
for k, v := range req.params { for k, v := range req.params {
@ -1251,9 +1297,6 @@ func (b *Bucket) ACL() (result *AccessControlPolicy, err error) {
return &resp, nil return &resp, nil
} }
const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize
func (b *Bucket) GetContentLength(sourcePath string) (int64, error) { func (b *Bucket) GetContentLength(sourcePath string) (int64, error) {
resp, err := b.Head(sourcePath, nil) resp, err := b.Head(sourcePath, nil)
if err != nil { if err != nil {
@ -1265,8 +1308,19 @@ func (b *Bucket) GetContentLength(sourcePath string) (int64, error) {
return currentLength, err return currentLength, err
} }
// Copy large file in the same bucket
func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error { func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error {
return b.CopyLargeFileInParallel(sourcePath, destPath, contentType, perm, options, 1)
}
const defaultChunkSize = int64(128 * 1024 * 1024) //128MB
const maxCopytSize = int64(1024 * 1024 * 1024) //1G
// Copy large file in the same bucket
func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, contentType string, perm ACL, options Options, maxConcurrency int) error {
if maxConcurrency < 1 {
maxConcurrency = 1
}
log.Printf("Copy large file from %s to %s\n", sourcePath, destPath) log.Printf("Copy large file from %s to %s\n", sourcePath, destPath)
@ -1276,25 +1330,23 @@ func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType s
return err return err
} }
if currentLength < maxCopytSize {
_, err := b.PutCopy(destPath, perm,
CopyOptions{},
b.Path(sourcePath))
return err
}
multi, err := b.InitMulti(destPath, contentType, perm, options) multi, err := b.InitMulti(destPath, contentType, perm, options)
if err != nil { if err != nil {
return err return err
} }
parts := []Part{} numParts := (currentLength + defaultChunkSize - 1) / defaultChunkSize
completedParts := make([]Part, numParts)
defer func() { errChan := make(chan error, numParts)
if len(parts) > 0 { limiter := make(chan struct{}, maxConcurrency)
if multi == nil {
// Parts should be empty if the multi is not initialized
panic("Unreachable")
} else {
if multi.Complete(parts) != nil {
multi.Abort()
}
}
}
}()
var start int64 = 0 var start int64 = 0
var to int64 = 0 var to int64 = 0
@ -1309,15 +1361,33 @@ func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType s
partNumber++ partNumber++
rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1) rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1)
limiter <- struct{}{}
_, part, err := multi.PutPartCopy(partNumber, go func(partNumber int, rangeStr string) {
_, part, err := multi.PutPartCopyWithContentLength(partNumber,
CopyOptions{CopySourceOptions: rangeStr}, CopyOptions{CopySourceOptions: rangeStr},
sourcePathForCopy) sourcePathForCopy, currentLength)
if err == nil {
if err != nil { completedParts[partNumber-1] = part
return err } else {
log.Printf("Unable in PutPartCopy of part %d for %s: %v\n", partNumber, sourcePathForCopy, err)
} }
parts = append(parts, part) errChan <- err
<-limiter
}(partNumber, rangeStr)
}
fullyCompleted := true
for range completedParts {
err := <-errChan
if err != nil {
fullyCompleted = false
}
}
if fullyCompleted {
err = multi.Complete(completedParts)
} else {
err = multi.Abort()
} }
return err return err

View File

@ -141,9 +141,13 @@ func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Option
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
} }
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
return m.PutPartCopyWithContentLength(n, options, source, -1)
}
// //
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy // You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { func (m *Multi) PutPartCopyWithContentLength(n int, options CopyOptions, source string, contentLength int64) (*CopyObjectResult, Part, error) {
// TODO source format a /BUCKET/PATH/TO/OBJECT // TODO source format a /BUCKET/PATH/TO/OBJECT
// TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path)
@ -155,6 +159,7 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj
params.Set("uploadId", m.UploadId) params.Set("uploadId", m.UploadId)
params.Set("partNumber", strconv.FormatInt(int64(n), 10)) params.Set("partNumber", strconv.FormatInt(int64(n), 10))
if contentLength < 0 {
sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/"))
//log.Println("source: ", source) //log.Println("source: ", source)
//log.Println("sourceBucket: ", sourceBucket.Name) //log.Println("sourceBucket: ", sourceBucket.Name)
@ -164,6 +169,8 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj
if err != nil { if err != nil {
return nil, Part{}, err return nil, Part{}, err
} }
contentLength = sourceMeta.ContentLength
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{ req := &request{
@ -174,7 +181,7 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj
params: params, params: params,
} }
resp := &CopyObjectResult{} resp := &CopyObjectResult{}
err = m.Bucket.Client.query(req, resp) err := m.Bucket.Client.query(req, resp)
if shouldRetry(err) && attempt.HasNext() { if shouldRetry(err) && attempt.HasNext() {
continue continue
} }
@ -184,7 +191,7 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj
if resp.ETag == "" { if resp.ETag == "" {
return nil, Part{}, errors.New("part upload succeeded with no ETag") return nil, Part{}, errors.New("part upload succeeded with no ETag")
} }
return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil return resp, Part{n, resp.ETag, contentLength}, nil
} }
panic("unreachable") panic("unreachable")
} }

View File

@ -15,6 +15,7 @@ const (
Hongkong = Region("oss-cn-hongkong") Hongkong = Region("oss-cn-hongkong")
Shenzhen = Region("oss-cn-shenzhen") Shenzhen = Region("oss-cn-shenzhen")
USWest1 = Region("oss-us-west-1") USWest1 = Region("oss-us-west-1")
USEast1 = Region("oss-us-east-1")
APSouthEast1 = Region("oss-ap-southeast-1") APSouthEast1 = Region("oss-ap-southeast-1")
Shanghai = Region("oss-cn-shanghai") Shanghai = Region("oss-cn-shanghai")
@ -54,3 +55,16 @@ func (r Region) GetInternalEndpoint(bucket string, secure bool) string {
} }
return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r))
} }
// GetInternalEndpoint returns internal endpoint of region
func (r Region) GetVPCInternalEndpoint(bucket string, secure bool) string {
protocol := getProtocol(secure)
if bucket == "" {
return fmt.Sprintf("%s://vpc100-oss-cn-hangzhou.aliyuncs.com", protocol)
}
if r == USEast1 {
return r.GetInternalEndpoint(bucket, secure)
} else {
return fmt.Sprintf("%s://%s.vpc100-%s.aliyuncs.com", protocol, bucket, string(r))
}
}

View File

@ -32,6 +32,7 @@ var ossParamsToSign = map[string]bool{
"response-cache-control": true, "response-cache-control": true,
"response-content-disposition": true, "response-content-disposition": true,
"response-content-encoding": true, "response-content-encoding": true,
"bucketInfo": true,
} }
func (client *Client) signRequest(request *request) { func (client *Client) signRequest(request *request) {
@ -101,5 +102,6 @@ func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result str
for _, k := range canonicalizedHeaders { for _, k := range canonicalizedHeaders {
canonicalizedHeader += k + ":" + headers.Get(k) + "\n" canonicalizedHeader += k + ":" + headers.Get(k) + "\n"
} }
return newHeaders, canonicalizedHeader return newHeaders, canonicalizedHeader
} }

View File

@ -37,8 +37,8 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) {
elem = elem.Elem() elem = elem.Elem()
} }
elemType := elem.Type() elemType := elem.Type()
for i := 0; i < elem.NumField(); i++ { for i := 0; i < elem.NumField(); i++ {
fieldName := elemType.Field(i).Name fieldName := elemType.Field(i).Name
anonymous := elemType.Field(i).Anonymous anonymous := elemType.Field(i).Anonymous
field := elem.Field(i) field := elem.Field(i)