minio-go: Move request into Client, rename API as Client.

- This cleanup merges newRequest into Client and uses httpClient
  instance properly in a single Client object context.
- This cleanup also ensures that we use getBucketLocation, inside
  request constructor to avoid the overkill of each requests handling
  them.
- ListBuckets now replies a list of buckets not an iterator.
- Remove mocked tests, we will write them through functional tests.
- ListObjectsPartsRecursive now cleans itself up in putParts through
  read channel.
This commit is contained in:
Harshavardhana 2015-12-14 02:08:20 -08:00
parent 2142093176
commit cff99100a5
27 changed files with 1329 additions and 2065 deletions

View File

@ -4,11 +4,6 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
## Current Maintainers
- Harshavardhana
- Krishna Srinivas
### Making new releases ### Making new releases
Edit `libraryVersion` constant in `api.go`. Edit `libraryVersion` constant in `api.go`.

View File

@ -49,10 +49,11 @@ func main() {
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
for bucket := range s3Client.ListBuckets() { buckets, err := s3Client.ListBuckets()
if bucket.Err != nil { if err != nil {
log.Fatalln(bucket.Err) log.Fatalln(err)
} }
for _, bucket := range buckets {
log.Println(bucket) log.Println(bucket)
} }
} }
@ -66,7 +67,7 @@ func main() {
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go) * [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) * [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) * [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
* [ListBuckets() <-chan BucketStat](examples/s3/listbuckets.go) * [ListBuckets() []BucketStat](examples/s3/listbuckets.go)
* [ListObjects(bucketName, objectPrefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go) * [ListObjects(bucketName, objectPrefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucketName, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) * [ListIncompleteUploads(bucketName, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go)

View File

@ -27,8 +27,6 @@ type BucketStat struct {
Name string Name string
// Date the bucket was created. // Date the bucket was created.
CreationDate time.Time CreationDate time.Time
// Error
Err error
} }
// ObjectStat container for object metadata. // ObjectStat container for object metadata.

View File

@ -20,7 +20,7 @@ import (
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "net/http"
"strconv" "strconv"
) )
@ -96,12 +96,71 @@ func (e ErrorResponse) Error() string {
return e.Message return e.Message
} }
// BodyToErrorResponse returns a new encoded ErrorResponse structure // Common reporting string
func BodyToErrorResponse(errBody io.Reader) error { const (
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
)
// HTTPRespToErrorResponse returns a new encoded ErrorResponse structure
func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
if resp == nil {
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
var errorResponse ErrorResponse var errorResponse ErrorResponse
err := xmlDecoder(errBody, &errorResponse) err := xmlDecoder(resp.Body, &errorResponse)
if err != nil { if err != nil {
return err switch resp.StatusCode {
case http.StatusNotFound:
if objectName == "" {
errorResponse = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
} else {
errorResponse = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusConflict:
errorResponse = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
} }
return errorResponse return errorResponse
} }

View File

@ -1,3 +1,19 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
@ -19,24 +35,33 @@ import (
// public-read - owner gets full access, others get read access. // public-read - owner gets full access, others get read access.
// public-read-write - owner gets full access, others get full access too. // public-read-write - owner gets full access, others get full access too.
// authenticated-read - owner gets full access, authenticated users get read access. // authenticated-read - owner gets full access, authenticated users get read access.
func (a API) GetBucketACL(bucketName string) (BucketACL, error) { func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return "", err return "", err
} }
req, err := a.getBucketACLRequest(bucketName)
// Set acl query.
urlValues := make(url.Values)
urlValues.Set("acl", "")
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil { if err != nil {
return "", err return "", err
} }
// Initiate the request. // Initiate the request.
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return "", err return "", err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return "", BodyToErrorResponse(resp.Body) return "", HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
@ -47,12 +72,14 @@ func (a API) GetBucketACL(bucketName string) (BucketACL, error) {
return "", err return "", err
} }
// If Google private bucket policy doesn't have any Grant list. // We need to avoid following de-serialization check for Google Cloud Storage.
if !isGoogleEndpoint(a.endpointURL) { // On Google Cloud Storage "private" canned ACL's policy do not have grant list.
// Treat it as a valid case, check for all other vendors.
if !isGoogleEndpoint(c.endpointURL) {
if policy.AccessControlList.Grant == nil { if policy.AccessControlList.Grant == nil {
errorResponse := ErrorResponse{ errorResponse := ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.", Message: "Access control Grant list is empty. " + reportIssue,
BucketName: bucketName, BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"), RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"), HostID: resp.Header.Get("x-amz-id-2"),
@ -101,39 +128,9 @@ func (a API) GetBucketACL(bucketName string) (BucketACL, error) {
} }
} }
// getBucketACLRequest wrapper creates a new getBucketACL request.
func (a API) getBucketACLRequest(bucketName string) (*Request, error) {
// Set acl query.
urlValues := make(url.Values)
urlValues.Set("acl", "")
// get target URL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("GET", targetURL, requestMetadata{
bucketRegion: region,
credentials: a.credentials,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
return req, nil
}
// GetObject gets object content from specified bucket. // GetObject gets object content from specified bucket.
// You may also look at GetPartialObject. // You may also look at GetPartialObject.
func (a API) GetObject(bucketName, objectName string) (io.ReadSeeker, error) { func (c Client) GetObject(bucketName, objectName string) (io.ReadSeeker, error) {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return nil, err return nil, err
} }
@ -141,7 +138,7 @@ func (a API) GetObject(bucketName, objectName string) (io.ReadSeeker, error) {
return nil, err return nil, err
} }
// get object. // get object.
return newObjectReadSeeker(a, bucketName, objectName), nil return newObjectReadSeeker(c, bucketName, objectName), nil
} }
// GetPartialObject gets partial object content as specified by the Range. // GetPartialObject gets partial object content as specified by the Range.
@ -149,7 +146,7 @@ func (a API) GetObject(bucketName, objectName string) (io.ReadSeeker, error) {
// Setting offset and length = 0 will download the full object. // Setting offset and length = 0 will download the full object.
// For more information about the HTTP Range header, // For more information about the HTTP Range header,
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
func (a API) GetPartialObject(bucketName, objectName string, offset, length int64) (io.ReadSeeker, error) { func (c Client) GetPartialObject(bucketName, objectName string, offset, length int64) (io.ReadSeeker, error) {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return nil, err return nil, err
} }
@ -157,7 +154,7 @@ func (a API) GetPartialObject(bucketName, objectName string, offset, length int6
return nil, err return nil, err
} }
// get partial object. // get partial object.
return newObjectReadSeeker(a, bucketName, objectName), nil return newObjectReadSeeker(c, bucketName, objectName), nil
} }
// objectReadSeeker container for io.ReadSeeker. // objectReadSeeker container for io.ReadSeeker.
@ -165,7 +162,7 @@ type objectReadSeeker struct {
// mutex. // mutex.
mutex *sync.Mutex mutex *sync.Mutex
api API client Client
reader io.ReadCloser reader io.ReadCloser
isRead bool isRead bool
stat ObjectStat stat ObjectStat
@ -175,12 +172,12 @@ type objectReadSeeker struct {
} }
// newObjectReadSeeker wraps getObject request returning a io.ReadSeeker. // newObjectReadSeeker wraps getObject request returning a io.ReadSeeker.
func newObjectReadSeeker(api API, bucket, object string) *objectReadSeeker { func newObjectReadSeeker(client Client, bucket, object string) *objectReadSeeker {
return &objectReadSeeker{ return &objectReadSeeker{
mutex: new(sync.Mutex), mutex: new(sync.Mutex),
reader: nil, reader: nil,
isRead: false, isRead: false,
api: api, client: client,
offset: 0, offset: 0,
bucketName: bucket, bucketName: bucket,
objectName: object, objectName: object,
@ -206,7 +203,7 @@ func (r *objectReadSeeker) Read(p []byte) (int, error) {
defer r.mutex.Unlock() defer r.mutex.Unlock()
if !r.isRead { if !r.isRead {
reader, _, err := r.api.getObject(r.bucketName, r.objectName, r.offset, 0) reader, _, err := r.client.getObject(r.bucketName, r.objectName, r.offset, 0)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -246,47 +243,11 @@ func (r *objectReadSeeker) Seek(offset int64, whence int) (int64, error) {
// Size returns the size of the object. // Size returns the size of the object.
func (r *objectReadSeeker) Size() (int64, error) { func (r *objectReadSeeker) Size() (int64, error) {
objectSt, err := r.api.StatObject(r.bucketName, r.objectName) objectSt, err := r.client.StatObject(r.bucketName, r.objectName)
r.stat = objectSt r.stat = objectSt
return r.stat.Size, err return r.stat.Size, err
} }
// getObjectRequest wrapper creates a new getObject request.
func (a API) getObjectRequest(bucketName, objectName string, offset, length int64) (*Request, error) {
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("GET", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
// Set ranges if length and offset are valid.
if length > 0 && offset >= 0 {
req.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
req.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length < 0 && offset == 0 {
req.Set("Range", fmt.Sprintf("bytes=%d", length))
}
return req, nil
}
// getObject - retrieve object from Object Storage. // getObject - retrieve object from Object Storage.
// //
// Additionally this function also takes range arguments to download the specified // Additionally this function also takes range arguments to download the specified
@ -294,32 +255,53 @@ func (a API) getObjectRequest(bucketName, objectName string, offset, length int6
// //
// For more information about the HTTP Range header. // For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (a API) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectStat{}, err return nil, ObjectStat{}, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return nil, ObjectStat{}, err return nil, ObjectStat{}, err
} }
req, err := a.getObjectRequest(bucketName, objectName, offset, length)
customHeader := make(http.Header)
// Set ranges if length and offset are valid.
if length > 0 && offset >= 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length < 0 && offset == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
}
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
})
if err != nil { if err != nil {
return nil, ObjectStat{}, err return nil, ObjectStat{}, err
} }
resp, err := req.Do() // Execute the request.
resp, err := c.httpClient.Do(req)
if err != nil { if err != nil {
return nil, ObjectStat{}, err return nil, ObjectStat{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectStat{}, BodyToErrorResponse(resp.Body) return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes // trim off the odd double quotes.
md5sum := strings.Trim(resp.Header.Get("ETag"), "\"")
// parse the date.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectStat{}, ErrorResponse{ return nil, ObjectStat{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.", Message: msg,
RequestID: resp.Header.Get("x-amz-request-id"), RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"), HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),

View File

@ -32,82 +32,29 @@ import (
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (a API) ListBuckets() <-chan BucketStat { func (c Client) ListBuckets() ([]BucketStat, error) {
ch := make(chan BucketStat, 100) // Instantiate a new request.
go a.listBucketsInRoutine(ch) req, err := c.newRequest("GET", requestMetadata{})
return ch
}
// listBucketsInRoutine goroutine based iterator for listBuckets.
func (a API) listBucketsInRoutine(ch chan<- BucketStat) {
defer close(ch)
req, err := a.listBucketsRequest()
if err != nil { if err != nil {
ch <- BucketStat{ return nil, err
Err: err,
}
return
} }
resp, err := req.Do() // Initiate the request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
ch <- BucketStat{ return nil, err
Err: err,
}
return
} }
if resp != nil { if resp != nil {
// for un-authenticated requests, amazon sends a redirect handle it.
if resp.StatusCode == http.StatusTemporaryRedirect {
ch <- BucketStat{
Err: ErrorResponse{
Code: "AccessDenied",
Message: "Anonymous access is forbidden for this operation.",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
},
}
return
}
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
ch <- BucketStat{ return nil, HTTPRespToErrorResponse(resp, "", "")
Err: BodyToErrorResponse(resp.Body),
}
return
} }
} }
listAllMyBucketsResult := listAllMyBucketsResult{} listAllMyBucketsResult := listAllMyBucketsResult{}
err = xmlDecoder(resp.Body, &listAllMyBucketsResult) err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
if err != nil {
ch <- BucketStat{
Err: err,
}
return
}
for _, bucket := range listAllMyBucketsResult.Buckets.Bucket {
ch <- bucket
}
}
// listBucketRequest wrapper creates a new listBuckets request.
func (a API) listBucketsRequest() (*Request, error) {
// List buckets is directly on the endpoint URL.
targetURL := a.endpointURL
targetURL.Path = "/"
// Instantiate a new request.
req, err := newRequest("GET", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: "us-east-1",
contentTransport: a.httpTransport,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return req, nil return listAllMyBucketsResult.Buckets.Bucket, nil
} }
// ListObjects - (List Objects) - List some objects or all recursively. // ListObjects - (List Objects) - List some objects or all recursively.
@ -126,124 +73,75 @@ func (a API) listBucketsRequest() (*Request, error) {
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (a API) ListObjects(bucketName string, objectPrefix string, recursive bool) <-chan ObjectStat { func (c Client) ListObjects(bucketName string, objectPrefix string, recursive bool) <-chan ObjectStat {
ch := make(chan ObjectStat, 1000) // Allocate new list objects channel.
go a.listObjectsInRoutine(bucketName, objectPrefix, recursive, ch) objectStatCh := make(chan ObjectStat, 1000)
return ch // Default listing is delimited at "/"
} delimiter := "/"
if recursive {
// listObjectsRecursive lists all objects recursively matching a prefix. // If recursive we do not delimit.
func (a API) listObjectsRecursive(bucketName, objectPrefix string, ch chan<- ObjectStat) { delimiter = ""
var objectMarker string
for {
result, err := a.listObjects(bucketName, objectPrefix, objectMarker, "", 1000)
if err != nil {
ch <- ObjectStat{
Err: err,
}
return
}
for _, object := range result.Contents {
ch <- object
objectMarker = object.Key
}
if !result.IsTruncated {
break
}
} }
}
// listObjectsNonRecursive lists objects delimited with "/" matching a prefix.
func (a API) listObjectsNonRecursive(bucketName, objectPrefix string, ch chan<- ObjectStat) {
// Non recursive delimit with "/".
var objectMarker string
for {
result, err := a.listObjects(bucketName, objectPrefix, objectMarker, "/", 1000)
if err != nil {
ch <- ObjectStat{
Err: err,
}
return
}
objectMarker = result.NextMarker
for _, object := range result.Contents {
ch <- object
}
for _, obj := range result.CommonPrefixes {
object := ObjectStat{}
object.Key = obj.Prefix
object.Size = 0
ch <- object
}
if !result.IsTruncated {
break
}
}
}
// listObjectsInRoutine goroutine based iterator for listObjects.
func (a API) listObjectsInRoutine(bucketName, objectPrefix string, recursive bool, ch chan<- ObjectStat) {
defer close(ch)
// Validate bucket name. // Validate bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
ch <- ObjectStat{ defer close(objectStatCh)
objectStatCh <- ObjectStat{
Err: err, Err: err,
} }
return return objectStatCh
} }
// Validate incoming object prefix. // Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
ch <- ObjectStat{ defer close(objectStatCh)
objectStatCh <- ObjectStat{
Err: err, Err: err,
} }
return return objectStatCh
}
// Recursive do not delimit.
if recursive {
a.listObjectsRecursive(bucketName, objectPrefix, ch)
return
}
a.listObjectsNonRecursive(bucketName, objectPrefix, ch)
return
}
// listObjectsRequest wrapper creates a new listObjects request.
func (a API) listObjectsRequest(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (*Request, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Set object prefix.
urlValues.Set("prefix", urlEncodePath(objectPrefix))
// Set object marker.
urlValues.Set("marker", urlEncodePath(objectMarker))
// Set delimiter.
urlValues.Set("delimiter", delimiter)
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Get target url.
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues)
if err != nil {
return nil, err
} }
// get bucket region. // Initiate list objects goroutine here.
region, err := a.getRegion(bucketName) go func(objectStatCh chan<- ObjectStat) {
if err != nil { defer close(objectStatCh)
return nil, err // Save marker for next request.
} var marker string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjects(bucketName, objectPrefix, marker, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectStat{
Err: err,
}
return
}
// Initialize a new request. // If contents are available loop through and send over channel.
req, err := newRequest("GET", targetURL, requestMetadata{ for _, object := range result.Contents {
credentials: a.credentials, objectStatCh <- object
userAgent: a.userAgent, // Save the marker.
bucketRegion: region, marker = object.Key
contentTransport: a.httpTransport, }
})
if err != nil { // Send all common prefixes if any.
return nil, err // NOTE: prefixes are only present if the request is delimited.
} for _, obj := range result.CommonPrefixes {
return req, nil object := ObjectStat{}
object.Key = obj.Prefix
object.Size = 0
objectStatCh <- object
}
// If next marker present, save it for next request.
if result.NextMarker != "" {
marker = result.NextMarker
}
// Listing ends result is not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectStatCh)
return objectStatCh
} }
/// Bucket Read Operations. /// Bucket Read Operations.
@ -257,33 +155,52 @@ func (a API) listObjectsRequest(bucketName, objectPrefix, objectMarker, delimite
// ?delimiter - A delimiter is a character you use to group keys. // ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix. // ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body. // ?max-keys - Sets the maximum number of keys returned in the response body.
func (a API) listObjects(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { func (c Client) listObjects(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return listBucketResult{}, err return listBucketResult{}, err
} }
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
return listBucketResult{}, err return listBucketResult{}, err
} }
req, err := a.listObjectsRequest(bucketName, objectPrefix, objectMarker, delimiter, maxkeys) // Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Set object prefix.
urlValues.Set("prefix", urlEncodePath(objectPrefix))
// Set object marker.
urlValues.Set("marker", urlEncodePath(objectMarker))
// Set delimiter.
urlValues.Set("delimiter", delimiter)
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Initialize a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil { if err != nil {
return listBucketResult{}, err return listBucketResult{}, err
} }
resp, err := req.Do() // Execute list buckets.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return listBucketResult{}, err return listBucketResult{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return listBucketResult{}, BodyToErrorResponse(resp.Body) return listBucketResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
// Decode listBuckets XML.
listBucketResult := listBucketResult{} listBucketResult := listBucketResult{}
err = xmlDecoder(resp.Body, &listBucketResult) err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil { if err != nil {
return listBucketResult, err return listBucketResult, err
} }
// close body while returning, along with any error.
return listBucketResult, nil return listBucketResult, nil
} }
@ -303,113 +220,99 @@ func (a API) listObjects(bucketName, objectPrefix, objectMarker, delimiter strin
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (a API) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartStat { func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartStat {
return a.listIncompleteUploads(bucketName, objectPrefix, recursive) // Turn on size aggregation of individual parts.
isAggregateSize := true
return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize)
} }
// listIncompleteUploads lists all incomplete uploads. // listIncompleteUploads lists all incomplete uploads.
func (a API) listIncompleteUploads(bucketName, objectName string, recursive bool) <-chan ObjectMultipartStat { func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool) <-chan ObjectMultipartStat {
ch := make(chan ObjectMultipartStat, 1000) // Allocate channel for multipart uploads.
go a.listIncompleteUploadsInRoutine(bucketName, objectName, recursive, ch) objectMultipartStatCh := make(chan ObjectMultipartStat, 1000)
return ch // Delimiter is set to "/" by default.
} delimiter := "/"
if recursive {
// listIncompleteUploadsRecursive list incomplete uploads matching a prefix recursively. // If recursive do not delimit.
func (a API) listIncompleteUploadsRecursive(bucketName, objectPrefix string, ch chan<- ObjectMultipartStat) { delimiter = ""
var objectMarker string
var uploadIDMarker string
for {
result, err := a.listMultipartUploads(bucketName, objectMarker, uploadIDMarker, objectPrefix, "", 1000)
if err != nil {
ch <- ObjectMultipartStat{
Err: err,
}
return
}
for _, objectSt := range result.Uploads {
// NOTE: getTotalMultipartSize can make listing incomplete uploads slower.
objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID)
if err != nil {
ch <- ObjectMultipartStat{
Err: err,
}
}
ch <- objectSt
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
}
if !result.IsTruncated {
break
}
} }
return // Validate bucket name.
}
// listIncompleteUploadsNonRecursive list incomplete uploads delimited at "/" matching a prefix.
func (a API) listIncompleteUploadsNonRecursive(bucketName, objectPrefix string, ch chan<- ObjectMultipartStat) {
// Non recursive with "/" delimiter.
var objectMarker string
var uploadIDMarker string
for {
result, err := a.listMultipartUploads(bucketName, objectMarker, uploadIDMarker, objectPrefix, "/", 1000)
if err != nil {
ch <- ObjectMultipartStat{
Err: err,
}
return
}
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
for _, objectSt := range result.Uploads {
objectSt.Size, err = a.getTotalMultipartSize(bucketName, objectSt.Key, objectSt.UploadID)
if err != nil {
ch <- ObjectMultipartStat{
Err: err,
}
}
ch <- objectSt
}
for _, obj := range result.CommonPrefixes {
object := ObjectMultipartStat{}
object.Key = obj.Prefix
object.Size = 0
ch <- object
}
if !result.IsTruncated {
break
}
}
}
// listIncompleteUploadsInRoutine goroutine based iterator for listing all incomplete uploads.
func (a API) listIncompleteUploadsInRoutine(bucketName, objectPrefix string, recursive bool, ch chan<- ObjectMultipartStat) {
defer close(ch)
// Validate incoming bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
ch <- ObjectMultipartStat{ defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartStat{
Err: err, Err: err,
} }
return return objectMultipartStatCh
} }
// Validate incoming object prefix. // Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
ch <- ObjectMultipartStat{ defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartStat{
Err: err, Err: err,
} }
return return objectMultipartStatCh
} }
// Recursive with no delimiter. go func(objectMultipartStatCh chan<- ObjectMultipartStat) {
if recursive { defer close(objectMultipartStatCh)
a.listIncompleteUploadsRecursive(bucketName, objectPrefix, ch) // object and upload ID marker for future requests.
return var objectMarker string
} var uploadIDMarker string
a.listIncompleteUploadsNonRecursive(bucketName, objectPrefix, ch) for {
return // list all multipart uploads.
result, err := c.listMultipartUploads(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
if err != nil {
objectMultipartStatCh <- ObjectMultipartStat{
Err: err,
}
return
}
// Save objectMarker and uploadIDMarker for next request.
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
// Send all multipart uploads.
for _, obj := range result.Uploads {
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
if aggregateSize {
// Get total multipart size.
obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
if err != nil {
objectMultipartStatCh <- ObjectMultipartStat{
Err: err,
}
}
}
objectMultipartStatCh <- obj
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectMultipartStat{}
object.Key = obj.Prefix
object.Size = 0
objectMultipartStatCh <- object
}
// Listing ends if result not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectMultipartStatCh)
// return.
return objectMultipartStatCh
} }
// listMultipartUploadsRequest wrapper creates a new listMultipartUploads request. // listMultipartUploads - (List Multipart Uploads).
func (a API) listMultipartUploadsRequest(bucketName, keyMarker, uploadIDMarker, // - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
prefix, delimiter string, maxUploads int) (*Request, error) { //
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request paramters. :-
// ---------
// ?key-marker - Specifies the multipart upload after which listing should begin.
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (c Client) listMultipartUploads(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
// Get resources properly escaped and lined up before using them in http request. // Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
// Set uploads. // Set uploads.
@ -425,55 +328,26 @@ func (a API) listMultipartUploadsRequest(bucketName, keyMarker, uploadIDMarker,
// Set max-uploads. // Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request. // Instantiate a new request.
return newRequest("GET", targetURL, requestMetadata{ req, err := c.newRequest("GET", requestMetadata{
credentials: a.credentials, bucketName: bucketName,
userAgent: a.userAgent, queryValues: urlValues,
bucketRegion: region,
contentTransport: a.httpTransport,
}) })
}
// listMultipartUploads - (List Multipart Uploads).
// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request paramters. :-
// ---------
// ?key-marker - Specifies the multipart upload after which listing should begin.
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (a API) listMultipartUploads(bucketName, keyMarker,
uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
req, err := a.listMultipartUploadsRequest(bucketName,
keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
if err != nil { if err != nil {
return listMultipartUploadsResult{}, err return listMultipartUploadsResult{}, err
} }
resp, err := req.Do() // Execute list multipart uploads request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return listMultipartUploadsResult{}, err return listMultipartUploadsResult{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return listMultipartUploadsResult{}, BodyToErrorResponse(resp.Body) return listMultipartUploadsResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
// Decode response body.
listMultipartUploadsResult := listMultipartUploadsResult{} listMultipartUploadsResult := listMultipartUploadsResult{}
err = xmlDecoder(resp.Body, &listMultipartUploadsResult) err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
if err != nil { if err != nil {
@ -483,92 +357,83 @@ func (a API) listMultipartUploads(bucketName, keyMarker,
} }
// listObjectPartsRecursive list all object parts recursively. // listObjectPartsRecursive list all object parts recursively.
func (a API) listObjectPartsRecursive(bucketName, objectName, uploadID string) <-chan objectPartMetadata { func (c Client) listObjectPartsRecursive(bucketName, objectName, uploadID string, doneCh <-chan bool) <-chan objectPartMetadata {
// Allocate new list parts channel.
objectPartCh := make(chan objectPartMetadata, 1000) objectPartCh := make(chan objectPartMetadata, 1000)
go a.listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID, objectPartCh) // Initiate list parts goroutine.
go func(objectPartCh chan<- objectPartMetadata, doneCh <-chan bool) {
defer close(objectPartCh)
// part number marker for the next request if IsTruncated is set.
var nextPartNumberMarker int
for {
// Get list of uploaded parts a maximum of 1000 per request.
listObjPartsResult, err := c.listObjectParts(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
if err != nil {
objectPartCh <- objectPartMetadata{
Err: err,
}
return
}
// Loop through all object parts and send over the channel.
// Additionally wait on done channel to return the routine.
for _, uploadedObjectPart := range listObjPartsResult.ObjectParts {
select {
// If done channel return here.
case <-doneCh:
return
// Send uploaded parts here.
case objectPartCh <- uploadedObjectPart:
}
}
// Keep part number marker, for the next iteration.
nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
// Listing ends result is not truncated, return right here.
if !listObjPartsResult.IsTruncated {
return
}
}
}(objectPartCh, doneCh)
// Return the channel here.
return objectPartCh return objectPartCh
} }
// listObjectPartsRecursiveInRoutine gorountine based iterator for listing all object parts. // findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
func (a API) listObjectPartsRecursiveInRoutine(bucketName, objectName, uploadID string, ch chan<- objectPartMetadata) { func (c Client) findUploadID(bucketName, objectName string) (string, error) {
defer close(ch) // Make list incomplete uploads recursive.
listObjPartsResult, err := a.listObjectParts(bucketName, objectName, uploadID, 0, 1000) isRecursive := true
if err != nil { // Turn off size aggregation of individual parts, in this request.
ch <- objectPartMetadata{ isAggregateSize := false
Err: err, for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize) {
if mpUpload.Err != nil {
return "", mpUpload.Err
} }
return // if object name found, return the upload id.
} if objectName == mpUpload.Key {
for _, uploadedObjectPart := range listObjPartsResult.ObjectParts { return mpUpload.UploadID, nil
ch <- uploadedObjectPart
}
// listObject parts.
for {
if !listObjPartsResult.IsTruncated {
break
}
nextPartNumberMarker := listObjPartsResult.NextPartNumberMarker
listObjPartsResult, err = a.listObjectParts(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
if err != nil {
ch <- objectPartMetadata{
Err: err,
}
return
}
for _, uploadedObjectPart := range listObjPartsResult.ObjectParts {
ch <- uploadedObjectPart
} }
} }
// No upload id was found, return success and empty upload id.
return "", nil
} }
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. // getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
func (a API) getTotalMultipartSize(bucketName, objectName, uploadID string) (int64, error) { func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (int64, error) {
var size int64 var size int64
// Allocate a new done channel.
doneCh := make(chan bool, 1)
defer close(doneCh)
// Iterate over all parts and aggregate the size. // Iterate over all parts and aggregate the size.
for part := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { for part := range c.listObjectPartsRecursive(bucketName, objectName, uploadID, doneCh) {
if part.Err != nil { if part.Err != nil {
return 0, part.Err return 0, part.Err
} }
size += part.Size size += part.Size
} }
// Done channel is not used here since, it is not necessary.
return size, nil return size, nil
} }
// listObjectPartsRequest wrapper creates a new ListObjectParts request.
func (a API) listObjectPartsRequest(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (*Request, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
// Set upload id.
urlValues.Set("uploadId", uploadID)
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
req, err := newRequest("GET", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
return req, nil
}
// listObjectParts (List Parts) // listObjectParts (List Parts)
// - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload // - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload
// //
@ -576,21 +441,36 @@ func (a API) listObjectPartsRequest(bucketName, objectName, uploadID string, par
// request paramters :- // request paramters :-
// --------- // ---------
// ?part-number-marker - Specifies the part after which listing should begin. // ?part-number-marker - Specifies the part after which listing should begin.
func (a API) listObjectParts(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { func (c Client) listObjectParts(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
req, err := a.listObjectPartsRequest(bucketName, objectName, uploadID, partNumberMarker, maxParts) // Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
// Set upload id.
urlValues.Set("uploadId", uploadID)
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
if err != nil { if err != nil {
return listObjectPartsResult{}, err return listObjectPartsResult{}, err
} }
resp, err := req.Do() // Exectue list object parts.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return listObjectPartsResult{}, err return listObjectPartsResult{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return listObjectPartsResult{}, BodyToErrorResponse(resp.Body) return listObjectPartsResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
// Decode list object parts XML.
listObjectPartsResult := listObjectPartsResult{} listObjectPartsResult := listObjectPartsResult{}
err = xmlDecoder(resp.Body, &listObjectPartsResult) err = xmlDecoder(resp.Body, &listObjectPartsResult)
if err != nil { if err != nil {

View File

@ -1,105 +1,87 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
"errors" "errors"
"fmt"
"net/url"
"time" "time"
) )
// PresignedGetObject returns a presigned URL to access an object without credentials. // PresignedGetObject returns a presigned URL to access an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (a API) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", err
}
if err := isValidExpiry(expires); err != nil { if err := isValidExpiry(expires); err != nil {
return "", err return "", err
} }
expireSeconds := int64(expires / time.Second) expireSeconds := int64(expires / time.Second)
return a.presignedGetObject(bucketName, objectName, expireSeconds, 0, 0)
}
// presignedGetObject - generate presigned get object URL.
func (a API) presignedGetObject(bucketName, objectName string, expires, offset, length int64) (string, error) {
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{})
if err != nil {
return "", err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return "", err
}
// Instantiate a new request. // Instantiate a new request.
req, err := newRequest("GET", targetURL, requestMetadata{ // Since expires is set newRequest will presign the request.
credentials: a.credentials, req, err := c.newRequest("GET", requestMetadata{
expires: expires, presignURL: true,
userAgent: a.userAgent, bucketName: bucketName,
bucketRegion: region, objectName: objectName,
contentTransport: a.httpTransport, expires: expireSeconds,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
return req.URL.String(), nil
// Set ranges if length and offset are valid.
if length > 0 && offset >= 0 {
req.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
req.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 && offset == 0 {
req.Set("Range", fmt.Sprintf("bytes=-%d", length))
}
if req.credentials.Signature.isV2() {
return req.PreSignV2()
}
return req.PreSignV4()
} }
// PresignedPutObject returns a presigned URL to upload an object without credentials. // PresignedPutObject returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (a API) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", err
}
if err := isValidExpiry(expires); err != nil { if err := isValidExpiry(expires); err != nil {
return "", err return "", err
} }
expireSeconds := int64(expires / time.Second) expireSeconds := int64(expires / time.Second)
return a.presignedPutObject(bucketName, objectName, expireSeconds)
}
// presignedPutObject - generate presigned PUT url.
func (a API) presignedPutObject(bucketName, objectName string, expires int64) (string, error) {
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{})
if err != nil {
return "", err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return "", err
}
// Instantiate a new request. // Instantiate a new request.
req, err := newRequest("PUT", targetURL, requestMetadata{ // Since expires is set newRequest will presign the request.
credentials: a.credentials, req, err := c.newRequest("PUT", requestMetadata{
expires: expires, presignURL: true,
userAgent: a.userAgent, bucketName: bucketName,
bucketRegion: region, objectName: objectName,
contentTransport: a.httpTransport, expires: expireSeconds,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
if req.credentials.Signature.isV2() { return req.URL.String(), nil
return req.PreSignV2()
}
return req.PreSignV4()
} }
// PresignedPostPolicy returns POST form data to upload an object at a location. // PresignedPostPolicy returns POST form data to upload an object at a location.
func (a API) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
// Validate input arguments.
if p.expiration.IsZero() { if p.expiration.IsZero() {
return nil, errors.New("Expiration time must be specified") return nil, errors.New("Expiration time must be specified")
} }
@ -109,69 +91,57 @@ func (a API) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
if _, ok := p.formData["bucket"]; !ok { if _, ok := p.formData["bucket"]; !ok {
return nil, errors.New("bucket name must be specified") return nil, errors.New("bucket name must be specified")
} }
return a.presignedPostPolicy(p)
}
// presignedPostPolicy - generate post form data. bucketName := p.formData["bucket"]
func (a API) presignedPostPolicy(p *PostPolicy) (map[string]string, error) { // Fetch the location.
// get targetURL. location, err := c.getBucketLocation(bucketName)
targetURL, err := getTargetURL(a.endpointURL, p.formData["bucket"], "", url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(p.formData["bucket"])
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("POST", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Keep time. // Keep time.
t := time.Now().UTC() t := time.Now().UTC()
if req.credentials.Signature.isV2() { if c.signature.isV2() {
policyBase64 := p.base64() policyBase64 := p.base64()
p.formData["policy"] = policyBase64 p.formData["policy"] = policyBase64
// for all other regions set this value to be 'AWSAccessKeyId'. // For Google endpoint set this value to be 'GoogleAccessId'.
if isGoogleEndpoint(a.endpointURL) { if isGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = req.credentials.AccessKeyID p.formData["GoogleAccessId"] = c.accessKeyID
} else { } else {
p.formData["AWSAccessKeyId"] = req.credentials.AccessKeyID // For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
} }
p.formData["signature"] = req.PostPresignSignatureV2(policyBase64) // Sign the policy.
p.formData["signature"] = PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return p.formData, nil return p.formData, nil
} }
credential := getCredential(req.credentials.AccessKeyID, req.bucketRegion, t)
// Add date policy.
p.addNewPolicy(policyCondition{ p.addNewPolicy(policyCondition{
matchType: "eq", matchType: "eq",
condition: "$x-amz-date", condition: "$x-amz-date",
value: t.Format(iso8601DateFormat), value: t.Format(iso8601DateFormat),
}) })
// Add algorithm policy.
p.addNewPolicy(policyCondition{ p.addNewPolicy(policyCondition{
matchType: "eq", matchType: "eq",
condition: "$x-amz-algorithm", condition: "$x-amz-algorithm",
value: authHeader, value: signV4Algorithm,
}) })
// Add a credential policy.
credential := getCredential(c.accessKeyID, location, t)
p.addNewPolicy(policyCondition{ p.addNewPolicy(policyCondition{
matchType: "eq", matchType: "eq",
condition: "$x-amz-credential", condition: "$x-amz-credential",
value: credential, value: credential,
}) })
// get base64 encoded policy.
policyBase64 := p.base64() policyBase64 := p.base64()
// Fill in the form data.
p.formData["policy"] = policyBase64 p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = authHeader p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat) p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = req.PostPresignSignatureV4(policyBase64, t) p.formData["x-amz-signature"] = PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return p.formData, nil return p.formData, nil
} }

View File

@ -1,7 +1,24 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
"bytes" "bytes"
"encoding/hex"
"encoding/xml" "encoding/xml"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -24,7 +41,13 @@ import (
// //
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (a API) MakeBucket(bucketName string, acl BucketACL, region string) error { func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
// Validate if request is made on anonymous requests.
if c.anonymous {
return ErrInvalidArgument("Make bucket cannot be issued with anonymous credentials.")
}
// Validate the input arguments.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
@ -32,13 +55,14 @@ func (a API) MakeBucket(bucketName string, acl BucketACL, region string) error {
return ErrInvalidArgument("Unrecognized ACL " + acl.String()) return ErrInvalidArgument("Unrecognized ACL " + acl.String())
} }
req, err := a.makeBucketRequest(bucketName, string(acl), region) // Instantiate the request.
req, err := c.makeBucketRequest(bucketName, acl, location)
if err != nil { if err != nil {
return err return err
} }
// Initiate the request. // Execute the request.
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
@ -46,62 +70,77 @@ func (a API) MakeBucket(bucketName string, acl BucketACL, region string) error {
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return BodyToErrorResponse(resp.Body) return HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
// Return.
return nil return nil
} }
// makeBucketRequest constructs request for makeBucket. // makeBucketRequest constructs request for makeBucket.
func (a API) makeBucketRequest(bucketName, acl, region string) (*Request, error) { func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
// get target URL. // Validate input arguments.
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{}) if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
if !acl.isValidBucketACL() {
return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
}
// Set get bucket location always as path style.
targetURL := c.endpointURL
targetURL.Path = "/" + bucketName
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Initialize request metadata. // by default bucket acl is set to private.
var reqMetadata requestMetadata req.Header.Set("x-amz-acl", "private")
reqMetadata = requestMetadata{ if acl != "" {
userAgent: a.userAgent, req.Header.Set("x-amz-acl", string(acl))
credentials: a.credentials,
bucketRegion: "us-east-1",
contentTransport: a.httpTransport,
} }
// if region is 'us-east-1' no need to apply location constraint on the bucket. // set UserAgent for the request.
if region == "us-east-1" { c.setUserAgent(req)
region = ""
// if location is 'us-east-1' no need to apply location constraint on the bucket.
if location == "us-east-1" {
location = ""
} }
// If region is set use to create bucket location config. // set sha256 sum for signature calculation only with signature version '4'.
if region != "" { if c.signature.isV4() || c.signature.isLatest() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
}
// If location is set use to create bucket location config.
if location != "" {
createBucketConfig := new(createBucketConfiguration) createBucketConfig := new(createBucketConfiguration)
createBucketConfig.Location = region createBucketConfig.Location = location
var createBucketConfigBytes []byte var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig) createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
reqMetadata.contentBody = ioutil.NopCloser(createBucketConfigBuffer) req.Body = ioutil.NopCloser(createBucketConfigBuffer)
reqMetadata.contentLength = int64(createBucketConfigBuffer.Len()) req.ContentLength = int64(createBucketConfigBuffer.Len())
reqMetadata.contentSha256Bytes = sum256(createBucketConfigBuffer.Bytes()) if c.signature.isV4() || c.signature.isLatest() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
}
} }
// Initialize new request. // Sign the request.
req, err := newRequest("PUT", targetURL, reqMetadata) if c.signature.isV4() || c.signature.isLatest() {
if err != nil { req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
return nil, err } else if c.signature.isV2() {
} req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
// by default bucket acl is set to private.
req.Set("x-amz-acl", "private")
if acl != "" {
req.Set("x-amz-acl", acl)
} }
// Return signed request.
return req, nil return req, nil
} }
@ -113,7 +152,8 @@ func (a API) makeBucketRequest(bucketName, acl, region string) (*Request, error)
// public-read - owner gets full access, all others get read access. // public-read - owner gets full access, all others get read access.
// public-read-write - owner gets full access, all others get full access too. // public-read-write - owner gets full access, all others get full access too.
// authenticated-read - owner gets full access, authenticated users get read access. // authenticated-read - owner gets full access, authenticated users get read access.
func (a API) SetBucketACL(bucketName string, acl BucketACL) error { func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
@ -121,63 +161,42 @@ func (a API) SetBucketACL(bucketName string, acl BucketACL) error {
return ErrInvalidArgument("Unrecognized ACL " + acl.String()) return ErrInvalidArgument("Unrecognized ACL " + acl.String())
} }
// Initialize a new request. // Set acl query.
req, err := a.setBucketACLRequest(bucketName, string(acl)) urlValues := make(url.Values)
urlValues.Set("acl", "")
// Add misc headers.
customHeader := make(http.Header)
if acl != "" {
customHeader.Set("x-amz-acl", acl.String())
} else {
customHeader.Set("x-amz-acl", "private")
}
// Instantiate a new request.
req, err := c.newRequest("PUT", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
customHeader: customHeader,
})
if err != nil { if err != nil {
return err return err
} }
// Initiate the request. // Initiate the request.
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
} }
if resp != nil { if resp != nil {
// if error return.
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return BodyToErrorResponse(resp.Body) return HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
// return
return nil return nil
} }
// setBucketRequestACL constructs request for SetBucketACL.
func (a API) setBucketACLRequest(bucketName, acl string) (*Request, error) {
// Set acl query.
urlValues := make(url.Values)
urlValues.Set("acl", "")
// get target URL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("PUT", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
// Set relevant acl.
if acl != "" {
req.Set("x-amz-acl", acl)
} else {
req.Set("x-amz-acl", "private")
}
// Return.
return req, nil
}

View File

@ -67,7 +67,7 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// //
// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload, // NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload,
// so we fall back to single PUT operation. // so we fall back to single PUT operation.
func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) { func (c Client) PutObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -76,7 +76,7 @@ func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size i
return 0, err return 0, err
} }
// NOTE: S3 doesn't allow anonymous multipart requests. // NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(a.endpointURL) && isAnonymousCredentials(*a.credentials) { if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 { if size <= -1 {
return 0, ErrorResponse{ return 0, ErrorResponse{
Code: "NotImplemented", Code: "NotImplemented",
@ -86,11 +86,11 @@ func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size i
} }
} }
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GB in size. // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GB in size.
return a.putAnonymous(bucketName, objectName, data, size, contentType) return c.putAnonymous(bucketName, objectName, data, size, contentType)
} }
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GB to Google Cloud Storage servers. // Current implementation will only upload a maximum of 5GB to Google Cloud Storage servers.
if isGoogleEndpoint(a.endpointURL) { if isGoogleEndpoint(c.endpointURL) {
if size <= -1 { if size <= -1 {
return 0, ErrorResponse{ return 0, ErrorResponse{
Code: "NotImplemented", Code: "NotImplemented",
@ -100,19 +100,26 @@ func (a API) PutObject(bucketName, objectName string, data io.ReadSeeker, size i
} }
} }
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GB in size. // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GB in size.
return a.putNoChecksum(bucketName, objectName, data, size, contentType) return c.putNoChecksum(bucketName, objectName, data, size, contentType)
} }
// Large file upload is initiated for uploads for input data size // Large file upload is initiated for uploads for input data size
// if its greater than 5MB or data size is negative. // if its greater than 5MB or data size is negative.
if size >= minimumPartSize || size < 0 { if size >= minimumPartSize || size < 0 {
return a.putLargeObject(bucketName, objectName, data, size, contentType) return c.putLargeObject(bucketName, objectName, data, size, contentType)
} }
return a.putSmallObject(bucketName, objectName, data, size, contentType) return c.putSmallObject(bucketName, objectName, data, size, contentType)
} }
// putNoChecksum special function used Google Cloud Storage. This special function // putNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. // is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (a API) putNoChecksum(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) { func (c Client) putNoChecksum(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxPartSize { if size > maxPartSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName) return 0, ErrEntityTooLarge(size, bucketName, objectName)
} }
@ -124,7 +131,8 @@ func (a API) putNoChecksum(bucketName, objectName string, data io.ReadSeeker, si
Size: size, Size: size,
ContentType: contentType, ContentType: contentType,
} }
if _, err := a.putObject(bucketName, objectName, putObjMetadata); err != nil { // Execute put object.
if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil {
return 0, err return 0, err
} }
return size, nil return size, nil
@ -133,12 +141,27 @@ func (a API) putNoChecksum(bucketName, objectName string, data io.ReadSeeker, si
// putAnonymous is a special function for uploading content as anonymous request. // putAnonymous is a special function for uploading content as anonymous request.
// This special function is necessary since Amazon S3 doesn't allow anonymous // This special function is necessary since Amazon S3 doesn't allow anonymous
// multipart uploads. // multipart uploads.
func (a API) putAnonymous(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) { func (c Client) putAnonymous(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) {
return a.putNoChecksum(bucketName, objectName, data, size, contentType) // Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
return c.putNoChecksum(bucketName, objectName, data, size, contentType)
} }
// putSmallObject uploads files smaller than 5 mega bytes. // putSmallObject uploads files smaller than 5 mega bytes.
func (a API) putSmallObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) { func (c Client) putSmallObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Read input data fully into buffer.
dataBytes, err := ioutil.ReadAll(data) dataBytes, err := ioutil.ReadAll(data)
if err != nil { if err != nil {
return 0, err return 0, err
@ -146,6 +169,7 @@ func (a API) putSmallObject(bucketName, objectName string, data io.ReadSeeker, s
if int64(len(dataBytes)) != size { if int64(len(dataBytes)) != size {
return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName) return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName)
} }
// Construct a new PUT object metadata.
putObjMetadata := putObjectMetadata{ putObjMetadata := putObjectMetadata{
MD5Sum: sumMD5(dataBytes), MD5Sum: sumMD5(dataBytes),
Sha256Sum: sum256(dataBytes), Sha256Sum: sum256(dataBytes),
@ -154,38 +178,49 @@ func (a API) putSmallObject(bucketName, objectName string, data io.ReadSeeker, s
ContentType: contentType, ContentType: contentType,
} }
// Single part use case, use putObject directly. // Single part use case, use putObject directly.
if _, err := a.putObject(bucketName, objectName, putObjMetadata); err != nil { if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil {
return 0, err return 0, err
} }
return size, nil return size, nil
} }
// putLargeObject uploads files bigger than 5 mega bytes. // putLargeObject uploads files bigger than 5 mega bytes.
func (a API) putLargeObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) { func (c Client) putLargeObject(bucketName, objectName string, data io.ReadSeeker, size int64, contentType string) (int64, error) {
var uploadID string // Input validation.
isRecursive := true if err := isValidBucketName(bucketName); err != nil {
for mpUpload := range a.listIncompleteUploads(bucketName, objectName, isRecursive) { return 0, err
if mpUpload.Err != nil { }
return 0, mpUpload.Err if err := isValidObjectName(objectName); err != nil {
} return 0, err
if mpUpload.Key == objectName { }
uploadID = mpUpload.UploadID // Find upload id before upload.
break uploadID, err := c.findUploadID(bucketName, objectName)
} if err != nil {
return 0, err
} }
if uploadID == "" { if uploadID == "" {
initMultipartUploadResult, err := a.initiateMultipartUpload(bucketName, objectName, contentType) // Initiate multipart upload.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Save the new upload id.
uploadID = initMultipartUploadResult.UploadID uploadID = initMultipartUploadResult.UploadID
} }
// Initiate multipart upload. // Initiate multipart upload.
return a.putParts(bucketName, objectName, uploadID, data, size) return c.putParts(bucketName, objectName, uploadID, data, size)
} }
// putParts - fully managed multipart uploader, resumes where its left off at `uploadID` // putParts - fully managed multipart uploader, resumes where its left off at `uploadID`
func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeker, size int64) (int64, error) { func (c Client) putParts(bucketName, objectName, uploadID string, data io.ReadSeeker, size int64) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Cleanup any previously left stale files, as the function exits. // Cleanup any previously left stale files, as the function exits.
defer cleanupStaleTempfiles("multiparts$") defer cleanupStaleTempfiles("multiparts$")
@ -198,13 +233,17 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
// Starting part number. Always part '1'. // Starting part number. Always part '1'.
partNumber := 1 partNumber := 1
completeMultipartUpload := completeMultipartUpload{} completeMultipartUpload := completeMultipartUpload{}
for objPart := range a.listObjectPartsRecursive(bucketName, objectName, uploadID) { doneCh := make(chan bool, 1)
defer close(doneCh)
for objPart := range c.listObjectPartsRecursive(bucketName, objectName, uploadID, doneCh) {
if objPart.Err != nil { if objPart.Err != nil {
return 0, objPart.Err return 0, objPart.Err
} }
// Verify if there is a hole i.e one of the parts is missing // Verify if there is a hole i.e one of the parts is missing
// Break and start uploading that part. // Break and start uploading that part.
if partNumber != objPart.PartNumber { if partNumber != objPart.PartNumber {
// Close listObjectParts channel.
doneCh <- true
break break
} }
var completedPart completePart var completedPart completePart
@ -250,8 +289,8 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
} }
var enableSha256Sum bool var enableSha256Sum bool
// if signature V4 - enable Sha256 calculation for individual parts. // Enable Sha256 calculation if signature version '4'.
if a.credentials.Signature.isV4() { if c.signature.isV4() {
enableSha256Sum = true enableSha256Sum = true
} }
@ -262,6 +301,7 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
// Account for all parts uploaded simultaneousy. // Account for all parts uploaded simultaneousy.
wg.Add(1) wg.Add(1)
part.Number = partNumber part.Number = partNumber
// Initiate the part upload goroutine.
go func(mpQueueCh <-chan struct{}, part partMetadata, wg *sync.WaitGroup, uploadedPartsCh chan<- uploadedPart) { go func(mpQueueCh <-chan struct{}, part partMetadata, wg *sync.WaitGroup, uploadedPartsCh chan<- uploadedPart) {
defer wg.Done() defer wg.Done()
defer func() { defer func() {
@ -274,7 +314,8 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
} }
return return
} }
complPart, err := a.uploadPart(bucketName, objectName, uploadID, part) // execute upload part.
complPart, err := c.uploadPart(bucketName, objectName, uploadID, part)
if err != nil { if err != nil {
uploadedPartsCh <- uploadedPart{ uploadedPartsCh <- uploadedPart{
Error: err, Error: err,
@ -300,7 +341,7 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
} }
// Save successfully uploaded size. // Save successfully uploaded size.
totalWritten += part.Size totalWritten += part.Size
// Save successfully uploaded part metadata. // Save successfully uploaded part metadatc.
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, uploadedPrt.Part) completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, uploadedPrt.Part)
} }
partNumber++ partNumber++
@ -313,68 +354,58 @@ func (a API) putParts(bucketName, objectName, uploadID string, data io.ReadSeeke
return totalWritten, ErrUnexpectedEOF(totalWritten, size, bucketName, objectName) return totalWritten, ErrUnexpectedEOF(totalWritten, size, bucketName, objectName)
} }
} }
// sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts)) sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err := a.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) _, err := c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil { if err != nil {
return totalWritten, err return totalWritten, err
} }
return totalWritten, nil return totalWritten, nil
} }
// putObjectRequest wrapper creates a new PutObject request. // putObject - add an object to a bucket.
func (a API) putObjectRequest(bucketName, objectName string, putObjMetadata putObjectMetadata) (*Request, error) { // NOTE: You must have WRITE permissions on a bucket to add an object to it.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{}) func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) {
if err != nil { // Input validation.
return nil, err if err := isValidBucketName(bucketName); err != nil {
return ObjectStat{}, err
} }
if err := isValidObjectName(objectName); err != nil {
return ObjectStat{}, err
}
if strings.TrimSpace(putObjMetadata.ContentType) == "" { if strings.TrimSpace(putObjMetadata.ContentType) == "" {
putObjMetadata.ContentType = "application/octet-stream" putObjMetadata.ContentType = "application/octet-stream"
} }
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Set headers. // Set headers.
putObjMetadataHeader := make(http.Header) customHeader := make(http.Header)
putObjMetadataHeader.Set("Content-Type", putObjMetadata.ContentType) customHeader.Set("Content-Type", putObjMetadata.ContentType)
// Populate request metadata. // Populate request metadata.
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
credentials: a.credentials, bucketName: bucketName,
userAgent: a.userAgent, objectName: objectName,
bucketRegion: region, customHeader: customHeader,
contentBody: putObjMetadata.ReadCloser, contentBody: putObjMetadata.ReadCloser,
contentLength: putObjMetadata.Size, contentLength: putObjMetadata.Size,
contentHeader: putObjMetadataHeader,
contentTransport: a.httpTransport,
contentSha256Bytes: putObjMetadata.Sha256Sum, contentSha256Bytes: putObjMetadata.Sha256Sum,
contentMD5Bytes: putObjMetadata.MD5Sum, contentMD5Bytes: putObjMetadata.MD5Sum,
} }
req, err := newRequest("PUT", targetURL, reqMetadata) // Initiate new request.
if err != nil { req, err := c.newRequest("PUT", reqMetadata)
return nil, err
}
return req, nil
}
// putObject - add an object to a bucket.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (a API) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) {
req, err := a.putObjectRequest(bucketName, objectName, putObjMetadata)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
resp, err := req.Do() // Execute the request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return ObjectStat{}, BodyToErrorResponse(resp.Body) return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
var metadata ObjectStat var metadata ObjectStat
@ -385,57 +416,52 @@ func (a API) putObject(bucketName, objectName string, putObjMetadata putObjectMe
return metadata, nil return metadata, nil
} }
// initiateMultipartRequest wrapper creates a new initiateMultiPart request. // initiateMultipartUpload initiates a multipart upload and returns an upload ID.
func (a API) initiateMultipartRequest(bucketName, objectName, contentType string) (*Request, error) { func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries. // Initialize url queries.
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("uploads", "") urlValues.Set("uploads", "")
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
if contentType == "" { if contentType == "" {
contentType = "application/octet-stream" contentType = "application/octet-stream"
} }
// set ContentType header. // set ContentType header.
multipartHeader := make(http.Header) customHeader := make(http.Header)
multipartHeader.Set("Content-Type", contentType) customHeader.Set("Content-Type", contentType)
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
credentials: a.credentials, bucketName: bucketName,
userAgent: a.userAgent, objectName: objectName,
bucketRegion: region, queryValues: urlValues,
contentHeader: multipartHeader, customHeader: customHeader,
contentTransport: a.httpTransport,
} }
return newRequest("POST", targetURL, reqMetadata)
}
// initiateMultipartUpload initiates a multipart upload and returns an upload ID. // Instantiate the request.
func (a API) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { req, err := c.newRequest("POST", reqMetadata)
req, err := a.initiateMultipartRequest(bucketName, objectName, contentType)
if err != nil { if err != nil {
return initiateMultipartUploadResult{}, err return initiateMultipartUploadResult{}, err
} }
resp, err := req.Do() // Execute the request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return initiateMultipartUploadResult{}, err return initiateMultipartUploadResult{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, BodyToErrorResponse(resp.Body) return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
// Decode xml initiate multipart.
initiateMultipartUploadResult := initiateMultipartUploadResult{} initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil { if err != nil {
@ -444,8 +470,16 @@ func (a API) initiateMultipartUpload(bucketName, objectName, contentType string)
return initiateMultipartUploadResult, nil return initiateMultipartUploadResult, nil
} }
// uploadPartRequest wrapper creates a new UploadPart request. // uploadPart uploads a part in a multipart upload.
func (a API) uploadPartRequest(bucketName, objectName, uploadID string, uploadingPart partMetadata) (*Request, error) { func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (completePart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completePart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completePart{}, err
}
// Get resources properly escaped and lined up before using them in http request. // Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
// Set part number. // Set part number.
@ -453,115 +487,88 @@ func (a API) uploadPartRequest(bucketName, objectName, uploadID string, uploadin
// Set upload id. // Set upload id.
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
credentials: a.credentials, bucketName: bucketName,
userAgent: a.userAgent, objectName: objectName,
bucketRegion: region, queryValues: urlValues,
contentBody: uploadingPart.ReadCloser, contentBody: uploadingPart.ReadCloser,
contentLength: uploadingPart.Size, contentLength: uploadingPart.Size,
contentTransport: a.httpTransport,
contentSha256Bytes: uploadingPart.Sha256Sum, contentSha256Bytes: uploadingPart.Sha256Sum,
contentMD5Bytes: uploadingPart.MD5Sum, contentMD5Bytes: uploadingPart.MD5Sum,
} }
req, err := newRequest("PUT", targetURL, reqMetadata)
if err != nil {
return nil, err
}
return req, nil
}
// uploadPart uploads a part in a multipart upload. // Instantiate a request.
func (a API) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (completePart, error) { req, err := c.newRequest("PUT", reqMetadata)
req, err := a.uploadPartRequest(bucketName, objectName, uploadID, uploadingPart)
if err != nil { if err != nil {
return completePart{}, err return completePart{}, err
} }
// Initiate the request. // Execute the request.
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return completePart{}, err return completePart{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return completePart{}, BodyToErrorResponse(resp.Body) return completePart{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
cPart := completePart{} // Once successfully uploaded, return completed part.
cPart.PartNumber = uploadingPart.Number cmplPart := completePart{}
cPart.ETag = resp.Header.Get("ETag") cmplPart.PartNumber = uploadingPart.Number
return cPart, nil cmplPart.ETag = resp.Header.Get("ETag")
return cmplPart, nil
} }
// completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request. // completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
func (a API) completeMultipartUploadRequest(bucketName, objectName, uploadID string, func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
complete completeMultipartUpload) (*Request, error) { // Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries. // Initialize url queries.
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// get targetURL. // Marshal complete multipart body.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues)
if err != nil {
return nil, err
}
completeMultipartUploadBytes, err := xml.Marshal(complete) completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
contentLength: int64(completeMultipartUploadBuffer.Len()),
contentTransport: a.httpTransport,
contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
}
req, err := newRequest("POST", targetURL, reqMetadata)
if err != nil {
return nil, err
}
return req, nil
}
// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
func (a API) completeMultipartUpload(bucketName, objectName, uploadID string,
c completeMultipartUpload) (completeMultipartUploadResult, error) {
req, err := a.completeMultipartUploadRequest(bucketName, objectName, uploadID, c)
if err != nil { if err != nil {
return completeMultipartUploadResult{}, err return completeMultipartUploadResult{}, err
} }
resp, err := req.Do()
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
contentLength: int64(completeMultipartUploadBuffer.Len()),
contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return completeMultipartUploadResult{}, err return completeMultipartUploadResult{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, BodyToErrorResponse(resp.Body) return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
// If successful response, decode the body.
completeMultipartUploadResult := completeMultipartUploadResult{} completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(resp.Body, &completeMultipartUploadResult) err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
if err != nil { if err != nil {

View File

@ -1,3 +1,19 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
@ -9,100 +25,45 @@ import (
// //
// All objects (including all object versions and delete markers). // All objects (including all object versions and delete markers).
// in the bucket must be deleted before successfully attempting this request. // in the bucket must be deleted before successfully attempting this request.
func (a API) RemoveBucket(bucketName string) error { func (c Client) RemoveBucket(bucketName string) error {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
req, err := a.removeBucketRequest(bucketName) req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName,
})
if err != nil { if err != nil {
return err return err
} }
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusNoContent { if resp.StatusCode != http.StatusNoContent {
var errorResponse ErrorResponse return HTTPRespToErrorResponse(resp, bucketName, "")
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusConflict:
errorResponse = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
return errorResponse
} }
} }
return nil return nil
} }
// removeBucketRequest constructs a new request for RemoveBucket.
func (a API) removeBucketRequest(bucketName string) (*Request, error) {
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
return newRequest("DELETE", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
}
// RemoveObject remove an object from a bucket. // RemoveObject remove an object from a bucket.
func (a API) RemoveObject(bucketName, objectName string) error { func (c Client) RemoveObject(bucketName, objectName string) error {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return err return err
} }
req, err := a.removeObjectRequest(bucketName, objectName) req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
if err != nil { if err != nil {
return err return err
} }
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
@ -113,105 +74,67 @@ func (a API) RemoveObject(bucketName, objectName string) error {
return nil return nil
} }
// removeObjectRequest constructs a request for RemoveObject.
func (a API) removeObjectRequest(bucketName, objectName string) (*Request, error) {
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("DELETE", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
return req, nil
}
// RemoveIncompleteUpload aborts an partially uploaded object. // RemoveIncompleteUpload aborts an partially uploaded object.
// Requires explicit authentication, no anonymous requests are allowed for multipart API. // Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (a API) RemoveIncompleteUpload(bucketName, objectName string) <-chan error { func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
errorCh := make(chan error) // Validate input arguments.
go a.removeIncompleteUploadInRoutine(bucketName, objectName, errorCh)
return errorCh
}
// removeIncompleteUploadInRoutine iterates over all incomplete uploads
// and removes only input object name.
func (a API) removeIncompleteUploadInRoutine(bucketName, objectName string, errorCh chan<- error) {
defer close(errorCh)
// Validate incoming bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
errorCh <- err return err
return
} }
// Validate incoming object name.
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
errorCh <- err return err
return
} }
// List all incomplete uploads recursively. errorCh := make(chan error)
for mpUpload := range a.listIncompleteUploads(bucketName, objectName, true) { go func(errorCh chan<- error) {
if objectName == mpUpload.Key { defer close(errorCh)
err := a.abortMultipartUpload(bucketName, mpUpload.Key, mpUpload.UploadID) // Find multipart upload id of the object.
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
errorCh <- err
return
}
if uploadID != "" {
// If uploadID is not an empty string, initiate the request.
err := c.abortMultipartUpload(bucketName, objectName, uploadID)
if err != nil { if err != nil {
errorCh <- err errorCh <- err
return return
} }
return return
} }
}(errorCh)
err, ok := <-errorCh
if ok && err != nil {
return err
} }
return nil
} }
// abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request. // abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted.
func (a API) abortMultipartUploadRequest(bucketName, objectName, uploadID string) (*Request, error) { func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Initialize url queries. // Initialize url queries.
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// get targetURL. // Instantiate a new DELETE request.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, urlValues) req, err := c.newRequest("DELETE", requestMetadata{
if err != nil { bucketName: bucketName,
return nil, err objectName: objectName,
} queryValues: urlValues,
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
req, err := newRequest("DELETE", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
}) })
if err != nil {
return nil, err
}
return req, nil
}
// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted.
func (a API) abortMultipartUpload(bucketName, objectName, uploadID string) error {
req, err := a.abortMultipartUploadRequest(bucketName, objectName, uploadID)
if err != nil { if err != nil {
return err return err
} }
resp, err := req.Do() // execute the request.
resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
@ -222,6 +145,7 @@ func (a API) abortMultipartUpload(bucketName, objectName, uploadID string) error
var errorResponse ErrorResponse var errorResponse ErrorResponse
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusNotFound: case http.StatusNotFound:
// This is needed specifically for Abort and it cannot be converged.
errorResponse = ErrorResponse{ errorResponse = ErrorResponse{
Code: "NoSuchUpload", Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.", Message: "The specified multipart upload does not exist.",
@ -231,26 +155,8 @@ func (a API) abortMultipartUpload(bucketName, objectName, uploadID string) error
HostID: resp.Header.Get("x-amz-id-2"), HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default: default:
errorResponse = ErrorResponse{ return HTTPRespToErrorResponse(resp, bucketName, objectName)
Code: resp.Status,
Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
} }
return errorResponse return errorResponse
} }

View File

@ -1,141 +1,76 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
"net/http" "net/http"
"net/url"
"strconv" "strconv"
"strings" "strings"
"time" "time"
) )
// BucketExists verify if bucket exists and you have permission to access it. // BucketExists verify if bucket exists and you have permission to access it.
func (a API) BucketExists(bucketName string) error { func (c Client) BucketExists(bucketName string) error {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
req, err := a.bucketExistsRequest(bucketName) req, err := c.newRequest("HEAD", requestMetadata{
bucketName: bucketName,
})
if err != nil { if err != nil {
return err return err
} }
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
// Head has no response body, handle it. return HTTPRespToErrorResponse(resp, bucketName, "")
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
return errorResponse
} }
} }
return nil return nil
} }
// bucketExistsRequest constructs a new request for BucketExists.
func (a API) bucketExistsRequest(bucketName string) (*Request, error) {
targetURL, err := getTargetURL(a.endpointURL, bucketName, "", url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
return newRequest("HEAD", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
}
// StatObject verifies if object exists and you have permission to access. // StatObject verifies if object exists and you have permission to access.
func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) { func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
req, err := a.statObjectRequest(bucketName, objectName) // Instantiate a new request.
req, err := c.newRequest("HEAD", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectStat{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errorResponse ErrorResponse return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
switch resp.StatusCode {
case http.StatusNotFound:
errorResponse = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
return ObjectStat{}, errorResponse
} }
} }
md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
@ -143,7 +78,7 @@ func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) {
if err != nil { if err != nil {
return ObjectStat{}, ErrorResponse{ return ObjectStat{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Content-Length is invalid, please report this issue at https://github.com/minio/minio-go/issues.", Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName, BucketName: bucketName,
Key: objectName, Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"), RequestID: resp.Header.Get("x-amz-request-id"),
@ -155,7 +90,7 @@ func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) {
if err != nil { if err != nil {
return ObjectStat{}, ErrorResponse{ return ObjectStat{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Last-Modified time format is invalid, please report this issue at https://github.com/minio/minio-go/issues.", Message: "Last-Modified time format is invalid. " + reportIssue,
BucketName: bucketName, BucketName: bucketName,
Key: objectName, Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"), RequestID: resp.Header.Get("x-amz-request-id"),
@ -176,32 +111,3 @@ func (a API) StatObject(bucketName, objectName string) (ObjectStat, error) {
objectStat.ContentType = contentType objectStat.ContentType = contentType
return objectStat, nil return objectStat, nil
} }
// statObjectRequest wrapper creates a new headObject request.
func (a API) statObjectRequest(bucketName, objectName string) (*Request, error) {
// get targetURL.
targetURL, err := getTargetURL(a.endpointURL, bucketName, objectName, url.Values{})
if err != nil {
return nil, err
}
// get bucket region.
region, err := a.getRegion(bucketName)
if err != nil {
return nil, err
}
// Instantiate a new request.
req, err := newRequest("HEAD", targetURL, requestMetadata{
credentials: a.credentials,
userAgent: a.userAgent,
bucketRegion: region,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
// Return new request.
return req, nil
}

324
api.go
View File

@ -17,125 +17,130 @@
package minio package minio
import ( import (
"encoding/base64"
"encoding/hex"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"runtime"
"time" "time"
) )
// API implements Amazon S3 compatible methods. // Client implements Amazon S3 compatible methods.
type API struct { type Client struct {
/// Standard options.
accessKeyID string // AccessKeyID required for authorized requests.
secretAccessKey string // SecretAccessKey required for authorized requests.
signature SignatureType // Choose a signature type if necessary.
anonymous bool // Set to 'true' if Client has no access and secret keys.
// User supplied. // User supplied.
userAgent string appInfo struct {
credentials *clientCredentials appName string
appVersion string
}
endpointURL *url.URL endpointURL *url.URL
// This http transport is usually needed for debugging OR to add your own
// custom TLS certificates on the client transport, for custom CA's and
// certs which are not part of standard certificate authority.
httpTransport http.RoundTripper
// Needs allocation. // Needs allocation.
bucketRgnC *bucketRegionCache httpClient *http.Client
bucketLocCache *bucketLocationCache
} }
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "0.2.5"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity. // NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
// construct endpoint. clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
endpointURL, err := getEndpointURL(endpoint, inSecure)
if err != nil { if err != nil {
return API{}, err return nil, err
} }
// Set to use signature version '2'.
// create a new client Config. clnt.signature = SignatureV2
credentials := &clientCredentials{} return clnt, nil
credentials.AccessKeyID = accessKeyID
credentials.SecretAccessKey = secretAccessKey
credentials.Signature = SignatureV2
// instantiate new API.
api := API{
// Save for lower level calls.
userAgent: libraryUserAgent,
credentials: credentials,
endpointURL: endpointURL,
// Allocate.
bucketRgnC: newBucketRegionCache(),
}
return api, nil
} }
// NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility. // NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
// construct endpoint. clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
endpointURL, err := getEndpointURL(endpoint, inSecure)
if err != nil { if err != nil {
return API{}, err return nil, err
} }
// Set to use signature version '4'.
// create a new client Config. clnt.signature = SignatureV4
credentials := &clientCredentials{} return clnt, nil
credentials.AccessKeyID = accessKeyID
credentials.SecretAccessKey = secretAccessKey
credentials.Signature = SignatureV4
// instantiate new API.
api := API{
// Save for lower level calls.
userAgent: libraryUserAgent,
credentials: credentials,
endpointURL: endpointURL,
// Allocate.
bucketRgnC: newBucketRegionCache(),
}
return api, nil
} }
// New - instantiate minio client API, adds automatic verification of signature. // New - instantiate minio client Client, adds automatic verification of signature.
func New(endpoint string, accessKeyID, secretAccessKey string, inSecure bool) (API, error) { func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
// construct endpoint. clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
endpointURL, err := getEndpointURL(endpoint, inSecure)
if err != nil { if err != nil {
return API{}, err return nil, err
} }
// create a new client Config.
credentials := &clientCredentials{}
credentials.AccessKeyID = accessKeyID
credentials.SecretAccessKey = secretAccessKey
// Google cloud storage should be set to signature V2, force it if not. // Google cloud storage should be set to signature V2, force it if not.
if isGoogleEndpoint(endpointURL) { if isGoogleEndpoint(clnt.endpointURL) {
credentials.Signature = SignatureV2 clnt.signature = SignatureV2
} }
// If Amazon S3 set to signature v2. // If Amazon S3 set to signature v2.
if isAmazonEndpoint(endpointURL) { if isAmazonEndpoint(clnt.endpointURL) {
credentials.Signature = SignatureV4 clnt.signature = SignatureV4
}
return clnt, nil
}
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, insecure)
if err != nil {
return nil, err
} }
// instantiate new API. // instantiate new Client.
api := API{ clnt := new(Client)
// Save for lower level calls. clnt.accessKeyID = accessKeyID
userAgent: libraryUserAgent, clnt.secretAccessKey = secretAccessKey
credentials: credentials, if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
endpointURL: endpointURL, clnt.anonymous = true
// Allocate.
bucketRgnC: newBucketRegionCache(),
} }
return api, nil
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{}
clnt.bucketLocCache = newBucketLocationCache()
// Return.
return clnt, nil
} }
// SetAppInfo - add application details to user agent. // SetAppInfo - add application details to user agent.
func (a *API) SetAppInfo(appName string, appVersion string) { func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user agent. // if app name and version is not set, we do not a new user agent.
if appName != "" && appVersion != "" { if appName != "" && appVersion != "" {
appUserAgent := appName + "/" + appVersion c.appInfo = struct {
a.userAgent = libraryUserAgent + " " + appUserAgent appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
} }
} }
// SetCustomTransport - set new custom transport. // SetCustomTransport - set new custom transport.
func (a *API) SetCustomTransport(customHTTPTransport http.RoundTripper) { func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport ``http.DefaultTransport``. // Set this to override default transport ``http.DefaultTransport``.
// //
// This transport is usually needed for debugging OR to add your own // This transport is usually needed for debugging OR to add your own
@ -149,11 +154,160 @@ func (a *API) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// } // }
// api.SetTransport(tr) // api.SetTransport(tr)
// //
a.httpTransport = customHTTPTransport if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
}
} }
// CloudStorageAPI - Cloud Storage API interface. // requestMetadata - is container for all the values to make a request.
type CloudStorageAPI interface { type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
// Generated by our internal code.
contentBody io.ReadCloser
contentLength int64
contentSha256Bytes []byte
contentMD5Bytes []byte
}
func (c Client) newRequest(method string, metadata requestMetadata) (*http.Request, error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
// construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.queryValues)
if err != nil {
return nil, err
}
// get a new HTTP request for the method.
req, err := http.NewRequest(method, targetURL.String(), nil)
if err != nil {
return nil, err
}
// Gather location only if bucketName is present.
location := "us-east-1" // Default all other requests to "us-east-1".
if metadata.bucketName != "" {
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
return nil, err
}
}
// If presigned request, return quickly.
if metadata.expires != 0 {
if c.anonymous {
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
}
if c.signature.isV2() {
// Presign URL with signature v2.
req = PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else {
// Presign URL with signature v4.
req = PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
// Set content body if available.
if metadata.contentBody != nil {
req.Body = metadata.contentBody
}
// set UserAgent for the request.
c.setUserAgent(req)
// Set all headers.
for k, v := range metadata.customHeader {
req.Header.Set(k, v[0])
}
// set incoming content-length.
if metadata.contentLength > 0 {
req.ContentLength = metadata.contentLength
}
// Set sha256 sum only for non anonymous credentials.
if !c.anonymous {
// set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() || c.signature.isLatest() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
if metadata.contentSha256Bytes != nil {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes))
}
}
}
// set md5Sum for content protection.
if metadata.contentMD5Bytes != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
// Sign the request if not anonymous.
if !c.anonymous {
if c.signature.isV2() {
// Add signature version '2' authorization header.
req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() || c.signature.isLatest() {
// Add signature version '4' authorization header.
req = SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
}
}
// return request.
return req, nil
}
func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
}
}
func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) {
urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/"
// Make URL only if bucketName is available, otherwise use the endpoint URL.
if bucketName != "" {
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support this.
if isVirtualHostSupported(c.endpointURL) {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/"
if objectName != "" {
urlStr = urlStr + urlEncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName
if objectName != "" {
urlStr = urlStr + "/" + urlEncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode()
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}
// CloudStorageClient - Cloud Storage Client interface.
type CloudStorageClient interface {
// Bucket Read/Write/Stat operations. // Bucket Read/Write/Stat operations.
MakeBucket(bucket string, cannedACL BucketACL, location string) error MakeBucket(bucket string, cannedACL BucketACL, location string) error
BucketExists(bucket string) error BucketExists(bucket string) error
@ -161,7 +315,7 @@ type CloudStorageAPI interface {
SetBucketACL(bucket string, cannedACL BucketACL) error SetBucketACL(bucket string, cannedACL BucketACL) error
GetBucketACL(bucket string) (BucketACL, error) GetBucketACL(bucket string) (BucketACL, error)
ListBuckets() <-chan BucketStat ListBuckets() ([]BucketStat, error)
ListObjects(bucket, prefix string, recursive bool) <-chan ObjectStat ListObjects(bucket, prefix string, recursive bool) <-chan ObjectStat
ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStat ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStat
@ -171,7 +325,7 @@ type CloudStorageAPI interface {
PutObject(bucket, object string, data io.ReadSeeker, size int64, contentType string) (int64, error) PutObject(bucket, object string, data io.ReadSeeker, size int64, contentType string) (int64, error)
StatObject(bucket, object string) (ObjectStat, error) StatObject(bucket, object string) (ObjectStat, error)
RemoveObject(bucket, object string) error RemoveObject(bucket, object string) error
RemoveIncompleteUpload(bucket, object string) <-chan error RemoveIncompleteUpload(bucket, object string) error
// Presigned operations. // Presigned operations.
PresignedGetObject(bucket, object string, expires time.Duration) (string, error) PresignedGetObject(bucket, object string, expires time.Duration) (string, error)

View File

@ -1,6 +1,8 @@
package minio_test package minio_test
import ( import (
"bytes"
"io/ioutil"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
@ -33,29 +35,32 @@ func randString(n int, src rand.Source) string {
} }
func TestFunctional(t *testing.T) { func TestFunctional(t *testing.T) {
a, err := minio.New("play.minio.io:9002", c, err := minio.New("play.minio.io:9002",
"Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// Set user agent.
c.SetAppInfo("Test", "0.1.0")
bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
err = a.MakeBucket(bucketName, "private", "us-east-1") err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
err = a.BucketExists(bucketName) err = c.BucketExists(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
err = a.SetBucketACL(bucketName, "public-read-write") err = c.SetBucketACL(bucketName, "public-read-write")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
acl, err := a.GetBucketACL(bucketName) acl, err := c.GetBucketACL(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -63,18 +68,46 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error:", acl) t.Fatal("Error:", acl)
} }
for b := range a.ListBuckets() { _, err = c.ListBuckets()
if b.Err != nil {
t.Fatal("Error:", b.Err)
}
}
err = a.RemoveBucket(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
err = a.RemoveBucket("bucket1") objectName := bucketName + "Minio"
readSeeker := bytes.NewReader([]byte("Hello World!"))
n, err := c.PutObject(bucketName, objectName, readSeeker, int64(readSeeker.Len()), "")
if err != nil {
t.Fatal("Error: ", err)
}
if n != int64(len([]byte("Hello World!"))) {
t.Fatal("Error: bad length ", n, readSeeker.Len())
}
newReadSeeker, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err := ioutil.ReadAll(newReadSeeker)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, []byte("Hello World!")) {
t.Fatal("Error: bytes invalid.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket("bucket1")
if err == nil { if err == nil {
t.Fatal("Error:") t.Fatal("Error:")
} }

View File

@ -1,173 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
"io"
"net/http"
"strconv"
"time"
)
// bucketHandler is an http.Handler that verifies bucket
// responses and validates incoming requests.
type bucketHandler struct {
resource string
}
func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
switch {
case r.Method == "GET":
switch {
case path == "/":
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListAllMyBucketsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Buckets><Bucket><Name>bucket</Name><CreationDate>2015-05-20T23:05:09.230Z</CreationDate></Bucket></Buckets><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner></ListAllMyBucketsResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
case path == h.resource:
_, ok := r.URL.Query()["acl"]
if ok {
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><AccessControlPolicy><Owner><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>CustomersName@amazon.com</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>CustomersName@amazon.com</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
fallthrough
case path == h.resource:
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Contents><ETag>\"259d04a13802ae09c7e41be50ccc6baa\"</ETag><Key>object</Key><LastModified>2015-05-21T18:24:21.097Z</LastModified><Size>22061</Size><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner><StorageClass>STANDARD</StorageClass></Contents><Delimiter></Delimiter><EncodingType></EncodingType><IsTruncated>false</IsTruncated><Marker></Marker><MaxKeys>1000</MaxKeys><Name>testbucket</Name><NextMarker></NextMarker><Prefix></Prefix></ListBucketResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}
case r.Method == "PUT":
switch {
case path == h.resource:
_, ok := r.URL.Query()["acl"]
if ok {
switch r.Header.Get("x-amz-acl") {
case "public-read-write":
fallthrough
case "public-read":
fallthrough
case "private":
fallthrough
case "authenticated-read":
w.WriteHeader(http.StatusOK)
return
default:
w.WriteHeader(http.StatusNotImplemented)
return
}
}
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusBadRequest)
}
case r.Method == "HEAD":
switch {
case path == h.resource:
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusForbidden)
}
case r.Method == "DELETE":
switch {
case path != h.resource:
w.WriteHeader(http.StatusNotFound)
default:
h.resource = ""
w.WriteHeader(http.StatusNoContent)
}
}
}
// objectHandler is an http.Handler that verifies object responses
// and validates incoming requests.
type objectHandler struct {
resource string
data []byte
}
func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "PUT":
length, err := strconv.Atoi(r.Header.Get("Content-Length"))
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var buffer bytes.Buffer
_, err = io.CopyN(&buffer, r.Body, int64(length))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if !bytes.Equal(h.data, buffer.Bytes()) {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
case r.Method == "HEAD":
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
case r.Method == "POST":
_, ok := r.URL.Query()["uploads"]
if ok {
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Bucket>example-bucket</Bucket><Key>object</Key><UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId></InitiateMultipartUploadResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
case r.Method == "GET":
_, ok := r.URL.Query()["uploadId"]
if ok {
uploadID := r.URL.Query().Get("uploadId")
if uploadID != "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA" {
w.WriteHeader(http.StatusNotFound)
return
}
response := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Bucket>example-bucket</Bucket><Key>example-object</Key><UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId><Initiator><ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID><DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName></Initiator><Owner><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>someName</DisplayName></Owner><StorageClass>STANDARD</StorageClass><PartNumberMarker>1</PartNumberMarker><NextPartNumberMarker>3</NextPartNumberMarker><MaxParts>2</MaxParts><IsTruncated>true</IsTruncated><Part><PartNumber>2</PartNumber><LastModified>2010-11-10T20:48:34.000Z</LastModified><ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag><Size>10485760</Size></Part><Part><PartNumber>3</PartNumber><LastModified>2010-11-10T20:48:33.000Z</LastModified><ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag><Size>10485760</Size></Part></ListPartsResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
return
}
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
w.WriteHeader(http.StatusOK)
io.Copy(w, bytes.NewReader(h.data))
case r.Method == "DELETE":
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
return
}
h.resource = ""
h.data = nil
w.WriteHeader(http.StatusNoContent)
}
}

View File

@ -22,19 +22,19 @@ import (
) )
func TestSignature(t *testing.T) { func TestSignature(t *testing.T) {
credentials := clientCredentials{} clnt := Client{}
if !credentials.Signature.isLatest() { if !clnt.signature.isLatest() {
t.Fatal("Error") t.Fatal("Error")
} }
credentials.Signature = SignatureV2 clnt.signature = SignatureV2
if !credentials.Signature.isV2() { if !clnt.signature.isV2() {
t.Fatal("Error") t.Fatal("Error")
} }
if credentials.Signature.isV4() { if clnt.signature.isV4() {
t.Fatal("Error") t.Fatal("Error")
} }
credentials.Signature = SignatureV4 clnt.signature = SignatureV4
if !credentials.Signature.isV4() { if !clnt.signature.isV4() {
t.Fatal("Error") t.Fatal("Error")
} }
} }

View File

@ -1,294 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
"io"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/minio/minio-go"
)
func TestBucketOperations(t *testing.T) {
bucket := bucketHandler(bucketHandler{
resource: "/bucket",
})
server := httptest.NewServer(bucket)
defer server.Close()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatal("Error:", err)
}
a, err := minio.New(u.Host, "", "", true)
if err != nil {
t.Fatal("Error:", err)
}
err = a.MakeBucket("bucket", "private", "")
if err != nil {
t.Fatal("Error:", err)
}
err = a.BucketExists("bucket")
if err != nil {
t.Fatal("Error:", err)
}
err = a.BucketExists("bucket1")
if err == nil {
t.Fatal("Error:", err)
}
if err.Error() != "Access Denied." {
t.Fatal("Error")
}
err = a.SetBucketACL("bucket", "public-read-write")
if err != nil {
t.Fatal("Error:", err)
}
acl, err := a.GetBucketACL("bucket")
if err != nil {
t.Fatal("Error:", err)
}
if acl != minio.BucketACL("private") {
t.Fatal("Error")
}
for b := range a.ListBuckets() {
if b.Err != nil {
t.Fatal("Error:", b.Err.Error())
}
if b.Name != "bucket" {
t.Fatal("Error")
}
}
for o := range a.ListObjects("bucket", "", true) {
if o.Err != nil {
t.Fatal("Error:", o.Err.Error())
}
if o.Key != "object" {
t.Fatal("Error")
}
}
err = a.RemoveBucket("bucket")
if err != nil {
t.Fatal("Error:", err)
}
err = a.RemoveBucket("bucket1")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "The specified bucket does not exist." {
t.Fatal("Error:", err)
}
}
func TestBucketOperationsFail(t *testing.T) {
bucket := bucketHandler(bucketHandler{
resource: "/bucket",
})
server := httptest.NewServer(bucket)
defer server.Close()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatal("Error:", err)
}
a, err := minio.New(u.Host, "", "", true)
if err != nil {
t.Fatal("Error:", err)
}
err = a.MakeBucket("bucket$$$", "private", "")
if err == nil {
t.Fatal("Error:", err)
}
err = a.BucketExists("bucket.")
if err == nil {
t.Fatal("Error:", err)
}
err = a.SetBucketACL("bucket-.", "public-read-write")
if err == nil {
t.Fatal("Error")
}
_, err = a.GetBucketACL("bucket??")
if err == nil {
t.Fatal("Error:", err)
}
for o := range a.ListObjects("bucket??", "", true) {
if o.Err == nil {
t.Fatal("Error:", o.Err.Error())
}
}
err = a.RemoveBucket("bucket??")
if err == nil {
t.Fatal("Error")
}
if err.Error() != "Bucket name contains invalid characters." {
t.Fatal("Error:", err)
}
}
func TestObjectOperations(t *testing.T) {
object := objectHandler(objectHandler{
resource: "/bucket/object",
data: []byte("Hello, World"),
})
server := httptest.NewServer(object)
defer server.Close()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatal("Error:", err)
}
a, err := minio.New(u.Host, "", "", true)
if err != nil {
t.Fatal("Error:", err)
}
data := []byte("Hello, World")
n, err := a.PutObject("bucket", "object", bytes.NewReader(data), int64(len(data)), "")
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(len(data)) {
t.Fatal("Error")
}
metadata, err := a.StatObject("bucket", "object")
if err != nil {
t.Fatal("Error:", err)
}
if metadata.Key != "object" {
t.Fatal("Error")
}
if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" {
t.Fatal("Error")
}
reader, err := a.GetObject("bucket", "object")
if err != nil {
t.Fatal("Error:", err)
}
var buffer bytes.Buffer
_, err = io.Copy(&buffer, reader)
if err != nil {
t.Fatal("Error:", err)
}
if !bytes.Equal(buffer.Bytes(), data) {
t.Fatal("Error")
}
err = a.RemoveObject("bucket", "object")
if err != nil {
t.Fatal("Error:", err)
}
}
func TestPresignedURL(t *testing.T) {
object := objectHandler(objectHandler{
resource: "/bucket/object",
data: []byte("Hello, World"),
})
server := httptest.NewServer(object)
defer server.Close()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatal("Error:", err)
}
a, err := minio.New(u.Host, "", "", true)
if err != nil {
t.Fatal("Error:", err)
}
// should error out for invalid access keys.
_, err = a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
if err == nil {
t.Fatal("Error:", err)
}
a, err = minio.New(u.Host, "accessKey", "secretKey", true)
if err != nil {
t.Fatal("Error:", err)
}
url, err := a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
if err != nil {
t.Fatal("Error:", err)
}
if url == "" {
t.Fatal("Error")
}
_, err = a.PresignedGetObject("bucket", "object", time.Duration(0)*time.Second)
if err == nil {
t.Fatal("Error")
}
_, err = a.PresignedGetObject("bucket", "object", time.Duration(604801)*time.Second)
if err == nil {
t.Fatal("Error")
}
}
func TestErrorResponse(t *testing.T) {
errorResponse := []byte("<?xml version=\"1.0\" encoding=\"UTF-8\"?><Error><Code>AccessDenied</Code><Message>Access Denied</Message><BucketName>mybucket</BucketName><Key>myphoto.jpg</Key><RequestId>F19772218238A85A</RequestId><HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId></Error>")
errorReader := bytes.NewReader(errorResponse)
err := minio.BodyToErrorResponse(errorReader)
if err == nil {
t.Fatal("Error")
}
if err.Error() != "Access Denied" {
t.Fatal("Error:", err)
}
resp := minio.ToErrorResponse(err)
if resp.Code != "AccessDenied" {
t.Fatal("Error:", resp)
}
if resp.RequestID != "F19772218238A85A" {
t.Fatal("Error:", resp.RequestID)
}
if resp.Message != "Access Denied" {
t.Fatal("Error:", resp.Message)
}
if resp.BucketName != "mybucket" {
t.Fatal("Error:", resp.BucketName)
}
if resp.Key != "myphoto.jpg" {
t.Fatal("Error:", resp.Key)
}
if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" {
t.Fatal("Error:", resp.HostID)
}
if resp.ToXML() == "" {
t.Fatal("Error")
}
if resp.ToJSON() == "" {
t.Fatal("Error")
}
}

View File

@ -17,123 +17,79 @@
package minio package minio
import ( import (
"encoding/hex"
"net/http" "net/http"
"net/url" "net/url"
"path/filepath" "path/filepath"
"sync" "sync"
) )
// bucketRegionCache provides simple mechansim to hold bucket regions in memory. // bucketLocationCache provides simple mechansim to hold bucket locations in memory.
type bucketRegionCache struct { type bucketLocationCache struct {
// Mutex is used for handling the concurrent // Mutex is used for handling the concurrent
// read/write requests for cache // read/write requests for cache
sync.RWMutex sync.RWMutex
// items holds the cached regions. // items holds the cached bucket locations.
items map[string]string items map[string]string
} }
// newBucketRegionCache provides a new bucket region cache to be used // newBucketLocationCache provides a new bucket location cache to be used
// internally with the client object. // internally with the client object.
func newBucketRegionCache() *bucketRegionCache { func newBucketLocationCache() *bucketLocationCache {
return &bucketRegionCache{ return &bucketLocationCache{
items: make(map[string]string), items: make(map[string]string),
} }
} }
// Get returns a value of a given key if it exists // Get returns a value of a given key if it exists
func (r *bucketRegionCache) Get(bucketName string) (region string, ok bool) { func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
r.RLock() r.RLock()
defer r.RUnlock() defer r.RUnlock()
region, ok = r.items[bucketName] location, ok = r.items[bucketName]
return return
} }
// Set will persist a value to the cache // Set will persist a value to the cache
func (r *bucketRegionCache) Set(bucketName string, region string) { func (r *bucketLocationCache) Set(bucketName string, location string) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.items[bucketName] = region r.items[bucketName] = location
} }
// Delete deletes a bucket name. // Delete deletes a bucket name.
func (r *bucketRegionCache) Delete(bucketName string) { func (r *bucketLocationCache) Delete(bucketName string) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
delete(r.items, bucketName) delete(r.items, bucketName)
} }
// getRegion - get region for the bucketName from region map cache. // getBucketLocation - get location for the bucketName from location map cache.
func (a API) getRegion(bucketName string) (string, error) { func (c Client) getBucketLocation(bucketName string) (string, error) {
// If signature version '2', no need to fetch bucket location. // For anonymous requests, default to "us-east-1" and let other calls
if a.credentials.Signature.isV2() { // move forward.
if c.anonymous {
return "us-east-1", nil return "us-east-1", nil
} }
// If signature version '4' and latest and endpoint is not Amazon. if location, ok := c.bucketLocCache.Get(bucketName); ok {
// Return 'us-east-1' return location, nil
if a.credentials.Signature.isV4() || a.credentials.Signature.isLatest() {
if !isAmazonEndpoint(a.endpointURL) {
return "us-east-1", nil
}
}
if region, ok := a.bucketRgnC.Get(bucketName); ok {
return region, nil
} }
// get bucket location.
location, err := a.getBucketLocation(bucketName)
if err != nil {
return "", err
}
region := "us-east-1"
// location is region in context of S3 API.
if location != "" {
region = location
}
a.bucketRgnC.Set(bucketName, region)
return region, nil
}
// getBucketLocationRequest wrapper creates a new getBucketLocation request.
func (a API) getBucketLocationRequest(bucketName string) (*Request, error) {
// Set location query.
urlValues := make(url.Values)
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL := a.endpointURL
targetURL.Path = filepath.Join(bucketName, "")
targetURL.RawQuery = urlValues.Encode()
// Instantiate a new request.
req, err := newRequest("GET", targetURL, requestMetadata{
bucketRegion: "us-east-1",
credentials: a.credentials,
contentTransport: a.httpTransport,
})
if err != nil {
return nil, err
}
return req, nil
}
// getBucketLocation uses location subresource to return a bucket's region.
func (a API) getBucketLocation(bucketName string) (string, error) {
// Initialize a new request. // Initialize a new request.
req, err := a.getBucketLocationRequest(bucketName) req, err := c.getBucketLocationRequest(bucketName)
if err != nil { if err != nil {
return "", err return "", err
} }
// Initiate the request. // Initiate the request.
resp, err := req.Do() resp, err := c.httpClient.Do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return "", err return "", err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return "", BodyToErrorResponse(resp.Body) return "", HTTPRespToErrorResponse(resp, bucketName, "")
} }
} }
@ -144,16 +100,54 @@ func (a API) getBucketLocation(bucketName string) (string, error) {
return "", err return "", err
} }
location := locationConstraint
// location is empty will be 'us-east-1'. // location is empty will be 'us-east-1'.
if locationConstraint == "" { if location == "" {
return "us-east-1", nil location = "us-east-1"
} }
// location can be 'EU' convert it to meaningful 'eu-west-1'. // location can be 'EU' convert it to meaningful 'eu-west-1'.
if locationConstraint == "EU" { if location == "EU" {
return "eu-west-1", nil location = "eu-west-1"
} }
// return location. // Save the location into cache.
return locationConstraint, nil c.bucketLocCache.Set(bucketName, location)
// Return.
return location, nil
}
// getBucketLocationRequest wrapper creates a new getBucketLocation request.
func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
// Set location query.
urlValues := make(url.Values)
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL := c.endpointURL
targetURL.Path = filepath.Join(bucketName, "")
targetURL.RawQuery = urlValues.Encode()
// get a new HTTP request for the method.
req, err := http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
}
// set UserAgent for the request.
c.setUserAgent(req)
// set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() || c.signature.isLatest() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
}
// Sign the request.
if c.signature.isV4() || c.signature.isLatest() {
req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
} }

View File

@ -35,10 +35,11 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
for bucket := range s3Client.ListBuckets() { buckets, err := s3Client.ListBuckets()
if bucket.Err != nil { if err != nil {
log.Fatalln(bucket.Err) log.Fatalln(err)
} }
for _, bucket := range buckets {
log.Println(bucket) log.Println(bucket)
} }
} }

View File

@ -50,5 +50,5 @@ func main() {
fmt.Printf("-F %s=%s ", k, v) fmt.Printf("-F %s=%s ", k, v)
} }
fmt.Printf("-F file=@/etc/bashrc ") fmt.Printf("-F file=@/etc/bashrc ")
fmt.Printf(config.Endpoint + "/bucket-name\n") fmt.Printf("https://play.minio.io:9002/bucket-name\n")
} }

View File

@ -36,7 +36,7 @@ func main() {
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
reader, err := s3Client.GetObject("bucke-name", "objectName") reader, err := s3Client.GetObject("bucket-name", "objectName")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -35,10 +35,11 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
for bucket := range s3Client.ListBuckets() { buckets, err := s3Client.ListBuckets()
if bucket.Err != nil { if err != nil {
log.Fatalln(bucket.Err) log.Fatalln(err)
} }
for _, bucket := range buckets {
log.Println(bucket) log.Println(bucket)
} }
} }

View File

@ -49,6 +49,6 @@ func main() {
for k, v := range m { for k, v := range m {
fmt.Printf("-F %s=%s ", k, v) fmt.Printf("-F %s=%s ", k, v)
} }
fmt.Printf("-F file=@/etc/bashrc ") fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf(config.Endpoint + "/bucket-name\n") fmt.Printf("https://bucket-name.s3.amazonaws.com\n")
} }

View File

@ -1,26 +0,0 @@
package minio
import "runtime"
// clientCredentials - main configuration struct used for credentials.
type clientCredentials struct {
/// Standard options.
AccessKeyID string // AccessKeyID required for authorized requests.
SecretAccessKey string // SecretAccessKey required for authorized requests.
Signature SignatureType // choose a signature type if necessary.
}
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "0.2.5"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)

View File

@ -21,7 +21,6 @@ import (
"crypto/hmac" "crypto/hmac"
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
@ -31,6 +30,11 @@ import (
"time" "time"
) )
// signature and API related constants.
const (
signV2Algorithm = "AWS"
)
// Encode input URL path to URL encoded path. // Encode input URL path to URL encoded path.
func encodeURL2Path(u *url.URL) (path string) { func encodeURL2Path(u *url.URL) (path string) {
// Encode URL path. // Encode URL path.
@ -52,49 +56,51 @@ func encodeURL2Path(u *url.URL) (path string) {
// PreSignV2 - presign the request in following style. // PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}
func (r *Request) PreSignV2() (string, error) { func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// if config is anonymous then presigning cannot be achieved, throw an error. // presign is a noop for anonymous credentials.
if isAnonymousCredentials(*r.credentials) { if accessKeyID == "" || secretAccessKey == "" {
return "", errors.New("Presigning cannot be achieved with anonymous credentials") return nil
} }
d := time.Now().UTC() d := time.Now().UTC()
// Add date if not present // Add date if not present
if date := r.Get("Date"); date == "" { if date := req.Header.Get("Date"); date == "" {
r.Set("Date", d.Format(http.TimeFormat)) req.Header.Set("Date", d.Format(http.TimeFormat))
} }
// Get encoded URL path. // Get encoded URL path.
path := encodeURL2Path(r.req.URL) path := encodeURL2Path(req.URL)
// Find epoch expires when the request will expire. // Find epoch expires when the request will expire.
epochExpires := d.Unix() + r.expires epochExpires := d.Unix() + expires
// get string to sign. // get string to sign.
stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path) stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign)) hm.Write([]byte(stringToSign))
// calculate signature. // calculate signature.
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
query := r.req.URL.Query() query := req.URL.Query()
// Handle specially for Google Cloud Storage. // Handle specially for Google Cloud Storage.
if strings.Contains(r.req.URL.Host, ".storage.googleapis.com") { if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
query.Set("GoogleAccessId", r.credentials.AccessKeyID) query.Set("GoogleAccessId", accessKeyID)
} else { } else {
query.Set("AWSAccessKeyId", r.credentials.AccessKeyID) query.Set("AWSAccessKeyId", accessKeyID)
} }
// Fill in Expires and Signature for presigned query. // Fill in Expires and Signature for presigned query.
query.Set("Expires", strconv.FormatInt(epochExpires, 10)) query.Set("Expires", strconv.FormatInt(epochExpires, 10))
query.Set("Signature", signature) query.Set("Signature", signature)
r.req.URL.RawQuery = query.Encode()
return r.req.URL.String(), nil // Encode query and save.
req.URL.RawQuery = query.Encode()
return &req
} }
// PostPresignSignatureV2 - presigned signature for PostPolicy request // PostPresignSignatureV2 - presigned signature for PostPolicy request
func (r *Request) PostPresignSignatureV2(policyBase64 string) string { func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64)) hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
return signature return signature
@ -117,29 +123,31 @@ func (r *Request) PostPresignSignatureV2(policyBase64 string) string {
// CanonicalizedProtocolHeaders = <described below> // CanonicalizedProtocolHeaders = <described below>
// SignV2 sign the request before Do() (AWS Signature Version 2). // SignV2 sign the request before Do() (AWS Signature Version 2).
func (r *Request) SignV2() { func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Initial time. // Initial time.
d := time.Now().UTC() d := time.Now().UTC()
// Add date if not present. // Add date if not present.
if date := r.Get("Date"); date == "" { if date := req.Header.Get("Date"); date == "" {
r.Set("Date", d.Format(http.TimeFormat)) req.Header.Set("Date", d.Format(http.TimeFormat))
} }
// Calculate HMAC for secretAccessKey. // Calculate HMAC for secretAccessKey.
stringToSign := r.getStringToSignV2() stringToSign := getStringToSignV2(req)
hm := hmac.New(sha1.New, []byte(r.credentials.SecretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign)) hm.Write([]byte(stringToSign))
// Prepare auth header. // Prepare auth header.
authHeader := new(bytes.Buffer) authHeader := new(bytes.Buffer)
authHeader.WriteString(fmt.Sprintf("AWS %s:", r.credentials.AccessKeyID)) authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
encoder := base64.NewEncoder(base64.StdEncoding, authHeader) encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil)) encoder.Write(hm.Sum(nil))
encoder.Close() encoder.Close()
// Set Authorization header. // Set Authorization header.
r.req.Header.Set("Authorization", authHeader.String()) req.Header.Set("Authorization", authHeader.String())
return &req
} }
// From the Amazon docs: // From the Amazon docs:
@ -150,34 +158,34 @@ func (r *Request) SignV2() {
// Date + "\n" + // Date + "\n" +
// CanonicalizedProtocolHeaders + // CanonicalizedProtocolHeaders +
// CanonicalizedResource; // CanonicalizedResource;
func (r *Request) getStringToSignV2() string { func getStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
// write standard headers. // write standard headers.
r.writeDefaultHeaders(buf) writeDefaultHeaders(buf, req)
// write canonicalized protocol headers if any. // write canonicalized protocol headers if any.
r.writeCanonicalizedHeaders(buf) writeCanonicalizedHeaders(buf, req)
// write canonicalized Query resources if any. // write canonicalized Query resources if any.
r.writeCanonicalizedResource(buf) writeCanonicalizedResource(buf, req)
return buf.String() return buf.String()
} }
// writeDefaultHeader - write all default necessary headers // writeDefaultHeader - write all default necessary headers
func (r *Request) writeDefaultHeaders(buf *bytes.Buffer) { func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
buf.WriteString(r.req.Method) buf.WriteString(req.Method)
buf.WriteByte('\n') buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Content-MD5")) buf.WriteString(req.Header.Get("Content-MD5"))
buf.WriteByte('\n') buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Content-Type")) buf.WriteString(req.Header.Get("Content-Type"))
buf.WriteByte('\n') buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Date")) buf.WriteString(req.Header.Get("Date"))
buf.WriteByte('\n') buf.WriteByte('\n')
} }
// writeCanonicalizedHeaders - write canonicalized headers. // writeCanonicalizedHeaders - write canonicalized headers.
func (r *Request) writeCanonicalizedHeaders(buf *bytes.Buffer) { func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
var protoHeaders []string var protoHeaders []string
vals := make(map[string][]string) vals := make(map[string][]string)
for k, vv := range r.req.Header { for k, vv := range req.Header {
// all the AMZ and GOOG headers should be lowercase // all the AMZ and GOOG headers should be lowercase
lk := strings.ToLower(k) lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") { if strings.HasPrefix(lk, "x-amz") {
@ -237,8 +245,9 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] + // CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> + // <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func (r *Request) writeCanonicalizedResource(buf *bytes.Buffer) error { func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error {
requestURL := r.req.URL requestURL := req.URL
// Get encoded URL path. // Get encoded URL path.
path := encodeURL2Path(requestURL) path := encodeURL2Path(requestURL)
buf.WriteString(path) buf.WriteString(path)

View File

@ -19,7 +19,6 @@ package minio
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"errors"
"net/http" "net/http"
"sort" "sort"
"strconv" "strconv"
@ -29,7 +28,7 @@ import (
// signature and API related constants. // signature and API related constants.
const ( const (
authHeader = "AWS4-HMAC-SHA256" signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z" iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102" yyyymmdd = "20060102"
) )
@ -70,10 +69,10 @@ var ignoredHeaders = map[string]bool{
} }
// getSigningKey hmac seed to calculate final signature // getSigningKey hmac seed to calculate final signature
func getSigningKey(secret, region string, t time.Time) []byte { func getSigningKey(secret, loc string, t time.Time) []byte {
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
regionbytes := sumHMAC(date, []byte(region)) location := sumHMAC(date, []byte(loc))
service := sumHMAC(regionbytes, []byte("s3")) service := sumHMAC(location, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request")) signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey return signingKey
} }
@ -84,10 +83,10 @@ func getSignature(signingKey []byte, stringToSign string) string {
} }
// getScope generate a string of a specific date, an AWS region, and a service // getScope generate a string of a specific date, an AWS region, and a service
func getScope(region string, t time.Time) string { func getScope(location string, t time.Time) string {
scope := strings.Join([]string{ scope := strings.Join([]string{
t.Format(yyyymmdd), t.Format(yyyymmdd),
region, location,
"s3", "s3",
"aws4_request", "aws4_request",
}, "/") }, "/")
@ -95,25 +94,26 @@ func getScope(region string, t time.Time) string {
} }
// getCredential generate a credential string // getCredential generate a credential string
func getCredential(accessKeyID, region string, t time.Time) string { func getCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(region, t) scope := getScope(location, t)
return accessKeyID + "/" + scope return accessKeyID + "/" + scope
} }
// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload // getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
func (r *Request) getHashedPayload() string { func getHashedPayload(req http.Request) string {
if r.expires != 0 { hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
return "UNSIGNED-PAYLOAD" if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value.
hashedPayload = "UNSIGNED-PAYLOAD"
} }
hashedPayload := r.req.Header.Get("X-Amz-Content-Sha256")
return hashedPayload return hashedPayload
} }
// getCanonicalHeaders generate a list of request headers for signature. // getCanonicalHeaders generate a list of request headers for signature.
func (r *Request) getCanonicalHeaders() string { func getCanonicalHeaders(req http.Request) string {
var headers []string var headers []string
vals := make(map[string][]string) vals := make(map[string][]string)
for k, vv := range r.req.Header { for k, vv := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header continue // ignored header
} }
@ -129,7 +129,7 @@ func (r *Request) getCanonicalHeaders() string {
buf.WriteByte(':') buf.WriteByte(':')
switch { switch {
case k == "host": case k == "host":
buf.WriteString(r.req.URL.Host) buf.WriteString(req.URL.Host)
fallthrough fallthrough
default: default:
for idx, v := range vals[k] { for idx, v := range vals[k] {
@ -146,9 +146,9 @@ func (r *Request) getCanonicalHeaders() string {
// getSignedHeaders generate all signed request headers. // getSignedHeaders generate all signed request headers.
// i.e alphabetically sorted, semicolon-separated list of lowercase request header names // i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *Request) getSignedHeaders() string { func getSignedHeaders(req http.Request) string {
var headers []string var headers []string
for k := range r.req.Header { for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header continue // ignored header
} }
@ -169,95 +169,114 @@ func (r *Request) getSignedHeaders() string {
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
// //
func (r *Request) getCanonicalRequest() string { func getCanonicalRequest(req http.Request) string {
r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
r.req.Method, req.Method,
urlEncodePath(r.req.URL.Path), urlEncodePath(req.URL.Path),
r.req.URL.RawQuery, req.URL.RawQuery,
r.getCanonicalHeaders(), getCanonicalHeaders(req),
r.getSignedHeaders(), getSignedHeaders(req),
r.getHashedPayload(), getHashedPayload(req),
}, "\n") }, "\n")
return canonicalRequest return canonicalRequest
} }
// getStringToSign a string based on selected query values. // getStringToSign a string based on selected query values.
func (r *Request) getStringToSignV4(canonicalRequest string, t time.Time) string { func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n" stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(r.bucketRegion, t) + "\n" stringToSign = stringToSign + getScope(location, t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign return stringToSign
} }
// PreSignV4 presign the request, in accordance with // PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func (r *Request) PreSignV4() (string, error) { func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
if isAnonymousCredentials(*r.credentials) { // presign is a noop for anonymous credentials.
return "", errors.New("presigning cannot be done with anonymous credentials") if accessKeyID == "" || secretAccessKey == "" {
return nil
} }
// Initial time. // Initial time.
t := time.Now().UTC() t := time.Now().UTC()
// get credential string. // get credential string.
credential := getCredential(r.credentials.AccessKeyID, r.bucketRegion, t) credential := getCredential(accessKeyID, location, t)
// get hmac signing key.
signingKey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t)
// Get all signed headers. // Get all signed headers.
signedHeaders := r.getSignedHeaders() signedHeaders := getSignedHeaders(req)
query := r.req.URL.Query() // set URL query.
query.Set("X-Amz-Algorithm", authHeader) query := req.URL.Query()
query.Set("X-Amz-Algorithm", signV4Algorithm)
query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", strconv.FormatInt(r.expires, 10)) query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders) query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential) query.Set("X-Amz-Credential", credential)
r.req.URL.RawQuery = query.Encode() req.URL.RawQuery = query.Encode()
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
// Get string to sign from canonical request. // Get string to sign from canonical request.
stringToSign := r.getStringToSignV4(r.getCanonicalRequest(), t) stringToSign := getStringToSignV4(t, location, canonicalRequest)
// get hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t)
// calculate signature. // calculate signature.
signature := getSignature(signingKey, stringToSign) signature := getSignature(signingKey, stringToSign)
r.req.URL.RawQuery += "&X-Amz-Signature=" + signature // Add signature header to RawQuery.
req.URL.RawQuery += "&X-Amz-Signature=" + signature
return r.req.URL.String(), nil return &req
} }
// PostPresignSignatureV4 - presigned signature for PostPolicy requests. // PostPresignSignatureV4 - presigned signature for PostPolicy requests.
func (r *Request) PostPresignSignatureV4(policyBase64 string, t time.Time) string { func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
signingkey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t) signingkey := getSigningKey(secretAccessKey, location, t)
signature := getSignature(signingkey, policyBase64) signature := getSignature(signingkey, policyBase64)
return signature return signature
} }
// SignV4 sign the request before Do(), in accordance with // SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func (r *Request) SignV4() { func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Initial time. // Initial time.
t := time.Now().UTC() t := time.Now().UTC()
// Set x-amz-date.
r.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Get all signed headers. // Set x-amz-date.
signedHeaders := r.getSignedHeaders() req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
// Get string to sign from canonical request. // Get string to sign from canonical request.
stringToSign := r.getStringToSignV4(r.getCanonicalRequest(), t) stringToSign := getStringToSignV4(t, location, canonicalRequest)
// get hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t)
// get credential string. // get credential string.
credential := getCredential(r.credentials.AccessKeyID, r.bucketRegion, t) credential := getCredential(accessKeyID, location, t)
// get hmac signing key.
signingKey := getSigningKey(r.credentials.SecretAccessKey, r.bucketRegion, t) // Get all signed headers.
signedHeaders := getSignedHeaders(req)
// calculate signature. // calculate signature.
signature := getSignature(signingKey, stringToSign) signature := getSignature(signingKey, stringToSign)
// if regular request, construct the final authorization header. // if regular request, construct the final authorization header.
parts := []string{ parts := []string{
authHeader + " Credential=" + credential, signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + signedHeaders, "SignedHeaders=" + signedHeaders,
"Signature=" + signature, "Signature=" + signature,
} }
// Set authorization header.
auth := strings.Join(parts, ", ") auth := strings.Join(parts, ", ")
r.Set("Authorization", auth) req.Header.Set("Authorization", auth)
return &req
} }

View File

@ -1,169 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/base64"
"encoding/hex"
"io"
"net/http"
"net/url"
)
// Request - a http request.
type Request struct {
req *http.Request
credentials *clientCredentials
transport http.RoundTripper
bucketRegion string
expires int64
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// User supplied.
expires int64
userAgent string
bucketRegion string
credentials *clientCredentials
contentTransport http.RoundTripper
contentHeader http.Header
// Generated by our internal code.
contentBody io.ReadCloser
contentLength int64
contentSha256Bytes []byte
contentMD5Bytes []byte
}
// getTargetURL - construct a encoded URL for the requests.
func getTargetURL(endpoint *url.URL, bucketName, objectName string, queryValues url.Values) (*url.URL, error) {
var urlStr string
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support this.
if isVirtualHostSupported(endpoint) {
urlStr = endpoint.Scheme + "://" + bucketName + "." + endpoint.Host
urlStr = urlStr + "/" + urlEncodePath(objectName)
} else {
// If not fall back to using path style.
urlStr = endpoint.Scheme + "://" + endpoint.Host + "/" + bucketName
if objectName != "" {
urlStr = urlStr + "/" + urlEncodePath(objectName)
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode()
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}
// Do - start the request.
func (r *Request) Do() (resp *http.Response, err error) {
// if not an anonymous request, calculate relevant signature.
if !isAnonymousCredentials(*r.credentials) {
if r.credentials.Signature.isV2() {
// if signature version '2' requested, use that.
r.SignV2()
}
if r.credentials.Signature.isV4() || r.credentials.Signature.isLatest() {
// Not a presigned request, set behavior to default.
r.SignV4()
}
}
// Use custom transport if any.
transport := http.DefaultTransport
if r.transport != nil {
transport = r.transport
}
client := &http.Client{Transport: transport}
return client.Do(r.req)
}
// Set - set additional headers if any.
func (r *Request) Set(key, value string) {
r.req.Header.Set(key, value)
}
// Get - get header values.
func (r *Request) Get(key string) string {
return r.req.Header.Get(key)
}
// newRequest - provides a new instance of *Request*.
func newRequest(method string, targetURL *url.URL, metadata requestMetadata) (*Request, error) {
if method == "" {
method = "POST"
}
urlStr := targetURL.String()
// get a new HTTP request, for the requested method.
req, err := http.NewRequest(method, urlStr, nil)
if err != nil {
return nil, err
}
// Set content body if available.
if metadata.contentBody != nil {
req.Body = metadata.contentBody
}
// save for subsequent use.
r := new(Request)
r.req = req
r.credentials = metadata.credentials
r.bucketRegion = metadata.bucketRegion
r.transport = metadata.contentTransport
// If presigned request, return.
if metadata.expires != 0 {
r.expires = metadata.expires
return r, nil
}
// set UserAgent for the request.
r.Set("User-Agent", metadata.userAgent)
// Set all headers.
for k, v := range metadata.contentHeader {
r.Set(k, v[0])
}
// set incoming content-length.
if metadata.contentLength > 0 {
r.req.ContentLength = metadata.contentLength
}
// set sha256 sum for signature calculation only with signature version '4'.
if r.credentials.Signature.isV4() || r.credentials.Signature.isLatest() {
r.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
if metadata.contentSha256Bytes != nil {
r.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes))
}
}
// set md5Sum for content protection.
if metadata.contentMD5Bytes != nil {
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
// return request.
return r, nil
}

View File

@ -119,14 +119,6 @@ func closeResponse(resp *http.Response) {
} }
} }
// isAnonymousCredentials - True if config doesn't have access and secret keys.
func isAnonymousCredentials(c clientCredentials) bool {
if c.AccessKeyID != "" && c.SecretAccessKey != "" {
return false
}
return true
}
// isVirtualHostSupported - verify if host supports virtual hosted style. // isVirtualHostSupported - verify if host supports virtual hosted style.
// Currently only Amazon S3 and Google Cloud Storage would support this. // Currently only Amazon S3 and Google Cloud Storage would support this.
func isVirtualHostSupported(endpointURL *url.URL) bool { func isVirtualHostSupported(endpointURL *url.URL) bool {