This commit is contained in:
Misty Stanley-Jones 2016-09-28 14:45:00 -07:00
commit 9f57550bb9
139 changed files with 34056 additions and 0 deletions

267
docs/api/errcode/errors.go Normal file
View File

@ -0,0 +1,267 @@
package errcode
import (
"encoding/json"
"fmt"
"strings"
)
// ErrorCoder is the base interface for ErrorCode and Error allowing
// users of each to just call ErrorCode to get the real ID of each
type ErrorCoder interface {
ErrorCode() ErrorCode
}
// ErrorCode represents the error type. The errors are serialized via strings
// and the integer format may change and should *never* be exported.
type ErrorCode int
var _ error = ErrorCode(0)
// ErrorCode just returns itself
func (ec ErrorCode) ErrorCode() ErrorCode {
return ec
}
// Error returns the ID/Value
func (ec ErrorCode) Error() string {
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
}
// Descriptor returns the descriptor for the error code.
func (ec ErrorCode) Descriptor() ErrorDescriptor {
d, ok := errorCodeToDescriptors[ec]
if !ok {
return ErrorCodeUnknown.Descriptor()
}
return d
}
// String returns the canonical identifier for this error code.
func (ec ErrorCode) String() string {
return ec.Descriptor().Value
}
// Message returned the human-readable error message for this error code.
func (ec ErrorCode) Message() string {
return ec.Descriptor().Message
}
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
// result.
func (ec ErrorCode) MarshalText() (text []byte, err error) {
return []byte(ec.String()), nil
}
// UnmarshalText decodes the form generated by MarshalText.
func (ec *ErrorCode) UnmarshalText(text []byte) error {
desc, ok := idToDescriptors[string(text)]
if !ok {
desc = ErrorCodeUnknown.Descriptor()
}
*ec = desc.Code
return nil
}
// WithMessage creates a new Error struct based on the passed-in info and
// overrides the Message property.
func (ec ErrorCode) WithMessage(message string) Error {
return Error{
Code: ec,
Message: message,
}
}
// WithDetail creates a new Error struct based on the passed-in info and
// set the Detail property appropriately
func (ec ErrorCode) WithDetail(detail interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithDetail(detail)
}
// WithArgs creates a new Error struct and sets the Args slice
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithArgs(args...)
}
// Error provides a wrapper around ErrorCode with extra Details provided.
type Error struct {
Code ErrorCode `json:"code"`
Message string `json:"message"`
Detail interface{} `json:"detail,omitempty"`
// TODO(duglin): See if we need an "args" property so we can do the
// variable substitution right before showing the message to the user
}
var _ error = Error{}
// ErrorCode returns the ID/Value of this Error
func (e Error) ErrorCode() ErrorCode {
return e.Code
}
// Error returns a human readable representation of the error.
func (e Error) Error() string {
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
}
// WithDetail will return a new Error, based on the current one, but with
// some Detail info added
func (e Error) WithDetail(detail interface{}) Error {
return Error{
Code: e.Code,
Message: e.Message,
Detail: detail,
}
}
// WithArgs uses the passed-in list of interface{} as the substitution
// variables in the Error's Message string, but returns a new Error
func (e Error) WithArgs(args ...interface{}) Error {
return Error{
Code: e.Code,
Message: fmt.Sprintf(e.Code.Message(), args...),
Detail: e.Detail,
}
}
// ErrorDescriptor provides relevant information about a given error code.
type ErrorDescriptor struct {
// Code is the error code that this descriptor describes.
Code ErrorCode
// Value provides a unique, string key, often captilized with
// underscores, to identify the error code. This value is used as the
// keyed value when serializing api errors.
Value string
// Message is a short, human readable decription of the error condition
// included in API responses.
Message string
// Description provides a complete account of the errors purpose, suitable
// for use in documentation.
Description string
// HTTPStatusCode provides the http status code that is associated with
// this error condition.
HTTPStatusCode int
}
// ParseErrorCode returns the value by the string error code.
// `ErrorCodeUnknown` will be returned if the error is not known.
func ParseErrorCode(value string) ErrorCode {
ed, ok := idToDescriptors[value]
if ok {
return ed.Code
}
return ErrorCodeUnknown
}
// Errors provides the envelope for multiple errors and a few sugar methods
// for use within the application.
type Errors []error
var _ error = Errors{}
func (errs Errors) Error() string {
switch len(errs) {
case 0:
return "<nil>"
case 1:
return errs[0].Error()
default:
msg := "errors:\n"
for _, err := range errs {
msg += err.Error() + "\n"
}
return msg
}
}
// Len returns the current number of errors.
func (errs Errors) Len() int {
return len(errs)
}
// MarshalJSON converts slice of error, ErrorCode or Error into a
// slice of Error - then serializes
func (errs Errors) MarshalJSON() ([]byte, error) {
var tmpErrs struct {
Errors []Error `json:"errors,omitempty"`
}
for _, daErr := range errs {
var err Error
switch daErr.(type) {
case ErrorCode:
err = daErr.(ErrorCode).WithDetail(nil)
case Error:
err = daErr.(Error)
default:
err = ErrorCodeUnknown.WithDetail(daErr)
}
// If the Error struct was setup and they forgot to set the
// Message field (meaning its "") then grab it from the ErrCode
msg := err.Message
if msg == "" {
msg = err.Code.Message()
}
tmpErrs.Errors = append(tmpErrs.Errors, Error{
Code: err.Code,
Message: msg,
Detail: err.Detail,
})
}
return json.Marshal(tmpErrs)
}
// UnmarshalJSON deserializes []Error and then converts it into slice of
// Error or ErrorCode
func (errs *Errors) UnmarshalJSON(data []byte) error {
var tmpErrs struct {
Errors []Error
}
if err := json.Unmarshal(data, &tmpErrs); err != nil {
return err
}
var newErrs Errors
for _, daErr := range tmpErrs.Errors {
// If Message is empty or exactly matches the Code's message string
// then just use the Code, no need for a full Error struct
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
// Error's w/o details get converted to ErrorCode
newErrs = append(newErrs, daErr.Code)
} else {
// Error's w/ details are untouched
newErrs = append(newErrs, Error{
Code: daErr.Code,
Message: daErr.Message,
Detail: daErr.Detail,
})
}
}
*errs = newErrs
return nil
}

View File

@ -0,0 +1,185 @@
package errcode
import (
"encoding/json"
"net/http"
"reflect"
"strings"
"testing"
)
// TestErrorsManagement does a quick check of the Errors type to ensure that
// members are properly pushed and marshaled.
var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{
Value: "TEST1",
Message: "test error 1",
Description: `Just a test message #1.`,
HTTPStatusCode: http.StatusInternalServerError,
})
var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{
Value: "TEST2",
Message: "test error 2",
Description: `Just a test message #2.`,
HTTPStatusCode: http.StatusNotFound,
})
var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{
Value: "TEST3",
Message: "Sorry %q isn't valid",
Description: `Just a test message #3.`,
HTTPStatusCode: http.StatusNotFound,
})
// TestErrorCodes ensures that error code format, mappings and
// marshaling/unmarshaling. round trips are stable.
func TestErrorCodes(t *testing.T) {
if len(errorCodeToDescriptors) == 0 {
t.Fatal("errors aren't loaded!")
}
for ec, desc := range errorCodeToDescriptors {
if ec != desc.Code {
t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code)
}
if idToDescriptors[desc.Value].Code != ec {
t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec)
}
if ec.Message() != desc.Message {
t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message)
}
// Test (de)serializing the ErrorCode
p, err := json.Marshal(ec)
if err != nil {
t.Fatalf("couldn't marshal ec %v: %v", ec, err)
}
if len(p) <= 0 {
t.Fatalf("expected content in marshaled before for error code %v", ec)
}
// First, unmarshal to interface and ensure we have a string.
var ecUnspecified interface{}
if err := json.Unmarshal(p, &ecUnspecified); err != nil {
t.Fatalf("error unmarshaling error code %v: %v", ec, err)
}
if _, ok := ecUnspecified.(string); !ok {
t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified)
}
// Now, unmarshal with the error code type and ensure they are equal
var ecUnmarshaled ErrorCode
if err := json.Unmarshal(p, &ecUnmarshaled); err != nil {
t.Fatalf("error unmarshaling error code %v: %v", ec, err)
}
if ecUnmarshaled != ec {
t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec)
}
expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1))
if ec.Error() != expectedErrorString {
t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString)
}
}
}
func TestErrorsManagement(t *testing.T) {
var errs Errors
errs = append(errs, ErrorCodeTest1)
errs = append(errs, ErrorCodeTest2.WithDetail(
map[string]interface{}{"digest": "sometestblobsumdoesntmatter"}))
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE"))
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data"))
p, err := json.Marshal(errs)
if err != nil {
t.Fatalf("error marashaling errors: %v", err)
}
expectedJSON := `{"errors":[` +
`{"code":"TEST1","message":"test error 1"},` +
`{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` +
`{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` +
`{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` +
`]}`
if string(p) != expectedJSON {
t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON)
}
// Now test the reverse
var unmarshaled Errors
if err := json.Unmarshal(p, &unmarshaled); err != nil {
t.Fatalf("unexpected error unmarshaling error envelope: %v", err)
}
if !reflect.DeepEqual(unmarshaled, errs) {
t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs)
}
// Test the arg substitution stuff
e1 := unmarshaled[3].(Error)
exp1 := `Sorry "BOOGIE" isn't valid`
if e1.Message != exp1 {
t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1)
}
exp1 = "test3: " + exp1
if e1.Error() != exp1 {
t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1)
}
// Test again with a single value this time
errs = Errors{ErrorCodeUnknown}
expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}"
p, err = json.Marshal(errs)
if err != nil {
t.Fatalf("error marashaling errors: %v", err)
}
if string(p) != expectedJSON {
t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
}
// Now test the reverse
unmarshaled = nil
if err := json.Unmarshal(p, &unmarshaled); err != nil {
t.Fatalf("unexpected error unmarshaling error envelope: %v", err)
}
if !reflect.DeepEqual(unmarshaled, errs) {
t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs)
}
// Verify that calling WithArgs() more than once does the right thing.
// Meaning creates a new Error and uses the ErrorCode Message
e1 = ErrorCodeTest3.WithArgs("test1")
e2 := e1.WithArgs("test2")
if &e1 == &e2 {
t.Fatalf("args: e2 and e1 should not be the same, but they are")
}
if e2.Message != `Sorry "test2" isn't valid` {
t.Fatalf("e2 had wrong message: %q", e2.Message)
}
// Verify that calling WithDetail() more than once does the right thing.
// Meaning creates a new Error and overwrites the old detail field
e1 = ErrorCodeTest3.WithDetail("stuff1")
e2 = e1.WithDetail("stuff2")
if &e1 == &e2 {
t.Fatalf("detail: e2 and e1 should not be the same, but they are")
}
if e2.Detail != `stuff2` {
t.Fatalf("e2 had wrong detail: %q", e2.Detail)
}
}

View File

@ -0,0 +1,44 @@
package errcode
import (
"encoding/json"
"net/http"
)
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
// and sets the content-type header to 'application/json'. It will handle
// ErrorCoder and Errors, and if necessary will create an envelope.
func ServeJSON(w http.ResponseWriter, err error) error {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
var sc int
switch errs := err.(type) {
case Errors:
if len(errs) < 1 {
break
}
if err, ok := errs[0].(ErrorCoder); ok {
sc = err.ErrorCode().Descriptor().HTTPStatusCode
}
case ErrorCoder:
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
err = Errors{err} // create an envelope.
default:
// We just have an unhandled error type, so just place in an envelope
// and move along.
err = Errors{err}
}
if sc == 0 {
sc = http.StatusInternalServerError
}
w.WriteHeader(sc)
if err := json.NewEncoder(w).Encode(err); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,138 @@
package errcode
import (
"fmt"
"net/http"
"sort"
"sync"
)
var (
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
idToDescriptors = map[string]ErrorDescriptor{}
groupToDescriptors = map[string][]ErrorDescriptor{}
)
var (
// ErrorCodeUnknown is a generic error that can be used as a last
// resort if there is no situation-specific error message that can be used
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
Value: "UNKNOWN",
Message: "unknown error",
Description: `Generic error returned when the error does not have an
API classification.`,
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeUnsupported is returned when an operation is not supported.
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
Value: "UNSUPPORTED",
Message: "The operation is unsupported.",
Description: `The operation was unsupported due to a missing
implementation or invalid set of parameters.`,
HTTPStatusCode: http.StatusMethodNotAllowed,
})
// ErrorCodeUnauthorized is returned if a request requires
// authentication.
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
Value: "UNAUTHORIZED",
Message: "authentication required",
Description: `The access controller was unable to authenticate
the client. Often this will be accompanied by a
Www-Authenticate HTTP response header indicating how to
authenticate.`,
HTTPStatusCode: http.StatusUnauthorized,
})
// ErrorCodeDenied is returned if a client does not have sufficient
// permission to perform an action.
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
Value: "DENIED",
Message: "requested access to the resource is denied",
Description: `The access controller denied access for the
operation on a resource.`,
HTTPStatusCode: http.StatusForbidden,
})
// ErrorCodeUnavailable provides a common error to report unavailability
// of a service or endpoint.
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
Value: "UNAVAILABLE",
Message: "service unavailable",
Description: "Returned when a service is not available",
HTTPStatusCode: http.StatusServiceUnavailable,
})
// ErrorCodeTooManyRequests is returned if a client attempts too many
// times to contact a service endpoint.
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
Value: "TOOMANYREQUESTS",
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
HTTPStatusCode: http.StatusTooManyRequests,
})
)
var nextCode = 1000
var registerLock sync.Mutex
// Register will make the passed-in error known to the environment and
// return a new ErrorCode
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
registerLock.Lock()
defer registerLock.Unlock()
descriptor.Code = ErrorCode(nextCode)
if _, ok := idToDescriptors[descriptor.Value]; ok {
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
}
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
}
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
errorCodeToDescriptors[descriptor.Code] = descriptor
idToDescriptors[descriptor.Value] = descriptor
nextCode++
return descriptor.Code
}
type byValue []ErrorDescriptor
func (a byValue) Len() int { return len(a) }
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
// GetGroupNames returns the list of Error group names that are registered
func GetGroupNames() []string {
keys := []string{}
for k := range groupToDescriptors {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// GetErrorCodeGroup returns the named group of error descriptors
func GetErrorCodeGroup(name string) []ErrorDescriptor {
desc := groupToDescriptors[name]
sort.Sort(byValue(desc))
return desc
}
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
// registered, irrespective of what group they're in
func GetErrorAllDescriptors() []ErrorDescriptor {
result := []ErrorDescriptor{}
for _, group := range GetGroupNames() {
result = append(result, GetErrorCodeGroup(group)...)
}
sort.Sort(byValue(result))
return result
}

1596
docs/api/v2/descriptors.go Normal file

File diff suppressed because it is too large Load Diff

9
docs/api/v2/doc.go Normal file
View File

@ -0,0 +1,9 @@
// Package v2 describes routes, urls and the error codes used in the Docker
// Registry JSON HTTP API V2. In addition to declarations, descriptors are
// provided for routes and error codes that can be used for implementation and
// automatically generating documentation.
//
// Definitions here are considered to be locked down for the V2 registry api.
// Any changes must be considered carefully and should not proceed without a
// change proposal in docker core.
package v2

136
docs/api/v2/errors.go Normal file
View File

@ -0,0 +1,136 @@
package v2
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
const errGroup = "registry.api.v2"
var (
// ErrorCodeDigestInvalid is returned when uploading a blob if the
// provided digest does not match the blob contents.
ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DIGEST_INVALID",
Message: "provided digest did not match uploaded content",
Description: `When a blob is uploaded, the registry will check that
the content matches the digest provided by the client. The error may
include a detail structure with the key "digest", including the
invalid digest string. This error may also be returned when a manifest
includes an invalid layer digest.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "SIZE_INVALID",
Message: "provided length did not match content length",
Description: `When a layer is uploaded, the provided size will be
checked against the uploaded content. If they do not match, this error
will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameInvalid is returned when the name in the manifest does not
// match the provided name.
ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_INVALID",
Message: "invalid repository name",
Description: `Invalid repository name encountered either during
manifest validation or any API operation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
// match the provided tag.
ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "TAG_INVALID",
Message: "manifest tag did not match URI",
Description: `During a manifest upload, if the tag in the manifest
does not match the uri tag, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameUnknown when the repository name is not known.
ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_UNKNOWN",
Message: "repository name not known to registry",
Description: `This is returned if the name used during an operation is
unknown to the registry.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestUnknown returned when image manifest is unknown.
ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNKNOWN",
Message: "manifest unknown",
Description: `This error is returned when the manifest, identified by
name and tag is unknown to the repository.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
// typically during a PUT operation. This error encompasses all errors
// encountered during manifest validation that aren't signature errors.
ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_INVALID",
Message: "manifest invalid",
Description: `During upload, manifests undergo several checks ensuring
validity. If those checks fail, this error may be returned, unless a
more specific error is included. The detail will contain information
the failed validation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestUnverified is returned when the manifest fails
// signature verification.
ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNVERIFIED",
Message: "manifest failed signature verification",
Description: `During manifest upload, if the manifest fails signature
verification, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
// unknown to the registry.
ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a manifest blob is
unknown to the registry.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
// registry. This can happen when the manifest references a nonexistent
// layer or the result is not found by a blob fetch.
ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a blob is unknown to the
registry in a specified repository. This can be returned with a
standard get or if a manifest references an unknown layer during
upload.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_UNKNOWN",
Message: "blob upload unknown to registry",
Description: `If a blob upload has been cancelled or was never
started, this error code may be returned.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_INVALID",
Message: "blob upload invalid",
Description: `The blob upload encountered an error and can no
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
})
)

49
docs/api/v2/routes.go Normal file
View File

@ -0,0 +1,49 @@
package v2
import "github.com/gorilla/mux"
// The following are definitions of the name under which all V2 routes are
// registered. These symbols can be used to look up a route based on the name.
const (
RouteNameBase = "base"
RouteNameManifest = "manifest"
RouteNameTags = "tags"
RouteNameBlob = "blob"
RouteNameBlobUpload = "blob-upload"
RouteNameBlobUploadChunk = "blob-upload-chunk"
RouteNameCatalog = "catalog"
)
var allEndpoints = []string{
RouteNameManifest,
RouteNameCatalog,
RouteNameTags,
RouteNameBlob,
RouteNameBlobUpload,
RouteNameBlobUploadChunk,
}
// Router builds a gorilla router with named routes for the various API
// methods. This can be used directly by both server implementations and
// clients.
func Router() *mux.Router {
return RouterWithPrefix("")
}
// RouterWithPrefix builds a gorilla router with a configured prefix
// on all routes.
func RouterWithPrefix(prefix string) *mux.Router {
rootRouter := mux.NewRouter()
router := rootRouter
if prefix != "" {
router = router.PathPrefix(prefix).Subrouter()
}
router.StrictSlash(true)
for _, descriptor := range routeDescriptors {
router.Path(descriptor.Path).Name(descriptor.Name)
}
return rootRouter
}

355
docs/api/v2/routes_test.go Normal file
View File

@ -0,0 +1,355 @@
package v2
import (
"encoding/json"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"time"
"github.com/gorilla/mux"
)
type routeTestCase struct {
RequestURI string
ExpectedURI string
Vars map[string]string
RouteName string
StatusCode int
}
// TestRouter registers a test handler with all the routes and ensures that
// each route returns the expected path variables. Not method verification is
// present. This not meant to be exhaustive but as check to ensure that the
// expected variables are extracted.
//
// This may go away as the application structure comes together.
func TestRouter(t *testing.T) {
testCases := []routeTestCase{
{
RouteName: RouteNameBase,
RequestURI: "/v2/",
Vars: map[string]string{},
},
{
RouteName: RouteNameManifest,
RequestURI: "/v2/foo/manifests/bar",
Vars: map[string]string{
"name": "foo",
"reference": "bar",
},
},
{
RouteName: RouteNameManifest,
RequestURI: "/v2/foo/bar/manifests/tag",
Vars: map[string]string{
"name": "foo/bar",
"reference": "tag",
},
},
{
RouteName: RouteNameManifest,
RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890",
Vars: map[string]string{
"name": "foo/bar",
"reference": "sha256:abcdef01234567890",
},
},
{
RouteName: RouteNameTags,
RequestURI: "/v2/foo/bar/tags/list",
Vars: map[string]string{
"name": "foo/bar",
},
},
{
RouteName: RouteNameTags,
RequestURI: "/v2/docker.com/foo/tags/list",
Vars: map[string]string{
"name": "docker.com/foo",
},
},
{
RouteName: RouteNameTags,
RequestURI: "/v2/docker.com/foo/bar/tags/list",
Vars: map[string]string{
"name": "docker.com/foo/bar",
},
},
{
RouteName: RouteNameTags,
RequestURI: "/v2/docker.com/foo/bar/baz/tags/list",
Vars: map[string]string{
"name": "docker.com/foo/bar/baz",
},
},
{
RouteName: RouteNameBlob,
RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
Vars: map[string]string{
"name": "foo/bar",
"digest": "sha256:abcdef0919234",
},
},
{
RouteName: RouteNameBlobUpload,
RequestURI: "/v2/foo/bar/blobs/uploads/",
Vars: map[string]string{
"name": "foo/bar",
},
},
{
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/bar/blobs/uploads/uuid",
Vars: map[string]string{
"name": "foo/bar",
"uuid": "uuid",
},
},
{
// support uuid proper
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
Vars: map[string]string{
"name": "foo/bar",
"uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
},
},
{
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
Vars: map[string]string{
"name": "foo/bar",
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
},
},
{
// supports urlsafe base64
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==",
Vars: map[string]string{
"name": "foo/bar",
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==",
},
},
{
// does not match
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==",
StatusCode: http.StatusNotFound,
},
{
// Check ambiguity: ensure we can distinguish between tags for
// "foo/bar/image/image" and image for "foo/bar/image" with tag
// "tags"
RouteName: RouteNameManifest,
RequestURI: "/v2/foo/bar/manifests/manifests/tags",
Vars: map[string]string{
"name": "foo/bar/manifests",
"reference": "tags",
},
},
{
// This case presents an ambiguity between foo/bar with tag="tags"
// and list tags for "foo/bar/manifest"
RouteName: RouteNameTags,
RequestURI: "/v2/foo/bar/manifests/tags/list",
Vars: map[string]string{
"name": "foo/bar/manifests",
},
},
{
RouteName: RouteNameManifest,
RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag",
Vars: map[string]string{
"name": "locahost:8080/foo/bar/baz",
"reference": "tag",
},
},
}
checkTestRouter(t, testCases, "", true)
checkTestRouter(t, testCases, "/prefix/", true)
}
func TestRouterWithPathTraversals(t *testing.T) {
testCases := []routeTestCase{
{
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
StatusCode: http.StatusNotFound,
},
{
// Testing for path traversal attack handling
RouteName: RouteNameTags,
RequestURI: "/v2/foo/../bar/baz/tags/list",
ExpectedURI: "/v2/bar/baz/tags/list",
Vars: map[string]string{
"name": "bar/baz",
},
},
}
checkTestRouter(t, testCases, "", false)
}
func TestRouterWithBadCharacters(t *testing.T) {
if testing.Short() {
testCases := []routeTestCase{
{
RouteName: RouteNameBlobUploadChunk,
RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286",
StatusCode: http.StatusNotFound,
},
{
// Testing for path traversal attack handling
RouteName: RouteNameTags,
RequestURI: "/v2/foo/不bar/tags/list",
StatusCode: http.StatusNotFound,
},
}
checkTestRouter(t, testCases, "", true)
} else {
// in the long version we're going to fuzz the router
// with random UTF8 characters not in the 128 bit ASCII range.
// These are not valid characters for the router and we expect
// 404s on every test.
rand.Seed(time.Now().UTC().UnixNano())
testCases := make([]routeTestCase, 1000)
for idx := range testCases {
testCases[idx] = routeTestCase{
RouteName: RouteNameTags,
RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)),
StatusCode: http.StatusNotFound,
}
}
checkTestRouter(t, testCases, "", true)
}
}
func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) {
router := RouterWithPrefix(prefix)
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
testCase := routeTestCase{
RequestURI: r.RequestURI,
Vars: mux.Vars(r),
RouteName: mux.CurrentRoute(r).GetName(),
}
enc := json.NewEncoder(w)
if err := enc.Encode(testCase); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
})
// Startup test server
server := httptest.NewServer(router)
for _, testcase := range testCases {
testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI
// Register the endpoint
route := router.GetRoute(testcase.RouteName)
if route == nil {
t.Fatalf("route for name %q not found", testcase.RouteName)
}
route.Handler(testHandler)
u := server.URL + testcase.RequestURI
resp, err := http.Get(u)
if err != nil {
t.Fatalf("error issuing get request: %v", err)
}
if testcase.StatusCode == 0 {
// Override default, zero-value
testcase.StatusCode = http.StatusOK
}
if testcase.ExpectedURI == "" {
// Override default, zero-value
testcase.ExpectedURI = testcase.RequestURI
}
if resp.StatusCode != testcase.StatusCode {
t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode)
}
if testcase.StatusCode != http.StatusOK {
resp.Body.Close()
// We don't care about json response.
continue
}
dec := json.NewDecoder(resp.Body)
var actualRouteInfo routeTestCase
if err := dec.Decode(&actualRouteInfo); err != nil {
t.Fatalf("error reading json response: %v", err)
}
// Needs to be set out of band
actualRouteInfo.StatusCode = resp.StatusCode
if actualRouteInfo.RequestURI != testcase.ExpectedURI {
t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI)
}
if actualRouteInfo.RouteName != testcase.RouteName {
t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName)
}
// when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want
// that to make the comparison fail. We're otherwise done with the testcase so empty the
// testcase.ExpectedURI
testcase.ExpectedURI = ""
if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) {
t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase)
}
resp.Body.Close()
}
}
// -------------- START LICENSED CODE --------------
// The following code is derivative of https://github.com/google/gofuzz
// gofuzz is licensed under the Apache License, Version 2.0, January 2004,
// a copy of which can be found in the LICENSE file at the root of this
// repository.
// These functions allow us to generate strings containing only multibyte
// characters that are invalid in our URLs. They are used above for fuzzing
// to ensure we always get 404s on these invalid strings
type charRange struct {
first, last rune
}
// choose returns a random unicode character from the given range, using the
// given randomness source.
func (r *charRange) choose() rune {
count := int64(r.last - r.first)
return r.first + rune(rand.Int63n(count))
}
var unicodeRanges = []charRange{
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
func randomString(length int) string {
runes := make([]rune, length)
for i := range runes {
runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose()
}
return string(runes)
}
// -------------- END LICENSED CODE --------------

251
docs/api/v2/urls.go Normal file
View File

@ -0,0 +1,251 @@
package v2
import (
"net/http"
"net/url"
"strings"
"github.com/docker/distribution/reference"
"github.com/gorilla/mux"
)
// URLBuilder creates registry API urls from a single base endpoint. It can be
// used to create urls for use in a registry client or server.
//
// All urls will be created from the given base, including the api version.
// For example, if a root of "/foo/" is provided, urls generated will be fall
// under "/foo/v2/...". Most application will only provide a schema, host and
// port, such as "https://localhost:5000/".
type URLBuilder struct {
root *url.URL // url root (ie http://localhost/)
router *mux.Router
relative bool
}
// NewURLBuilder creates a URLBuilder with provided root url object.
func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
return &URLBuilder{
root: root,
router: Router(),
relative: relative,
}
}
// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
// a string argument for the root, returning an error if it is not a valid
// url.
func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
u, err := url.Parse(root)
if err != nil {
return nil, err
}
return NewURLBuilder(u, relative), nil
}
// NewURLBuilderFromRequest uses information from an *http.Request to
// construct the root url.
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
var scheme string
forwardedProto := r.Header.Get("X-Forwarded-Proto")
switch {
case len(forwardedProto) > 0:
scheme = forwardedProto
case r.TLS != nil:
scheme = "https"
case len(r.URL.Scheme) > 0:
scheme = r.URL.Scheme
default:
scheme = "http"
}
host := r.Host
forwardedHost := r.Header.Get("X-Forwarded-Host")
if len(forwardedHost) > 0 {
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
// comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated
// list.
hosts := strings.SplitN(forwardedHost, ",", 2)
host = strings.TrimSpace(hosts[0])
}
basePath := routeDescriptorsMap[RouteNameBase].Path
requestPath := r.URL.Path
index := strings.Index(requestPath, basePath)
u := &url.URL{
Scheme: scheme,
Host: host,
}
if index > 0 {
// N.B. index+1 is important because we want to include the trailing /
u.Path = requestPath[0 : index+1]
}
return NewURLBuilder(u, relative)
}
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
func (ub *URLBuilder) BuildBaseURL() (string, error) {
route := ub.cloneRoute(RouteNameBase)
baseURL, err := route.URL()
if err != nil {
return "", err
}
return baseURL.String(), nil
}
// BuildCatalogURL constructs a url get a catalog of repositories
func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameCatalog)
catalogURL, err := route.URL()
if err != nil {
return "", err
}
return appendValuesURL(catalogURL, values...).String(), nil
}
// BuildTagsURL constructs a url to list the tags in the named repository.
func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameTags)
tagsURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return tagsURL.String(), nil
}
// BuildManifestURL constructs a url for the manifest identified by name and
// reference. The argument reference may be either a tag or digest.
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameManifest)
tagOrDigest := ""
switch v := ref.(type) {
case reference.Tagged:
tagOrDigest = v.Tag()
case reference.Digested:
tagOrDigest = v.Digest().String()
}
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
if err != nil {
return "", err
}
return manifestURL.String(), nil
}
// BuildBlobURL constructs the url for the blob identified by name and dgst.
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
route := ub.cloneRoute(RouteNameBlob)
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
if err != nil {
return "", err
}
return layerURL.String(), nil
}
// BuildBlobUploadURL constructs a url to begin a blob upload in the
// repository identified by name.
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUpload)
uploadURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
// including any url values. This should generally not be used by clients, as
// this url is provided by server implementations during the blob upload
// process.
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUploadChunk)
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// clondedRoute returns a clone of the named route from the router. Routes
// must be cloned to avoid modifying them during url generation.
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
route := new(mux.Route)
root := new(url.URL)
*route = *ub.router.GetRoute(name) // clone the route
*root = *ub.root
return clonedRoute{Route: route, root: root, relative: ub.relative}
}
type clonedRoute struct {
*mux.Route
root *url.URL
relative bool
}
func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
routeURL, err := cr.Route.URL(pairs...)
if err != nil {
return nil, err
}
if cr.relative {
return routeURL, nil
}
if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
routeURL.Path = routeURL.Path[1:]
}
url := cr.root.ResolveReference(routeURL)
url.Scheme = cr.root.Scheme
return url, nil
}
// appendValuesURL appends the parameters to the url.
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
merged := u.Query()
for _, v := range values {
for k, vv := range v {
merged[k] = append(merged[k], vv...)
}
}
u.RawQuery = merged.Encode()
return u
}
// appendValues appends the parameters to the url. Panics if the string is not
// a url.
func appendValues(u string, values ...url.Values) string {
up, err := url.Parse(u)
if err != nil {
panic(err) // should never happen
}
return appendValuesURL(up, values...).String()
}

334
docs/api/v2/urls_test.go Normal file
View File

@ -0,0 +1,334 @@
package v2
import (
"net/http"
"net/url"
"testing"
"github.com/docker/distribution/reference"
)
type urlBuilderTestCase struct {
description string
expectedPath string
build func() (string, error)
}
func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
fooBarRef, _ := reference.ParseNamed("foo/bar")
return []urlBuilderTestCase{
{
description: "test base url",
expectedPath: "/v2/",
build: urlBuilder.BuildBaseURL,
},
{
description: "test tags url",
expectedPath: "/v2/foo/bar/tags/list",
build: func() (string, error) {
return urlBuilder.BuildTagsURL(fooBarRef)
},
},
{
description: "test manifest url",
expectedPath: "/v2/foo/bar/manifests/tag",
build: func() (string, error) {
ref, _ := reference.WithTag(fooBarRef, "tag")
return urlBuilder.BuildManifestURL(ref)
},
},
{
description: "build blob url",
expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
build: func() (string, error) {
ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5")
return urlBuilder.BuildBlobURL(ref)
},
},
{
description: "build blob upload url",
expectedPath: "/v2/foo/bar/blobs/uploads/",
build: func() (string, error) {
return urlBuilder.BuildBlobUploadURL(fooBarRef)
},
},
{
description: "build blob upload url with digest and size",
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
build: func() (string, error) {
return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{
"size": []string{"10000"},
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
})
},
},
{
description: "build blob upload chunk url",
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part",
build: func() (string, error) {
return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part")
},
},
{
description: "build blob upload chunk url with digest and size",
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
build: func() (string, error) {
return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{
"size": []string{"10000"},
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
})
},
},
}
}
// TestURLBuilder tests the various url building functions, ensuring they are
// returning the expected values.
func TestURLBuilder(t *testing.T) {
roots := []string{
"http://example.com",
"https://example.com",
"http://localhost:5000",
"https://localhost:5443",
}
doTest := func(relative bool) {
for _, root := range roots {
urlBuilder, err := NewURLBuilderFromString(root, relative)
if err != nil {
t.Fatalf("unexpected error creating urlbuilder: %v", err)
}
for _, testCase := range makeURLBuilderTestCases(urlBuilder) {
url, err := testCase.build()
if err != nil {
t.Fatalf("%s: error building url: %v", testCase.description, err)
}
expectedURL := testCase.expectedPath
if !relative {
expectedURL = root + expectedURL
}
if url != expectedURL {
t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
}
}
}
}
doTest(true)
doTest(false)
}
func TestURLBuilderWithPrefix(t *testing.T) {
roots := []string{
"http://example.com/prefix/",
"https://example.com/prefix/",
"http://localhost:5000/prefix/",
"https://localhost:5443/prefix/",
}
doTest := func(relative bool) {
for _, root := range roots {
urlBuilder, err := NewURLBuilderFromString(root, relative)
if err != nil {
t.Fatalf("unexpected error creating urlbuilder: %v", err)
}
for _, testCase := range makeURLBuilderTestCases(urlBuilder) {
url, err := testCase.build()
if err != nil {
t.Fatalf("%s: error building url: %v", testCase.description, err)
}
expectedURL := testCase.expectedPath
if !relative {
expectedURL = root[0:len(root)-1] + expectedURL
}
if url != expectedURL {
t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
}
}
}
}
doTest(true)
doTest(false)
}
type builderFromRequestTestCase struct {
request *http.Request
base string
}
func TestBuilderFromRequest(t *testing.T) {
u, err := url.Parse("http://example.com")
if err != nil {
t.Fatal(err)
}
forwardedProtoHeader := make(http.Header, 1)
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
forwardedHostHeader1 := make(http.Header, 1)
forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com")
forwardedHostHeader2 := make(http.Header, 1)
forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com")
testRequests := []struct {
request *http.Request
base string
configHost url.URL
}{
{
request: &http.Request{URL: u, Host: u.Host},
base: "http://example.com",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
base: "http://example.com",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
base: "https://example.com",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1},
base: "http://first.example.com",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
base: "http://first.example.com",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
base: "https://third.example.com:5000",
configHost: url.URL{
Scheme: "https",
Host: "third.example.com:5000",
},
},
}
doTest := func(relative bool) {
for _, tr := range testRequests {
var builder *URLBuilder
if tr.configHost.Scheme != "" && tr.configHost.Host != "" {
builder = NewURLBuilder(&tr.configHost, relative)
} else {
builder = NewURLBuilderFromRequest(tr.request, relative)
}
for _, testCase := range makeURLBuilderTestCases(builder) {
buildURL, err := testCase.build()
if err != nil {
t.Fatalf("%s: error building url: %v", testCase.description, err)
}
var expectedURL string
proto, ok := tr.request.Header["X-Forwarded-Proto"]
if !ok {
expectedURL = testCase.expectedPath
if !relative {
expectedURL = tr.base + expectedURL
}
} else {
urlBase, err := url.Parse(tr.base)
if err != nil {
t.Fatal(err)
}
urlBase.Scheme = proto[0]
expectedURL = testCase.expectedPath
if !relative {
expectedURL = urlBase.String() + expectedURL
}
}
if buildURL != expectedURL {
t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL)
}
}
}
}
doTest(true)
doTest(false)
}
func TestBuilderFromRequestWithPrefix(t *testing.T) {
u, err := url.Parse("http://example.com/prefix/v2/")
if err != nil {
t.Fatal(err)
}
forwardedProtoHeader := make(http.Header, 1)
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
testRequests := []struct {
request *http.Request
base string
configHost url.URL
}{
{
request: &http.Request{URL: u, Host: u.Host},
base: "http://example.com/prefix/",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
base: "http://example.com/prefix/",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
base: "https://example.com/prefix/",
},
{
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
base: "https://subdomain.example.com/prefix/",
configHost: url.URL{
Scheme: "https",
Host: "subdomain.example.com",
Path: "/prefix/",
},
},
}
var relative bool
for _, tr := range testRequests {
var builder *URLBuilder
if tr.configHost.Scheme != "" && tr.configHost.Host != "" {
builder = NewURLBuilder(&tr.configHost, false)
} else {
builder = NewURLBuilderFromRequest(tr.request, false)
}
for _, testCase := range makeURLBuilderTestCases(builder) {
buildURL, err := testCase.build()
if err != nil {
t.Fatalf("%s: error building url: %v", testCase.description, err)
}
var expectedURL string
proto, ok := tr.request.Header["X-Forwarded-Proto"]
if !ok {
expectedURL = testCase.expectedPath
if !relative {
expectedURL = tr.base[0:len(tr.base)-1] + expectedURL
}
} else {
urlBase, err := url.Parse(tr.base)
if err != nil {
t.Fatal(err)
}
urlBase.Scheme = proto[0]
expectedURL = testCase.expectedPath
if !relative {
expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL
}
}
if buildURL != expectedURL {
t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL)
}
}
}
}

168
docs/auth/auth.go Normal file
View File

@ -0,0 +1,168 @@
// Package auth defines a standard interface for request access controllers.
//
// An access controller has a simple interface with a single `Authorized`
// method which checks that a given request is authorized to perform one or
// more actions on one or more resources. This method should return a non-nil
// error if the request is not authorized.
//
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
//
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
//
// This `accessController` can then be used in a request handler like so:
//
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
//
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// }
// }
//
package auth
import (
"errors"
"fmt"
"net/http"
"github.com/docker/distribution/context"
)
const (
// UserKey is used to get the user object from
// a user context
UserKey = "auth.user"
// UserNameKey is used to get the user name from
// a user context
UserNameKey = "auth.user.name"
)
var (
// ErrInvalidCredential is returned when the auth token does not authenticate correctly.
ErrInvalidCredential = errors.New("invalid authorization credential")
// ErrAuthenticationFailure returned when authentication fails.
ErrAuthenticationFailure = errors.New("authentication failure")
)
// UserInfo carries information about
// an autenticated/authorized client.
type UserInfo struct {
Name string
}
// Resource describes a resource by type and name.
type Resource struct {
Type string
Name string
}
// Access describes a specific action that is
// requested or allowed for a given resource.
type Access struct {
Resource
Action string
}
// Challenge is a special error type which is used for HTTP 401 Unauthorized
// responses and is able to write the response with WWW-Authenticate challenge
// header values based on the error.
type Challenge interface {
error
// SetHeaders prepares the request to conduct a challenge response by
// adding the an HTTP challenge header on the response message. Callers
// are expected to set the appropriate HTTP status code (e.g. 401)
// themselves.
SetHeaders(w http.ResponseWriter)
}
// AccessController controls access to registry resources based on a request
// and required access levels for a request. Implementations can support both
// complete denial and http authorization challenges.
type AccessController interface {
// Authorized returns a non-nil error if the context is granted access and
// returns a new authorized context. If one or more Access structs are
// provided, the requested access will be compared with what is available
// to the context. The given context will contain a "http.request" key with
// a `*http.Request` value. If the error is non-nil, access should always
// be denied. The error may be of type Challenge, in which case the caller
// may have the Challenge handle the request or choose what action to take
// based on the Challenge header or response status. The returned context
// object should have a "auth.user" value set to a UserInfo struct.
Authorized(ctx context.Context, access ...Access) (context.Context, error)
}
// CredentialAuthenticator is an object which is able to authenticate credentials
type CredentialAuthenticator interface {
AuthenticateUser(username, password string) error
}
// WithUser returns a context with the authorized user info.
func WithUser(ctx context.Context, user UserInfo) context.Context {
return userInfoContext{
Context: ctx,
user: user,
}
}
type userInfoContext struct {
context.Context
user UserInfo
}
func (uic userInfoContext) Value(key interface{}) interface{} {
switch key {
case UserKey:
return uic.user
case UserNameKey:
return uic.user.Name
}
return uic.Context.Value(key)
}
// InitFunc is the type of an AccessController factory function and is used
// to register the constructor for different AccesController backends.
type InitFunc func(options map[string]interface{}) (AccessController, error)
var accessControllers map[string]InitFunc
func init() {
accessControllers = make(map[string]InitFunc)
}
// Register is used to register an InitFunc for
// an AccessController backend with the given name.
func Register(name string, initFunc InitFunc) error {
if _, exists := accessControllers[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
accessControllers[name] = initFunc
return nil
}
// GetAccessController constructs an AccessController
// with the given options using the named backend.
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
if initFunc, exists := accessControllers[name]; exists {
return initFunc(options)
}
return nil, fmt.Errorf("no access controller registered with name: %s", name)
}

View File

@ -0,0 +1,97 @@
// Package htpasswd provides a simple authentication scheme that checks for the
// user credential hash in an htpasswd formatted file in a configuration-determined
// location.
//
// This authentication method MUST be used under TLS, as simple token-replay attack is possible.
package htpasswd
import (
"fmt"
"net/http"
"os"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
)
type accessController struct {
realm string
htpasswd *htpasswd
}
var _ auth.AccessController = &accessController{}
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
realm, present := options["realm"]
if _, ok := realm.(string); !present || !ok {
return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`)
}
path, present := options["path"]
if _, ok := path.(string); !present || !ok {
return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`)
}
f, err := os.Open(path.(string))
if err != nil {
return nil, err
}
defer f.Close()
h, err := newHTPasswd(f)
if err != nil {
return nil, err
}
return &accessController{realm: realm.(string), htpasswd: h}, nil
}
func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) {
req, err := context.GetRequest(ctx)
if err != nil {
return nil, err
}
username, password, ok := req.BasicAuth()
if !ok {
return nil, &challenge{
realm: ac.realm,
err: auth.ErrInvalidCredential,
}
}
if err := ac.AuthenticateUser(username, password); err != nil {
context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err)
return nil, &challenge{
realm: ac.realm,
err: auth.ErrAuthenticationFailure,
}
}
return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil
}
func (ac *accessController) AuthenticateUser(username, password string) error {
return ac.htpasswd.authenticateUser(username, password)
}
// challenge implements the auth.Challenge interface.
type challenge struct {
realm string
err error
}
var _ auth.Challenge = challenge{}
// SetHeaders sets the basic challenge header on the response.
func (ch challenge) SetHeaders(w http.ResponseWriter) {
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm))
}
func (ch challenge) Error() string {
return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err)
}
func init() {
auth.Register("htpasswd", auth.InitFunc(newAccessController))
}

View File

@ -0,0 +1,122 @@
package htpasswd
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
)
func TestBasicAccessController(t *testing.T) {
testRealm := "The-Shire"
testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"}
testPasswords := []string{"baggins", "baggins", "새주", "공주님"}
testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs=
frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W
MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2
DeokMan:공주님`
tempFile, err := ioutil.TempFile("", "htpasswd-test")
if err != nil {
t.Fatal("could not create temporary htpasswd file")
}
if _, err = tempFile.WriteString(testHtpasswdContent); err != nil {
t.Fatal("could not write temporary htpasswd file")
}
options := map[string]interface{}{
"realm": testRealm,
"path": tempFile.Name(),
}
ctx := context.Background()
accessController, err := newAccessController(options)
if err != nil {
t.Fatal("error creating access controller")
}
tempFile.Close()
var userNumber = 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r)
authCtx, err := accessController.Authorized(ctx)
if err != nil {
switch err := err.(type) {
case auth.Challenge:
err.SetHeaders(w)
w.WriteHeader(http.StatusUnauthorized)
return
default:
t.Fatalf("unexpected error authorizing request: %v", err)
}
}
userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo)
if !ok {
t.Fatal("basic accessController did not set auth.user context")
}
if userInfo.Name != testUsers[userNumber] {
t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name)
}
w.WriteHeader(http.StatusNoContent)
}))
client := &http.Client{
CheckRedirect: nil,
}
req, _ := http.NewRequest("GET", server.URL, nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
defer resp.Body.Close()
// Request should not be authorized
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized)
}
nonbcrypt := map[string]struct{}{
"bilbo": {},
"DeokMan": {},
}
for i := 0; i < len(testUsers); i++ {
userNumber = i
req, err := http.NewRequest("GET", server.URL, nil)
if err != nil {
t.Fatalf("error allocating new request: %v", err)
}
req.SetBasicAuth(testUsers[i], testPasswords[i])
resp, err = client.Do(req)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
defer resp.Body.Close()
if _, ok := nonbcrypt[testUsers[i]]; ok {
// these are not allowed.
// Request should be authorized
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i])
}
} else {
// Request should be authorized
if resp.StatusCode != http.StatusNoContent {
t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i])
}
}
}
}

View File

@ -0,0 +1,82 @@
package htpasswd
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/docker/distribution/registry/auth"
"golang.org/x/crypto/bcrypt"
)
// htpasswd holds a path to a system .htpasswd file and the machinery to parse
// it. Only bcrypt hash entries are supported.
type htpasswd struct {
entries map[string][]byte // maps username to password byte slice.
}
// newHTPasswd parses the reader and returns an htpasswd or an error.
func newHTPasswd(rd io.Reader) (*htpasswd, error) {
entries, err := parseHTPasswd(rd)
if err != nil {
return nil, err
}
return &htpasswd{entries: entries}, nil
}
// AuthenticateUser checks a given user:password credential against the
// receiving HTPasswd's file. If the check passes, nil is returned.
func (htpasswd *htpasswd) authenticateUser(username string, password string) error {
credentials, ok := htpasswd.entries[username]
if !ok {
// timing attack paranoia
bcrypt.CompareHashAndPassword([]byte{}, []byte(password))
return auth.ErrAuthenticationFailure
}
err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password))
if err != nil {
return auth.ErrAuthenticationFailure
}
return nil
}
// parseHTPasswd parses the contents of htpasswd. This will read all the
// entries in the file, whether or not they are needed. An error is returned
// if a syntax errors are encountered or if the reader fails.
func parseHTPasswd(rd io.Reader) (map[string][]byte, error) {
entries := map[string][]byte{}
scanner := bufio.NewScanner(rd)
var line int
for scanner.Scan() {
line++ // 1-based line numbering
t := strings.TrimSpace(scanner.Text())
if len(t) < 1 {
continue
}
// lines that *begin* with a '#' are considered comments
if t[0] == '#' {
continue
}
i := strings.Index(t, ":")
if i < 0 || i >= len(t) {
return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text())
}
entries[t[:i]] = []byte(t[i+1:])
}
if err := scanner.Err(); err != nil {
return nil, err
}
return entries, nil
}

View File

@ -0,0 +1,85 @@
package htpasswd
import (
"fmt"
"reflect"
"strings"
"testing"
)
func TestParseHTPasswd(t *testing.T) {
for _, tc := range []struct {
desc string
input string
err error
entries map[string][]byte
}{
{
desc: "basic example",
input: `
# This is a comment in a basic example.
bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs=
frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W
MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2
DeokMan:공주님
`,
entries: map[string][]byte{
"bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="),
"frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"),
"MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"),
"DeokMan": []byte("공주님"),
},
},
{
desc: "ensures comments are filtered",
input: `
# asdf:asdf
`,
},
{
desc: "ensure midline hash is not comment",
input: `
asdf:as#df
`,
entries: map[string][]byte{
"asdf": []byte("as#df"),
},
},
{
desc: "ensure midline hash is not comment",
input: `
# A valid comment
valid:entry
asdf
`,
err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`),
},
} {
entries, err := parseHTPasswd(strings.NewReader(tc.input))
if err != tc.err {
if tc.err == nil {
t.Fatalf("%s: unexpected error: %v", tc.desc, err)
} else {
if err.Error() != tc.err.Error() { // use string equality here.
t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err)
}
}
}
if tc.err != nil {
continue // don't test output
}
// allow empty and nil to be equal
if tc.entries == nil {
tc.entries = map[string][]byte{}
}
if !reflect.DeepEqual(entries, tc.entries) {
t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries)
}
}
}

97
docs/auth/silly/access.go Normal file
View File

@ -0,0 +1,97 @@
// Package silly provides a simple authentication scheme that checks for the
// existence of an Authorization header and issues access if is present and
// non-empty.
//
// This package is present as an example implementation of a minimal
// auth.AccessController and for testing. This is not suitable for any kind of
// production security.
package silly
import (
"fmt"
"net/http"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
)
// accessController provides a simple implementation of auth.AccessController
// that simply checks for a non-empty Authorization header. It is useful for
// demonstration and testing.
type accessController struct {
realm string
service string
}
var _ auth.AccessController = &accessController{}
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
realm, present := options["realm"]
if _, ok := realm.(string); !present || !ok {
return nil, fmt.Errorf(`"realm" must be set for silly access controller`)
}
service, present := options["service"]
if _, ok := service.(string); !present || !ok {
return nil, fmt.Errorf(`"service" must be set for silly access controller`)
}
return &accessController{realm: realm.(string), service: service.(string)}, nil
}
// Authorized simply checks for the existence of the authorization header,
// responding with a bearer challenge if it doesn't exist.
func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) {
req, err := context.GetRequest(ctx)
if err != nil {
return nil, err
}
if req.Header.Get("Authorization") == "" {
challenge := challenge{
realm: ac.realm,
service: ac.service,
}
if len(accessRecords) > 0 {
var scopes []string
for _, access := range accessRecords {
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action))
}
challenge.scope = strings.Join(scopes, " ")
}
return nil, &challenge
}
return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil
}
type challenge struct {
realm string
service string
scope string
}
var _ auth.Challenge = challenge{}
// SetHeaders sets a simple bearer challenge on the response.
func (ch challenge) SetHeaders(w http.ResponseWriter) {
header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service)
if ch.scope != "" {
header = fmt.Sprintf("%s,scope=%q", header, ch.scope)
}
w.Header().Set("WWW-Authenticate", header)
}
func (ch challenge) Error() string {
return fmt.Sprintf("silly authentication challenge: %#v", ch)
}
// init registers the silly auth backend.
func init() {
auth.Register("silly", auth.InitFunc(newAccessController))
}

View File

@ -0,0 +1,71 @@
package silly
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
)
func TestSillyAccessController(t *testing.T) {
ac := &accessController{
realm: "test-realm",
service: "test-service",
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(nil, "http.request", r)
authCtx, err := ac.Authorized(ctx)
if err != nil {
switch err := err.(type) {
case auth.Challenge:
err.SetHeaders(w)
w.WriteHeader(http.StatusUnauthorized)
return
default:
t.Fatalf("unexpected error authorizing request: %v", err)
}
}
userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo)
if !ok {
t.Fatal("silly accessController did not set auth.user context")
}
if userInfo.Name != "silly" {
t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name)
}
w.WriteHeader(http.StatusNoContent)
}))
resp, err := http.Get(server.URL)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
defer resp.Body.Close()
// Request should not be authorized
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized)
}
req, err := http.NewRequest("GET", server.URL, nil)
if err != nil {
t.Fatalf("unexpected error creating new request: %v", err)
}
req.Header.Set("Authorization", "seriously, anything")
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
defer resp.Body.Close()
// Request should not be authorized
if resp.StatusCode != http.StatusNoContent {
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent)
}
}

View File

@ -0,0 +1,268 @@
package token
import (
"crypto"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
"github.com/docker/libtrust"
)
// accessSet maps a typed, named resource to
// a set of actions requested or authorized.
type accessSet map[auth.Resource]actionSet
// newAccessSet constructs an accessSet from
// a variable number of auth.Access items.
func newAccessSet(accessItems ...auth.Access) accessSet {
accessSet := make(accessSet, len(accessItems))
for _, access := range accessItems {
resource := auth.Resource{
Type: access.Type,
Name: access.Name,
}
set, exists := accessSet[resource]
if !exists {
set = newActionSet()
accessSet[resource] = set
}
set.add(access.Action)
}
return accessSet
}
// contains returns whether or not the given access is in this accessSet.
func (s accessSet) contains(access auth.Access) bool {
actionSet, ok := s[access.Resource]
if ok {
return actionSet.contains(access.Action)
}
return false
}
// scopeParam returns a collection of scopes which can
// be used for a WWW-Authenticate challenge parameter.
// See https://tools.ietf.org/html/rfc6750#section-3
func (s accessSet) scopeParam() string {
scopes := make([]string, 0, len(s))
for resource, actionSet := range s {
actions := strings.Join(actionSet.keys(), ",")
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions))
}
return strings.Join(scopes, " ")
}
// Errors used and exported by this package.
var (
ErrInsufficientScope = errors.New("insufficient scope")
ErrTokenRequired = errors.New("authorization token required")
)
// authChallenge implements the auth.Challenge interface.
type authChallenge struct {
err error
realm string
service string
accessSet accessSet
}
var _ auth.Challenge = authChallenge{}
// Error returns the internal error string for this authChallenge.
func (ac authChallenge) Error() string {
return ac.err.Error()
}
// Status returns the HTTP Response Status Code for this authChallenge.
func (ac authChallenge) Status() int {
return http.StatusUnauthorized
}
// challengeParams constructs the value to be used in
// the WWW-Authenticate response challenge header.
// See https://tools.ietf.org/html/rfc6750#section-3
func (ac authChallenge) challengeParams() string {
str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service)
if scope := ac.accessSet.scopeParam(); scope != "" {
str = fmt.Sprintf("%s,scope=%q", str, scope)
}
if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken {
str = fmt.Sprintf("%s,error=%q", str, "invalid_token")
} else if ac.err == ErrInsufficientScope {
str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope")
}
return str
}
// SetChallenge sets the WWW-Authenticate value for the response.
func (ac authChallenge) SetHeaders(w http.ResponseWriter) {
w.Header().Add("WWW-Authenticate", ac.challengeParams())
}
// accessController implements the auth.AccessController interface.
type accessController struct {
realm string
issuer string
service string
rootCerts *x509.CertPool
trustedKeys map[string]libtrust.PublicKey
}
// tokenAccessOptions is a convenience type for handling
// options to the contstructor of an accessController.
type tokenAccessOptions struct {
realm string
issuer string
service string
rootCertBundle string
}
// checkOptions gathers the necessary options
// for an accessController from the given map.
func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
var opts tokenAccessOptions
keys := []string{"realm", "issuer", "service", "rootcertbundle"}
vals := make([]string, 0, len(keys))
for _, key := range keys {
val, ok := options[key].(string)
if !ok {
return opts, fmt.Errorf("token auth requires a valid option string: %q", key)
}
vals = append(vals, val)
}
opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3]
return opts, nil
}
// newAccessController creates an accessController using the given options.
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
config, err := checkOptions(options)
if err != nil {
return nil, err
}
fp, err := os.Open(config.rootCertBundle)
if err != nil {
return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
}
defer fp.Close()
rawCertBundle, err := ioutil.ReadAll(fp)
if err != nil {
return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
}
var rootCerts []*x509.Certificate
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
for pemBlock != nil {
cert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
}
rootCerts = append(rootCerts, cert)
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
}
if len(rootCerts) == 0 {
return nil, errors.New("token auth requires at least one token signing root certificate")
}
rootPool := x509.NewCertPool()
trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts))
for _, rootCert := range rootCerts {
rootPool.AddCert(rootCert)
pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey))
if err != nil {
return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err)
}
trustedKeys[pubKey.KeyID()] = pubKey
}
return &accessController{
realm: config.realm,
issuer: config.issuer,
service: config.service,
rootCerts: rootPool,
trustedKeys: trustedKeys,
}, nil
}
// Authorized handles checking whether the given request is authorized
// for actions on resources described by the given access items.
func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) {
challenge := &authChallenge{
realm: ac.realm,
service: ac.service,
accessSet: newAccessSet(accessItems...),
}
req, err := context.GetRequest(ctx)
if err != nil {
return nil, err
}
parts := strings.Split(req.Header.Get("Authorization"), " ")
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
challenge.err = ErrTokenRequired
return nil, challenge
}
rawToken := parts[1]
token, err := NewToken(rawToken)
if err != nil {
challenge.err = err
return nil, challenge
}
verifyOpts := VerifyOptions{
TrustedIssuers: []string{ac.issuer},
AcceptedAudiences: []string{ac.service},
Roots: ac.rootCerts,
TrustedKeys: ac.trustedKeys,
}
if err = token.Verify(verifyOpts); err != nil {
challenge.err = err
return nil, challenge
}
accessSet := token.accessSet()
for _, access := range accessItems {
if !accessSet.contains(access) {
challenge.err = ErrInsufficientScope
return nil, challenge
}
}
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
}
// init handles registering the token auth backend.
func init() {
auth.Register("token", auth.InitFunc(newAccessController))
}

View File

@ -0,0 +1,35 @@
package token
// StringSet is a useful type for looking up strings.
type stringSet map[string]struct{}
// NewStringSet creates a new StringSet with the given strings.
func newStringSet(keys ...string) stringSet {
ss := make(stringSet, len(keys))
ss.add(keys...)
return ss
}
// Add inserts the given keys into this StringSet.
func (ss stringSet) add(keys ...string) {
for _, key := range keys {
ss[key] = struct{}{}
}
}
// Contains returns whether the given key is in this StringSet.
func (ss stringSet) contains(key string) bool {
_, ok := ss[key]
return ok
}
// Keys returns a slice of all keys in this StringSet.
func (ss stringSet) keys() []string {
keys := make([]string, 0, len(ss))
for key := range ss {
keys = append(keys, key)
}
return keys
}

343
docs/auth/token/token.go Normal file
View File

@ -0,0 +1,343 @@
package token
import (
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/libtrust"
"github.com/docker/distribution/registry/auth"
)
const (
// TokenSeparator is the value which separates the header, claims, and
// signature in the compact serialization of a JSON Web Token.
TokenSeparator = "."
)
// Errors used by token parsing and verification.
var (
ErrMalformedToken = errors.New("malformed token")
ErrInvalidToken = errors.New("invalid token")
)
// ResourceActions stores allowed actions on a named and typed resource.
type ResourceActions struct {
Type string `json:"type"`
Name string `json:"name"`
Actions []string `json:"actions"`
}
// ClaimSet describes the main section of a JSON Web Token.
type ClaimSet struct {
// Public claims
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
Expiration int64 `json:"exp"`
NotBefore int64 `json:"nbf"`
IssuedAt int64 `json:"iat"`
JWTID string `json:"jti"`
// Private claims
Access []*ResourceActions `json:"access"`
}
// Header describes the header section of a JSON Web Token.
type Header struct {
Type string `json:"typ"`
SigningAlg string `json:"alg"`
KeyID string `json:"kid,omitempty"`
X5c []string `json:"x5c,omitempty"`
RawJWK *json.RawMessage `json:"jwk,omitempty"`
}
// Token describes a JSON Web Token.
type Token struct {
Raw string
Header *Header
Claims *ClaimSet
Signature []byte
}
// VerifyOptions is used to specify
// options when verifying a JSON Web Token.
type VerifyOptions struct {
TrustedIssuers []string
AcceptedAudiences []string
Roots *x509.CertPool
TrustedKeys map[string]libtrust.PublicKey
}
// NewToken parses the given raw token string
// and constructs an unverified JSON Web Token.
func NewToken(rawToken string) (*Token, error) {
parts := strings.Split(rawToken, TokenSeparator)
if len(parts) != 3 {
return nil, ErrMalformedToken
}
var (
rawHeader, rawClaims = parts[0], parts[1]
headerJSON, claimsJSON []byte
err error
)
defer func() {
if err != nil {
log.Errorf("error while unmarshalling raw token: %s", err)
}
}()
if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil {
err = fmt.Errorf("unable to decode header: %s", err)
return nil, ErrMalformedToken
}
if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil {
err = fmt.Errorf("unable to decode claims: %s", err)
return nil, ErrMalformedToken
}
token := new(Token)
token.Header = new(Header)
token.Claims = new(ClaimSet)
token.Raw = strings.Join(parts[:2], TokenSeparator)
if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil {
err = fmt.Errorf("unable to decode signature: %s", err)
return nil, ErrMalformedToken
}
if err = json.Unmarshal(headerJSON, token.Header); err != nil {
return nil, ErrMalformedToken
}
if err = json.Unmarshal(claimsJSON, token.Claims); err != nil {
return nil, ErrMalformedToken
}
return token, nil
}
// Verify attempts to verify this token using the given options.
// Returns a nil error if the token is valid.
func (t *Token) Verify(verifyOpts VerifyOptions) error {
// Verify that the Issuer claim is a trusted authority.
if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
return ErrInvalidToken
}
// Verify that the Audience claim is allowed.
if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
log.Errorf("token intended for another audience: %q", t.Claims.Audience)
return ErrInvalidToken
}
// Verify that the token is currently usable and not expired.
currentUnixTime := time.Now().Unix()
if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
return ErrInvalidToken
}
// Verify the token signature.
if len(t.Signature) == 0 {
log.Error("token has no signature")
return ErrInvalidToken
}
// Verify that the signing key is trusted.
signingKey, err := t.VerifySigningKey(verifyOpts)
if err != nil {
log.Error(err)
return ErrInvalidToken
}
// Finally, verify the signature of the token using the key which signed it.
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
log.Errorf("unable to verify token signature: %s", err)
return ErrInvalidToken
}
return nil
}
// VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields:
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
// Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {
// First attempt to get an x509 certificate chain from the header.
var (
x5c = t.Header.X5c
rawJWK = t.Header.RawJWK
keyID = t.Header.KeyID
)
switch {
case len(x5c) > 0:
signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots)
case rawJWK != nil:
signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts)
case len(keyID) > 0:
signingKey = verifyOpts.TrustedKeys[keyID]
if signingKey == nil {
err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID)
}
default:
err = errors.New("unable to get token signing key")
}
return
}
func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) {
if len(x5c) == 0 {
return nil, errors.New("empty x509 certificate chain")
}
// Ensure the first element is encoded correctly.
leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0])
if err != nil {
return nil, fmt.Errorf("unable to decode leaf certificate: %s", err)
}
// And that it is a valid x509 certificate.
leafCert, err := x509.ParseCertificate(leafCertDer)
if err != nil {
return nil, fmt.Errorf("unable to parse leaf certificate: %s", err)
}
// The rest of the certificate chain are intermediate certificates.
intermediates := x509.NewCertPool()
for i := 1; i < len(x5c); i++ {
intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i])
if err != nil {
return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err)
}
intermediateCert, err := x509.ParseCertificate(intermediateCertDer)
if err != nil {
return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err)
}
intermediates.AddCert(intermediateCert)
}
verifyOpts := x509.VerifyOptions{
Intermediates: intermediates,
Roots: roots,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
}
// TODO: this call returns certificate chains which we ignore for now, but
// we should check them for revocations if we have the ability later.
if _, err = leafCert.Verify(verifyOpts); err != nil {
return nil, fmt.Errorf("unable to verify certificate chain: %s", err)
}
// Get the public key from the leaf certificate.
leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey)
if !ok {
return nil, errors.New("unable to get leaf cert public key value")
}
leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey)
if err != nil {
return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err)
}
return
}
func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) {
pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK))
if err != nil {
return nil, fmt.Errorf("unable to decode raw JWK value: %s", err)
}
// Check to see if the key includes a certificate chain.
x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{})
if !ok {
// The JWK should be one of the trusted root keys.
if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted {
return nil, errors.New("untrusted JWK with no certificate chain")
}
// The JWK is one of the trusted keys.
return
}
// Ensure each item in the chain is of the correct type.
x5c := make([]string, len(x5cVal))
for i, val := range x5cVal {
certString, ok := val.(string)
if !ok || len(certString) == 0 {
return nil, errors.New("malformed certificate chain")
}
x5c[i] = certString
}
// Ensure that the x509 certificate chain can
// be verified up to one of our trusted roots.
leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots)
if err != nil {
return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err)
}
// Verify that the public key in the leaf cert *is* the signing key.
if pubKey.KeyID() != leafKey.KeyID() {
return nil, errors.New("leaf certificate public key ID does not match JWK key ID")
}
return
}
// accessSet returns a set of actions available for the resource
// actions listed in the `access` section of this token.
func (t *Token) accessSet() accessSet {
if t.Claims == nil {
return nil
}
accessSet := make(accessSet, len(t.Claims.Access))
for _, resourceActions := range t.Claims.Access {
resource := auth.Resource{
Type: resourceActions.Type,
Name: resourceActions.Name,
}
set, exists := accessSet[resource]
if !exists {
set = newActionSet()
accessSet[resource] = set
}
for _, action := range resourceActions.Actions {
set.add(action)
}
}
return accessSet
}
func (t *Token) compactRaw() string {
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
}

View File

@ -0,0 +1,387 @@
package token
import (
"crypto"
"crypto/rand"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
"github.com/docker/libtrust"
)
func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) {
keys := make([]libtrust.PrivateKey, 0, numKeys)
for i := 0; i < numKeys; i++ {
key, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
return nil, err
}
keys = append(keys, key)
}
return keys, nil
}
func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) {
if depth == 0 {
// Don't need to build a chain.
return rootKey, nil
}
var (
x5c = make([]string, depth)
parentKey = rootKey
key libtrust.PrivateKey
cert *x509.Certificate
err error
)
for depth > 0 {
if key, err = libtrust.GenerateECP256PrivateKey(); err != nil {
return nil, err
}
if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil {
return nil, err
}
depth--
x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw)
parentKey = key
}
key.AddExtendedField("x5c", x5c)
return key, nil
}
func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) {
certs := make([]*x509.Certificate, 0, len(rootKeys))
for _, key := range rootKeys {
cert, err := libtrust.GenerateCACert(key, key)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
return certs, nil
}
func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey {
trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys))
for _, key := range rootKeys {
trustedKeys[key.KeyID()] = key.PublicKey()
}
return trustedKeys
}
func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) {
signingKey, err := makeSigningKeyWithChain(rootKey, depth)
if err != nil {
return nil, fmt.Errorf("unable to make signing key with chain: %s", err)
}
var rawJWK json.RawMessage
rawJWK, err = signingKey.PublicKey().MarshalJSON()
if err != nil {
return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err)
}
joseHeader := &Header{
Type: "JWT",
SigningAlg: "ES256",
RawJWK: &rawJWK,
}
now := time.Now()
randomBytes := make([]byte, 15)
if _, err = rand.Read(randomBytes); err != nil {
return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err)
}
claimSet := &ClaimSet{
Issuer: issuer,
Subject: "foo",
Audience: audience,
Expiration: now.Add(5 * time.Minute).Unix(),
NotBefore: now.Unix(),
IssuedAt: now.Unix(),
JWTID: base64.URLEncoding.EncodeToString(randomBytes),
Access: access,
}
var joseHeaderBytes, claimSetBytes []byte
if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil {
return nil, fmt.Errorf("unable to marshal jose header: %s", err)
}
if claimSetBytes, err = json.Marshal(claimSet); err != nil {
return nil, fmt.Errorf("unable to marshal claim set: %s", err)
}
encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes)
encodedClaimSet := joseBase64UrlEncode(claimSetBytes)
encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet)
var signatureBytes []byte
if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil {
return nil, fmt.Errorf("unable to sign jwt payload: %s", err)
}
signature := joseBase64UrlEncode(signatureBytes)
tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature)
return NewToken(tokenString)
}
// This test makes 4 tokens with a varying number of intermediate
// certificates ranging from no intermediate chain to a length of 3
// intermediates.
func TestTokenVerify(t *testing.T) {
var (
numTokens = 4
issuer = "test-issuer"
audience = "test-audience"
access = []*ResourceActions{
{
Type: "repository",
Name: "foo/bar",
Actions: []string{"pull", "push"},
},
}
)
rootKeys, err := makeRootKeys(numTokens)
if err != nil {
t.Fatal(err)
}
rootCerts, err := makeRootCerts(rootKeys)
if err != nil {
t.Fatal(err)
}
rootPool := x509.NewCertPool()
for _, rootCert := range rootCerts {
rootPool.AddCert(rootCert)
}
trustedKeys := makeTrustedKeyMap(rootKeys)
tokens := make([]*Token, 0, numTokens)
for i := 0; i < numTokens; i++ {
token, err := makeTestToken(issuer, audience, access, rootKeys[i], i)
if err != nil {
t.Fatal(err)
}
tokens = append(tokens, token)
}
verifyOps := VerifyOptions{
TrustedIssuers: []string{issuer},
AcceptedAudiences: []string{audience},
Roots: rootPool,
TrustedKeys: trustedKeys,
}
for _, token := range tokens {
if err := token.Verify(verifyOps); err != nil {
t.Fatal(err)
}
}
}
func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) {
rootCerts, err := makeRootCerts(rootKeys)
if err != nil {
return "", err
}
tempFile, err := ioutil.TempFile("", "rootCertBundle")
if err != nil {
return "", err
}
defer tempFile.Close()
for _, cert := range rootCerts {
if err = pem.Encode(tempFile, &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}); err != nil {
os.Remove(tempFile.Name())
return "", err
}
}
return tempFile.Name(), nil
}
// TestAccessController tests complete integration of the token auth package.
// It starts by mocking the options for a token auth accessController which
// it creates. It then tries a few mock requests:
// - don't supply a token; should error with challenge
// - supply an invalid token; should error with challenge
// - supply a token with insufficient access; should error with challenge
// - supply a valid token; should not error
func TestAccessController(t *testing.T) {
// Make 2 keys; only the first is to be a trusted root key.
rootKeys, err := makeRootKeys(2)
if err != nil {
t.Fatal(err)
}
rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1])
if err != nil {
t.Fatal(err)
}
defer os.Remove(rootCertBundleFilename)
realm := "https://auth.example.com/token/"
issuer := "test-issuer.example.com"
service := "test-service.example.com"
options := map[string]interface{}{
"realm": realm,
"issuer": issuer,
"service": service,
"rootcertbundle": rootCertBundleFilename,
}
accessController, err := newAccessController(options)
if err != nil {
t.Fatal(err)
}
// 1. Make a mock http.Request with no token.
req, err := http.NewRequest("GET", "http://example.com/foo", nil)
if err != nil {
t.Fatal(err)
}
testAccess := auth.Access{
Resource: auth.Resource{
Type: "foo",
Name: "bar",
},
Action: "baz",
}
ctx := context.WithValue(nil, "http.request", req)
authCtx, err := accessController.Authorized(ctx, testAccess)
challenge, ok := err.(auth.Challenge)
if !ok {
t.Fatal("accessController did not return a challenge")
}
if challenge.Error() != ErrTokenRequired.Error() {
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
}
if authCtx != nil {
t.Fatalf("expected nil auth context but got %s", authCtx)
}
// 2. Supply an invalid token.
token, err := makeTestToken(
issuer, service,
[]*ResourceActions{{
Type: testAccess.Type,
Name: testAccess.Name,
Actions: []string{testAccess.Action},
}},
rootKeys[1], 1, // Everything is valid except the key which signed it.
)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
authCtx, err = accessController.Authorized(ctx, testAccess)
challenge, ok = err.(auth.Challenge)
if !ok {
t.Fatal("accessController did not return a challenge")
}
if challenge.Error() != ErrInvalidToken.Error() {
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
}
if authCtx != nil {
t.Fatalf("expected nil auth context but got %s", authCtx)
}
// 3. Supply a token with insufficient access.
token, err = makeTestToken(
issuer, service,
[]*ResourceActions{}, // No access specified.
rootKeys[0], 1,
)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
authCtx, err = accessController.Authorized(ctx, testAccess)
challenge, ok = err.(auth.Challenge)
if !ok {
t.Fatal("accessController did not return a challenge")
}
if challenge.Error() != ErrInsufficientScope.Error() {
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope)
}
if authCtx != nil {
t.Fatalf("expected nil auth context but got %s", authCtx)
}
// 4. Supply the token we need, or deserve, or whatever.
token, err = makeTestToken(
issuer, service,
[]*ResourceActions{{
Type: testAccess.Type,
Name: testAccess.Name,
Actions: []string{testAccess.Action},
}},
rootKeys[0], 1,
)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
authCtx, err = accessController.Authorized(ctx, testAccess)
if err != nil {
t.Fatalf("accessController returned unexpected error: %s", err)
}
userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo)
if !ok {
t.Fatal("token accessController did not set auth.user context")
}
if userInfo.Name != "foo" {
t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name)
}
}

58
docs/auth/token/util.go Normal file
View File

@ -0,0 +1,58 @@
package token
import (
"encoding/base64"
"errors"
"strings"
)
// joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters omitted in accordance
// with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// joseBase64UrlDecode decodes the given string using the standard base64 url
// decoder but first adds the appropriate number of trailing '=' characters in
// accordance with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlDecode(s string) ([]byte, error) {
switch len(s) % 4 {
case 0:
case 2:
s += "=="
case 3:
s += "="
default:
return nil, errors.New("illegal base64url string")
}
return base64.URLEncoding.DecodeString(s)
}
// actionSet is a special type of stringSet.
type actionSet struct {
stringSet
}
func newActionSet(actions ...string) actionSet {
return actionSet{newStringSet(actions...)}
}
// Contains calls StringSet.Contains() for
// either "*" or the given action string.
func (s actionSet) contains(action string) bool {
return s.stringSet.contains("*") || s.stringSet.contains(action)
}
// contains returns true if q is found in ss.
func contains(ss []string, q string) bool {
for _, s := range ss {
if s == q {
return true
}
}
return false
}

View File

@ -0,0 +1,58 @@
package auth
import (
"net/http"
"strings"
)
// APIVersion represents a version of an API including its
// type and version number.
type APIVersion struct {
// Type refers to the name of a specific API specification
// such as "registry"
Type string
// Version is the version of the API specification implemented,
// This may omit the revision number and only include
// the major and minor version, such as "2.0"
Version string
}
// String returns the string formatted API Version
func (v APIVersion) String() string {
return v.Type + "/" + v.Version
}
// APIVersions gets the API versions out of an HTTP response using the provided
// version header as the key for the HTTP header.
func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
versions := []APIVersion{}
if versionHeader != "" {
for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
for _, version := range strings.Fields(supportedVersions) {
versions = append(versions, ParseAPIVersion(version))
}
}
}
return versions
}
// ParseAPIVersion parses an API version string into an APIVersion
// Format (Expected, not enforced):
// API version string = <API type> '/' <API version>
// API type = [a-z][a-z0-9]*
// API version = [0-9]+(\.[0-9]+)?
// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
func ParseAPIVersion(versionStr string) APIVersion {
idx := strings.IndexRune(versionStr, '/')
if idx == -1 {
return APIVersion{
Type: "unknown",
Version: versionStr,
}
}
return APIVersion{
Type: strings.ToLower(versionStr[:idx]),
Version: versionStr[idx+1:],
}
}

View File

@ -0,0 +1,220 @@
package auth
import (
"fmt"
"net/http"
"net/url"
"strings"
)
// Challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type Challenge struct {
// Scheme is the auth-scheme according to RFC 2617
Scheme string
// Parameters are the auth-params according to RFC 2617
Parameters map[string]string
}
// ChallengeManager manages the challenges for endpoints.
// The challenges are pulled out of HTTP responses. Only
// responses which expect challenges should be added to
// the manager, since a non-unauthorized request will be
// viewed as not requiring challenges.
type ChallengeManager interface {
// GetChallenges returns the challenges for the given
// endpoint URL.
GetChallenges(endpoint url.URL) ([]Challenge, error)
// AddResponse adds the response to the challenge
// manager. The challenges will be parsed out of
// the WWW-Authenicate headers and added to the
// URL which was produced the response. If the
// response was authorized, any challenges for the
// endpoint will be cleared.
AddResponse(resp *http.Response) error
}
// NewSimpleChallengeManager returns an instance of
// ChallengeManger which only maps endpoints to challenges
// based on the responses which have been added the
// manager. The simple manager will make no attempt to
// perform requests on the endpoints or cache the responses
// to a backend.
func NewSimpleChallengeManager() ChallengeManager {
return simpleChallengeManager{}
}
type simpleChallengeManager map[string][]Challenge
func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
endpoint.Host = strings.ToLower(endpoint.Host)
challenges := m[endpoint.String()]
return challenges, nil
}
func (m simpleChallengeManager) AddResponse(resp *http.Response) error {
challenges := ResponseChallenges(resp)
if resp.Request == nil {
return fmt.Errorf("missing request reference")
}
urlCopy := url.URL{
Path: resp.Request.URL.Path,
Host: strings.ToLower(resp.Request.URL.Host),
Scheme: resp.Request.URL.Scheme,
}
m[urlCopy.String()] = challenges
return nil
}
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
// ResponseChallenges returns a list of authorization challenges
// for the given http Response. Challenges are only checked if
// the response status code was a 401.
func ResponseChallenges(resp *http.Response) []Challenge {
if resp.StatusCode == http.StatusUnauthorized {
// Parse the WWW-Authenticate Header and store the challenges
// on this endpoint object.
return parseAuthHeader(resp.Header)
}
return nil
}
func parseAuthHeader(header http.Header) []Challenge {
challenges := []Challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

View File

@ -0,0 +1,81 @@
package auth
import (
"fmt"
"net/http"
"net/url"
"strings"
"testing"
)
func TestAuthChallengeParse(t *testing.T) {
header := http.Header{}
header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`)
challenges := parseAuthHeader(header)
if len(challenges) != 1 {
t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges))
}
challenge := challenges[0]
if expected := "bearer"; challenge.Scheme != expected {
t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected)
}
if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected)
}
if expected := "registry.example.com"; challenge.Parameters["service"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected)
}
if expected := "fun"; challenge.Parameters["other"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected)
}
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
}
}
func TestAuthChallengeNormalization(t *testing.T) {
testAuthChallengeNormalization(t, "reg.EXAMPLE.com")
testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com")
}
func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleChallengeManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
if err != nil {
t.Fatal(err)
}
resp := &http.Response{
Request: &http.Request{
URL: url,
},
Header: make(http.Header),
StatusCode: http.StatusUnauthorized,
}
resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host))
err = scm.AddResponse(resp)
if err != nil {
t.Fatal(err)
}
lowered := *url
lowered.Host = strings.ToLower(lowered.Host)
c, err := scm.GetChallenges(lowered)
if err != nil {
t.Fatal(err)
}
if len(c) == 0 {
t.Fatal("Expected challenge for lower-cased-host URL")
}
}

480
docs/client/auth/session.go Normal file
View File

@ -0,0 +1,480 @@
package auth
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/transport"
)
var (
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
// basic auth due to lack of credentials.
ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
// ErrNoToken is returned if a request is successful but the body does not
// contain an authorization token.
ErrNoToken = errors.New("authorization server did not include a token in the response")
)
const defaultClientID = "registry-client"
// AuthenticationHandler is an interface for authorizing a request from
// params from a "WWW-Authenicate" header for a single scheme.
type AuthenticationHandler interface {
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
Scheme() string
// AuthorizeRequest adds the authorization header to a request (if needed)
// using the parameters from "WWW-Authenticate" method. The parameters
// values depend on the scheme.
AuthorizeRequest(req *http.Request, params map[string]string) error
}
// CredentialStore is an interface for getting credentials for
// a given URL
type CredentialStore interface {
// Basic returns basic auth for the given URL
Basic(*url.URL) (string, string)
// RefreshToken returns a refresh token for the
// given URL and service
RefreshToken(*url.URL, string) string
// SetRefreshToken sets the refresh token if none
// is provided for the given url and service
SetRefreshToken(realm *url.URL, service, token string)
}
// NewAuthorizer creates an authorizer which can handle multiple authentication
// schemes. The handlers are tried in order, the higher priority authentication
// methods should be first. The challengeMap holds a list of challenges for
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier {
return &endpointAuthorizer{
challenges: manager,
handlers: handlers,
}
}
type endpointAuthorizer struct {
challenges ChallengeManager
handlers []AuthenticationHandler
transport http.RoundTripper
}
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
v2Root := strings.Index(req.URL.Path, "/v2/")
if v2Root == -1 {
return nil
}
ping := url.URL{
Host: req.URL.Host,
Scheme: req.URL.Scheme,
Path: req.URL.Path[:v2Root+4],
}
challenges, err := ea.challenges.GetChallenges(ping)
if err != nil {
return err
}
if len(challenges) > 0 {
for _, handler := range ea.handlers {
for _, challenge := range challenges {
if challenge.Scheme != handler.Scheme() {
continue
}
if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil {
return err
}
}
}
}
return nil
}
// This is the minimum duration a token can last (in seconds).
// A token must not live less than 60 seconds because older versions
// of the Docker client didn't read their expiration from the token
// response and assumed 60 seconds. So to remain compatible with
// those implementations, a token must live at least this long.
const minimumTokenLifetimeSeconds = 60
// Private interface for time used by this package to enable tests to provide their own implementation.
type clock interface {
Now() time.Time
}
type tokenHandler struct {
header http.Header
creds CredentialStore
transport http.RoundTripper
clock clock
offlineAccess bool
forceOAuth bool
clientID string
scopes []Scope
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
}
// Scope is a type which is serializable to a string
// using the allow scope grammar.
type Scope interface {
String() string
}
// RepositoryScope represents a token scope for access
// to a repository.
type RepositoryScope struct {
Repository string
Actions []string
}
// String returns the string representation of the repository
// using the scope grammar
func (rs RepositoryScope) String() string {
return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ","))
}
// TokenHandlerOptions is used to configure a new token handler
type TokenHandlerOptions struct {
Transport http.RoundTripper
Credentials CredentialStore
OfflineAccess bool
ForceOAuth bool
ClientID string
Scopes []Scope
}
// An implementation of clock for providing real time data.
type realClock struct{}
// Now implements clock
func (realClock) Now() time.Time { return time.Now() }
// NewTokenHandler creates a new AuthenicationHandler which supports
// fetching tokens from a remote token server.
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
// Create options...
return NewTokenHandlerWithOptions(TokenHandlerOptions{
Transport: transport,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: scope,
Actions: actions,
},
},
})
}
// NewTokenHandlerWithOptions creates a new token handler using the provided
// options structure.
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
handler := &tokenHandler{
transport: options.Transport,
creds: options.Credentials,
offlineAccess: options.OfflineAccess,
forceOAuth: options.ForceOAuth,
clientID: options.ClientID,
scopes: options.Scopes,
clock: realClock{},
}
return handler
}
func (th *tokenHandler) client() *http.Client {
return &http.Client{
Transport: th.transport,
Timeout: 15 * time.Second,
}
}
func (th *tokenHandler) Scheme() string {
return "bearer"
}
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
var additionalScopes []string
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
additionalScopes = append(additionalScopes, RepositoryScope{
Repository: fromParam,
Actions: []string{"pull"},
}.String())
}
token, err := th.getToken(params, additionalScopes...)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
th.tokenLock.Lock()
defer th.tokenLock.Unlock()
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
for _, scope := range th.scopes {
scopes = append(scopes, scope.String())
}
var addedScopes bool
for _, scope := range additionalScopes {
scopes = append(scopes, scope)
addedScopes = true
}
now := th.clock.Now()
if now.After(th.tokenExpiration) || addedScopes {
token, expiration, err := th.fetchToken(params, scopes)
if err != nil {
return "", err
}
// do not update cache for added scope tokens
if !addedScopes {
th.tokenCache = token
th.tokenExpiration = expiration
}
return token, nil
}
return th.tokenCache, nil
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
form := url.Values{}
form.Set("scope", strings.Join(scopes, " "))
form.Set("service", service)
clientID := th.clientID
if clientID == "" {
// Use default client, this is a required field
clientID = defaultClientID
}
form.Set("client_id", clientID)
if refreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", refreshToken)
} else if th.creds != nil {
form.Set("grant_type", "password")
username, password := th.creds.Basic(realm)
form.Set("username", username)
form.Set("password", password)
// attempt to get a refresh token
form.Set("access_type", "offline")
} else {
// refuse to do oauth without a grant type
return "", time.Time{}, fmt.Errorf("no supported grant type")
}
resp, err := th.client().PostForm(realm.String(), form)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
req, err := http.NewRequest("GET", realm.String(), nil)
if err != nil {
return "", time.Time{}, err
}
reqParams := req.URL.Query()
if service != "" {
reqParams.Add("service", service)
}
for _, scope := range scopes {
reqParams.Add("scope", scope)
}
if th.offlineAccess {
reqParams.Add("offline_token", "true")
clientID := th.clientID
if clientID == "" {
clientID = defaultClientID
}
reqParams.Add("client_id", clientID)
}
if th.creds != nil {
username, password := th.creds.Basic(realm)
if username != "" && password != "" {
reqParams.Add("account", username)
req.SetBasicAuth(username, password)
}
}
req.URL.RawQuery = reqParams.Encode()
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && th.creds != nil {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", time.Time{}, ErrNoToken
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
realm, ok := params["realm"]
if !ok {
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
}
// TODO(dmcgowan): Handle empty scheme and relative realm
realmURL, err := url.Parse(realm)
if err != nil {
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
}
service := params["service"]
var refreshToken string
if th.creds != nil {
refreshToken = th.creds.RefreshToken(realmURL, service)
}
if refreshToken != "" || th.forceOAuth {
return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
}
return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
}
type basicHandler struct {
creds CredentialStore
}
// NewBasicHandler creaters a new authentiation handler which adds
// basic authentication credentials to a request.
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
return &basicHandler{
creds: creds,
}
}
func (*basicHandler) Scheme() string {
return "basic"
}
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
if bh.creds != nil {
username, password := bh.creds.Basic(req.URL)
if username != "" && password != "" {
req.SetBasicAuth(username, password)
return nil
}
}
return ErrNoBasicAuthCredentials
}

View File

@ -0,0 +1,787 @@
package auth
import (
"encoding/base64"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/testutil"
)
// An implementation of clock for providing fake time data.
type fakeClock struct {
current time.Time
}
// Now implements clock
func (fc *fakeClock) Now() time.Time { return fc.current }
func testServer(rrm testutil.RequestResponseMap) (string, func()) {
h := testutil.NewHandler(rrm)
s := httptest.NewServer(h)
return s.URL, s.Close
}
type testAuthenticationWrapper struct {
headers http.Header
authCheck func(string) bool
next http.Handler
}
func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
auth := r.Header.Get("Authorization")
if auth == "" || !w.authCheck(auth) {
h := rw.Header()
for k, values := range w.headers {
h[k] = values
}
rw.WriteHeader(http.StatusUnauthorized)
return
}
w.next.ServeHTTP(rw, r)
}
func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) {
h := testutil.NewHandler(rrm)
wrapper := &testAuthenticationWrapper{
headers: http.Header(map[string][]string{
"X-API-Version": {"registry/2.0"},
"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"},
"WWW-Authenticate": {authenticate},
}),
authCheck: authCheck,
next: h,
}
s := httptest.NewServer(wrapper)
return s.URL, s.Close
}
// ping pings the provided endpoint to determine its required authorization challenges.
// If a version header is provided, the versions will be returned.
func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) {
resp, err := http.Get(endpoint)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := manager.AddResponse(resp); err != nil {
return nil, err
}
return APIVersions(resp, versionHeader), err
}
type testCredentialStore struct {
username string
password string
refreshTokens map[string]string
}
func (tcs *testCredentialStore) Basic(*url.URL) (string, string) {
return tcs.username, tcs.password
}
func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string {
return tcs.refreshTokens[service]
}
func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) {
if tcs.refreshTokens != nil {
tcs.refreshTokens[service] = token
}
}
func TestEndpointAuthorizeToken(t *testing.T) {
service := "localhost.localdomain"
repo1 := "some/registry"
repo2 := "other/registry"
scope1 := fmt.Sprintf("repository:%s:pull,push", repo1)
scope2 := fmt.Sprintf("repository:%s:pull,push", repo2)
tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"token":"statictoken"}`),
},
},
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"token":"badtoken"}`),
},
},
})
te, tc := testServer(tokenMap)
defer tc()
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service)
validCheck := func(a string) bool {
return a == "Bearer statictoken"
}
e, c := testServerWithAuth(m, authenicate, validCheck)
defer c()
challengeManager1 := NewSimpleChallengeManager()
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
if err != nil {
t.Fatal(err)
}
if len(versions) != 1 {
t.Fatalf("Unexpected version count: %d, expected 1", len(versions))
}
if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check)
}
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push")))
client := &http.Client{Transport: transport1}
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
defer c2()
challengeManager2 := NewSimpleChallengeManager()
versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version")
if err != nil {
t.Fatal(err)
}
if len(versions) != 3 {
t.Fatalf("Unexpected version count: %d, expected 3", len(versions))
}
if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check)
}
if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check)
}
if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check)
}
transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push")))
client2 := &http.Client{Transport: transport2}
req, _ = http.NewRequest("GET", e2+"/v2/hello", nil)
resp, err = client2.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized)
}
}
func TestEndpointAuthorizeRefreshToken(t *testing.T) {
service := "localhost.localdomain"
repo1 := "some/registry"
repo2 := "other/registry"
scope1 := fmt.Sprintf("repository:%s:pull,push", repo1)
scope2 := fmt.Sprintf("repository:%s:pull,push", repo2)
refreshToken1 := "0123456790abcdef"
refreshToken2 := "0123456790fedcba"
tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "POST",
Route: "/token",
Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)),
},
},
{
// In the future this test may fail and require using basic auth to get a different refresh token
Request: testutil.Request{
Method: "POST",
Route: "/token",
Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)),
},
},
{
Request: testutil.Request{
Method: "POST",
Route: "/token",
Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`),
},
},
})
te, tc := testServer(tokenMap)
defer tc()
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service)
validCheck := func(a string) bool {
return a == "Bearer statictoken"
}
e, c := testServerWithAuth(m, authenicate, validCheck)
defer c()
challengeManager1 := NewSimpleChallengeManager()
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
if err != nil {
t.Fatal(err)
}
if len(versions) != 1 {
t.Fatalf("Unexpected version count: %d, expected 1", len(versions))
}
if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check)
}
creds := &testCredentialStore{
refreshTokens: map[string]string{
service: refreshToken1,
},
}
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push")))
client := &http.Client{Transport: transport1}
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
// Try with refresh token setting
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
defer c2()
challengeManager2 := NewSimpleChallengeManager()
versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version")
if err != nil {
t.Fatal(err)
}
if len(versions) != 1 {
t.Fatalf("Unexpected version count: %d, expected 1", len(versions))
}
if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check)
}
transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push")))
client2 := &http.Client{Transport: transport2}
req, _ = http.NewRequest("GET", e2+"/v2/hello", nil)
resp, err = client2.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized)
}
if creds.refreshTokens[service] != refreshToken2 {
t.Fatalf("Refresh token not set after change")
}
// Try with bad token
e3, c3 := testServerWithAuth(m, authenicate, validCheck)
defer c3()
challengeManager3 := NewSimpleChallengeManager()
versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version")
if err != nil {
t.Fatal(err)
}
if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check {
t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check)
}
transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push")))
client3 := &http.Client{Transport: transport3}
req, _ = http.NewRequest("GET", e3+"/v2/hello", nil)
resp, err = client3.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized)
}
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func TestEndpointAuthorizeTokenBasic(t *testing.T) {
service := "localhost.localdomain"
repo := "some/fun/registry"
scope := fmt.Sprintf("repository:%s:pull,push", repo)
username := "tokenuser"
password := "superSecretPa$$word"
tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"access_token":"statictoken"}`),
},
},
})
authenicate1 := fmt.Sprintf("Basic realm=localhost")
basicCheck := func(a string) bool {
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
}
te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck)
defer tc()
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service)
bearerCheck := func(a string) bool {
return a == "Bearer statictoken"
}
e, c := testServerWithAuth(m, authenicate2, bearerCheck)
defer c()
creds := &testCredentialStore{
username: username,
password: password,
}
challengeManager := NewSimpleChallengeManager()
_, err := ping(challengeManager, e+"/v2/", "")
if err != nil {
t.Fatal(err)
}
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds)))
client := &http.Client{Transport: transport1}
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
}
func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) {
service := "localhost.localdomain"
repo := "some/fun/registry"
scope := fmt.Sprintf("repository:%s:pull,push", repo)
username := "tokenuser"
password := "superSecretPa$$word"
tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"token":"statictoken", "expires_in": 3001}`),
},
},
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`),
},
},
})
authenicate1 := fmt.Sprintf("Basic realm=localhost")
tokenExchanges := 0
basicCheck := func(a string) bool {
tokenExchanges = tokenExchanges + 1
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
}
te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck)
defer tc()
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service)
bearerCheck := func(a string) bool {
return a == "Bearer statictoken"
}
e, c := testServerWithAuth(m, authenicate2, bearerCheck)
defer c()
creds := &testCredentialStore{
username: username,
password: password,
}
challengeManager := NewSimpleChallengeManager()
_, err := ping(challengeManager, e+"/v2/", "")
if err != nil {
t.Fatal(err)
}
clock := &fakeClock{current: time.Now()}
options := TokenHandlerOptions{
Transport: nil,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: repo,
Actions: []string{"pull", "push"},
},
},
}
tHandler := NewTokenHandlerWithOptions(options)
tHandler.(*tokenHandler).clock = clock
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds)))
client := &http.Client{Transport: transport1}
// First call should result in a token exchange
// Subsequent calls should recycle the token from the first request, until the expiration has lapsed.
timeIncrement := 1000 * time.Second
for i := 0; i < 4; i++ {
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
if tokenExchanges != 1 {
t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i)
}
clock.current = clock.current.Add(timeIncrement)
}
// After we've exceeded the expiration, we should see a second token exchange.
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
if tokenExchanges != 2 {
t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges)
}
}
func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) {
service := "localhost.localdomain"
repo := "some/fun/registry"
scope := fmt.Sprintf("repository:%s:pull,push", repo)
username := "tokenuser"
password := "superSecretPa$$word"
// This test sets things up such that the token was issued one increment
// earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn.
// This will mean that the token expires after 3 increments instead of 4.
clock := &fakeClock{current: time.Now()}
timeIncrement := 1000 * time.Second
firstIssuedAt := clock.Now()
clock.current = clock.current.Add(timeIncrement)
secondIssuedAt := clock.current.Add(2 * timeIncrement)
tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`),
},
},
{
Request: testutil.Request{
Method: "GET",
Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service),
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`),
},
},
})
authenicate1 := fmt.Sprintf("Basic realm=localhost")
tokenExchanges := 0
basicCheck := func(a string) bool {
tokenExchanges = tokenExchanges + 1
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
}
te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck)
defer tc()
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service)
bearerCheck := func(a string) bool {
return a == "Bearer statictoken"
}
e, c := testServerWithAuth(m, authenicate2, bearerCheck)
defer c()
creds := &testCredentialStore{
username: username,
password: password,
}
challengeManager := NewSimpleChallengeManager()
_, err := ping(challengeManager, e+"/v2/", "")
if err != nil {
t.Fatal(err)
}
options := TokenHandlerOptions{
Transport: nil,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: repo,
Actions: []string{"pull", "push"},
},
},
}
tHandler := NewTokenHandlerWithOptions(options)
tHandler.(*tokenHandler).clock = clock
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds)))
client := &http.Client{Transport: transport1}
// First call should result in a token exchange
// Subsequent calls should recycle the token from the first request, until the expiration has lapsed.
// We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn
// so this loop should have one fewer iteration.
for i := 0; i < 3; i++ {
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
if tokenExchanges != 1 {
t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i)
}
clock.current = clock.current.Add(timeIncrement)
}
// After we've exceeded the expiration, we should see a second token exchange.
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
if tokenExchanges != 2 {
t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges)
}
}
func TestEndpointAuthorizeBasic(t *testing.T) {
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/hello",
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
},
},
})
username := "user1"
password := "funSecretPa$$word"
authenicate := fmt.Sprintf("Basic realm=localhost")
validCheck := func(a string) bool {
return a == fmt.Sprintf("Basic %s", basicAuth(username, password))
}
e, c := testServerWithAuth(m, authenicate, validCheck)
defer c()
creds := &testCredentialStore{
username: username,
password: password,
}
challengeManager := NewSimpleChallengeManager()
_, err := ping(challengeManager, e+"/v2/", "")
if err != nil {
t.Fatal(err)
}
transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds)))
client := &http.Client{Transport: transport1}
req, _ := http.NewRequest("GET", e+"/v2/hello", nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Error sending get request: %s", err)
}
if resp.StatusCode != http.StatusAccepted {
t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted)
}
}

162
docs/client/blob_writer.go Normal file
View File

@ -0,0 +1,162 @@
package client
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
type httpBlobUpload struct {
statter distribution.BlobStatter
client *http.Client
uuid string
startedAt time.Time
location string // always the last value of the location header.
offset int64
closed bool
}
func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
panic("Not implemented")
}
func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUploadUnknown
}
return HandleErrorResponse(resp)
}
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
if err != nil {
return 0, err
}
defer req.Body.Close()
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
if !SuccessStatus(resp.StatusCode) {
return 0, hbu.handleErrorResponse(resp)
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int64
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
if err != nil {
return 0, err
}
req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
if !SuccessStatus(resp.StatusCode) {
return 0, hbu.handleErrorResponse(resp)
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Size() int64 {
return hbu.offset
}
func (hbu *httpBlobUpload) ID() string {
return hbu.uuid
}
func (hbu *httpBlobUpload) StartedAt() time.Time {
return hbu.startedAt
}
func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
// TODO(dmcgowan): Check if already finished, if so just fetch
req, err := http.NewRequest("PUT", hbu.location, nil)
if err != nil {
return distribution.Descriptor{}, err
}
values := req.URL.Query()
values.Set("digest", desc.Digest.String())
req.URL.RawQuery = values.Encode()
resp, err := hbu.client.Do(req)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if !SuccessStatus(resp.StatusCode) {
return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
}
return hbu.statter.Stat(ctx, desc.Digest)
}
func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
req, err := http.NewRequest("DELETE", hbu.location, nil)
if err != nil {
return err
}
resp, err := hbu.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
return nil
}
return hbu.handleErrorResponse(resp)
}
func (hbu *httpBlobUpload) Close() error {
hbu.closed = true
return nil
}

View File

@ -0,0 +1,211 @@
package client
import (
"bytes"
"fmt"
"net/http"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/testutil"
)
// Test implements distribution.BlobWriter
var _ distribution.BlobWriter = &httpBlobUpload{}
func TestUploadReadFrom(t *testing.T) {
_, b := newRandomBlob(64)
repo := "test/upload/readfrom"
locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo)
m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{
{
Request: testutil.Request{
Method: "GET",
Route: "/v2/",
},
Response: testutil.Response{
StatusCode: http.StatusOK,
Headers: http.Header(map[string][]string{
"Docker-Distribution-API-Version": {"registry/2.0"},
}),
},
},
// Test Valid case
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
Headers: http.Header(map[string][]string{
"Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"},
"Location": {locationPath},
"Range": {"0-63"},
}),
},
},
// Test invalid range
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusAccepted,
Headers: http.Header(map[string][]string{
"Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"},
"Location": {locationPath},
"Range": {""},
}),
},
},
// Test 404
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusNotFound,
},
},
// Test 400 valid json
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusBadRequest,
Body: []byte(`
{ "errors":
[
{
"code": "BLOB_UPLOAD_INVALID",
"message": "blob upload invalid",
"detail": "more detail"
}
]
} `),
},
},
// Test 400 invalid json
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusBadRequest,
Body: []byte("something bad happened"),
},
},
// Test 500
{
Request: testutil.Request{
Method: "PATCH",
Route: locationPath,
Body: b,
},
Response: testutil.Response{
StatusCode: http.StatusInternalServerError,
},
},
})
e, c := testServer(m)
defer c()
blobUpload := &httpBlobUpload{
client: &http.Client{},
}
// Valid case
blobUpload.location = e + locationPath
n, err := blobUpload.ReadFrom(bytes.NewReader(b))
if err != nil {
t.Fatalf("Error calling ReadFrom: %s", err)
}
if n != 64 {
t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n)
}
// Bad range
blobUpload.location = e + locationPath
_, err = blobUpload.ReadFrom(bytes.NewReader(b))
if err == nil {
t.Fatalf("Expected error when bad range received")
}
// 404
blobUpload.location = e + locationPath
_, err = blobUpload.ReadFrom(bytes.NewReader(b))
if err == nil {
t.Fatalf("Expected error when not found")
}
if err != distribution.ErrBlobUploadUnknown {
t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown)
}
// 400 valid json
blobUpload.location = e + locationPath
_, err = blobUpload.ReadFrom(bytes.NewReader(b))
if err == nil {
t.Fatalf("Expected error when not found")
}
if uploadErr, ok := err.(errcode.Errors); !ok {
t.Fatalf("Wrong error type %T: %s", err, err)
} else if len(uploadErr) != 1 {
t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr))
} else {
v2Err, ok := uploadErr[0].(errcode.Error)
if !ok {
t.Fatalf("Not an 'Error' type: %#v", uploadErr[0])
}
if v2Err.Code != v2.ErrorCodeBlobUploadInvalid {
t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid)
}
if expected := "blob upload invalid"; v2Err.Message != expected {
t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected)
}
if expected := "more detail"; v2Err.Detail.(string) != expected {
t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected)
}
}
// 400 invalid json
blobUpload.location = e + locationPath
_, err = blobUpload.ReadFrom(bytes.NewReader(b))
if err == nil {
t.Fatalf("Expected error when not found")
}
if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok {
t.Fatalf("Wrong error type %T: %s", err, err)
} else {
respStr := string(uploadErr.Response)
if expected := "something bad happened"; respStr != expected {
t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected)
}
}
// 500
blobUpload.location = e + locationPath
_, err = blobUpload.ReadFrom(bytes.NewReader(b))
if err == nil {
t.Fatalf("Expected error when not found")
}
if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok {
t.Fatalf("Wrong error type %T: %s", err, err)
} else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected {
t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected)
}
}

107
docs/client/errors.go Normal file
View File

@ -0,0 +1,107 @@
package client
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
// errcode.Errors slice.
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
// returned when making a registry api call.
type UnexpectedHTTPStatusError struct {
Status string
}
func (e *UnexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
}
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
// is returned, but the content was unexpected and failed to be parsed.
type UnexpectedHTTPResponseError struct {
ParseErr error
StatusCode int
Response []byte
}
func (e *UnexpectedHTTPResponseError) Error() string {
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
}
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
var errors errcode.Errors
body, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// For backward compatibility, handle irregularly formatted
// messages that contain a "details" field.
var detailsErr struct {
Details string `json:"details"`
}
err = json.Unmarshal(body, &detailsErr)
if err == nil && detailsErr.Details != "" {
switch statusCode {
case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
case http.StatusTooManyRequests:
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
default:
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
}
}
if err := json.Unmarshal(body, &errors); err != nil {
return &UnexpectedHTTPResponseError{
ParseErr: err,
StatusCode: statusCode,
Response: body,
}
}
if len(errors) == 0 {
// If there was no error specified in the body, return
// UnexpectedHTTPResponseError.
return &UnexpectedHTTPResponseError{
ParseErr: ErrNoErrorsInBody,
StatusCode: statusCode,
Response: body,
}
}
return errors
}
// HandleErrorResponse returns error parsed from HTTP response for an
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
// UnexpectedHTTPStatusError returned for response code outside of expected
// range.
func HandleErrorResponse(resp *http.Response) error {
if resp.StatusCode == 401 {
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
}
return err
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
return parseHTTPErrorResponse(resp.StatusCode, resp.Body)
}
return &UnexpectedHTTPStatusError{Status: resp.Status}
}
// SuccessStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func SuccessStatus(status int) bool {
return status >= 200 && status <= 399
}

104
docs/client/errors_test.go Normal file
View File

@ -0,0 +1,104 @@
package client
import (
"bytes"
"io"
"net/http"
"strings"
"testing"
)
type nopCloser struct {
io.Reader
}
func (nopCloser) Close() error { return nil }
func TestHandleErrorResponse401ValidBody(t *testing.T) {
json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}"
response := &http.Response{
Status: "401 Unauthorized",
StatusCode: 401,
Body: nopCloser{bytes.NewBufferString(json)},
}
err := HandleErrorResponse(response)
expectedMsg := "unauthorized: action requires authentication"
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}
func TestHandleErrorResponse401WithInvalidBody(t *testing.T) {
json := "{invalid json}"
response := &http.Response{
Status: "401 Unauthorized",
StatusCode: 401,
Body: nopCloser{bytes.NewBufferString(json)},
}
err := HandleErrorResponse(response)
expectedMsg := "unauthorized: authentication required"
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}
func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) {
json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}"
response := &http.Response{
Status: "400 Bad Request",
StatusCode: 400,
Body: nopCloser{bytes.NewBufferString(json)},
}
err := HandleErrorResponse(response)
expectedMsg := "digest invalid: provided digest does not match"
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}
func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) {
json := `{"randomkey": "randomvalue"}`
response := &http.Response{
Status: "404 Not Found",
StatusCode: 404,
Body: nopCloser{bytes.NewBufferString(json)},
}
err := HandleErrorResponse(response)
expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"`
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}
func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) {
json := "{invalid json}"
response := &http.Response{
Status: "404 Not Found",
StatusCode: 404,
Body: nopCloser{bytes.NewBufferString(json)},
}
err := HandleErrorResponse(response)
expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\""
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}
func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) {
response := &http.Response{
Status: "501 Not Implemented",
StatusCode: 501,
Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")},
}
err := HandleErrorResponse(response)
expectedMsg := "received unexpected HTTP status: 501 Not Implemented"
if !strings.Contains(err.Error(), expectedMsg) {
t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
}
}

863
docs/client/repository.go Normal file
View File

@ -0,0 +1,863 @@
package client
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/storage/cache"
"github.com/docker/distribution/registry/storage/cache/memory"
)
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
type Registry interface {
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
}
// checkHTTPRedirect is a callback that can manipulate redirected HTTP
// requests. It is used to preserve Accept and Range headers.
func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
if len(via) > 0 {
for headerName, headerVals := range via[0].Header {
if headerName != "Accept" && headerName != "Range" {
continue
}
for _, val := range headerVals {
// Don't add to redirected request if redirected
// request already has a header with the same
// name and value.
hasValue := false
for _, existingVal := range req.Header[headerName] {
if existingVal == val {
hasValue = true
break
}
}
if !hasValue {
req.Header.Add(headerName, val)
}
}
}
}
return nil
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: 1 * time.Minute,
CheckRedirect: checkHTTPRedirect,
}
return &registry{
client: client,
ub: ub,
context: ctx,
}, nil
}
type registry struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
// are no more entries
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
var numFilled int
var returnErr error
values := buildCatalogValues(len(entries), last)
u, err := r.ub.BuildCatalogURL(values)
if err != nil {
return 0, err
}
resp, err := r.client.Get(u)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
var ctlg struct {
Repositories []string `json:"repositories"`
}
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&ctlg); err != nil {
return 0, err
}
for cnt := range ctlg.Repositories {
entries[cnt] = ctlg.Repositories[cnt]
}
numFilled = len(ctlg.Repositories)
link := resp.Header.Get("Link")
if link == "" {
returnErr = io.EOF
}
} else {
return 0, HandleErrorResponse(resp)
}
return numFilled, returnErr
}
// NewRepository creates a new Repository for the given repository name and base URL.
func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
CheckRedirect: checkHTTPRedirect,
// TODO(dmcgowan): create cookie jar
}
return &repository{
client: client,
ub: ub,
name: name,
context: ctx,
}, nil
}
type repository struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
name reference.Named
}
func (r *repository) Named() reference.Named {
return r.name
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter := &blobStatter{
name: r.name,
ub: r.ub,
client: r.client,
}
return &blobs{
name: r.name,
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
}
}
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
// todo(richardscothern): options should be sent over the wire
return &manifests{
name: r.name,
ub: r.ub,
client: r.client,
etags: make(map[string]string),
}, nil
}
func (r *repository) Tags(ctx context.Context) distribution.TagService {
return &tags{
client: r.client,
ub: r.ub,
context: r.context,
name: r.Named(),
}
}
// tags implements remote tagging operations.
type tags struct {
client *http.Client
ub *v2.URLBuilder
context context.Context
name reference.Named
}
// All returns all tags
func (t *tags) All(ctx context.Context) ([]string, error) {
var tags []string
u, err := t.ub.BuildTagsURL(t.name)
if err != nil {
return tags, err
}
for {
resp, err := t.client.Get(u)
if err != nil {
return tags, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return tags, err
}
tagsResponse := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResponse); err != nil {
return tags, err
}
tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
u = strings.Trim(strings.Split(link, ";")[0], "<>")
} else {
return tags, nil
}
} else {
return tags, HandleErrorResponse(resp)
}
}
}
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
desc := distribution.Descriptor{}
headers := response.Header
ctHeader := headers.Get("Content-Type")
if ctHeader == "" {
return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
}
desc.MediaType = ctHeader
digestHeader := headers.Get("Docker-Content-Digest")
if digestHeader == "" {
bytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return distribution.Descriptor{}, err
}
_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
if err != nil {
return distribution.Descriptor{}, err
}
return desc, nil
}
dgst, err := digest.ParseDigest(digestHeader)
if err != nil {
return distribution.Descriptor{}, err
}
desc.Digest = dgst
lengthHeader := headers.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, err
}
desc.Size = length
return desc, nil
}
// Get issues a HEAD request for a Manifest against its named endpoint in order
// to construct a descriptor for the tag. If the registry doesn't support HEADing
// a manifest, fallback to GET.
func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
ref, err := reference.WithTag(t.name, tag)
if err != nil {
return distribution.Descriptor{}, err
}
u, err := t.ub.BuildManifestURL(ref)
if err != nil {
return distribution.Descriptor{}, err
}
req, err := http.NewRequest("HEAD", u, nil)
if err != nil {
return distribution.Descriptor{}, err
}
for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t)
}
var attempts int
resp, err := t.client.Do(req)
check:
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 400:
return descriptorFromResponse(resp)
case resp.StatusCode == http.StatusMethodNotAllowed:
req, err = http.NewRequest("GET", u, nil)
if err != nil {
return distribution.Descriptor{}, err
}
for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t)
}
resp, err = t.client.Do(req)
attempts++
if attempts > 1 {
return distribution.Descriptor{}, err
}
goto check
default:
return distribution.Descriptor{}, HandleErrorResponse(resp)
}
}
func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
panic("not implemented")
}
func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
panic("not implemented")
}
func (t *tags) Untag(ctx context.Context, tag string) error {
panic("not implemented")
}
type manifests struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
etags map[string]string
}
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return false, err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return false, err
}
resp, err := ms.client.Head(u)
if err != nil {
return false, err
}
if SuccessStatus(resp.StatusCode) {
return true, nil
} else if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return false, HandleErrorResponse(resp)
}
// AddEtagToTag allows a client to supply an eTag to Get which will be
// used for a conditional HTTP request. If the eTag matches, a nil manifest
// and ErrManifestNotModified error will be returned. etag is automatically
// quoted when added to this map.
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
return etagOption{tag, etag}
}
type etagOption struct{ tag, etag string }
func (o etagOption) Apply(ms distribution.ManifestService) error {
if ms, ok := ms.(*manifests); ok {
ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
return nil
}
return fmt.Errorf("etag options is a client-only option")
}
// ReturnContentDigest allows a client to set a the content digest on
// a successful request from the 'Docker-Content-Digest' header. This
// returned digest is represents the digest which the registry uses
// to refer to the content and can be used to delete the content.
func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
return contentDigestOption{dgst}
}
type contentDigestOption struct{ digest *digest.Digest }
func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
return nil
}
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
var (
digestOrTag string
ref reference.Named
err error
contentDgst *digest.Digest
)
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
digestOrTag = opt.Tag
ref, err = reference.WithTag(ms.name, opt.Tag)
if err != nil {
return nil, err
}
} else if opt, ok := option.(contentDigestOption); ok {
contentDgst = opt.digest
} else {
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
}
if digestOrTag == "" {
digestOrTag = dgst.String()
ref, err = reference.WithDigest(ms.name, dgst)
if err != nil {
return nil, err
}
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t)
}
if _, ok := ms.etags[digestOrTag]; ok {
req.Header.Set("If-None-Match", ms.etags[digestOrTag])
}
resp, err := ms.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) {
if contentDgst != nil {
dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest"))
if err == nil {
*contentDgst = dgst
}
}
mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
m, _, err := distribution.UnmarshalManifest(mt, body)
if err != nil {
return nil, err
}
return m, nil
}
return nil, HandleErrorResponse(resp)
}
// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
// tag name in order to build the correct upload URL.
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
ref := ms.name
var tagged bool
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
var err error
ref, err = reference.WithTag(ref, opt.Tag)
if err != nil {
return "", err
}
tagged = true
} else {
err := option.Apply(ms)
if err != nil {
return "", err
}
}
}
mediaType, p, err := m.Payload()
if err != nil {
return "", err
}
if !tagged {
// generate a canonical digest and Put by digest
_, d, err := distribution.UnmarshalManifest(mediaType, p)
if err != nil {
return "", err
}
ref, err = reference.WithDigest(ref, d.Digest)
if err != nil {
return "", err
}
}
manifestURL, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return "", err
}
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
if err != nil {
return "", err
}
putRequest.Header.Set("Content-Type", mediaType)
resp, err := ms.client.Do(putRequest)
if err != nil {
return "", err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
dgstHeader := resp.Header.Get("Docker-Content-Digest")
dgst, err := digest.ParseDigest(dgstHeader)
if err != nil {
return "", err
}
return dgst, nil
}
return "", HandleErrorResponse(resp)
}
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u, nil)
if err != nil {
return err
}
resp, err := ms.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return HandleErrorResponse(resp)
}
// todo(richardscothern): Restore interface and implementation with merge of #1050
/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
panic("not supported")
}*/
type blobs struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
statter distribution.BlobDescriptorService
distribution.BlobDeleter
}
func sanitizeLocation(location, base string) (string, error) {
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
locationURL, err := url.Parse(location)
if err != nil {
return "", err
}
return baseURL.ResolveReference(locationURL).String(), nil
}
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
reader, err := bs.Open(ctx, dgst)
if err != nil {
return nil, err
}
defer reader.Close()
return ioutil.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return nil, err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return HandleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
panic("not implemented")
}
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
writer, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
dgstr := digest.Canonical.New()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return distribution.Descriptor{}, err
}
if n < int64(len(p)) {
return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
}
desc := distribution.Descriptor{
MediaType: mediaType,
Size: int64(len(p)),
Digest: dgstr.Digest(),
}
return writer.Commit(ctx, desc)
}
// createOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type createOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
}
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*createOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
var opts createOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
var values []url.Values
if opts.Mount.ShouldMount {
values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
}
u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
if err != nil {
return nil, err
}
resp, err := bs.client.Post(u, "", nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusCreated:
desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
if err != nil {
return nil, err
}
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
case http.StatusAccepted:
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return nil, err
}
return &httpBlobUpload{
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}, nil
default:
return nil, HandleErrorResponse(resp)
}
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}
type blobStatter struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
}
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
u, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return distribution.Descriptor{}, err
}
resp, err := bs.client.Head(u)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
lengthHeader := resp.Header.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
}
return distribution.Descriptor{
MediaType: resp.Header.Get("Content-Type"),
Size: length,
Digest: dgst,
}, nil
} else if resp.StatusCode == http.StatusNotFound {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return distribution.Descriptor{}, HandleErrorResponse(resp)
}
func buildCatalogValues(maxEntries int, last string) url.Values {
values := url.Values{}
if maxEntries > 0 {
values.Add("n", strconv.Itoa(maxEntries))
}
if last != "" {
values.Add("last", last)
}
return values
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", blobURL, nil)
if err != nil {
return err
}
resp, err := bs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return HandleErrorResponse(resp)
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,250 @@
package transport
import (
"errors"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
)
var (
contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
// ErrWrongCodeForByteRange is returned if the client sends a request
// with a Range header but the server returns a 2xx or 3xx code other
// than 206 Partial Content.
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
)
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
// request. When seeking and starting a read from a non-zero offset
// the a "Range" header will be added which sets the offset.
// TODO(dmcgowan): Move this into a separate utility package
func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
return &httpReadSeeker{
client: client,
url: url,
errorHandler: errorHandler,
}
}
type httpReadSeeker struct {
client *http.Client
url string
// errorHandler creates an error from an unsuccessful HTTP response.
// This allows the error to be created with the HTTP response body
// without leaking the body through a returned error.
errorHandler func(*http.Response) error
size int64
// rc is the remote read closer.
rc io.ReadCloser
// readerOffset tracks the offset as of the last read.
readerOffset int64
// seekOffset allows Seek to override the offset. Seek changes
// seekOffset instead of changing readOffset directly so that
// connection resets can be delayed and possibly avoided if the
// seek is undone (i.e. seeking to the end and then back to the
// beginning).
seekOffset int64
err error
}
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
if hrs.err != nil {
return 0, hrs.err
}
// If we sought to a different position, we need to reset the
// connection. This logic is here instead of Seek so that if
// a seek is undone before the next read, the connection doesn't
// need to be closed and reopened. A common example of this is
// seeking to the end to determine the length, and then seeking
// back to the original position.
if hrs.readerOffset != hrs.seekOffset {
hrs.reset()
}
hrs.readerOffset = hrs.seekOffset
rd, err := hrs.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
hrs.seekOffset += int64(n)
hrs.readerOffset += int64(n)
return n, err
}
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
if hrs.err != nil {
return 0, hrs.err
}
lastReaderOffset := hrs.readerOffset
if whence == os.SEEK_SET && hrs.rc == nil {
// If no request has been made yet, and we are seeking to an
// absolute position, set the read offset as well to avoid an
// unnecessary request.
hrs.readerOffset = offset
}
_, err := hrs.reader()
if err != nil {
hrs.readerOffset = lastReaderOffset
return 0, err
}
newOffset := hrs.seekOffset
switch whence {
case os.SEEK_CUR:
newOffset += offset
case os.SEEK_END:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + offset
case os.SEEK_SET:
newOffset = offset
}
if newOffset < 0 {
err = errors.New("cannot seek to negative position")
} else {
hrs.seekOffset = newOffset
}
return hrs.seekOffset, err
}
func (hrs *httpReadSeeker) Close() error {
if hrs.err != nil {
return hrs.err
}
// close and release reader chain
if hrs.rc != nil {
hrs.rc.Close()
}
hrs.rc = nil
hrs.err = errors.New("httpLayer: closed")
return nil
}
func (hrs *httpReadSeeker) reset() {
if hrs.err != nil {
return
}
if hrs.rc != nil {
hrs.rc.Close()
hrs.rc = nil
}
}
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
if hrs.err != nil {
return nil, hrs.err
}
if hrs.rc != nil {
return hrs.rc, nil
}
req, err := http.NewRequest("GET", hrs.url, nil)
if err != nil {
return nil, err
}
if hrs.readerOffset > 0 {
// If we are at different offset, issue a range request from there.
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
// TODO: get context in here
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
}
resp, err := hrs.client.Do(req)
if err != nil {
return nil, err
}
// Normally would use client.SuccessStatus, but that would be a cyclic
// import
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
if hrs.readerOffset > 0 {
if resp.StatusCode != http.StatusPartialContent {
return nil, ErrWrongCodeForByteRange
}
contentRange := resp.Header.Get("Content-Range")
if contentRange == "" {
return nil, errors.New("no Content-Range header found in HTTP 206 response")
}
submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
if len(submatches) < 4 {
return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
}
startByte, err := strconv.ParseUint(submatches[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
}
if startByte != uint64(hrs.readerOffset) {
return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
}
endByte, err := strconv.ParseUint(submatches[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
}
if submatches[3] == "*" {
hrs.size = -1
} else {
size, err := strconv.ParseUint(submatches[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
}
if endByte+1 != size {
return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
}
hrs.size = int64(size)
}
} else if resp.StatusCode == http.StatusOK {
hrs.size = resp.ContentLength
} else {
hrs.size = -1
}
hrs.rc = resp.Body
} else {
defer resp.Body.Close()
if hrs.errorHandler != nil {
return nil, hrs.errorHandler(resp)
}
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
}
return hrs.rc, nil
}

View File

@ -0,0 +1,147 @@
package transport
import (
"io"
"net/http"
"sync"
)
// RequestModifier represents an object which will do an inplace
// modification of an HTTP request.
type RequestModifier interface {
ModifyRequest(*http.Request) error
}
type headerModifier http.Header
// NewHeaderRequestModifier returns a new RequestModifier which will
// add the given headers to a request.
func NewHeaderRequestModifier(header http.Header) RequestModifier {
return headerModifier(header)
}
func (h headerModifier) ModifyRequest(req *http.Request) error {
for k, s := range http.Header(h) {
req.Header[k] = append(req.Header[k], s...)
}
return nil
}
// NewTransport creates a new transport which will apply modifiers to
// the request on a RoundTrip call.
func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
return &transport{
Modifiers: modifiers,
Base: base,
}
}
// transport is an http.RoundTripper that makes HTTP requests after
// copying and modifying the request
type transport struct {
Modifiers []RequestModifier
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip authorizes and authenticates the request with an
// access token. If no token exists or token is expired,
// tries to refresh/fetch a new token.
func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
req2 := cloneRequest(req)
for _, modifier := range t.Modifiers {
if err := modifier.ModifyRequest(req2); err != nil {
return nil, err
}
}
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func (t *transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
func (t *transport) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}

2
docs/doc.go Normal file
View File

@ -0,0 +1,2 @@
// Package registry provides the main entrypoints for running a registry.
package registry

2474
docs/handlers/api_test.go Normal file

File diff suppressed because it is too large Load Diff

996
docs/handlers/app.go Normal file
View File

@ -0,0 +1,996 @@
package handlers
import (
cryptorand "crypto/rand"
"expvar"
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"runtime"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/configuration"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/health/checks"
"github.com/docker/distribution/notifications"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
registrymiddleware "github.com/docker/distribution/registry/middleware/registry"
repositorymiddleware "github.com/docker/distribution/registry/middleware/repository"
"github.com/docker/distribution/registry/proxy"
"github.com/docker/distribution/registry/storage"
memorycache "github.com/docker/distribution/registry/storage/cache/memory"
rediscache "github.com/docker/distribution/registry/storage/cache/redis"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
"github.com/docker/distribution/version"
"github.com/docker/libtrust"
"github.com/garyburd/redigo/redis"
"github.com/gorilla/mux"
"golang.org/x/net/context"
)
// randomSecretSize is the number of random bytes to generate if no secret
// was specified.
const randomSecretSize = 32
// defaultCheckInterval is the default time in between health checks
const defaultCheckInterval = 10 * time.Second
// App is a global registry application object. Shared resources can be placed
// on this object that will be accessible from all requests. Any writable
// fields should be protected.
type App struct {
context.Context
Config *configuration.Configuration
router *mux.Router // main application router, configured with dispatchers
driver storagedriver.StorageDriver // driver maintains the app global storage driver instance.
registry distribution.Namespace // registry is the primary registry backend for the app instance.
accessController auth.AccessController // main access controller for application
// httpHost is a parsed representation of the http.host parameter from
// the configuration. Only the Scheme and Host fields are used.
httpHost url.URL
// events contains notification related configuration.
events struct {
sink notifications.Sink
source notifications.SourceRecord
}
redis *redis.Pool
// trustKey is a deprecated key used to sign manifests converted to
// schema1 for backward compatibility. It should not be used for any
// other purposes.
trustKey libtrust.PrivateKey
// isCache is true if this registry is configured as a pull through cache
isCache bool
// readOnly is true if the registry is in a read-only maintenance mode
readOnly bool
}
// NewApp takes a configuration and returns a configured app, ready to serve
// requests. The app only implements ServeHTTP and can be wrapped in other
// handlers accordingly.
func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app := &App{
Config: config,
Context: ctx,
router: v2.RouterWithPrefix(config.HTTP.Prefix),
isCache: config.Proxy.RemoteURL != "",
}
// Register the handler dispatchers.
app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler {
return http.HandlerFunc(apiBase)
})
app.register(v2.RouteNameManifest, imageManifestDispatcher)
app.register(v2.RouteNameCatalog, catalogDispatcher)
app.register(v2.RouteNameTags, tagsDispatcher)
app.register(v2.RouteNameBlob, blobDispatcher)
app.register(v2.RouteNameBlobUpload, blobUploadDispatcher)
app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher)
// override the storage driver's UA string for registry outbound HTTP requests
storageParams := config.Storage.Parameters()
if storageParams == nil {
storageParams = make(configuration.Parameters)
}
storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version())
var err error
app.driver, err = factory.Create(config.Storage.Type(), storageParams)
if err != nil {
// TODO(stevvooe): Move the creation of a service into a protected
// method, where this is created lazily. Its status can be queried via
// a health check.
panic(err)
}
purgeConfig := uploadPurgeDefaultConfig()
if mc, ok := config.Storage["maintenance"]; ok {
if v, ok := mc["uploadpurging"]; ok {
purgeConfig, ok = v.(map[interface{}]interface{})
if !ok {
panic("uploadpurging config key must contain additional keys")
}
}
if v, ok := mc["readonly"]; ok {
readOnly, ok := v.(map[interface{}]interface{})
if !ok {
panic("readonly config key must contain additional keys")
}
if readOnlyEnabled, ok := readOnly["enabled"]; ok {
app.readOnly, ok = readOnlyEnabled.(bool)
if !ok {
panic("readonly's enabled config key must have a boolean value")
}
}
}
}
startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig)
app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"])
if err != nil {
panic(err)
}
app.configureSecret(config)
app.configureEvents(config)
app.configureRedis(config)
app.configureLogHook(config)
options := registrymiddleware.GetRegistryOptions()
if config.Compatibility.Schema1.TrustKey != "" {
app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey)
if err != nil {
panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err))
}
} else {
// Generate an ephemeral key to be used for signing converted manifests
// for clients that don't support schema2.
app.trustKey, err = libtrust.GenerateECP256PrivateKey()
if err != nil {
panic(err)
}
}
options = append(options, storage.Schema1SigningKey(app.trustKey))
if config.HTTP.Host != "" {
u, err := url.Parse(config.HTTP.Host)
if err != nil {
panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err))
}
app.httpHost = *u
}
if app.isCache {
options = append(options, storage.DisableDigestResumption)
}
// configure deletion
if d, ok := config.Storage["delete"]; ok {
e, ok := d["enabled"]
if ok {
if deleteEnabled, ok := e.(bool); ok && deleteEnabled {
options = append(options, storage.EnableDelete)
}
}
}
// configure redirects
var redirectDisabled bool
if redirectConfig, ok := config.Storage["redirect"]; ok {
v := redirectConfig["disable"]
switch v := v.(type) {
case bool:
redirectDisabled = v
default:
panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig))
}
}
if redirectDisabled {
ctxu.GetLogger(app).Infof("backend redirection disabled")
} else {
options = append(options, storage.EnableRedirect)
}
// configure storage caches
if cc, ok := config.Storage["cache"]; ok {
v, ok := cc["blobdescriptor"]
if !ok {
// Backwards compatible: "layerinfo" == "blobdescriptor"
v = cc["layerinfo"]
}
switch v {
case "redis":
if app.redis == nil {
panic("redis configuration required to use for layerinfo cache")
}
cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis)
localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider))
app.registry, err = storage.NewRegistry(app, app.driver, localOptions...)
if err != nil {
panic("could not create registry: " + err.Error())
}
ctxu.GetLogger(app).Infof("using redis blob descriptor cache")
case "inmemory":
cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider()
localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider))
app.registry, err = storage.NewRegistry(app, app.driver, localOptions...)
if err != nil {
panic("could not create registry: " + err.Error())
}
ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache")
default:
if v != "" {
ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"])
}
}
}
if app.registry == nil {
// configure the registry if no cache section is available.
app.registry, err = storage.NewRegistry(app.Context, app.driver, options...)
if err != nil {
panic("could not create registry: " + err.Error())
}
}
app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"])
if err != nil {
panic(err)
}
authType := config.Auth.Type()
if authType != "" {
accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters())
if err != nil {
panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err))
}
app.accessController = accessController
ctxu.GetLogger(app).Debugf("configured %q access controller", authType)
}
// configure as a pull through cache
if config.Proxy.RemoteURL != "" {
app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy)
if err != nil {
panic(err.Error())
}
app.isCache = true
ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL)
}
return app
}
// RegisterHealthChecks is an awful hack to defer health check registration
// control to callers. This should only ever be called once per registry
// process, typically in a main function. The correct way would be register
// health checks outside of app, since multiple apps may exist in the same
// process. Because the configuration and app are tightly coupled,
// implementing this properly will require a refactor. This method may panic
// if called twice in the same process.
func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) {
if len(healthRegistries) > 1 {
panic("RegisterHealthChecks called with more than one registry")
}
healthRegistry := health.DefaultRegistry
if len(healthRegistries) == 1 {
healthRegistry = healthRegistries[0]
}
if app.Config.Health.StorageDriver.Enabled {
interval := app.Config.Health.StorageDriver.Interval
if interval == 0 {
interval = defaultCheckInterval
}
storageDriverCheck := func() error {
_, err := app.driver.List(app, "/") // "/" should always exist
return err // any error will be treated as failure
}
if app.Config.Health.StorageDriver.Threshold != 0 {
healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck)
} else {
healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck)
}
}
for _, fileChecker := range app.Config.Health.FileCheckers {
interval := fileChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second)
healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval))
}
for _, httpChecker := range app.Config.Health.HTTPCheckers {
interval := httpChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
statusCode := httpChecker.StatusCode
if statusCode == 0 {
statusCode = 200
}
checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers)
if httpChecker.Threshold != 0 {
ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold)
healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold))
} else {
ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second)
healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval))
}
}
for _, tcpChecker := range app.Config.Health.TCPCheckers {
interval := tcpChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout)
if tcpChecker.Threshold != 0 {
ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold)
healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold))
} else {
ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second)
healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval))
}
}
}
// register a handler with the application, by route name. The handler will be
// passed through the application filters and context will be constructed at
// request time.
func (app *App) register(routeName string, dispatch dispatchFunc) {
// TODO(stevvooe): This odd dispatcher/route registration is by-product of
// some limitations in the gorilla/mux router. We are using it to keep
// routing consistent between the client and server, but we may want to
// replace it with manual routing and structure-based dispatch for better
// control over the request execution.
app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch))
}
// configureEvents prepares the event sink for action.
func (app *App) configureEvents(configuration *configuration.Configuration) {
// Configure all of the endpoint sinks.
var sinks []notifications.Sink
for _, endpoint := range configuration.Notifications.Endpoints {
if endpoint.Disabled {
ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name)
continue
}
ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers)
endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{
Timeout: endpoint.Timeout,
Threshold: endpoint.Threshold,
Backoff: endpoint.Backoff,
Headers: endpoint.Headers,
})
sinks = append(sinks, endpoint)
}
// NOTE(stevvooe): Moving to a new queuing implementation is as easy as
// replacing broadcaster with a rabbitmq implementation. It's recommended
// that the registry instances also act as the workers to keep deployment
// simple.
app.events.sink = notifications.NewBroadcaster(sinks...)
// Populate registry event source
hostname, err := os.Hostname()
if err != nil {
hostname = configuration.HTTP.Addr
} else {
// try to pick the port off the config
_, port, err := net.SplitHostPort(configuration.HTTP.Addr)
if err == nil {
hostname = net.JoinHostPort(hostname, port)
}
}
app.events.source = notifications.SourceRecord{
Addr: hostname,
InstanceID: ctxu.GetStringValue(app, "instance.id"),
}
}
func (app *App) configureRedis(configuration *configuration.Configuration) {
if configuration.Redis.Addr == "" {
ctxu.GetLogger(app).Infof("redis not configured")
return
}
pool := &redis.Pool{
Dial: func() (redis.Conn, error) {
// TODO(stevvooe): Yet another use case for contextual timing.
ctx := context.WithValue(app, "redis.connect.startedat", time.Now())
done := func(err error) {
logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration",
ctxu.Since(ctx, "redis.connect.startedat"))
if err != nil {
logger.Errorf("redis: error connecting: %v", err)
} else {
logger.Infof("redis: connect %v", configuration.Redis.Addr)
}
}
conn, err := redis.DialTimeout("tcp",
configuration.Redis.Addr,
configuration.Redis.DialTimeout,
configuration.Redis.ReadTimeout,
configuration.Redis.WriteTimeout)
if err != nil {
ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v",
configuration.Redis.Addr, err)
done(err)
return nil, err
}
// authorize the connection
if configuration.Redis.Password != "" {
if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil {
defer conn.Close()
done(err)
return nil, err
}
}
// select the database to use
if configuration.Redis.DB != 0 {
if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil {
defer conn.Close()
done(err)
return nil, err
}
}
done(nil)
return conn, nil
},
MaxIdle: configuration.Redis.Pool.MaxIdle,
MaxActive: configuration.Redis.Pool.MaxActive,
IdleTimeout: configuration.Redis.Pool.IdleTimeout,
TestOnBorrow: func(c redis.Conn, t time.Time) error {
// TODO(stevvooe): We can probably do something more interesting
// here with the health package.
_, err := c.Do("PING")
return err
},
Wait: false, // if a connection is not avialable, proceed without cache.
}
app.redis = pool
// setup expvar
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} {
return map[string]interface{}{
"Config": configuration.Redis,
"Active": app.redis.ActiveCount(),
}
}))
}
// configureLogHook prepares logging hook parameters.
func (app *App) configureLogHook(configuration *configuration.Configuration) {
entry, ok := ctxu.GetLogger(app).(*log.Entry)
if !ok {
// somehow, we are not using logrus
return
}
logger := entry.Logger
for _, configHook := range configuration.Log.Hooks {
if !configHook.Disabled {
switch configHook.Type {
case "mail":
hook := &logHook{}
hook.LevelsParam = configHook.Levels
hook.Mail = &mailer{
Addr: configHook.MailOptions.SMTP.Addr,
Username: configHook.MailOptions.SMTP.Username,
Password: configHook.MailOptions.SMTP.Password,
Insecure: configHook.MailOptions.SMTP.Insecure,
From: configHook.MailOptions.From,
To: configHook.MailOptions.To,
}
logger.Hooks.Add(hook)
default:
}
}
}
}
// configureSecret creates a random secret if a secret wasn't included in the
// configuration.
func (app *App) configureSecret(configuration *configuration.Configuration) {
if configuration.HTTP.Secret == "" {
var secretBytes [randomSecretSize]byte
if _, err := cryptorand.Read(secretBytes[:]); err != nil {
panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err))
}
configuration.HTTP.Secret = string(secretBytes[:])
ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.")
}
}
func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close() // ensure that request body is always closed.
// Instantiate an http context here so we can track the error codes
// returned by the request router.
ctx := defaultContextManager.context(app, w, r)
defer func() {
status, ok := ctx.Value("http.response.status").(int)
if ok && status >= 200 && status <= 399 {
ctxu.GetResponseLogger(ctx).Infof("response completed")
}
}()
defer defaultContextManager.release(ctx)
// NOTE(stevvooe): Total hack to get instrumented responsewriter from context.
var err error
w, err = ctxu.GetResponseWriter(ctx)
if err != nil {
ctxu.GetLogger(ctx).Warnf("response writer not found in context")
}
// Set a header with the Docker Distribution API Version for all responses.
w.Header().Add("Docker-Distribution-API-Version", "registry/2.0")
app.router.ServeHTTP(w, r)
}
// dispatchFunc takes a context and request and returns a constructed handler
// for the route. The dispatcher will use this to dynamically create request
// specific handlers for each endpoint without creating a new router for each
// request.
type dispatchFunc func(ctx *Context, r *http.Request) http.Handler
// TODO(stevvooe): dispatchers should probably have some validation error
// chain with proper error reporting.
// dispatcher returns a handler that constructs a request specific context and
// handler, using the dispatch factory function.
func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for headerName, headerValues := range app.Config.HTTP.Headers {
for _, value := range headerValues {
w.Header().Add(headerName, value)
}
}
context := app.context(w, r)
if err := app.authorized(w, r, context); err != nil {
ctxu.GetLogger(context).Warnf("error authorizing context: %v", err)
return
}
// Add username to request logging
context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey))
if app.nameRequired(r) {
nameRef, err := reference.ParseNamed(getName(context))
if err != nil {
ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err)
context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{
Name: getName(context),
Reason: err,
})
if err := errcode.ServeJSON(w, context.Errors); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
repository, err := app.registry.Repository(context, nameRef)
if err != nil {
ctxu.GetLogger(context).Errorf("error resolving repository: %v", err)
switch err := err.(type) {
case distribution.ErrRepositoryUnknown:
context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err))
case distribution.ErrRepositoryNameInvalid:
context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))
case errcode.Error:
context.Errors = append(context.Errors, err)
}
if err := errcode.ServeJSON(w, context.Errors); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
// assign and decorate the authorized repository with an event bridge.
context.Repository = notifications.Listen(
repository,
app.eventBridge(context, r))
context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"])
if err != nil {
ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err)
context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
if err := errcode.ServeJSON(w, context.Errors); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
}
dispatch(context, r).ServeHTTP(w, r)
// Automated error response handling here. Handlers may return their
// own errors if they need different behavior (such as range errors
// for layer upload).
if context.Errors.Len() > 0 {
if err := errcode.ServeJSON(w, context.Errors); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
app.logError(context, context.Errors)
}
})
}
func (app *App) logError(context context.Context, errors errcode.Errors) {
for _, e1 := range errors {
var c ctxu.Context
switch e1.(type) {
case errcode.Error:
e, _ := e1.(errcode.Error)
c = ctxu.WithValue(context, "err.code", e.Code)
c = ctxu.WithValue(c, "err.message", e.Code.Message())
c = ctxu.WithValue(c, "err.detail", e.Detail)
case errcode.ErrorCode:
e, _ := e1.(errcode.ErrorCode)
c = ctxu.WithValue(context, "err.code", e)
c = ctxu.WithValue(c, "err.message", e.Message())
default:
// just normal go 'error'
c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown)
c = ctxu.WithValue(c, "err.message", e1.Error())
}
c = ctxu.WithLogger(c, ctxu.GetLogger(c,
"err.code",
"err.message",
"err.detail"))
ctxu.GetResponseLogger(c).Errorf("response completed with error")
}
}
// context constructs the context object for the application. This only be
// called once per request.
func (app *App) context(w http.ResponseWriter, r *http.Request) *Context {
ctx := defaultContextManager.context(app, w, r)
ctx = ctxu.WithVars(ctx, r)
ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx,
"vars.name",
"vars.reference",
"vars.digest",
"vars.uuid"))
context := &Context{
App: app,
Context: ctx,
}
if app.httpHost.Scheme != "" && app.httpHost.Host != "" {
// A "host" item in the configuration takes precedence over
// X-Forwarded-Proto and X-Forwarded-Host headers, and the
// hostname in the request.
context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false)
} else {
context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs)
}
return context
}
// authorized checks if the request can proceed with access to the requested
// repository. If it succeeds, the context may access the requested
// repository. An error will be returned if access is not available.
func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error {
ctxu.GetLogger(context).Debug("authorizing request")
repo := getName(context)
if app.accessController == nil {
return nil // access controller is not enabled.
}
var accessRecords []auth.Access
if repo != "" {
accessRecords = appendAccessRecords(accessRecords, r.Method, repo)
if fromRepo := r.FormValue("from"); fromRepo != "" {
// mounting a blob from one repository to another requires pull (GET)
// access to the source repository.
accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo)
}
} else {
// Only allow the name not to be set on the base route.
if app.nameRequired(r) {
// For this to be properly secured, repo must always be set for a
// resource that may make a modification. The only condition under
// which name is not set and we still allow access is when the
// base route is accessed. This section prevents us from making
// that mistake elsewhere in the code, allowing any operation to
// proceed.
if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return fmt.Errorf("forbidden: no repository name")
}
accessRecords = appendCatalogAccessRecord(accessRecords, r)
}
ctx, err := app.accessController.Authorized(context.Context, accessRecords...)
if err != nil {
switch err := err.(type) {
case auth.Challenge:
// Add the appropriate WWW-Auth header
err.SetHeaders(w)
if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil {
ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
default:
// This condition is a potential security problem either in
// the configuration or whatever is backing the access
// controller. Just return a bad request with no information
// to avoid exposure. The request should not proceed.
ctxu.GetLogger(context).Errorf("error checking authorization: %v", err)
w.WriteHeader(http.StatusBadRequest)
}
return err
}
// TODO(stevvooe): This pattern needs to be cleaned up a bit. One context
// should be replaced by another, rather than replacing the context on a
// mutable object.
context.Context = ctx
return nil
}
// eventBridge returns a bridge for the current request, configured with the
// correct actor and source.
func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener {
actor := notifications.ActorRecord{
Name: getUserName(ctx, r),
}
request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r)
return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink)
}
// nameRequired returns true if the route requires a name.
func (app *App) nameRequired(r *http.Request) bool {
route := mux.CurrentRoute(r)
routeName := route.GetName()
return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog)
}
// apiBase implements a simple yes-man for doing overall checks against the
// api. This can support auth roundtrips to support docker login.
func apiBase(w http.ResponseWriter, r *http.Request) {
const emptyJSON = "{}"
// Provide a simple /v2/ 200 OK response with empty json response.
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON)))
fmt.Fprint(w, emptyJSON)
}
// appendAccessRecords checks the method and adds the appropriate Access records to the records list.
func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access {
resource := auth.Resource{
Type: "repository",
Name: repo,
}
switch method {
case "GET", "HEAD":
records = append(records,
auth.Access{
Resource: resource,
Action: "pull",
})
case "POST", "PUT", "PATCH":
records = append(records,
auth.Access{
Resource: resource,
Action: "pull",
},
auth.Access{
Resource: resource,
Action: "push",
})
case "DELETE":
// DELETE access requires full admin rights, which is represented
// as "*". This may not be ideal.
records = append(records,
auth.Access{
Resource: resource,
Action: "*",
})
}
return records
}
// Add the access record for the catalog if it's our current route
func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access {
route := mux.CurrentRoute(r)
routeName := route.GetName()
if routeName == v2.RouteNameCatalog {
resource := auth.Resource{
Type: "registry",
Name: "catalog",
}
accessRecords = append(accessRecords,
auth.Access{
Resource: resource,
Action: "*",
})
}
return accessRecords
}
// applyRegistryMiddleware wraps a registry instance with the configured middlewares
func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) {
for _, mw := range middlewares {
rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry)
if err != nil {
return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err)
}
registry = rmw
}
return registry, nil
}
// applyRepoMiddleware wraps a repository with the configured middlewares
func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) {
for _, mw := range middlewares {
rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository)
if err != nil {
return nil, err
}
repository = rmw
}
return repository, nil
}
// applyStorageMiddleware wraps a storage driver with the configured middlewares
func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) {
for _, mw := range middlewares {
smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver)
if err != nil {
return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err)
}
driver = smw
}
return driver, nil
}
// uploadPurgeDefaultConfig provides a default configuration for upload
// purging to be used in the absence of configuration in the
// confifuration file
func uploadPurgeDefaultConfig() map[interface{}]interface{} {
config := map[interface{}]interface{}{}
config["enabled"] = true
config["age"] = "168h"
config["interval"] = "24h"
config["dryrun"] = false
return config
}
func badPurgeUploadConfig(reason string) {
panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason))
}
// startUploadPurger schedules a goroutine which will periodically
// check upload directories for old files and delete them
func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) {
if config["enabled"] == false {
return
}
var purgeAgeDuration time.Duration
var err error
purgeAge, ok := config["age"]
if ok {
ageStr, ok := purgeAge.(string)
if !ok {
badPurgeUploadConfig("age is not a string")
}
purgeAgeDuration, err = time.ParseDuration(ageStr)
if err != nil {
badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error()))
}
} else {
badPurgeUploadConfig("age missing")
}
var intervalDuration time.Duration
interval, ok := config["interval"]
if ok {
intervalStr, ok := interval.(string)
if !ok {
badPurgeUploadConfig("interval is not a string")
}
intervalDuration, err = time.ParseDuration(intervalStr)
if err != nil {
badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error()))
}
} else {
badPurgeUploadConfig("interval missing")
}
var dryRunBool bool
dryRun, ok := config["dryrun"]
if ok {
dryRunBool, ok = dryRun.(bool)
if !ok {
badPurgeUploadConfig("cannot parse dryrun")
}
} else {
badPurgeUploadConfig("dryrun missing")
}
go func() {
rand.Seed(time.Now().Unix())
jitter := time.Duration(rand.Int()%60) * time.Minute
log.Infof("Starting upload purge in %s", jitter)
time.Sleep(jitter)
for {
storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool)
log.Infof("Starting upload purge in %s", intervalDuration)
time.Sleep(intervalDuration)
}
}()
}

274
docs/handlers/app_test.go Normal file
View File

@ -0,0 +1,274 @@
package handlers
import (
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
_ "github.com/docker/distribution/registry/auth/silly"
"github.com/docker/distribution/registry/storage"
memorycache "github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/testdriver"
)
// TestAppDispatcher builds an application with a test dispatcher and ensures
// that requests are properly dispatched and the handlers are constructed.
// This only tests the dispatch mechanism. The underlying dispatchers must be
// tested individually.
func TestAppDispatcher(t *testing.T) {
driver := testdriver.New()
ctx := context.Background()
registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
app := &App{
Config: &configuration.Configuration{},
Context: ctx,
router: v2.Router(),
driver: driver,
registry: registry,
}
server := httptest.NewServer(app)
router := v2.Router()
serverURL, err := url.Parse(server.URL)
if err != nil {
t.Fatalf("error parsing server url: %v", err)
}
varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc {
return func(ctx *Context, r *http.Request) http.Handler {
// Always checks the same name context
if ctx.Repository.Named().Name() != getName(ctx) {
t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar")
}
// Check that we have all that is expected
for expectedK, expectedV := range expectedVars {
if ctx.Value(expectedK) != expectedV {
t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV)
}
}
// Check that we only have variables that are expected
for k, v := range ctx.Value("vars").(map[string]string) {
_, ok := expectedVars[k]
if !ok { // name is checked on context
// We have an unexpected key, fail
t.Fatalf("unexpected key %q in vars with value %q", k, v)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
}
}
// unflatten a list of variables, suitable for gorilla/mux, to a map[string]string
unflatten := func(vars []string) map[string]string {
m := make(map[string]string)
for i := 0; i < len(vars)-1; i = i + 2 {
m[vars[i]] = vars[i+1]
}
return m
}
for _, testcase := range []struct {
endpoint string
vars []string
}{
{
endpoint: v2.RouteNameManifest,
vars: []string{
"name", "foo/bar",
"reference", "sometag",
},
},
{
endpoint: v2.RouteNameTags,
vars: []string{
"name", "foo/bar",
},
},
{
endpoint: v2.RouteNameBlobUpload,
vars: []string{
"name", "foo/bar",
},
},
{
endpoint: v2.RouteNameBlobUploadChunk,
vars: []string{
"name", "foo/bar",
"uuid", "theuuid",
},
},
} {
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
u, err := route.URL(testcase.vars...)
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(u.String())
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK)
}
}
}
// TestNewApp covers the creation of an application via NewApp with a
// configuration.
func TestNewApp(t *testing.T) {
ctx := context.Background()
config := configuration.Configuration{
Storage: configuration.Storage{
"testdriver": nil,
},
Auth: configuration.Auth{
// For now, we simply test that new auth results in a viable
// application.
"silly": {
"realm": "realm-test",
"service": "service-test",
},
},
}
// Mostly, with this test, given a sane configuration, we are simply
// ensuring that NewApp doesn't panic. We might want to tweak this
// behavior.
app := NewApp(ctx, &config)
server := httptest.NewServer(app)
builder, err := v2.NewURLBuilderFromString(server.URL, false)
if err != nil {
t.Fatalf("error creating urlbuilder: %v", err)
}
baseURL, err := builder.BuildBaseURL()
if err != nil {
t.Fatalf("error creating baseURL: %v", err)
}
// TODO(stevvooe): The rest of this test might belong in the API tests.
// Just hit the app and make sure we get a 401 Unauthorized error.
req, err := http.Get(baseURL)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
defer req.Body.Close()
if req.StatusCode != http.StatusUnauthorized {
t.Fatalf("unexpected status code during request: %v", err)
}
if req.Header.Get("Content-Type") != "application/json; charset=utf-8" {
t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8")
}
expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\""
if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a {
t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a)
}
var errs errcode.Errors
dec := json.NewDecoder(req.Body)
if err := dec.Decode(&errs); err != nil {
t.Fatalf("error decoding error response: %v", err)
}
err2, ok := errs[0].(errcode.ErrorCoder)
if !ok {
t.Fatalf("not an ErrorCoder: %#v", errs[0])
}
if err2.ErrorCode() != errcode.ErrorCodeUnauthorized {
t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized)
}
}
// Test the access record accumulator
func TestAppendAccessRecords(t *testing.T) {
repo := "testRepo"
expectedResource := auth.Resource{
Type: "repository",
Name: repo,
}
expectedPullRecord := auth.Access{
Resource: expectedResource,
Action: "pull",
}
expectedPushRecord := auth.Access{
Resource: expectedResource,
Action: "push",
}
expectedAllRecord := auth.Access{
Resource: expectedResource,
Action: "*",
}
records := []auth.Access{}
result := appendAccessRecords(records, "GET", repo)
expectedResult := []auth.Access{expectedPullRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
records = []auth.Access{}
result = appendAccessRecords(records, "HEAD", repo)
expectedResult = []auth.Access{expectedPullRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
records = []auth.Access{}
result = appendAccessRecords(records, "POST", repo)
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
records = []auth.Access{}
result = appendAccessRecords(records, "PUT", repo)
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
records = []auth.Access{}
result = appendAccessRecords(records, "PATCH", repo)
expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
records = []auth.Access{}
result = appendAccessRecords(records, "DELETE", repo)
expectedResult = []auth.Access{expectedAllRecord}
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
}

View File

@ -0,0 +1,11 @@
// +build go1.4
package handlers
import (
"net/http"
)
func basicAuth(r *http.Request) (username, password string, ok bool) {
return r.BasicAuth()
}

View File

@ -0,0 +1,41 @@
// +build !go1.4
package handlers
import (
"encoding/base64"
"net/http"
"strings"
)
// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we
// can compile on go1.3 and earlier.
// BasicAuth returns the username and password provided in the request's
// Authorization header, if the request uses HTTP Basic Authentication.
// See RFC 2617, Section 2.
func basicAuth(r *http.Request) (username, password string, ok bool) {
auth := r.Header.Get("Authorization")
if auth == "" {
return
}
return parseBasicAuth(auth)
}
// parseBasicAuth parses an HTTP Basic Authentication string.
// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
func parseBasicAuth(auth string) (username, password string, ok bool) {
if !strings.HasPrefix(auth, "Basic ") {
return
}
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
}

99
docs/handlers/blob.go Normal file
View File

@ -0,0 +1,99 @@
package handlers
import (
"net/http"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/gorilla/handlers"
)
// blobDispatcher uses the request context to build a blobHandler.
func blobDispatcher(ctx *Context, r *http.Request) http.Handler {
dgst, err := getDigest(ctx)
if err != nil {
if err == errDigestNotAvailable {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
})
}
blobHandler := &blobHandler{
Context: ctx,
Digest: dgst,
}
mhandler := handlers.MethodHandler{
"GET": http.HandlerFunc(blobHandler.GetBlob),
"HEAD": http.HandlerFunc(blobHandler.GetBlob),
}
if !ctx.readOnly {
mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob)
}
return mhandler
}
// blobHandler serves http blob requests.
type blobHandler struct {
*Context
Digest digest.Digest
}
// GetBlob fetches the binary data from backend storage returns it in the
// response.
func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) {
context.GetLogger(bh).Debug("GetBlob")
blobs := bh.Repository.Blobs(bh)
desc, err := blobs.Stat(bh, bh.Digest)
if err != nil {
if err == distribution.ErrBlobUnknown {
bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest))
} else {
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil {
context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err)
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// DeleteBlob deletes a layer blob
func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) {
context.GetLogger(bh).Debug("DeleteBlob")
blobs := bh.Repository.Blobs(bh)
err := blobs.Delete(bh, bh.Digest)
if err != nil {
switch err {
case distribution.ErrUnsupported:
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported)
return
case distribution.ErrBlobUnknown:
bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown)
return
default:
bh.Errors = append(bh.Errors, err)
context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error())
return
}
}
w.Header().Set("Content-Length", "0")
w.WriteHeader(http.StatusAccepted)
}

368
docs/handlers/blobupload.go Normal file
View File

@ -0,0 +1,368 @@
package handlers
import (
"fmt"
"net/http"
"net/url"
"github.com/docker/distribution"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/storage"
"github.com/gorilla/handlers"
)
// blobUploadDispatcher constructs and returns the blob upload handler for the
// given request context.
func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler {
buh := &blobUploadHandler{
Context: ctx,
UUID: getUploadUUID(ctx),
}
handler := handlers.MethodHandler{
"GET": http.HandlerFunc(buh.GetUploadStatus),
"HEAD": http.HandlerFunc(buh.GetUploadStatus),
}
if !ctx.readOnly {
handler["POST"] = http.HandlerFunc(buh.StartBlobUpload)
handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData)
handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete)
handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload)
}
if buh.UUID != "" {
state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state"))
if err != nil {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err)
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
buh.State = state
if state.Name != ctx.Repository.Named().Name() {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name())
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
if state.UUID != buh.UUID {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID)
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
blobs := ctx.Repository.Blobs(buh)
upload, err := blobs.Resume(buh, buh.UUID)
if err != nil {
ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err)
if err == distribution.ErrBlobUploadUnknown {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
})
}
buh.Upload = upload
if size := upload.Size(); size != buh.State.Offset {
defer upload.Close()
ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
upload.Cancel(buh)
})
}
return closeResources(handler, buh.Upload)
}
return handler
}
// blobUploadHandler handles the http blob upload process.
type blobUploadHandler struct {
*Context
// UUID identifies the upload instance for the current request. Using UUID
// to key blob writers since this implementation uses UUIDs.
UUID string
Upload distribution.BlobWriter
State blobUploadState
}
// StartBlobUpload begins the blob upload process and allocates a server-side
// blob writer session, optionally mounting the blob from a separate repository.
func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) {
var options []distribution.BlobCreateOption
fromRepo := r.FormValue("from")
mountDigest := r.FormValue("mount")
if mountDigest != "" && fromRepo != "" {
opt, err := buh.createBlobMountOption(fromRepo, mountDigest)
if opt != nil && err == nil {
options = append(options, opt)
}
}
blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...)
if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
} else if err == distribution.ErrUnsupported {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
} else {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
buh.Upload = upload
if err := buh.blobUploadResponse(w, r, true); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Docker-Upload-UUID", buh.Upload.ID())
w.WriteHeader(http.StatusAccepted)
}
// GetUploadStatus returns the status of a given upload, identified by id.
func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
return
}
// TODO(dmcgowan): Set last argument to false in blobUploadResponse when
// resumable upload is supported. This will enable returning a non-zero
// range for clients to begin uploading at an offset.
if err := buh.blobUploadResponse(w, r, true); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Docker-Upload-UUID", buh.UUID)
w.WriteHeader(http.StatusNoContent)
}
// PatchBlobData writes data to an upload.
func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
return
}
ct := r.Header.Get("Content-Type")
if ct != "" && ct != "application/octet-stream" {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type")))
// TODO(dmcgowan): encode error
return
}
// TODO(dmcgowan): support Content-Range header to seek and write range
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil {
// copyFullPayload reports the error if necessary
return
}
if err := buh.blobUploadResponse(w, r, false); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.WriteHeader(http.StatusAccepted)
}
// PutBlobUploadComplete takes the final request of a blob upload. The
// request may include all the blob data or no blob data. Any data
// provided is received and verified. If successful, the blob is linked
// into the blob store and 201 Created is returned with the canonical
// url of the blob.
func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
return
}
dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters!
if dgstStr == "" {
// no digest? return error, but allow retry.
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing"))
return
}
dgst, err := digest.ParseDigest(dgstStr)
if err != nil {
// no digest? return error, but allow retry.
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed"))
return
}
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil {
// copyFullPayload reports the error if necessary
return
}
desc, err := buh.Upload.Commit(buh, distribution.Descriptor{
Digest: dgst,
// TODO(stevvooe): This isn't wildly important yet, but we should
// really set the mediatype. For now, we can let the backend take care
// of this.
})
if err != nil {
switch err := err.(type) {
case distribution.ErrBlobInvalidDigest:
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
case errcode.Error:
buh.Errors = append(buh.Errors, err)
default:
switch err {
case distribution.ErrAccessDenied:
buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied)
case distribution.ErrUnsupported:
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported:
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err))
default:
ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err)
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
}
// Clean up the backend blob data if there was an error.
if err := buh.Upload.Cancel(buh); err != nil {
// If the cleanup fails, all we can do is observe and report.
ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err)
}
return
}
if err := buh.writeBlobCreatedHeaders(w, desc); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// CancelBlobUpload cancels an in-progress upload of a blob.
func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown)
return
}
w.Header().Set("Docker-Upload-UUID", buh.UUID)
if err := buh.Upload.Cancel(buh); err != nil {
ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err)
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
w.WriteHeader(http.StatusNoContent)
}
// blobUploadResponse provides a standard request for uploading blobs and
// chunk responses. This sets the correct headers but the response status is
// left to the caller. The fresh argument is used to ensure that new blob
// uploads always start at a 0 offset. This allows disabling resumable push by
// always returning a 0 offset on check status.
func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error {
// TODO(stevvooe): Need a better way to manage the upload state automatically.
buh.State.Name = buh.Repository.Named().Name()
buh.State.UUID = buh.Upload.ID()
buh.Upload.Close()
buh.State.Offset = buh.Upload.Size()
buh.State.StartedAt = buh.Upload.StartedAt()
token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State)
if err != nil {
ctxu.GetLogger(buh).Infof("error building upload state token: %s", err)
return err
}
uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL(
buh.Repository.Named(), buh.Upload.ID(),
url.Values{
"_state": []string{token},
})
if err != nil {
ctxu.GetLogger(buh).Infof("error building upload url: %s", err)
return err
}
endRange := buh.Upload.Size()
if endRange > 0 {
endRange = endRange - 1
}
w.Header().Set("Docker-Upload-UUID", buh.UUID)
w.Header().Set("Location", uploadURL)
w.Header().Set("Content-Length", "0")
w.Header().Set("Range", fmt.Sprintf("0-%d", endRange))
return nil
}
// mountBlob attempts to mount a blob from another repository by its digest. If
// successful, the blob is linked into the blob store and 201 Created is
// returned with the canonical url of the blob.
func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) {
dgst, err := digest.ParseDigest(mountDigest)
if err != nil {
return nil, err
}
ref, err := reference.ParseNamed(fromRepo)
if err != nil {
return nil, err
}
canonical, err := reference.WithDigest(ref, dgst)
if err != nil {
return nil, err
}
return storage.WithMountFrom(canonical), nil
}
// writeBlobCreatedHeaders writes the standard headers describing a newly
// created blob. A 201 Created is written as well as the canonical URL and
// blob digest.
func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error {
ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest)
if err != nil {
return err
}
blobURL, err := buh.urlBuilder.BuildBlobURL(ref)
if err != nil {
return err
}
w.Header().Set("Location", blobURL)
w.Header().Set("Content-Length", "0")
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
w.WriteHeader(http.StatusCreated)
return nil
}

95
docs/handlers/catalog.go Normal file
View File

@ -0,0 +1,95 @@
package handlers
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"github.com/docker/distribution/registry/api/errcode"
"github.com/gorilla/handlers"
)
const maximumReturnedEntries = 100
func catalogDispatcher(ctx *Context, r *http.Request) http.Handler {
catalogHandler := &catalogHandler{
Context: ctx,
}
return handlers.MethodHandler{
"GET": http.HandlerFunc(catalogHandler.GetCatalog),
}
}
type catalogHandler struct {
*Context
}
type catalogAPIResponse struct {
Repositories []string `json:"repositories"`
}
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
var moreEntries = true
q := r.URL.Query()
lastEntry := q.Get("last")
maxEntries, err := strconv.Atoi(q.Get("n"))
if err != nil || maxEntries < 0 {
maxEntries = maximumReturnedEntries
}
repos := make([]string, maxEntries)
filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry)
if err == io.EOF {
moreEntries = false
} else if err != nil {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Add a link header if there are more entries to retrieve
if moreEntries {
lastEntry = repos[len(repos)-1]
urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry)
if err != nil {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Link", urlStr)
}
enc := json.NewEncoder(w)
if err := enc.Encode(catalogAPIResponse{
Repositories: repos[0:filled],
}); err != nil {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// Use the original URL from the request to create a new URL for
// the link header
func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) {
calledURL, err := url.Parse(origURL)
if err != nil {
return "", err
}
v := url.Values{}
v.Add("n", strconv.Itoa(maxEntries))
v.Add("last", lastEntry)
calledURL.RawQuery = v.Encode()
calledURL.Fragment = ""
urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String())
return urlStr, nil
}

152
docs/handlers/context.go Normal file
View File

@ -0,0 +1,152 @@
package handlers
import (
"fmt"
"net/http"
"sync"
"github.com/docker/distribution"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
"golang.org/x/net/context"
)
// Context should contain the request specific context for use in across
// handlers. Resources that don't need to be shared across handlers should not
// be on this object.
type Context struct {
// App points to the application structure that created this context.
*App
context.Context
// Repository is the repository for the current request. All requests
// should be scoped to a single repository. This field may be nil.
Repository distribution.Repository
// Errors is a collection of errors encountered during the request to be
// returned to the client API. If errors are added to the collection, the
// handler *must not* start the response via http.ResponseWriter.
Errors errcode.Errors
urlBuilder *v2.URLBuilder
// TODO(stevvooe): The goal is too completely factor this context and
// dispatching out of the web application. Ideally, we should lean on
// context.Context for injection of these resources.
}
// Value overrides context.Context.Value to ensure that calls are routed to
// correct context.
func (ctx *Context) Value(key interface{}) interface{} {
return ctx.Context.Value(key)
}
func getName(ctx context.Context) (name string) {
return ctxu.GetStringValue(ctx, "vars.name")
}
func getReference(ctx context.Context) (reference string) {
return ctxu.GetStringValue(ctx, "vars.reference")
}
var errDigestNotAvailable = fmt.Errorf("digest not available in context")
func getDigest(ctx context.Context) (dgst digest.Digest, err error) {
dgstStr := ctxu.GetStringValue(ctx, "vars.digest")
if dgstStr == "" {
ctxu.GetLogger(ctx).Errorf("digest not available")
return "", errDigestNotAvailable
}
d, err := digest.ParseDigest(dgstStr)
if err != nil {
ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err)
return "", err
}
return d, nil
}
func getUploadUUID(ctx context.Context) (uuid string) {
return ctxu.GetStringValue(ctx, "vars.uuid")
}
// getUserName attempts to resolve a username from the context and request. If
// a username cannot be resolved, the empty string is returned.
func getUserName(ctx context.Context, r *http.Request) string {
username := ctxu.GetStringValue(ctx, auth.UserNameKey)
// Fallback to request user with basic auth
if username == "" {
var ok bool
uname, _, ok := basicAuth(r)
if ok {
username = uname
}
}
return username
}
// contextManager allows us to associate net/context.Context instances with a
// request, based on the memory identity of http.Request. This prepares http-
// level context, which is not application specific. If this is called,
// (*contextManager).release must be called on the context when the request is
// completed.
//
// Providing this circumvents a lot of necessity for dispatchers with the
// benefit of instantiating the request context much earlier.
//
// TODO(stevvooe): Consider making this facility a part of the context package.
type contextManager struct {
contexts map[*http.Request]context.Context
mu sync.Mutex
}
// defaultContextManager is just a global instance to register request contexts.
var defaultContextManager = newContextManager()
func newContextManager() *contextManager {
return &contextManager{
contexts: make(map[*http.Request]context.Context),
}
}
// context either returns a new context or looks it up in the manager.
func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context {
cm.mu.Lock()
defer cm.mu.Unlock()
ctx, ok := cm.contexts[r]
if ok {
return ctx
}
if parent == nil {
parent = ctxu.Background()
}
ctx = ctxu.WithRequest(parent, r)
ctx, w = ctxu.WithResponseWriter(ctx, w)
ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx))
cm.contexts[r] = ctx
return ctx
}
// releases frees any associated with resources from request.
func (cm *contextManager) release(ctx context.Context) {
cm.mu.Lock()
defer cm.mu.Unlock()
r, err := ctxu.GetRequest(ctx)
if err != nil {
ctxu.GetLogger(ctx).Errorf("no request found in context during release")
return
}
delete(cm.contexts, r)
}

View File

@ -0,0 +1,201 @@
package handlers
import (
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/health"
)
func TestFileHealthCheck(t *testing.T) {
interval := time.Second
tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck")
if err != nil {
t.Fatalf("could not create temporary file: %v", err)
}
defer tmpfile.Close()
config := &configuration.Configuration{
Storage: configuration.Storage{
"inmemory": configuration.Parameters{},
},
Health: configuration.Health{
FileCheckers: []configuration.FileChecker{
{
Interval: interval,
File: tmpfile.Name(),
},
},
},
}
ctx := context.Background()
app := NewApp(ctx, config)
healthRegistry := health.NewRegistry()
app.RegisterHealthChecks(healthRegistry)
// Wait for health check to happen
<-time.After(2 * interval)
status := healthRegistry.CheckStatus()
if len(status) != 1 {
t.Fatal("expected 1 item in health check results")
}
if status[tmpfile.Name()] != "file exists" {
t.Fatal(`did not get "file exists" result for health check`)
}
os.Remove(tmpfile.Name())
<-time.After(2 * interval)
if len(healthRegistry.CheckStatus()) != 0 {
t.Fatal("expected 0 items in health check results")
}
}
func TestTCPHealthCheck(t *testing.T) {
interval := time.Second
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("could not create listener: %v", err)
}
addrStr := ln.Addr().String()
// Start accepting
go func() {
for {
conn, err := ln.Accept()
if err != nil {
// listener was closed
return
}
defer conn.Close()
}
}()
config := &configuration.Configuration{
Storage: configuration.Storage{
"inmemory": configuration.Parameters{},
},
Health: configuration.Health{
TCPCheckers: []configuration.TCPChecker{
{
Interval: interval,
Addr: addrStr,
Timeout: 500 * time.Millisecond,
},
},
},
}
ctx := context.Background()
app := NewApp(ctx, config)
healthRegistry := health.NewRegistry()
app.RegisterHealthChecks(healthRegistry)
// Wait for health check to happen
<-time.After(2 * interval)
if len(healthRegistry.CheckStatus()) != 0 {
t.Fatal("expected 0 items in health check results")
}
ln.Close()
<-time.After(2 * interval)
// Health check should now fail
status := healthRegistry.CheckStatus()
if len(status) != 1 {
t.Fatal("expected 1 item in health check results")
}
if status[addrStr] != "connection to "+addrStr+" failed" {
t.Fatal(`did not get "connection failed" result for health check`)
}
}
func TestHTTPHealthCheck(t *testing.T) {
interval := time.Second
threshold := 3
stopFailing := make(chan struct{})
checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "HEAD" {
t.Fatalf("expected HEAD request, got %s", r.Method)
}
select {
case <-stopFailing:
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusInternalServerError)
}
}))
config := &configuration.Configuration{
Storage: configuration.Storage{
"inmemory": configuration.Parameters{},
},
Health: configuration.Health{
HTTPCheckers: []configuration.HTTPChecker{
{
Interval: interval,
URI: checkedServer.URL,
Threshold: threshold,
},
},
},
}
ctx := context.Background()
app := NewApp(ctx, config)
healthRegistry := health.NewRegistry()
app.RegisterHealthChecks(healthRegistry)
for i := 0; ; i++ {
<-time.After(interval)
status := healthRegistry.CheckStatus()
if i < threshold-1 {
// definitely shouldn't have hit the threshold yet
if len(status) != 0 {
t.Fatal("expected 1 item in health check results")
}
continue
}
if i < threshold+1 {
// right on the threshold - don't expect a failure yet
continue
}
if len(status) != 1 {
t.Fatal("expected 1 item in health check results")
}
if status[checkedServer.URL] != "downstream service returned unexpected status: 500" {
t.Fatal("did not get expected result for health check")
}
break
}
// Signal HTTP handler to start returning 200
close(stopFailing)
<-time.After(2 * interval)
if len(healthRegistry.CheckStatus()) != 0 {
t.Fatal("expected 0 items in health check results")
}
}

66
docs/handlers/helpers.go Normal file
View File

@ -0,0 +1,66 @@
package handlers
import (
"errors"
"io"
"net/http"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
)
// closeResources closes all the provided resources after running the target
// handler.
func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, closer := range closers {
defer closer.Close()
}
handler.ServeHTTP(w, r)
})
}
// copyFullPayload copies the payload of an HTTP request to destWriter. If it
// receives less content than expected, and the client disconnected during the
// upload, it avoids sending a 400 error to keep the logs cleaner.
func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error {
// Get a channel that tells us if the client disconnects
var clientClosed <-chan bool
if notifier, ok := responseWriter.(http.CloseNotifier); ok {
clientClosed = notifier.CloseNotify()
} else {
ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter)
}
// Read in the data, if any.
copied, err := io.Copy(destWriter, r.Body)
if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) {
// Didn't receive as much content as expected. Did the client
// disconnect during the request? If so, avoid returning a 400
// error to keep the logs cleaner.
select {
case <-clientClosed:
// Set the response code to "499 Client Closed Request"
// Even though the connection has already been closed,
// this causes the logger to pick up a 499 error
// instead of showing 0 for the HTTP status.
responseWriter.WriteHeader(499)
ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{
"error": err,
"copied": copied,
"contentLength": r.ContentLength,
}, "error", "copied", "contentLength").Error("client disconnected during " + action)
return errors.New("client disconnected")
default:
}
}
if err != nil {
ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err)
*errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err))
return err
}
return nil
}

72
docs/handlers/hmac.go Normal file
View File

@ -0,0 +1,72 @@
package handlers
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"time"
)
// blobUploadState captures the state serializable state of the blob upload.
type blobUploadState struct {
// name is the primary repository under which the blob will be linked.
Name string
// UUID identifies the upload.
UUID string
// offset contains the current progress of the upload.
Offset int64
// StartedAt is the original start time of the upload.
StartedAt time.Time
}
type hmacKey string
// unpackUploadState unpacks and validates the blob upload state from the
// token, using the hmacKey secret.
func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) {
var state blobUploadState
tokenBytes, err := base64.URLEncoding.DecodeString(token)
if err != nil {
return state, err
}
mac := hmac.New(sha256.New, []byte(secret))
if len(tokenBytes) < mac.Size() {
return state, fmt.Errorf("Invalid token")
}
macBytes := tokenBytes[:mac.Size()]
messageBytes := tokenBytes[mac.Size():]
mac.Write(messageBytes)
if !hmac.Equal(mac.Sum(nil), macBytes) {
return state, fmt.Errorf("Invalid token")
}
if err := json.Unmarshal(messageBytes, &state); err != nil {
return state, err
}
return state, nil
}
// packUploadState packs the upload state signed with and hmac digest using
// the hmacKey secret, encoding to url safe base64. The resulting token can be
// used to share data with minimized risk of external tampering.
func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) {
mac := hmac.New(sha256.New, []byte(secret))
p, err := json.Marshal(lus)
if err != nil {
return "", err
}
mac.Write(p)
return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil
}

117
docs/handlers/hmac_test.go Normal file
View File

@ -0,0 +1,117 @@
package handlers
import "testing"
var blobUploadStates = []blobUploadState{
{
Name: "hello",
UUID: "abcd-1234-qwer-0987",
Offset: 0,
},
{
Name: "hello-world",
UUID: "abcd-1234-qwer-0987",
Offset: 0,
},
{
Name: "h3ll0_w0rld",
UUID: "abcd-1234-qwer-0987",
Offset: 1337,
},
{
Name: "ABCDEFG",
UUID: "ABCD-1234-QWER-0987",
Offset: 1234567890,
},
{
Name: "this-is-A-sort-of-Long-name-for-Testing",
UUID: "dead-1234-beef-0987",
Offset: 8675309,
},
}
var secrets = []string{
"supersecret",
"12345",
"a",
"SuperSecret",
"Sup3r... S3cr3t!",
"This is a reasonably long secret key that is used for the purpose of testing.",
"\u2603+\u2744", // snowman+snowflake
}
// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and
// validates that the tokens can be used to reconstruct the proper upload state.
func TestLayerUploadTokens(t *testing.T) {
secret := hmacKey("supersecret")
for _, testcase := range blobUploadStates {
token, err := secret.packUploadState(testcase)
if err != nil {
t.Fatal(err)
}
lus, err := secret.unpackUploadState(token)
if err != nil {
t.Fatal(err)
}
assertBlobUploadStateEquals(t, testcase, lus)
}
}
// TestHMACValidate ensures that any HMAC token providers are compatible if and
// only if they share the same secret.
func TestHMACValidation(t *testing.T) {
for _, secret := range secrets {
secret1 := hmacKey(secret)
secret2 := hmacKey(secret)
badSecret := hmacKey("DifferentSecret")
for _, testcase := range blobUploadStates {
token, err := secret1.packUploadState(testcase)
if err != nil {
t.Fatal(err)
}
lus, err := secret2.unpackUploadState(token)
if err != nil {
t.Fatal(err)
}
assertBlobUploadStateEquals(t, testcase, lus)
_, err = badSecret.unpackUploadState(token)
if err == nil {
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token)
}
badToken, err := badSecret.packUploadState(lus)
if err != nil {
t.Fatal(err)
}
_, err = secret1.unpackUploadState(badToken)
if err == nil {
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken)
}
_, err = secret2.unpackUploadState(badToken)
if err == nil {
t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken)
}
}
}
}
func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) {
if expected.Name != received.Name {
t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name)
}
if expected.UUID != received.UUID {
t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID)
}
if expected.Offset != received.Offset {
t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset)
}
}

53
docs/handlers/hooks.go Normal file
View File

@ -0,0 +1,53 @@
package handlers
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
)
// logHook is for hooking Panic in web application
type logHook struct {
LevelsParam []string
Mail *mailer
}
// Fire forwards an error to LogHook
func (hook *logHook) Fire(entry *logrus.Entry) error {
addr := strings.Split(hook.Mail.Addr, ":")
if len(addr) != 2 {
return errors.New("Invalid Mail Address")
}
host := addr[0]
subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message)
html := `
{{.Message}}
{{range $key, $value := .Data}}
{{$key}}: {{$value}}
{{end}}
`
b := bytes.NewBuffer(make([]byte, 0))
t := template.Must(template.New("mail body").Parse(html))
if err := t.Execute(b, entry); err != nil {
return err
}
body := fmt.Sprintf("%s", b)
return hook.Mail.sendMail(subject, body)
}
// Levels contains hook levels to be catched
func (hook *logHook) Levels() []logrus.Level {
levels := []logrus.Level{}
for _, v := range hook.LevelsParam {
lv, _ := logrus.ParseLevel(v)
levels = append(levels, lv)
}
return levels
}

386
docs/handlers/images.go Normal file
View File

@ -0,0 +1,386 @@
package handlers
import (
"bytes"
"fmt"
"net/http"
"strings"
"github.com/docker/distribution"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/gorilla/handlers"
)
// These constants determine which architecture and OS to choose from a
// manifest list when downconverting it to a schema1 manifest.
const (
defaultArch = "amd64"
defaultOS = "linux"
)
// imageManifestDispatcher takes the request context and builds the
// appropriate handler for handling image manifest requests.
func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler {
imageManifestHandler := &imageManifestHandler{
Context: ctx,
}
reference := getReference(ctx)
dgst, err := digest.ParseDigest(reference)
if err != nil {
// We just have a tag
imageManifestHandler.Tag = reference
} else {
imageManifestHandler.Digest = dgst
}
mhandler := handlers.MethodHandler{
"GET": http.HandlerFunc(imageManifestHandler.GetImageManifest),
"HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest),
}
if !ctx.readOnly {
mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest)
mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest)
}
return mhandler
}
// imageManifestHandler handles http operations on image manifests.
type imageManifestHandler struct {
*Context
// One of tag or digest gets set, depending on what is present in context.
Tag string
Digest digest.Digest
}
// GetImageManifest fetches the image manifest from the storage backend, if it exists.
func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(imh).Debug("GetImageManifest")
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
var manifest distribution.Manifest
if imh.Tag != "" {
tags := imh.Repository.Tags(imh)
desc, err := tags.Get(imh, imh.Tag)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
return
}
imh.Digest = desc.Digest
}
if etagMatch(r, imh.Digest.String()) {
w.WriteHeader(http.StatusNotModified)
return
}
var options []distribution.ManifestServiceOption
if imh.Tag != "" {
options = append(options, distribution.WithTag(imh.Tag))
}
manifest, err = manifests.Get(imh, imh.Digest, options...)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
return
}
supportsSchema2 := false
supportsManifestList := false
// this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values
// https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202
for _, acceptHeader := range r.Header["Accept"] {
// r.Header[...] is a slice in case the request contains the same header more than once
// if the header isn't set, we'll get the zero value, which "range" will handle gracefully
// we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616)
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
for _, mediaType := range strings.Split(acceptHeader, ",") {
// remove "; q=..." if present
if i := strings.Index(mediaType, ";"); i >= 0 {
mediaType = mediaType[:i]
}
// it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f")
mediaType = strings.TrimSpace(mediaType)
if mediaType == schema2.MediaTypeManifest {
supportsSchema2 = true
}
if mediaType == manifestlist.MediaTypeManifestList {
supportsManifestList = true
}
}
}
schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest)
manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList)
// Only rewrite schema2 manifests when they are being fetched by tag.
// If they are being fetched by digest, we can't return something not
// matching the digest.
if imh.Tag != "" && isSchema2 && !supportsSchema2 {
// Rewrite manifest in schema1 format
ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String())
manifest, err = imh.convertSchema2Manifest(schema2Manifest)
if err != nil {
return
}
} else if imh.Tag != "" && isManifestList && !supportsManifestList {
// Rewrite manifest in schema1 format
ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String())
// Find the image manifest corresponding to the default
// platform
var manifestDigest digest.Digest
for _, manifestDescriptor := range manifestList.Manifests {
if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS {
manifestDigest = manifestDescriptor.Digest
break
}
}
if manifestDigest == "" {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)
return
}
manifest, err = manifests.Get(imh, manifestDigest)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))
return
}
// If necessary, convert the image manifest
if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 {
manifest, err = imh.convertSchema2Manifest(schema2Manifest)
if err != nil {
return
}
}
}
ct, p, err := manifest.Payload()
if err != nil {
return
}
w.Header().Set("Content-Type", ct)
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
w.Write(p)
}
func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) {
targetDescriptor := schema2Manifest.Target()
blobs := imh.Repository.Blobs(imh)
configJSON, err := blobs.Get(imh, targetDescriptor.Digest)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
return nil, err
}
ref := imh.Repository.Named()
if imh.Tag != "" {
ref, err = reference.WithTag(ref, imh.Tag)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err))
return nil, err
}
}
builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON)
for _, d := range schema2Manifest.References() {
if err := builder.AppendReference(d); err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
return nil, err
}
}
manifest, err := builder.Build(imh)
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
return nil, err
}
imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical)
return manifest, nil
}
func etagMatch(r *http.Request, etag string) bool {
for _, headerVal := range r.Header["If-None-Match"] {
if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted
return true
}
}
return false
}
// PutImageManifest validates and stores an image in the registry.
func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(imh).Debug("PutImageManifest")
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
var jsonBuf bytes.Buffer
if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil {
// copyFullPayload reports the error if necessary
return
}
mediaType := r.Header.Get("Content-Type")
manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())
if err != nil {
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
return
}
if imh.Digest != "" {
if desc.Digest != imh.Digest {
ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest)
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
return
}
} else if imh.Tag != "" {
imh.Digest = desc.Digest
} else {
imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified"))
return
}
var options []distribution.ManifestServiceOption
if imh.Tag != "" {
options = append(options, distribution.WithTag(imh.Tag))
}
_, err = manifests.Put(imh, manifest, options...)
if err != nil {
// TODO(stevvooe): These error handling switches really need to be
// handled by an app global mapper.
if err == distribution.ErrUnsupported {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
return
}
if err == distribution.ErrAccessDenied {
imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)
return
}
switch err := err.(type) {
case distribution.ErrManifestVerification:
for _, verificationError := range err {
switch verificationError := verificationError.(type) {
case distribution.ErrManifestBlobUnknown:
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))
case distribution.ErrManifestNameInvalid:
imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))
case distribution.ErrManifestUnverified:
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)
default:
if verificationError == digest.ErrDigestInvalidFormat {
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)
}
}
}
case errcode.Error:
imh.Errors = append(imh.Errors, err)
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
// Tag this manifest
if imh.Tag != "" {
tags := imh.Repository.Tags(imh)
err = tags.Tag(imh, imh.Tag, desc)
if err != nil {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// Construct a canonical url for the uploaded manifest.
ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)
if err != nil {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
location, err := imh.urlBuilder.BuildManifestURL(ref)
if err != nil {
// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to
// happen. We'll log the error here but proceed as if it worked. Worst
// case, we set an empty location header.
ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err)
}
w.Header().Set("Location", location)
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.WriteHeader(http.StatusCreated)
}
// DeleteImageManifest removes the manifest with the given digest from the registry.
func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) {
ctxu.GetLogger(imh).Debug("DeleteImageManifest")
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
err = manifests.Delete(imh, imh.Digest)
if err != nil {
switch err {
case digest.ErrDigestUnsupported:
case digest.ErrDigestInvalidFormat:
imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)
return
case distribution.ErrBlobUnknown:
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)
return
case distribution.ErrUnsupported:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
return
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)
return
}
}
tagService := imh.Repository.Tags(imh)
referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest})
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
for _, tag := range referencedTags {
if err := tagService.Untag(imh, tag); err != nil {
imh.Errors = append(imh.Errors, err)
return
}
}
w.WriteHeader(http.StatusAccepted)
}

45
docs/handlers/mail.go Normal file
View File

@ -0,0 +1,45 @@
package handlers
import (
"errors"
"net/smtp"
"strings"
)
// mailer provides fields of email configuration for sending.
type mailer struct {
Addr, Username, Password, From string
Insecure bool
To []string
}
// sendMail allows users to send email, only if mail parameters is configured correctly.
func (mail *mailer) sendMail(subject, message string) error {
addr := strings.Split(mail.Addr, ":")
if len(addr) != 2 {
return errors.New("Invalid Mail Address")
}
host := addr[0]
msg := []byte("To:" + strings.Join(mail.To, ";") +
"\r\nFrom: " + mail.From +
"\r\nSubject: " + subject +
"\r\nContent-Type: text/plain\r\n\r\n" +
message)
auth := smtp.PlainAuth(
"",
mail.Username,
mail.Password,
host,
)
err := smtp.SendMail(
mail.Addr,
auth,
mail.From,
mail.To,
[]byte(msg),
)
if err != nil {
return err
}
return nil
}

62
docs/handlers/tags.go Normal file
View File

@ -0,0 +1,62 @@
package handlers
import (
"encoding/json"
"net/http"
"github.com/docker/distribution"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/gorilla/handlers"
)
// tagsDispatcher constructs the tags handler api endpoint.
func tagsDispatcher(ctx *Context, r *http.Request) http.Handler {
tagsHandler := &tagsHandler{
Context: ctx,
}
return handlers.MethodHandler{
"GET": http.HandlerFunc(tagsHandler.GetTags),
}
}
// tagsHandler handles requests for lists of tags under a repository name.
type tagsHandler struct {
*Context
}
type tagsAPIResponse struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
// GetTags returns a json list of tags for a specific image name.
func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
tagService := th.Repository.Tags(th)
tags, err := tagService.All(th)
if err != nil {
switch err := err.(type) {
case distribution.ErrRepositoryUnknown:
th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()}))
case errcode.Error:
th.Errors = append(th.Errors, err)
default:
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
enc := json.NewEncoder(w)
if err := enc.Encode(tagsAPIResponse{
Name: th.Repository.Named().Name(),
Tags: tags,
}); err != nil {
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}

74
docs/listener/listener.go Normal file
View File

@ -0,0 +1,74 @@
package listener
import (
"fmt"
"net"
"os"
"time"
)
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
// it is a plain copy-paste from net/http/server.go
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
// NewListener announces on laddr and net. Accepted values of the net are
// 'unix' and 'tcp'
func NewListener(net, laddr string) (net.Listener, error) {
switch net {
case "unix":
return newUnixListener(laddr)
case "tcp", "": // an empty net means tcp
return newTCPListener(laddr)
default:
return nil, fmt.Errorf("unknown address type %s", net)
}
}
func newUnixListener(laddr string) (net.Listener, error) {
fi, err := os.Stat(laddr)
if err == nil {
// the file exists.
// try to remove it if it's a socket
if !isSocket(fi.Mode()) {
return nil, fmt.Errorf("file %s exists and is not a socket", laddr)
}
if err := os.Remove(laddr); err != nil {
return nil, err
}
} else if !os.IsNotExist(err) {
// we can't do stat on the file.
// it means we can not remove it
return nil, err
}
return net.Listen("unix", laddr)
}
func isSocket(m os.FileMode) bool {
return m&os.ModeSocket != 0
}
func newTCPListener(laddr string) (net.Listener, error) {
ln, err := net.Listen("tcp", laddr)
if err != nil {
return nil, err
}
return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil
}

View File

@ -0,0 +1,54 @@
package middleware
import (
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage"
)
// InitFunc is the type of a RegistryMiddleware factory function and is
// used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc
var registryoptions []storage.RegistryOption
// Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name.
func Register(name string, initFunc InitFunc) error {
if middlewares == nil {
middlewares = make(map[string]InitFunc)
}
if _, exists := middlewares[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
middlewares[name] = initFunc
return nil
}
// Get constructs a RegistryMiddleware with the given options using the named backend.
func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) {
if middlewares != nil {
if initFunc, exists := middlewares[name]; exists {
return initFunc(ctx, registry, options)
}
}
return nil, fmt.Errorf("no registry middleware registered with name: %s", name)
}
// RegisterOptions adds more options to RegistryOption list. Options get applied before
// any other configuration-based options.
func RegisterOptions(options ...storage.RegistryOption) error {
registryoptions = append(registryoptions, options...)
return nil
}
// GetRegistryOptions returns list of RegistryOption.
func GetRegistryOptions() []storage.RegistryOption {
return registryoptions
}

View File

@ -0,0 +1,40 @@
package middleware
import (
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
// InitFunc is the type of a RepositoryMiddleware factory function and is
// used to register the constructor for different RepositoryMiddleware backends.
type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error)
var middlewares map[string]InitFunc
// Register is used to register an InitFunc for
// a RepositoryMiddleware backend with the given name.
func Register(name string, initFunc InitFunc) error {
if middlewares == nil {
middlewares = make(map[string]InitFunc)
}
if _, exists := middlewares[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
middlewares[name] = initFunc
return nil
}
// Get constructs a RepositoryMiddleware with the given options using the named backend.
func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) {
if middlewares != nil {
if initFunc, exists := middlewares[name]; exists {
return initFunc(ctx, repository, options)
}
}
return nil, fmt.Errorf("no repository middleware registered with name: %s", name)
}

58
docs/proxy/proxyauth.go Normal file
View File

@ -0,0 +1,58 @@
package proxy
import (
"net/http"
"net/url"
"github.com/docker/distribution/registry/client/auth"
)
const tokenURL = "https://auth.docker.io/token"
const challengeHeader = "Docker-Distribution-Api-Version"
type userpass struct {
username string
password string
}
type credentials struct {
creds map[string]userpass
}
func (c credentials) Basic(u *url.URL) (string, string) {
up := c.creds[u.String()]
return up.username, up.password
}
func (c credentials) RefreshToken(u *url.URL, service string) string {
return ""
}
func (c credentials) SetRefreshToken(u *url.URL, service, token string) {
}
// configureAuth stores credentials for challenge responses
func configureAuth(username, password string) (auth.CredentialStore, error) {
creds := map[string]userpass{
tokenURL: {
username: username,
password: password,
},
}
return credentials{creds: creds}, nil
}
func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error {
resp, err := http.Get(endpoint)
if err != nil {
return err
}
defer resp.Body.Close()
if err := manager.AddResponse(resp); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,222 @@
package proxy
import (
"io"
"net/http"
"strconv"
"sync"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
)
// todo(richardscothern): from cache control header or config file
const blobTTL = time.Duration(24 * 7 * time.Hour)
type proxyBlobStore struct {
localStore distribution.BlobStore
remoteStore distribution.BlobService
scheduler *scheduler.TTLExpirationScheduler
repositoryName reference.Named
authChallenger authChallenger
}
var _ distribution.BlobStore = &proxyBlobStore{}
// inflight tracks currently downloading blobs
var inflight = make(map[digest.Digest]struct{})
// mu protects inflight
var mu sync.Mutex
func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) {
w.Header().Set("Content-Length", strconv.FormatInt(length, 10))
w.Header().Set("Content-Type", mediaType)
w.Header().Set("Docker-Content-Digest", digest.String())
w.Header().Set("Etag", digest.String())
}
func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) {
desc, err := pbs.remoteStore.Stat(ctx, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
if w, ok := writer.(http.ResponseWriter); ok {
setResponseHeaders(w, desc.Size, desc.MediaType, dgst)
}
remoteReader, err := pbs.remoteStore.Open(ctx, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
_, err = io.CopyN(writer, remoteReader, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
proxyMetrics.BlobPush(uint64(desc.Size))
return desc, nil
}
func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) {
localDesc, err := pbs.localStore.Stat(ctx, dgst)
if err != nil {
// Stat can report a zero sized file here if it's checked between creation
// and population. Return nil error, and continue
return false, nil
}
if err == nil {
proxyMetrics.BlobPush(uint64(localDesc.Size))
return true, pbs.localStore.ServeBlob(ctx, w, r, dgst)
}
return false, nil
}
func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error {
defer func() {
mu.Lock()
delete(inflight, dgst)
mu.Unlock()
}()
var desc distribution.Descriptor
var err error
var bw distribution.BlobWriter
bw, err = pbs.localStore.Create(ctx)
if err != nil {
return err
}
desc, err = pbs.copyContent(ctx, dgst, bw)
if err != nil {
return err
}
_, err = bw.Commit(ctx, desc)
if err != nil {
return err
}
return nil
}
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
served, err := pbs.serveLocal(ctx, w, r, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
return err
}
if served {
return nil
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return err
}
mu.Lock()
_, ok := inflight[dgst]
if ok {
mu.Unlock()
_, err := pbs.copyContent(ctx, dgst, w)
return err
}
inflight[dgst] = struct{}{}
mu.Unlock()
go func(dgst digest.Digest) {
if err := pbs.storeLocal(ctx, dgst); err != nil {
context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
}
blobRef, err := reference.WithDigest(pbs.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return
}
pbs.scheduler.AddBlob(blobRef, repositoryTTL)
}(dgst)
_, err = pbs.copyContent(ctx, dgst, w)
if err != nil {
return err
}
return nil
}
func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
desc, err := pbs.localStore.Stat(ctx, dgst)
if err == nil {
return desc, err
}
if err != distribution.ErrBlobUnknown {
return distribution.Descriptor{}, err
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return distribution.Descriptor{}, err
}
return pbs.remoteStore.Stat(ctx, dgst)
}
func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
blob, err := pbs.localStore.Get(ctx, dgst)
if err == nil {
return blob, nil
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return []byte{}, err
}
blob, err = pbs.remoteStore.Get(ctx, dgst)
if err != nil {
return []byte{}, err
}
_, err = pbs.localStore.Put(ctx, "", blob)
if err != nil {
return []byte{}, err
}
return blob, nil
}
// Unsupported functions
func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
return distribution.Descriptor{}, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) {
return distribution.Descriptor{}, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}

View File

@ -0,0 +1,409 @@
package proxy
import (
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/filesystem"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
var sbsMu sync.Mutex
type statsBlobStore struct {
stats map[string]int
blobs distribution.BlobStore
}
func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
sbsMu.Lock()
sbs.stats["put"]++
sbsMu.Unlock()
return sbs.blobs.Put(ctx, mediaType, p)
}
func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
sbsMu.Lock()
sbs.stats["get"]++
sbsMu.Unlock()
return sbs.blobs.Get(ctx, dgst)
}
func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
sbsMu.Lock()
sbs.stats["create"]++
sbsMu.Unlock()
return sbs.blobs.Create(ctx, options...)
}
func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
sbsMu.Lock()
sbs.stats["resume"]++
sbsMu.Unlock()
return sbs.blobs.Resume(ctx, id)
}
func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
sbsMu.Lock()
sbs.stats["open"]++
sbsMu.Unlock()
return sbs.blobs.Open(ctx, dgst)
}
func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
sbsMu.Lock()
sbs.stats["serveblob"]++
sbsMu.Unlock()
return sbs.blobs.ServeBlob(ctx, w, r, dgst)
}
func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
sbsMu.Lock()
sbs.stats["stat"]++
sbsMu.Unlock()
return sbs.blobs.Stat(ctx, dgst)
}
func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
sbsMu.Lock()
sbs.stats["delete"]++
sbsMu.Unlock()
return sbs.blobs.Delete(ctx, dgst)
}
type testEnv struct {
numUnique int
inRemote []distribution.Descriptor
store proxyBlobStore
ctx context.Context
}
func (te *testEnv) LocalStats() *map[string]int {
sbsMu.Lock()
ls := te.store.localStore.(statsBlobStore).stats
sbsMu.Unlock()
return &ls
}
func (te *testEnv) RemoteStats() *map[string]int {
sbsMu.Lock()
rs := te.store.remoteStore.(statsBlobStore).stats
sbsMu.Unlock()
return &rs
}
// Populate remote store and record the digests
func makeTestEnv(t *testing.T, name string) *testEnv {
nameRef, err := reference.ParseNamed(name)
if err != nil {
t.Fatalf("unable to parse reference: %s", err)
}
ctx := context.Background()
truthDir, err := ioutil.TempDir("", "truth")
if err != nil {
t.Fatalf("unable to create tempdir: %s", err)
}
cacheDir, err := ioutil.TempDir("", "cache")
if err != nil {
t.Fatalf("unable to create tempdir: %s", err)
}
localDriver, err := filesystem.FromParameters(map[string]interface{}{
"rootdirectory": truthDir,
})
if err != nil {
t.Fatalf("unable to create filesystem driver: %s", err)
}
// todo: create a tempfile area here
localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
localRepo, err := localRegistry.Repository(ctx, nameRef)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
cacheDriver, err := filesystem.FromParameters(map[string]interface{}{
"rootdirectory": cacheDir,
})
if err != nil {
t.Fatalf("unable to create filesystem driver: %s", err)
}
truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
truthRepo, err := truthRegistry.Repository(ctx, nameRef)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
truthBlobs := statsBlobStore{
stats: make(map[string]int),
blobs: truthRepo.Blobs(ctx),
}
localBlobs := statsBlobStore{
stats: make(map[string]int),
blobs: localRepo.Blobs(ctx),
}
s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json")
proxyBlobStore := proxyBlobStore{
repositoryName: nameRef,
remoteStore: truthBlobs,
localStore: localBlobs,
scheduler: s,
authChallenger: &mockChallenger{},
}
te := &testEnv{
store: proxyBlobStore,
ctx: ctx,
}
return te
}
func makeBlob(size int) []byte {
blob := make([]byte, size, size)
for i := 0; i < size; i++ {
blob[i] = byte('A' + rand.Int()%48)
}
return blob
}
func init() {
rand.Seed(42)
}
func perm(m []distribution.Descriptor) []distribution.Descriptor {
for i := 0; i < len(m); i++ {
j := rand.Intn(i + 1)
tmp := m[i]
m[i] = m[j]
m[j] = tmp
}
return m
}
func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
var inRemote []distribution.Descriptor
for i := 0; i < numUnique; i++ {
bytes := makeBlob(size)
for j := 0; j < blobCount/numUnique; j++ {
desc, err := te.store.remoteStore.Put(te.ctx, "", bytes)
if err != nil {
t.Fatalf("Put in store")
}
inRemote = append(inRemote, desc)
}
}
te.inRemote = inRemote
te.numUnique = numUnique
}
func TestProxyStoreGet(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
localStats := te.LocalStats()
remoteStats := te.RemoteStats()
populate(t, te, 1, 10, 1)
_, err := te.store.Get(te.ctx, te.inRemote[0].Digest)
if err != nil {
t.Fatal(err)
}
if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 {
t.Errorf("Unexpected local counts")
}
if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count")
}
_, err = te.store.Get(te.ctx, te.inRemote[0].Digest)
if err != nil {
t.Fatal(err)
}
if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 {
t.Errorf("Unexpected local counts")
}
if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count")
}
}
func TestProxyStoreStat(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
remoteBlobCount := 1
populate(t, te, remoteBlobCount, 10, 1)
localStats := te.LocalStats()
remoteStats := te.RemoteStats()
// Stat - touches both stores
for _, d := range te.inRemote {
_, err := te.store.Stat(te.ctx, d.Digest)
if err != nil {
t.Fatalf("Error stating proxy store")
}
}
if (*localStats)["stat"] != remoteBlobCount {
t.Errorf("Unexpected local stat count")
}
if (*remoteStats)["stat"] != remoteBlobCount {
t.Errorf("Unexpected remote stat count")
}
if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) {
t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger)
}
}
func TestProxyStoreServeHighConcurrency(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
blobSize := 200
blobCount := 10
numUnique := 1
populate(t, te, blobCount, blobSize, numUnique)
numClients := 16
testProxyStoreServe(t, te, numClients)
}
func TestProxyStoreServeMany(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
blobSize := 200
blobCount := 10
numUnique := 4
populate(t, te, blobCount, blobSize, numUnique)
numClients := 4
testProxyStoreServe(t, te, numClients)
}
// todo(richardscothern): blobCount must be smaller than num clients
func TestProxyStoreServeBig(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
blobSize := 2 << 20
blobCount := 4
numUnique := 2
populate(t, te, blobCount, blobSize, numUnique)
numClients := 4
testProxyStoreServe(t, te, numClients)
}
// testProxyStoreServe will create clients to consume all blobs
// populated in the truth store
func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
localStats := te.LocalStats()
remoteStats := te.RemoteStats()
var wg sync.WaitGroup
for i := 0; i < numClients; i++ {
// Serveblob - pulls through blobs
wg.Add(1)
go func() {
defer wg.Done()
for _, remoteBlob := range te.inRemote {
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
}
err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest)
if err != nil {
t.Fatalf(err.Error())
}
bodyBytes := w.Body.Bytes()
localDigest := digest.FromBytes(bodyBytes)
if localDigest != remoteBlob.Digest {
t.Fatalf("Mismatching blob fetch from proxy")
}
}
}()
}
wg.Wait()
remoteBlobCount := len(te.inRemote)
if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique {
t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount)
}
// Wait for any async storage goroutines to finish
time.Sleep(3 * time.Second)
remoteStatCount := (*remoteStats)["stat"]
remoteOpenCount := (*remoteStats)["open"]
// Serveblob - blobs come from local
for _, dr := range te.inRemote {
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
}
err = te.store.ServeBlob(te.ctx, w, r, dr.Digest)
if err != nil {
t.Fatalf(err.Error())
}
dl := digest.FromBytes(w.Body.Bytes())
if dl != dr.Digest {
t.Errorf("Mismatching blob fetch from proxy")
}
}
localStats = te.LocalStats()
remoteStats = te.RemoteStats()
// Ensure remote unchanged
if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount {
t.Fatalf("unexpected remote stats: %#v", remoteStats)
}
}

View File

@ -0,0 +1,95 @@
package proxy
import (
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
)
// todo(richardscothern): from cache control header or config
const repositoryTTL = time.Duration(24 * 7 * time.Hour)
type proxyManifestStore struct {
ctx context.Context
localManifests distribution.ManifestService
remoteManifests distribution.ManifestService
repositoryName reference.Named
scheduler *scheduler.TTLExpirationScheduler
authChallenger authChallenger
}
var _ distribution.ManifestService = &proxyManifestStore{}
func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
exists, err := pms.localManifests.Exists(ctx, dgst)
if err != nil {
return false, err
}
if exists {
return true, nil
}
if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil {
return false, err
}
return pms.remoteManifests.Exists(ctx, dgst)
}
func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
// At this point `dgst` was either specified explicitly, or returned by the
// tagstore with the most recent association.
var fromRemote bool
manifest, err := pms.localManifests.Get(ctx, dgst, options...)
if err != nil {
if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil {
return nil, err
}
manifest, err = pms.remoteManifests.Get(ctx, dgst, options...)
if err != nil {
return nil, err
}
fromRemote = true
}
_, payload, err := manifest.Payload()
if err != nil {
return nil, err
}
proxyMetrics.ManifestPush(uint64(len(payload)))
if fromRemote {
proxyMetrics.ManifestPull(uint64(len(payload)))
_, err = pms.localManifests.Put(ctx, manifest)
if err != nil {
return nil, err
}
// Schedule the manifest blob for removal
repoBlob, err := reference.WithDigest(pms.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return nil, err
}
pms.scheduler.AddManifest(repoBlob, repositoryTTL)
// Ensure the manifest blob is cleaned up
//pms.scheduler.AddBlob(blobRef, repositoryTTL)
}
return manifest, err
}
func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
var d digest.Digest
return d, distribution.ErrUnsupported
}
func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}

View File

@ -0,0 +1,274 @@
package proxy
import (
"io"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
"github.com/docker/libtrust"
)
type statsManifest struct {
manifests distribution.ManifestService
stats map[string]int
}
type manifestStoreTestEnv struct {
manifestDigest digest.Digest // digest of the signed manifest in the local storage
manifests proxyManifestStore
}
func (te manifestStoreTestEnv) LocalStats() *map[string]int {
ls := te.manifests.localManifests.(statsManifest).stats
return &ls
}
func (te manifestStoreTestEnv) RemoteStats() *map[string]int {
rs := te.manifests.remoteManifests.(statsManifest).stats
return &rs
}
func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error {
sm.stats["delete"]++
return sm.manifests.Delete(ctx, dgst)
}
func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
sm.stats["exists"]++
return sm.manifests.Exists(ctx, dgst)
}
func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
sm.stats["get"]++
return sm.manifests.Get(ctx, dgst)
}
func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
sm.stats["put"]++
return sm.manifests.Put(ctx, manifest)
}
type mockChallenger struct {
sync.Mutex
count int
}
// Called for remote operations only
func (m *mockChallenger) tryEstablishChallenges(context.Context) error {
m.Lock()
defer m.Unlock()
m.count++
return nil
}
func (m *mockChallenger) credentialStore() auth.CredentialStore {
return nil
}
func (m *mockChallenger) challengeManager() auth.ChallengeManager {
return nil
}
func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv {
nameRef, err := reference.ParseNamed(name)
if err != nil {
t.Fatalf("unable to parse reference: %s", err)
}
k, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(),
storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()),
storage.Schema1SigningKey(k))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
truthRepo, err := truthRegistry.Repository(ctx, nameRef)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
tr, err := truthRepo.Manifests(ctx)
if err != nil {
t.Fatal(err.Error())
}
truthManifests := statsManifest{
manifests: tr,
stats: make(map[string]int),
}
manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag)
if err != nil {
t.Fatalf(err.Error())
}
localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
localRepo, err := localRegistry.Repository(ctx, nameRef)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
lr, err := localRepo.Manifests(ctx)
if err != nil {
t.Fatal(err.Error())
}
localManifests := statsManifest{
manifests: lr,
stats: make(map[string]int),
}
s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json")
return &manifestStoreTestEnv{
manifestDigest: manifestDigest,
manifests: proxyManifestStore{
ctx: ctx,
localManifests: localManifests,
remoteManifests: truthManifests,
scheduler: s,
repositoryName: nameRef,
authChallenger: &mockChallenger{},
},
}
}
func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) {
m := schema1.Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: name,
Tag: tag,
}
for i := 0; i < 2; i++ {
wr, err := repository.Blobs(ctx).Create(ctx)
if err != nil {
t.Fatalf("unexpected error creating test upload: %v", err)
}
rs, ts, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("unexpected error generating test layer file")
}
dgst := digest.Digest(ts)
if _, err := io.Copy(wr, rs); err != nil {
t.Fatalf("unexpected error copying to upload: %v", err)
}
if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil {
t.Fatalf("unexpected error finishing upload: %v", err)
}
}
pk, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatalf("unexpected error generating private key: %v", err)
}
sm, err := schema1.Sign(&m, pk)
if err != nil {
t.Fatalf("error signing manifest: %v", err)
}
ms, err := repository.Manifests(ctx)
if err != nil {
t.Fatalf(err.Error())
}
dgst, err := ms.Put(ctx, sm)
if err != nil {
t.Fatalf("unexpected errors putting manifest: %v", err)
}
return dgst, nil
}
// TestProxyManifests contains basic acceptance tests
// for the pull-through behavior
func TestProxyManifests(t *testing.T) {
name := "foo/bar"
env := newManifestStoreTestEnv(t, name, "latest")
localStats := env.LocalStats()
remoteStats := env.RemoteStats()
ctx := context.Background()
// Stat - must check local and remote
exists, err := env.manifests.Exists(ctx, env.manifestDigest)
if err != nil {
t.Fatalf("Error checking existence")
}
if !exists {
t.Errorf("Unexpected non-existant manifest")
}
if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 {
t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats)
}
if env.manifests.authChallenger.(*mockChallenger).count != 1 {
t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger)
}
// Get - should succeed and pull manifest into local
_, err = env.manifests.Get(ctx, env.manifestDigest)
if err != nil {
t.Fatal(err)
}
if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected get count")
}
if (*localStats)["put"] != 1 {
t.Errorf("Expected local put")
}
if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
}
// Stat - should only go to local
exists, err = env.manifests.Exists(ctx, env.manifestDigest)
if err != nil {
t.Fatal(err)
}
if !exists {
t.Errorf("Unexpected non-existant manifest")
}
if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 {
t.Errorf("Unexpected exists count")
}
if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
}
// Get proxied - won't require another authchallenge
_, err = env.manifests.Get(ctx, env.manifestDigest)
if err != nil {
t.Fatal(err)
}
if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
}
}

View File

@ -0,0 +1,74 @@
package proxy
import (
"expvar"
"sync/atomic"
)
// Metrics is used to hold metric counters
// related to the proxy
type Metrics struct {
Requests uint64
Hits uint64
Misses uint64
BytesPulled uint64
BytesPushed uint64
}
type proxyMetricsCollector struct {
blobMetrics Metrics
manifestMetrics Metrics
}
// BlobPull tracks metrics about blobs pulled into the cache
func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) {
atomic.AddUint64(&pmc.blobMetrics.Misses, 1)
atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled)
}
// BlobPush tracks metrics about blobs pushed to clients
func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) {
atomic.AddUint64(&pmc.blobMetrics.Requests, 1)
atomic.AddUint64(&pmc.blobMetrics.Hits, 1)
atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed)
}
// ManifestPull tracks metrics related to Manifests pulled into the cache
func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) {
atomic.AddUint64(&pmc.manifestMetrics.Misses, 1)
atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled)
}
// ManifestPush tracks metrics about manifests pushed to clients
func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) {
atomic.AddUint64(&pmc.manifestMetrics.Requests, 1)
atomic.AddUint64(&pmc.manifestMetrics.Hits, 1)
atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed)
}
// proxyMetrics tracks metrics about the proxy cache. This is
// kept globally and made available via expvar.
var proxyMetrics = &proxyMetricsCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
pm := registry.(*expvar.Map).Get("proxy")
if pm == nil {
pm = &expvar.Map{}
pm.(*expvar.Map).Init()
registry.(*expvar.Map).Set("proxy", pm)
}
pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} {
return proxyMetrics.blobMetrics
}))
pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} {
return proxyMetrics.manifestMetrics
}))
}

248
docs/proxy/proxyregistry.go Normal file
View File

@ -0,0 +1,248 @@
package proxy
import (
"fmt"
"net/http"
"net/url"
"sync"
"github.com/docker/distribution"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver"
)
// proxyingRegistry fetches content from a remote registry and caches it locally
type proxyingRegistry struct {
embedded distribution.Namespace // provides local registry functionality
scheduler *scheduler.TTLExpirationScheduler
remoteURL url.URL
authChallenger authChallenger
}
// NewRegistryPullThroughCache creates a registry acting as a pull through cache
func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) {
remoteURL, err := url.Parse(config.RemoteURL)
if err != nil {
return nil, err
}
v := storage.NewVacuum(ctx, driver)
s := scheduler.New(ctx, driver, "/scheduler-state.json")
s.OnBlobExpire(func(ref reference.Reference) error {
var r reference.Canonical
var ok bool
if r, ok = ref.(reference.Canonical); !ok {
return fmt.Errorf("unexpected reference type : %T", ref)
}
repo, err := registry.Repository(ctx, r)
if err != nil {
return err
}
blobs := repo.Blobs(ctx)
// Clear the repository reference and descriptor caches
err = blobs.Delete(ctx, r.Digest())
if err != nil {
return err
}
err = v.RemoveBlob(r.Digest().String())
if err != nil {
return err
}
return nil
})
s.OnManifestExpire(func(ref reference.Reference) error {
var r reference.Canonical
var ok bool
if r, ok = ref.(reference.Canonical); !ok {
return fmt.Errorf("unexpected reference type : %T", ref)
}
repo, err := registry.Repository(ctx, r)
if err != nil {
return err
}
manifests, err := repo.Manifests(ctx)
if err != nil {
return err
}
err = manifests.Delete(ctx, r.Digest())
if err != nil {
return err
}
return nil
})
err = s.Start()
if err != nil {
return nil, err
}
cs, err := configureAuth(config.Username, config.Password)
if err != nil {
return nil, err
}
return &proxyingRegistry{
embedded: registry,
scheduler: s,
remoteURL: *remoteURL,
authChallenger: &remoteAuthChallenger{
remoteURL: *remoteURL,
cm: auth.NewSimpleChallengeManager(),
cs: cs,
},
}, nil
}
func (pr *proxyingRegistry) Scope() distribution.Scope {
return distribution.GlobalScope
}
func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) {
return pr.embedded.Repositories(ctx, repos, last)
}
func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) {
c := pr.authChallenger
tr := transport.NewTransport(http.DefaultTransport,
auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull")))
localRepo, err := pr.embedded.Repository(ctx, name)
if err != nil {
return nil, err
}
localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification())
if err != nil {
return nil, err
}
remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr)
if err != nil {
return nil, err
}
remoteManifests, err := remoteRepo.Manifests(ctx)
if err != nil {
return nil, err
}
return &proxiedRepository{
blobStore: &proxyBlobStore{
localStore: localRepo.Blobs(ctx),
remoteStore: remoteRepo.Blobs(ctx),
scheduler: pr.scheduler,
repositoryName: name,
authChallenger: pr.authChallenger,
},
manifests: &proxyManifestStore{
repositoryName: name,
localManifests: localManifests, // Options?
remoteManifests: remoteManifests,
ctx: ctx,
scheduler: pr.scheduler,
authChallenger: pr.authChallenger,
},
name: name,
tags: &proxyTagService{
localTags: localRepo.Tags(ctx),
remoteTags: remoteRepo.Tags(ctx),
authChallenger: pr.authChallenger,
},
}, nil
}
func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator {
return pr.embedded.Blobs()
}
func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter {
return pr.embedded.BlobStatter()
}
// authChallenger encapsulates a request to the upstream to establish credential challenges
type authChallenger interface {
tryEstablishChallenges(context.Context) error
challengeManager() auth.ChallengeManager
credentialStore() auth.CredentialStore
}
type remoteAuthChallenger struct {
remoteURL url.URL
sync.Mutex
cm auth.ChallengeManager
cs auth.CredentialStore
}
func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore {
return r.cs
}
func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager {
return r.cm
}
// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist
func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error {
r.Lock()
defer r.Unlock()
remoteURL := r.remoteURL
remoteURL.Path = "/v2/"
challenges, err := r.cm.GetChallenges(r.remoteURL)
if err != nil {
return err
}
if len(challenges) > 0 {
return nil
}
// establish challenge type with upstream
if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil {
return err
}
context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm)
return nil
}
// proxiedRepository uses proxying blob and manifest services to serve content
// locally, or pulling it through from a remote and caching it locally if it doesn't
// already exist
type proxiedRepository struct {
blobStore distribution.BlobStore
manifests distribution.ManifestService
name reference.Named
tags distribution.TagService
}
func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
return pr.manifests, nil
}
func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore {
return pr.blobStore
}
func (pr *proxiedRepository) Named() reference.Named {
return pr.name
}
func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService {
return pr.tags
}

View File

@ -0,0 +1,65 @@
package proxy
import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
// proxyTagService supports local and remote lookup of tags.
type proxyTagService struct {
localTags distribution.TagService
remoteTags distribution.TagService
authChallenger authChallenger
}
var _ distribution.TagService = proxyTagService{}
// Get attempts to get the most recent digest for the tag by checking the remote
// tag service first and then caching it locally. If the remote is unavailable
// the local association is returned
func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
err := pt.authChallenger.tryEstablishChallenges(ctx)
if err == nil {
desc, err := pt.remoteTags.Get(ctx, tag)
if err == nil {
err := pt.localTags.Tag(ctx, tag, desc)
if err != nil {
return distribution.Descriptor{}, err
}
return desc, nil
}
}
desc, err := pt.localTags.Get(ctx, tag)
if err != nil {
return distribution.Descriptor{}, err
}
return desc, nil
}
func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
return distribution.ErrUnsupported
}
func (pt proxyTagService) Untag(ctx context.Context, tag string) error {
err := pt.localTags.Untag(ctx, tag)
if err != nil {
return err
}
return nil
}
func (pt proxyTagService) All(ctx context.Context) ([]string, error) {
err := pt.authChallenger.tryEstablishChallenges(ctx)
if err == nil {
tags, err := pt.remoteTags.All(ctx)
if err == nil {
return tags, err
}
}
return pt.localTags.All(ctx)
}
func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
return []string{}, distribution.ErrUnsupported
}

View File

@ -0,0 +1,182 @@
package proxy
import (
"reflect"
"sort"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
type mockTagStore struct {
mapping map[string]distribution.Descriptor
sync.Mutex
}
var _ distribution.TagService = &mockTagStore{}
func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
m.Lock()
defer m.Unlock()
if d, ok := m.mapping[tag]; ok {
return d, nil
}
return distribution.Descriptor{}, distribution.ErrTagUnknown{}
}
func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
m.Lock()
defer m.Unlock()
m.mapping[tag] = desc
return nil
}
func (m *mockTagStore) Untag(ctx context.Context, tag string) error {
m.Lock()
defer m.Unlock()
if _, ok := m.mapping[tag]; ok {
delete(m.mapping, tag)
return nil
}
return distribution.ErrTagUnknown{}
}
func (m *mockTagStore) All(ctx context.Context) ([]string, error) {
m.Lock()
defer m.Unlock()
var tags []string
for tag := range m.mapping {
tags = append(tags, tag)
}
return tags, nil
}
func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
panic("not implemented")
}
func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService {
if local == nil {
local = make(map[string]distribution.Descriptor)
}
if remote == nil {
remote = make(map[string]distribution.Descriptor)
}
return &proxyTagService{
localTags: &mockTagStore{mapping: local},
remoteTags: &mockTagStore{mapping: remote},
authChallenger: &mockChallenger{},
}
}
func TestGet(t *testing.T) {
remoteDesc := distribution.Descriptor{Size: 42}
remoteTag := "remote"
proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil)
ctx := context.Background()
// Get pre-loaded tag
d, err := proxyTags.Get(ctx, remoteTag)
if err != nil {
t.Fatal(err)
}
if proxyTags.authChallenger.(*mockChallenger).count != 1 {
t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger)
}
if !reflect.DeepEqual(d, remoteDesc) {
t.Fatal("unable to get put tag")
}
local, err := proxyTags.localTags.Get(ctx, remoteTag)
if err != nil {
t.Fatal("remote tag not pulled into store")
}
if !reflect.DeepEqual(local, remoteDesc) {
t.Fatalf("unexpected descriptor pulled through")
}
// Manually overwrite remote tag
newRemoteDesc := distribution.Descriptor{Size: 43}
err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc)
if err != nil {
t.Fatal(err)
}
d, err = proxyTags.Get(ctx, remoteTag)
if err != nil {
t.Fatal(err)
}
if proxyTags.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger)
}
if !reflect.DeepEqual(d, newRemoteDesc) {
t.Fatal("unable to get put tag")
}
_, err = proxyTags.localTags.Get(ctx, remoteTag)
if err != nil {
t.Fatal("remote tag not pulled into store")
}
// untag, ensure it's removed locally, but present in remote
err = proxyTags.Untag(ctx, remoteTag)
if err != nil {
t.Fatal(err)
}
_, err = proxyTags.localTags.Get(ctx, remoteTag)
if err == nil {
t.Fatalf("Expected error getting Untag'd tag")
}
_, err = proxyTags.remoteTags.Get(ctx, remoteTag)
if err != nil {
t.Fatalf("remote tag should not be untagged with proxyTag.Untag")
}
_, err = proxyTags.Get(ctx, remoteTag)
if err != nil {
t.Fatal("untagged tag should be pulled through")
}
if proxyTags.authChallenger.(*mockChallenger).count != 3 {
t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger)
}
// Add another tag. Ensure both tags appear in 'All'
err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42})
if err != nil {
t.Fatal(err)
}
all, err := proxyTags.All(ctx)
if err != nil {
t.Fatal(err)
}
if len(all) != 2 {
t.Fatalf("Unexpected tag length returned from All() : %d ", len(all))
}
sort.Strings(all)
if all[0] != "funtag" && all[1] != "remote" {
t.Fatalf("Unexpected tags returned from All() : %v ", all)
}
if proxyTags.authChallenger.(*mockChallenger).count != 4 {
t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger)
}
}

View File

@ -0,0 +1,258 @@
package scheduler
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
)
// onTTLExpiryFunc is called when a repository's TTL expires
type expiryFunc func(reference.Reference) error
const (
entryTypeBlob = iota
entryTypeManifest
indexSaveFrequency = 5 * time.Second
)
// schedulerEntry represents an entry in the scheduler
// fields are exported for serialization
type schedulerEntry struct {
Key string `json:"Key"`
Expiry time.Time `json:"ExpiryData"`
EntryType int `json:"EntryType"`
timer *time.Timer
}
// New returns a new instance of the scheduler
func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler {
return &TTLExpirationScheduler{
entries: make(map[string]*schedulerEntry),
driver: driver,
pathToStateFile: path,
ctx: ctx,
stopped: true,
doneChan: make(chan struct{}),
saveTimer: time.NewTicker(indexSaveFrequency),
}
}
// TTLExpirationScheduler is a scheduler used to perform actions
// when TTLs expire
type TTLExpirationScheduler struct {
sync.Mutex
entries map[string]*schedulerEntry
driver driver.StorageDriver
ctx context.Context
pathToStateFile string
stopped bool
onBlobExpire expiryFunc
onManifestExpire expiryFunc
indexDirty bool
saveTimer *time.Ticker
doneChan chan struct{}
}
// OnBlobExpire is called when a scheduled blob's TTL expires
func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) {
ttles.Lock()
defer ttles.Unlock()
ttles.onBlobExpire = f
}
// OnManifestExpire is called when a scheduled manifest's TTL expires
func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) {
ttles.Lock()
defer ttles.Unlock()
ttles.onManifestExpire = f
}
// AddBlob schedules a blob cleanup after ttl expires
func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error {
ttles.Lock()
defer ttles.Unlock()
if ttles.stopped {
return fmt.Errorf("scheduler not started")
}
ttles.add(blobRef, ttl, entryTypeBlob)
return nil
}
// AddManifest schedules a manifest cleanup after ttl expires
func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error {
ttles.Lock()
defer ttles.Unlock()
if ttles.stopped {
return fmt.Errorf("scheduler not started")
}
ttles.add(manifestRef, ttl, entryTypeManifest)
return nil
}
// Start starts the scheduler
func (ttles *TTLExpirationScheduler) Start() error {
ttles.Lock()
defer ttles.Unlock()
err := ttles.readState()
if err != nil {
return err
}
if !ttles.stopped {
return fmt.Errorf("Scheduler already started")
}
context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
ttles.stopped = false
// Start timer for each deserialized entry
for _, entry := range ttles.entries {
entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now()))
}
// Start a ticker to periodically save the entries index
go func() {
for {
select {
case <-ttles.saveTimer.C:
if !ttles.indexDirty {
continue
}
ttles.Lock()
err := ttles.writeState()
if err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
} else {
ttles.indexDirty = false
}
ttles.Unlock()
case <-ttles.doneChan:
return
}
}
}()
return nil
}
func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) {
entry := &schedulerEntry{
Key: r.String(),
Expiry: time.Now().Add(ttl),
EntryType: eType,
}
context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
oldEntry.timer.Stop()
}
ttles.entries[entry.Key] = entry
entry.timer = ttles.startTimer(entry, ttl)
ttles.indexDirty = true
}
func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer {
return time.AfterFunc(ttl, func() {
ttles.Lock()
defer ttles.Unlock()
var f expiryFunc
switch entry.EntryType {
case entryTypeBlob:
f = ttles.onBlobExpire
case entryTypeManifest:
f = ttles.onManifestExpire
default:
f = func(reference.Reference) error {
return fmt.Errorf("scheduler entry type")
}
}
ref, err := reference.Parse(entry.Key)
if err == nil {
if err := f(ref); err != nil {
context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
}
} else {
context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
}
delete(ttles.entries, entry.Key)
ttles.indexDirty = true
})
}
// Stop stops the scheduler.
func (ttles *TTLExpirationScheduler) Stop() {
ttles.Lock()
defer ttles.Unlock()
if err := ttles.writeState(); err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
}
for _, entry := range ttles.entries {
entry.timer.Stop()
}
close(ttles.doneChan)
ttles.saveTimer.Stop()
ttles.stopped = true
}
func (ttles *TTLExpirationScheduler) writeState() error {
jsonBytes, err := json.Marshal(ttles.entries)
if err != nil {
return err
}
err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes)
if err != nil {
return err
}
return nil
}
func (ttles *TTLExpirationScheduler) readState() error {
if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return nil
default:
return err
}
}
bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &ttles.entries)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,188 @@
package scheduler
import (
"encoding/json"
"testing"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) {
ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
if err != nil {
t.Fatalf("could not parse reference: %v", err)
}
ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
if err != nil {
t.Fatalf("could not parse reference: %v", err)
}
ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
if err != nil {
t.Fatalf("could not parse reference: %v", err)
}
return ref1, ref2, ref3
}
func TestSchedule(t *testing.T) {
ref1, ref2, ref3 := testRefs(t)
timeUnit := time.Millisecond
remainingRepos := map[string]bool{
ref1.String(): true,
ref2.String(): true,
ref3.String(): true,
}
s := New(context.Background(), inmemory.New(), "/ttl")
deleteFunc := func(repoName reference.Reference) error {
if len(remainingRepos) == 0 {
t.Fatalf("Incorrect expiry count")
}
_, ok := remainingRepos[repoName.String()]
if !ok {
t.Fatalf("Trying to remove nonexistent repo: %s", repoName)
}
t.Log("removing", repoName)
delete(remainingRepos, repoName.String())
return nil
}
s.onBlobExpire = deleteFunc
err := s.Start()
if err != nil {
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
}
s.add(ref1, 3*timeUnit, entryTypeBlob)
s.add(ref2, 1*timeUnit, entryTypeBlob)
func() {
s.add(ref3, 1*timeUnit, entryTypeBlob)
}()
// Ensure all repos are deleted
<-time.After(50 * timeUnit)
if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos)
}
}
func TestRestoreOld(t *testing.T) {
ref1, ref2, _ := testRefs(t)
remainingRepos := map[string]bool{
ref1.String(): true,
ref2.String(): true,
}
deleteFunc := func(r reference.Reference) error {
if r.String() == ref1.String() && len(remainingRepos) == 2 {
t.Errorf("ref1 should be removed first")
}
_, ok := remainingRepos[r.String()]
if !ok {
t.Fatalf("Trying to remove nonexistent repo: %s", r)
}
delete(remainingRepos, r.String())
return nil
}
timeUnit := time.Millisecond
serialized, err := json.Marshal(&map[string]schedulerEntry{
ref1.String(): {
Expiry: time.Now().Add(1 * timeUnit),
Key: ref1.String(),
EntryType: 0,
},
ref2.String(): {
Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first
Key: ref2.String(),
EntryType: 0,
},
})
if err != nil {
t.Fatalf("Error serializing test data: %s", err.Error())
}
ctx := context.Background()
pathToStatFile := "/ttl"
fs := inmemory.New()
err = fs.PutContent(ctx, pathToStatFile, serialized)
if err != nil {
t.Fatal("Unable to write serialized data to fs")
}
s := New(context.Background(), fs, "/ttl")
s.onBlobExpire = deleteFunc
err = s.Start()
if err != nil {
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
}
<-time.After(50 * timeUnit)
if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos)
}
}
func TestStopRestore(t *testing.T) {
ref1, ref2, _ := testRefs(t)
timeUnit := time.Millisecond
remainingRepos := map[string]bool{
ref1.String(): true,
ref2.String(): true,
}
deleteFunc := func(r reference.Reference) error {
delete(remainingRepos, r.String())
return nil
}
fs := inmemory.New()
pathToStateFile := "/ttl"
s := New(context.Background(), fs, pathToStateFile)
s.onBlobExpire = deleteFunc
err := s.Start()
if err != nil {
t.Fatalf(err.Error())
}
s.add(ref1, 300*timeUnit, entryTypeBlob)
s.add(ref2, 100*timeUnit, entryTypeBlob)
// Start and stop before all operations complete
// state will be written to fs
s.Stop()
time.Sleep(10 * time.Millisecond)
// v2 will restore state from fs
s2 := New(context.Background(), fs, pathToStateFile)
s2.onBlobExpire = deleteFunc
err = s2.Start()
if err != nil {
t.Fatalf("Error starting v2: %s", err.Error())
}
<-time.After(500 * timeUnit)
if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos)
}
}
func TestDoubleStart(t *testing.T) {
s := New(context.Background(), inmemory.New(), "/ttl")
err := s.Start()
if err != nil {
t.Fatalf("Unable to start scheduler")
}
err = s.Start()
if err == nil {
t.Fatalf("Scheduler started twice without error")
}
}

345
docs/registry.go Normal file
View File

@ -0,0 +1,345 @@
package registry
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"rsc.io/letsencrypt"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/formatters/logstash"
"github.com/bugsnag/bugsnag-go"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
gorhandlers "github.com/gorilla/handlers"
"github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
)
// ServeCmd is a cobra command for running the registry.
var ServeCmd = &cobra.Command{
Use: "serve <config>",
Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
// setup context
ctx := context.WithVersion(context.Background(), version.Version)
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
if config.HTTP.Debug.Addr != "" {
go func(addr string) {
log.Infof("debug server listening %v", addr)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("error listening on debug interface: %v", err)
}
}(config.HTTP.Debug.Addr)
}
registry, err := NewRegistry(ctx, config)
if err != nil {
log.Fatalln(err)
}
if err = registry.ListenAndServe(); err != nil {
log.Fatalln(err)
}
},
}
// A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct {
config *configuration.Configuration
app *handlers.App
server *http.Server
}
// NewRegistry creates a new registry from a context and configuration struct.
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
var err error
ctx, err = configureLogging(ctx, config)
if err != nil {
return nil, fmt.Errorf("error configuring logger: %v", err)
}
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = context.GetLogger(ctx).Warnf
app := handlers.NewApp(ctx, config)
// TODO(aaronl): The global scope of the health checks means NewRegistry
// can only be called once per process.
app.RegisterHealthChecks()
handler := configureReporting(app)
handler = alive("/", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
server := &http.Server{
Handler: handler,
}
return &Registry{
app: app,
config: config,
server: server,
}, nil
}
// ListenAndServe runs the registry's HTTP server.
func (registry *Registry) ListenAndServe() error {
config := registry.config
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
if err != nil {
return err
}
if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
tlsConf := &tls.Config{
ClientAuth: tls.NoClientCert,
NextProtos: []string{"http/1.1"},
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
if config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
if config.HTTP.TLS.Certificate != "" {
return fmt.Errorf("cannot specify both certificate and Let's Encrypt")
}
var m letsencrypt.Manager
if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil {
return err
}
if !m.Registered() {
if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil {
return err
}
}
tlsConf.GetCertificate = m.GetCertificate
} else {
tlsConf.Certificates = make([]tls.Certificate, 1)
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
if err != nil {
return err
}
}
if len(config.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range config.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("Could not add CA to pool")
}
}
for _, subj := range pool.Subjects() {
context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
ln = tls.NewListener(ln, tlsConf)
context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
} else {
context.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
}
return registry.server.Serve(ln)
}
func configureReporting(app *handlers.App) http.Handler {
var handler http.Handler = app
if app.Config.Reporting.Bugsnag.APIKey != "" {
bugsnagConfig := bugsnag.Configuration{
APIKey: app.Config.Reporting.Bugsnag.APIKey,
// TODO(brianbland): provide the registry version here
// AppVersion: "2.0",
}
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
}
if app.Config.Reporting.Bugsnag.Endpoint != "" {
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
}
bugsnag.Configure(bugsnagConfig)
handler = bugsnag.Handler(handler)
}
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
if config.Log.Level == "" && config.Log.Formatter == "" {
// If no config for logging is set, fallback to deprecated "Loglevel".
log.SetLevel(logLevel(config.Loglevel))
ctx = context.WithLogger(ctx, context.GetLogger(ctx))
return ctx, nil
}
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
log.SetFormatter(&logstash.LogstashFormatter{
TimestampFormat: time.RFC3339Nano,
})
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = context.WithValues(ctx, config.Log.Fields)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// panicHandler add an HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
var configurationPath string
if len(args) > 0 {
configurationPath = args[0]
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
return config, nil
}

84
docs/root.go Normal file
View File

@ -0,0 +1,84 @@
package registry
import (
"fmt"
"os"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver/factory"
"github.com/docker/distribution/version"
"github.com/docker/libtrust"
"github.com/spf13/cobra"
)
var showVersion bool
func init() {
RootCmd.AddCommand(ServeCmd)
RootCmd.AddCommand(GCCmd)
GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// RootCmd is the main command for the 'registry' binary.
var RootCmd = &cobra.Command{
Use: "registry",
Short: "`registry`",
Long: "`registry`",
Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
cmd.Usage()
},
}
var dryRun bool
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{
Use: "garbage-collect <config>",
Short: "`garbage-collect` deletes layers not referenced by any manifests",
Long: "`garbage-collect` deletes layers not referenced by any manifests",
Run: func(cmd *cobra.Command, args []string) {
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
os.Exit(1)
}
ctx := context.Background()
ctx, err = configureLogging(ctx, config)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err)
os.Exit(1)
}
k, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k))
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
os.Exit(1)
}
err = storage.MarkAndSweep(ctx, driver, registry, dryRun)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
os.Exit(1)
}
},
}

614
docs/storage/blob_test.go Normal file
View File

@ -0,0 +1,614 @@
package storage
import (
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/testdriver"
"github.com/docker/distribution/testutil"
)
// TestWriteSeek tests that the current file size can be
// obtained using Seek
func TestWriteSeek(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
blobUpload, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
contents := []byte{1, 2, 3}
blobUpload.Write(contents)
blobUpload.Close()
offset := blobUpload.Size()
if offset != int64(len(contents)) {
t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents))
}
}
// TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) {
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random reader: %v", err)
}
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
h := sha256.New()
rd := io.TeeReader(randomDataReader, h)
blobUpload, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Cancel the upload then restart it
if err := blobUpload.Cancel(ctx); err != nil {
t.Fatalf("unexpected error during upload cancellation: %v", err)
}
// get the enclosing directory
uploadPath := path.Dir(blobUpload.(*blobWriter).path)
// ensure state was cleaned up
_, err = driver.List(ctx, uploadPath)
if err == nil {
t.Fatal("files in upload path after cleanup")
}
// Do a resume, get unknown upload
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != distribution.ErrBlobUploadUnknown {
t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
}
// Restart!
blobUpload, err = bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Get the size of our random tarfile
randomDataSize, err := seekerSize(randomDataReader)
if err != nil {
t.Fatalf("error getting seeker size of random data: %v", err)
}
nn, err := io.Copy(blobUpload, rd)
if err != nil {
t.Fatalf("unexpected error uploading layer data: %v", err)
}
if nn != randomDataSize {
t.Fatalf("layer data write incomplete")
}
blobUpload.Close()
offset := blobUpload.Size()
if offset != nn {
t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
}
// Do a resume, for good fun
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != nil {
t.Fatalf("unexpected error resuming upload: %v", err)
}
sha256Digest := digest.NewDigest("sha256", h)
desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
// ensure state was cleaned up
uploadPath = path.Dir(blobUpload.(*blobWriter).path)
_, err = driver.List(ctx, uploadPath)
if err == nil {
t.Fatal("files in upload path after commit")
}
// After finishing an upload, it should no longer exist.
if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
t.Fatalf("expected layer upload to be unknown, got %v", err)
}
// Test for existence.
statDesc, err := bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
rc, err := bs.Open(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error opening blob for read: %v", err)
}
defer rc.Close()
h.Reset()
nn, err = io.Copy(h, rc)
if err != nil {
t.Fatalf("error reading layer: %v", err)
}
if nn != randomDataSize {
t.Fatalf("incorrect read length")
}
if digest.NewDigest("sha256", h) != sha256Digest {
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
}
// Delete a blob
err = bs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err := bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
_, err = bs.Open(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected success opening deleted blob for read")
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type getting deleted manifest: %#v", err)
}
// Re-upload the blob
randomBlob, err := ioutil.ReadAll(randomDataReader)
if err != nil {
t.Fatalf("Error reading all of blob %s", err.Error())
}
expectedDigest := digest.FromBytes(randomBlob)
simpleUpload(t, bs, randomBlob, expectedDigest)
d, err = bs.Stat(ctx, expectedDigest)
if err != nil {
t.Errorf("unexpected error stat-ing blob")
}
if d.Digest != expectedDigest {
t.Errorf("Mismatching digest with restored blob")
}
_, err = bs.Open(ctx, expectedDigest)
if err != nil {
t.Errorf("Unexpected error opening blob")
}
// Reuse state to test delete with a delete-disabled registry
registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err = registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs = repository.Blobs(ctx)
err = bs.Delete(ctx, desc.Digest)
if err == nil {
t.Errorf("Unexpected success deleting while disabled")
}
}
// TestSimpleBlobRead just creates a simple blob file and ensures that basic
// open, read, seek, read works. More specific edge cases should be covered in
// other tests.
func TestSimpleBlobRead(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
if err != nil {
t.Fatalf("error creating random data: %v", err)
}
// Test for existence.
desc, err := bs.Stat(ctx, dgst)
if err != distribution.ErrBlobUnknown {
t.Fatalf("expected not found error when testing for existence: %v", err)
}
rc, err := bs.Open(ctx, dgst)
if err != distribution.ErrBlobUnknown {
t.Fatalf("expected not found error when opening non-existent blob: %v", err)
}
randomLayerSize, err := seekerSize(randomLayerReader)
if err != nil {
t.Fatalf("error getting seeker size for random layer: %v", err)
}
descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
t.Logf("desc: %v", descBefore)
desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
if err != nil {
t.Fatalf("error adding blob to blobservice: %v", err)
}
if desc.Size != randomLayerSize {
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
}
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
if err != nil {
t.Fatalf("error opening blob with %v: %v", dgst, err)
}
defer rc.Close()
// Now check the sha digest and ensure its the same
h := sha256.New()
nn, err := io.Copy(h, rc)
if err != nil {
t.Fatalf("unexpected error copying to hash: %v", err)
}
if nn != randomLayerSize {
t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize)
}
sha256Digest := digest.NewDigest("sha256", h)
if sha256Digest != desc.Digest {
t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest)
}
// Now seek back the blob, read the whole thing and check against randomLayerData
offset, err := rc.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatalf("error seeking blob: %v", err)
}
if offset != 0 {
t.Fatalf("seek failed: expected 0 offset, got %d", offset)
}
p, err := ioutil.ReadAll(rc)
if err != nil {
t.Fatalf("error reading all of blob: %v", err)
}
if len(p) != int(randomLayerSize) {
t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize)
}
// Reset the randomLayerReader and read back the buffer
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatalf("error resetting layer reader: %v", err)
}
randomLayerData, err := ioutil.ReadAll(randomLayerReader)
if err != nil {
t.Fatalf("random layer read failed: %v", err)
}
if !bytes.Equal(p, randomLayerData) {
t.Fatalf("layer data not equal")
}
}
// TestBlobMount covers the blob mount process, exercising common
// error paths that might be seen during a mount.
func TestBlobMount(t *testing.T) {
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random reader: %v", err)
}
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
sourceImageName, _ := reference.ParseNamed("foo/source")
driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
sourceRepository, err := registry.Repository(ctx, sourceImageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
sbs := sourceRepository.Blobs(ctx)
blobUpload, err := sbs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Get the size of our random tarfile
randomDataSize, err := seekerSize(randomDataReader)
if err != nil {
t.Fatalf("error getting seeker size of random data: %v", err)
}
nn, err := io.Copy(blobUpload, randomDataReader)
if err != nil {
t.Fatalf("unexpected error uploading layer data: %v", err)
}
desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
// Test for existence.
statDesc, err := sbs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs)
}
if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
bs := repository.Blobs(ctx)
// Test destination for existence.
statDesc, err = bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating unmounted blob: %v", desc)
}
canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest)
if err != nil {
t.Fatal(err)
}
bw, err := bs.Create(ctx, WithMountFrom(canonicalRef))
if bw != nil {
t.Fatal("unexpected blobwriter returned from Create call, should mount instead")
}
ebm, ok := err.(distribution.ErrBlobMounted)
if !ok {
t.Fatalf("unexpected error mounting layer: %v", err)
}
if !reflect.DeepEqual(ebm.Descriptor, desc) {
t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc)
}
// Test for existence.
statDesc, err = bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
rc, err := bs.Open(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error opening blob for read: %v", err)
}
defer rc.Close()
h := sha256.New()
nn, err = io.Copy(h, rc)
if err != nil {
t.Fatalf("error reading layer: %v", err)
}
if nn != randomDataSize {
t.Fatalf("incorrect read length")
}
if digest.NewDigest("sha256", h) != dgst {
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst)
}
// Delete the blob from the source repo
err = sbs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err := bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error stating blob deleted from source repository: %v", err)
}
d, err = sbs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
// Delete the blob from the dest repo
err = bs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err = bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
}
// TestLayerUploadZeroLength uploads zero-length
func TestLayerUploadZeroLength(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar)
}
func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) {
ctx := context.Background()
wr, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting upload: %v", err)
}
nn, err := io.Copy(wr, bytes.NewReader(blob))
if err != nil {
t.Fatalf("error copying into blob writer: %v", err)
}
if nn != 0 {
t.Fatalf("unexpected number of bytes copied: %v > 0", nn)
}
dgst, err := digest.FromReader(bytes.NewReader(blob))
if err != nil {
t.Fatalf("error getting digest: %v", err)
}
if dgst != expectedDigest {
// sanity check on zero digest
t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
}
desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error committing write: %v", err)
}
if desc.Digest != dgst {
t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst)
}
}
// seekerSize seeks to the end of seeker, checks the size and returns it to
// the original state, returning the size. The state of the seeker should be
// treated as unknown if an error is returned.
func seekerSize(seeker io.ReadSeeker) (int64, error) {
current, err := seeker.Seek(0, os.SEEK_CUR)
if err != nil {
return 0, err
}
end, err := seeker.Seek(0, os.SEEK_END)
if err != nil {
return 0, err
}
resumed, err := seeker.Seek(current, os.SEEK_SET)
if err != nil {
return 0, err
}
if resumed != current {
return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location")
}
return end, nil
}
// addBlob simply consumes the reader and inserts into the blob service,
// returning a descriptor on success.
func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) {
wr, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
defer wr.Cancel(ctx)
if nn, err := io.Copy(wr, rd); err != nil {
return distribution.Descriptor{}, err
} else if nn != desc.Size {
return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size)
}
return wr.Commit(ctx, desc)
}

View File

@ -0,0 +1,60 @@
package storage
import (
"expvar"
"sync/atomic"
"github.com/docker/distribution/registry/storage/cache"
)
type blobStatCollector struct {
metrics cache.Metrics
}
func (bsc *blobStatCollector) Hit() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Hits, 1)
}
func (bsc *blobStatCollector) Miss() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Misses, 1)
}
func (bsc *blobStatCollector) Metrics() cache.Metrics {
return bsc.metrics
}
// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor
// cache requests. Note this is kept globally and made available via expvar.
// For more detailed metrics, its recommend to instrument a particular cache
// implementation.
var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
cache := registry.(*expvar.Map).Get("cache")
if cache == nil {
cache = &expvar.Map{}
cache.(*expvar.Map).Init()
registry.(*expvar.Map).Set("cache", cache)
}
storage := cache.(*expvar.Map).Get("storage")
if storage == nil {
storage = &expvar.Map{}
storage.(*expvar.Map).Init()
cache.(*expvar.Map).Set("storage", storage)
}
storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} {
// no need for synchronous access: the increments are atomic and
// during reading, we don't care if the data is up to date. The
// numbers will always *eventually* be reported correctly.
return blobStatterCacheMetrics
}))
}

View File

@ -0,0 +1,78 @@
package storage
import (
"fmt"
"net/http"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
)
// TODO(stevvooe): This should configurable in the future.
const blobCacheControlMaxAge = 365 * 24 * time.Hour
// blobServer simply serves blobs from a driver instance using a path function
// to identify paths and a descriptor service to fill in metadata.
type blobServer struct {
driver driver.StorageDriver
statter distribution.BlobStatter
pathFn func(dgst digest.Digest) (string, error)
redirect bool // allows disabling URLFor redirects
}
func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return err
}
path, err := bs.pathFn(desc.Digest)
if err != nil {
return err
}
if bs.redirect {
redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method})
switch err.(type) {
case nil:
// Redirect to storage URL.
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return err
case driver.ErrUnsupportedMethod:
// Fallback to serving the content directly.
default:
// Some unexpected error.
return err
}
}
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
return err
}
defer br.Close()
w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))
if w.Header().Get("Docker-Content-Digest") == "" {
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
}
if w.Header().Get("Content-Type") == "" {
// Set the content type if not already set.
w.Header().Set("Content-Type", desc.MediaType)
}
if w.Header().Get("Content-Length") == "" {
// Set the content length if not already set.
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
}
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
return nil
}

223
docs/storage/blobstore.go Normal file
View File

@ -0,0 +1,223 @@
package storage
import (
"path"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
)
// blobStore implements the read side of the blob store interface over a
// driver without enforcing per-repository membership. This object is
// intentionally a leaky abstraction, providing utility methods that support
// creating and traversing backend links.
type blobStore struct {
driver driver.StorageDriver
statter distribution.BlobStatter
}
var _ distribution.BlobProvider = &blobStore{}
// Get implements the BlobReadService.Get call.
func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
bp, err := bs.path(dgst)
if err != nil {
return nil, err
}
p, err := bs.driver.GetContent(ctx, bp)
if err != nil {
switch err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUnknown
}
return nil, err
}
return p, err
}
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return nil, err
}
path, err := bs.path(desc.Digest)
if err != nil {
return nil, err
}
return newFileReader(ctx, bs.driver, path, desc.Size)
}
// Put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
dgst := digest.FromBytes(p)
desc, err := bs.statter.Stat(ctx, dgst)
if err == nil {
// content already present
return desc, nil
} else if err != distribution.ErrBlobUnknown {
context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err)
// real error, return it
return distribution.Descriptor{}, err
}
bp, err := bs.path(dgst)
if err != nil {
return distribution.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype here, as well.
return distribution.Descriptor{
Size: int64(len(p)),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, bs.driver.PutContent(ctx, bp, p)
}
func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error {
specPath, err := pathFor(blobsPathSpec{})
if err != nil {
return err
}
err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error {
// skip directories
if fileInfo.IsDir() {
return nil
}
currentPath := fileInfo.Path()
// we only want to parse paths that end with /data
_, fileName := path.Split(currentPath)
if fileName != "data" {
return nil
}
digest, err := digestFromPath(currentPath)
if err != nil {
return err
}
return ingester(digest)
})
return err
}
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return "", err
}
return bp, nil
}
// link links the path to the provided digest by writing the digest into the
// target file. Caller must ensure that the blob actually exists.
func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error {
// The contents of the "link" file are the exact string contents of the
// digest, which is specified in that package.
return bs.driver.PutContent(ctx, path, []byte(dgst))
}
// readlink returns the linked digest at path.
func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) {
content, err := bs.driver.GetContent(ctx, path)
if err != nil {
return "", err
}
linked, err := digest.ParseDigest(string(content))
if err != nil {
return "", err
}
return linked, nil
}
// resolve reads the digest link at path and returns the blob store path.
func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
dgst, err := bs.readlink(ctx, path)
if err != nil {
return "", err
}
return bs.path(dgst)
}
type blobStatter struct {
driver driver.StorageDriver
}
var _ distribution.BlobDescriptorService = &blobStatter{}
// Stat implements BlobStatter.Stat by returning the descriptor for the blob
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return distribution.Descriptor{}, err
}
fi, err := bs.driver.Stat(ctx, path)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return distribution.Descriptor{}, distribution.ErrBlobUnknown
default:
return distribution.Descriptor{}, err
}
}
if fi.IsDir() {
// NOTE(stevvooe): This represents a corruption situation. Somehow, we
// calculated a blob path and then detected a directory. We log the
// error and then error on the side of not knowing about the blob.
context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
// TODO(stevvooe): Add method to resolve the mediatype. We can store and
// cache a "global" media type for the blob, even if a specific repo has a
// mediatype that overrides the main one.
return distribution.Descriptor{
Size: fi.Size(),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, nil
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return distribution.ErrUnsupported
}

399
docs/storage/blobwriter.go Normal file
View File

@ -0,0 +1,399 @@
package storage
import (
"errors"
"fmt"
"io"
"path"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
// blobWriter is used to control the various aspects of resumable
// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the contiguous write
fileWriter storagedriver.FileWriter
driver storagedriver.StorageDriver
path string
resumableDigestEnabled bool
committed bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(); err != nil {
return distribution.Descriptor{}, err
}
bw.Close()
desc.Size = bw.Size()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return distribution.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return distribution.Descriptor{}, err
}
bw.committed = true
return canonical, nil
}
// Cancel the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
context.GetLogger(ctx).Debug("(*blobWriter).Cancel")
if err := bw.fileWriter.Cancel(); err != nil {
return err
}
if err := bw.Close(); err != nil {
context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err)
}
if err := bw.removeResources(ctx); err != nil {
return err
}
return nil
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r)
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if bw.committed {
return errors.New("blobwriter close after commit")
}
if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return distribution.Descriptor{}, err
}
} else {
if fi.IsDir() {
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return distribution.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.New()
digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
if err != nil {
return distribution.Descriptor{}, err
}
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(digestVerifier, tr); err != nil {
return distribution.Descriptor{}, err
}
canonical = digester.Digest()
verified = digestVerifier.Verified()
}
}
if !verified {
context.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// tars.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty tar.
if desc.Digest == digest.DigestSha256EmptyTar {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}

View File

@ -0,0 +1,17 @@
// +build noresumabledigest
package storage
import (
"github.com/docker/distribution/context"
)
// resumeHashAt is a noop when resumable digest support is disabled.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
return errResumableDigestNotAvailable
}
// storeHashState is a noop when resumable digest support is disabled.
func (bw *blobWriter) storeHashState(ctx context.Context) error {
return errResumableDigestNotAvailable
}

View File

@ -0,0 +1,145 @@
// +build !noresumabledigest
package storage
import (
"fmt"
"path"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/stevvooe/resumable"
// register resumable hashes with import
_ "github.com/stevvooe/resumable/sha256"
_ "github.com/stevvooe/resumable/sha512"
)
// resumeDigest attempts to restore the state of the internal hash function
// by loading the most recent saved hash state equal to the current size of the blob.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
offset := bw.fileWriter.Size()
if offset == int64(h.Len()) {
// State of digester is already at the requested offset.
return nil
}
// List hash states from storage backend.
var hashStateMatch hashStateEntry
hashStates, err := bw.getStoredHashStates(ctx)
if err != nil {
return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
}
// Find the highest stored hashState with offset equal to
// the requested offset.
for _, hashState := range hashStates {
if hashState.offset == offset {
hashStateMatch = hashState
break // Found an exact offset match.
}
}
if hashStateMatch.offset == 0 {
// No need to load any state, just reset the hasher.
h.Reset()
} else {
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
if err != nil {
return err
}
if err = h.Restore(storedState); err != nil {
return err
}
}
// Mind the gap.
if gapLen := offset - int64(h.Len()); gapLen > 0 {
return errResumableDigestNotAvailable
}
return nil
}
type hashStateEntry struct {
offset int64
path string
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
if err != nil {
if _, ok := err.(storagedriver.PathNotFoundError); !ok {
return nil, err
}
// Treat PathNotFoundError as no entries.
paths = nil
}
hashStateEntries := make([]hashStateEntry, 0, len(paths))
for _, p := range paths {
pathSuffix := path.Base(p)
// The suffix should be the offset.
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
if err != nil {
logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
}
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
}
return hashStateEntries, nil
}
func (bw *blobWriter) storeHashState(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: int64(h.Len()),
})
if err != nil {
return err
}
hashState, err := h.State()
if err != nil {
return err
}
return bw.driver.PutContent(ctx, uploadHashStatePath, hashState)
}

35
docs/storage/cache/cache.go vendored Normal file
View File

@ -0,0 +1,35 @@
// Package cache provides facilities to speed up access to the storage
// backend.
package cache
import (
"fmt"
"github.com/docker/distribution"
)
// BlobDescriptorCacheProvider provides repository scoped
// BlobDescriptorService cache instances and a global descriptor cache.
type BlobDescriptorCacheProvider interface {
distribution.BlobDescriptorService
RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
}
// ValidateDescriptor provides a helper function to ensure that caches have
// common criteria for admitting descriptors.
func ValidateDescriptor(desc distribution.Descriptor) error {
if err := desc.Digest.Validate(); err != nil {
return err
}
if desc.Size < 0 {
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
}
if desc.MediaType == "" {
return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
}
return nil
}

180
docs/storage/cache/cachecheck/suite.go vendored Normal file
View File

@ -0,0 +1,180 @@
package cachecheck
import (
"reflect"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/cache"
)
// CheckBlobDescriptorCache takes a cache implementation through a common set
// of operations. If adding new tests, please add them here so new
// implementations get the benefit. This should be used for unit tests.
func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) {
ctx := context.Background()
checkBlobDescriptorCacheEmptyRepository(t, ctx, provider)
checkBlobDescriptorCacheSetAndRead(t, ctx, provider)
checkBlobDescriptorCacheClear(t, ctx, provider)
}
func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown {
t.Fatalf("expected unknown blob error with empty store: %v", err)
}
cache, err := provider.RepositoryScoped("")
if err == nil {
t.Fatalf("expected an error when asking for invalid repo")
}
cache, err = provider.RepositoryScoped("foo/bar")
if err != nil {
t.Fatalf("unexpected error getting repository: %v", err)
}
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
Digest: "sha384:abc",
Size: 10,
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error with invalid digest: %v", err)
}
if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{
Digest: "",
Size: 10,
MediaType: "application/octet-stream"}); err == nil {
t.Fatalf("expected error setting value on invalid descriptor")
}
if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error checking for cache item with empty digest: %v", err)
}
if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown {
t.Fatalf("expected unknown blob error with empty repo: %v", err)
}
}
func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
expected := distribution.Descriptor{
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
Size: 10,
MediaType: "application/octet-stream"}
cache, err := provider.RepositoryScoped("foo/bar")
if err != nil {
t.Fatalf("unexpected error getting scoped cache: %v", err)
}
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
t.Fatalf("error setting descriptor: %v", err)
}
desc, err := cache.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("unexpected error statting fake2:abc: %v", err)
}
if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
// also check that we set the canonical key ("fake:abc")
desc, err = cache.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("descriptor not returned for canonical key: %v", err)
}
if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
// ensure that global gets extra descriptor mapping
desc, err = provider.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc)
}
if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
// get at it through canonical descriptor
desc, err = provider.Stat(ctx, expected.Digest)
if err != nil {
t.Fatalf("unexpected error checking glboal descriptor: %v", err)
}
if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
// now, we set the repo local mediatype to something else and ensure it
// doesn't get changed in the provider cache.
expected.MediaType = "application/json"
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
t.Fatalf("unexpected error setting descriptor: %v", err)
}
desc, err = cache.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("unexpected error getting descriptor: %v", err)
}
if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
}
desc, err = provider.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("unexpected error getting global descriptor: %v", err)
}
expected.MediaType = "application/octet-stream" // expect original mediatype in global
if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
}
}
func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
expected := distribution.Descriptor{
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
Size: 10,
MediaType: "application/octet-stream"}
cache, err := provider.RepositoryScoped("foo/bar")
if err != nil {
t.Fatalf("unexpected error getting scoped cache: %v", err)
}
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
t.Fatalf("error setting descriptor: %v", err)
}
desc, err := cache.Stat(ctx, localDigest)
if err != nil {
t.Fatalf("unexpected error statting fake2:abc: %v", err)
}
if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
err = cache.Clear(ctx, localDigest)
if err != nil {
t.Error(err)
}
desc, err = cache.Stat(ctx, localDigest)
if err == nil {
t.Fatalf("expected error statting deleted blob: %v", err)
}
}

View File

@ -0,0 +1,101 @@
package cache
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution"
)
// Metrics is used to hold metric counters
// related to the number of times a cache was
// hit or missed.
type Metrics struct {
Requests uint64
Hits uint64
Misses uint64
}
// MetricsTracker represents a metric tracker
// which simply counts the number of hits and misses.
type MetricsTracker interface {
Hit()
Miss()
Metrics() Metrics
}
type cachedBlobStatter struct {
cache distribution.BlobDescriptorService
backend distribution.BlobDescriptorService
tracker MetricsTracker
}
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
return &cachedBlobStatter{
cache: cache,
backend: backend,
}
}
// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
// falls back to a backend. Hits and misses will send to the tracker.
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
return &cachedBlobStatter{
cache: cache,
backend: backend,
tracker: tracker,
}
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
desc, err := cbds.cache.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
}
goto fallback
}
if cbds.tracker != nil {
cbds.tracker.Hit()
}
return desc, nil
fallback:
if cbds.tracker != nil {
cbds.tracker.Miss()
}
desc, err = cbds.backend.Stat(ctx, dgst)
if err != nil {
return desc, err
}
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
}
return desc, err
}
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
err := cbds.cache.Clear(ctx, dgst)
if err != nil {
return err
}
err = cbds.backend.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
}
return nil
}

170
docs/storage/cache/memory/memory.go vendored Normal file
View File

@ -0,0 +1,170 @@
package memory
import (
"sync"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
)
type inMemoryBlobDescriptorCacheProvider struct {
global *mapBlobDescriptorCache
repositories map[string]*mapBlobDescriptorCache
mu sync.RWMutex
}
// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
// storing blob descriptor data.
func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
return &inMemoryBlobDescriptorCacheProvider{
global: newMapBlobDescriptorCache(),
repositories: make(map[string]*mapBlobDescriptorCache),
}
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNamed(repo); err != nil {
return nil, err
}
imbdcp.mu.RLock()
defer imbdcp.mu.RUnlock()
return &repositoryScopedInMemoryBlobDescriptorCache{
repo: repo,
parent: imbdcp,
repository: imbdcp.repositories[repo],
}, nil
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return imbdcp.global.Stat(ctx, dgst)
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
return imbdcp.global.Clear(ctx, dgst)
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
_, err := imbdcp.Stat(ctx, dgst)
if err == distribution.ErrBlobUnknown {
if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
// if the digests differ, set the other canonical mapping
if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
return err
}
}
// unknown, just set it
return imbdcp.global.SetDescriptor(ctx, dgst, desc)
}
// we already know it, do nothing
return err
}
// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
// repository cache. Instances are not thread-safe but the delegated
// operations are.
type repositoryScopedInMemoryBlobDescriptorCache struct {
repo string
parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
repository *mapBlobDescriptorCache
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if rsimbdcp.repository == nil {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return rsimbdcp.repository.Stat(ctx, dgst)
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
if rsimbdcp.repository == nil {
return distribution.ErrBlobUnknown
}
return rsimbdcp.repository.Clear(ctx, dgst)
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if rsimbdcp.repository == nil {
// allocate map since we are setting it now.
rsimbdcp.parent.mu.Lock()
var ok bool
// have to read back value since we may have allocated elsewhere.
rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
if !ok {
rsimbdcp.repository = newMapBlobDescriptorCache()
rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository
}
rsimbdcp.parent.mu.Unlock()
}
if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil {
return err
}
return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
}
// mapBlobDescriptorCache provides a simple map-based implementation of the
// descriptor cache.
type mapBlobDescriptorCache struct {
descriptors map[digest.Digest]distribution.Descriptor
mu sync.RWMutex
}
var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
return &mapBlobDescriptorCache{
descriptors: make(map[digest.Digest]distribution.Descriptor),
}
}
func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return distribution.Descriptor{}, err
}
mbdc.mu.RLock()
defer mbdc.mu.RUnlock()
desc, ok := mbdc.descriptors[dgst]
if !ok {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return desc, nil
}
func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
mbdc.mu.Lock()
defer mbdc.mu.Unlock()
delete(mbdc.descriptors, dgst)
return nil
}
func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
mbdc.mu.Lock()
defer mbdc.mu.Unlock()
mbdc.descriptors[dgst] = desc
return nil
}

View File

@ -0,0 +1,13 @@
package memory
import (
"testing"
"github.com/docker/distribution/registry/storage/cache/cachecheck"
)
// TestInMemoryBlobInfoCache checks the in memory implementation is working
// correctly.
func TestInMemoryBlobInfoCache(t *testing.T) {
cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider())
}

268
docs/storage/cache/redis/redis.go vendored Normal file
View File

@ -0,0 +1,268 @@
package redis
import (
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
"github.com/garyburd/redigo/redis"
)
// redisBlobStatService provides an implementation of
// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in
// two parts. The first provide fast access to repository membership through a
// redis set for each repo. The second is a redis hash keyed by the digest of
// the layer, providing path, length and mediatype information. There is also
// a per-repository redis hash of the blob descriptor, allowing override of
// data. This is currently used to override the mediatype on a per-repository
// basis.
//
// Note that there is no implied relationship between these two caches. The
// layer may exist in one, both or none and the code must be written this way.
type redisBlobDescriptorService struct {
pool *redis.Pool
// TODO(stevvooe): We use a pool because we don't have great control over
// the cache lifecycle to manage connections. A new connection if fetched
// for each operation. Once we have better lifecycle management of the
// request objects, we can change this to a connection.
}
// NewRedisBlobDescriptorCacheProvider returns a new redis-based
// BlobDescriptorCacheProvider using the provided redis connection pool.
func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider {
return &redisBlobDescriptorService{
pool: pool,
}
}
// RepositoryScoped returns the scoped cache.
func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNamed(repo); err != nil {
return nil, err
}
return &repositoryScopedRedisBlobDescriptorService{
repo: repo,
upstream: rbds,
}, nil
}
// Stat retrieves the descriptor data from the redis hash entry.
func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return distribution.Descriptor{}, err
}
conn := rbds.pool.Get()
defer conn.Close()
return rbds.stat(ctx, conn, dgst)
}
func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
if err := dgst.Validate(); err != nil {
return err
}
conn := rbds.pool.Get()
defer conn.Close()
// Not atomic in redis <= 2.3
reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")
if err != nil {
return err
}
if reply == 0 {
return distribution.ErrBlobUnknown
}
return nil
}
// stat provides an internal stat call that takes a connection parameter. This
// allows some internal management of the connection scope.
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) {
reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype"))
if err != nil {
return distribution.Descriptor{}, err
}
// NOTE(stevvooe): The "size" field used to be "length". We treat a
// missing "size" field here as an unknown blob, which causes a cache
// miss, effectively migrating the field.
if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
var desc distribution.Descriptor
if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil {
return distribution.Descriptor{}, err
}
return desc, nil
}
// SetDescriptor sets the descriptor data for the given digest using a redis
// hash. A hash is used here since we may store unrelated fields about a layer
// in the future.
func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
conn := rbds.pool.Get()
defer conn.Close()
return rbds.setDescriptor(ctx, conn, dgst, desc)
}
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst),
"digest", desc.Digest,
"size", desc.Size); err != nil {
return err
}
// Only set mediatype if not already set.
if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst),
"mediatype", desc.MediaType); err != nil {
return err
}
return nil
}
func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
return "blobs::" + dgst.String()
}
type repositoryScopedRedisBlobDescriptorService struct {
repo string
upstream *redisBlobDescriptorService
}
var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{}
// Stat ensures that the digest is a member of the specified repository and
// forwards the descriptor request to the global blob store. If the media type
// differs for the repository, we override it.
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return distribution.Descriptor{}, err
}
conn := rsrbds.upstream.pool.Get()
defer conn.Close()
// Check membership to repository first
member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
if err != nil {
return distribution.Descriptor{}, err
}
if !member {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
upstream, err := rsrbds.upstream.stat(ctx, conn, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
// We allow a per repository mediatype, let's look it up here.
mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype"))
if err != nil {
return distribution.Descriptor{}, err
}
if mediatype != "" {
upstream.MediaType = mediatype
}
return upstream, nil
}
// Clear removes the descriptor from the cache and forwards to the upstream descriptor store
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
if err := dgst.Validate(); err != nil {
return err
}
conn := rsrbds.upstream.pool.Get()
defer conn.Close()
// Check membership to repository first
member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
if err != nil {
return err
}
if !member {
return distribution.ErrBlobUnknown
}
return rsrbds.upstream.Clear(ctx, dgst)
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
if dgst != desc.Digest {
if dgst.Algorithm() == desc.Digest.Algorithm() {
return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest)
}
}
conn := rsrbds.upstream.pool.Get()
defer conn.Close()
return rsrbds.setDescriptor(ctx, conn, dgst, desc)
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil {
return err
}
if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil {
return err
}
// Override repository mediatype.
if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil {
return err
}
// Also set the values for the primary descriptor, if they differ by
// algorithm (ie sha256 vs sha512).
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
return err
}
}
return nil
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
return "repository::" + rsrbds.repo + "::blobs::" + dgst.String()
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string {
return "repository::" + rsrbds.repo + "::blobs"
}

51
docs/storage/cache/redis/redis_test.go vendored Normal file
View File

@ -0,0 +1,51 @@
package redis
import (
"flag"
"os"
"testing"
"time"
"github.com/docker/distribution/registry/storage/cache/cachecheck"
"github.com/garyburd/redigo/redis"
)
var redisAddr string
func init() {
flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis")
}
// TestRedisLayerInfoCache exercises a live redis instance using the cache
// implementation.
func TestRedisBlobDescriptorCacheProvider(t *testing.T) {
if redisAddr == "" {
// fallback to an environement variable
redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR")
}
if redisAddr == "" {
// skip if still not set
t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis")
}
pool := &redis.Pool{
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", redisAddr)
},
MaxIdle: 1,
MaxActive: 2,
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
Wait: false, // if a connection is not avialable, proceed without cache.
}
// Clear the database
if _, err := pool.Get().Do("FLUSHDB"); err != nil {
t.Fatalf("unexpected error flushing redis db: %v", err)
}
cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool))
}

97
docs/storage/catalog.go Normal file
View File

@ -0,0 +1,97 @@
package storage
import (
"errors"
"io"
"path"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
// ErrFinishedWalk is used when the called walk function no longer wants
// to accept any more values. This is used for pagination when the
// required number of repos have been found.
var ErrFinishedWalk = errors.New("finished walk")
// Returns a list, or partial list, of repositories in the registry.
// Because it's a quite expensive operation, it should only be used when building up
// an initial set of repositories.
func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) {
var foundRepos []string
if len(repos) == 0 {
return 0, errors.New("no space in slice")
}
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return 0, err
}
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
filePath := fileInfo.Path()
// lop the base path off
repoPath := filePath[len(root)+1:]
_, file := path.Split(repoPath)
if file == "_layers" {
repoPath = strings.TrimSuffix(repoPath, "/_layers")
if repoPath > last {
foundRepos = append(foundRepos, repoPath)
}
return ErrSkipDir
} else if strings.HasPrefix(file, "_") {
return ErrSkipDir
}
// if we've filled our array, no need to walk any further
if len(foundRepos) == len(repos) {
return ErrFinishedWalk
}
return nil
})
n = copy(repos, foundRepos)
// Signal that we have no more entries by setting EOF
if len(foundRepos) <= len(repos) && err != ErrFinishedWalk {
errVal = io.EOF
}
return n, errVal
}
// Enumerate applies ingester to each repository
func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error {
repoNameBuffer := make([]string, 100)
var last string
for {
n, err := reg.Repositories(ctx, repoNameBuffer, last)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
last = repoNameBuffer[n-1]
for i := 0; i < n; i++ {
repoName := repoNameBuffer[i]
err = ingester(repoName)
if err != nil {
return err
}
}
if err == io.EOF {
break
}
}
return nil
}

View File

@ -0,0 +1,125 @@
package storage
import (
"io"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
type setupEnv struct {
ctx context.Context
driver driver.StorageDriver
expected []string
registry distribution.Namespace
}
func setupFS(t *testing.T) *setupEnv {
d := inmemory.New()
c := []byte("")
ctx := context.Background()
registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
rootpath, _ := pathFor(repositoriesRootPathSpec{})
repos := []string{
"/foo/a/_layers/1",
"/foo/b/_layers/2",
"/bar/c/_layers/3",
"/bar/d/_layers/4",
"/foo/d/in/_layers/5",
"/an/invalid/repo",
"/bar/d/_layers/ignored/dir/6",
}
for _, repo := range repos {
if err := d.PutContent(ctx, rootpath+repo, c); err != nil {
t.Fatalf("Unable to put to inmemory fs")
}
}
expected := []string{
"bar/c",
"bar/d",
"foo/a",
"foo/b",
"foo/d/in",
}
return &setupEnv{
ctx: ctx,
driver: d,
expected: expected,
registry: registry,
}
}
func TestCatalog(t *testing.T) {
env := setupFS(t)
p := make([]string, 50)
numFilled, err := env.registry.Repositories(env.ctx, p, "")
if !testEq(p, env.expected, numFilled) {
t.Errorf("Expected catalog repos err")
}
if err != io.EOF {
t.Errorf("Catalog has more values which we aren't expecting")
}
}
func TestCatalogInParts(t *testing.T) {
env := setupFS(t)
chunkLen := 2
p := make([]string, chunkLen)
numFilled, err := env.registry.Repositories(env.ctx, p, "")
if err == io.EOF || numFilled != len(p) {
t.Errorf("Expected more values in catalog")
}
if !testEq(p, env.expected[0:chunkLen], numFilled) {
t.Errorf("Expected catalog first chunk err")
}
lastRepo := p[len(p)-1]
numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo)
if err == io.EOF || numFilled != len(p) {
t.Errorf("Expected more values in catalog")
}
if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) {
t.Errorf("Expected catalog second chunk err")
}
lastRepo = p[len(p)-1]
numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo)
if err != io.EOF {
t.Errorf("Catalog has more values which we aren't expecting")
}
if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) {
t.Errorf("Expected catalog third chunk err")
}
}
func testEq(a, b []string, size int) bool {
for cnt := 0; cnt < size-1; cnt++ {
if a[cnt] != b[cnt] {
return false
}
}
return true
}

3
docs/storage/doc.go Normal file
View File

@ -0,0 +1,3 @@
// Package storage contains storage services for use in the registry
// application. It should be considered an internal package, as of Go 1.4.
package storage

View File

@ -0,0 +1,482 @@
// Package azure provides a storagedriver.StorageDriver implementation to
// store blobs in Microsoft Azure Blob Storage Service.
package azure
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
azure "github.com/Azure/azure-sdk-for-go/storage"
)
const driverName = "azure"
const (
paramAccountName = "accountname"
paramAccountKey = "accountkey"
paramContainer = "container"
paramRealm = "realm"
maxChunkSize = 4 * 1024 * 1024
)
type driver struct {
client azure.BlobStorageClient
container string
}
type baseEmbed struct{ base.Base }
// Driver is a storagedriver.StorageDriver implementation backed by
// Microsoft Azure Blob Storage Service.
type Driver struct{ baseEmbed }
func init() {
factory.Register(driverName, &azureDriverFactory{})
}
type azureDriverFactory struct{}
func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters)
}
// FromParameters constructs a new Driver with a given parameters map.
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
accountName, ok := parameters[paramAccountName]
if !ok || fmt.Sprint(accountName) == "" {
return nil, fmt.Errorf("No %s parameter provided", paramAccountName)
}
accountKey, ok := parameters[paramAccountKey]
if !ok || fmt.Sprint(accountKey) == "" {
return nil, fmt.Errorf("No %s parameter provided", paramAccountKey)
}
container, ok := parameters[paramContainer]
if !ok || fmt.Sprint(container) == "" {
return nil, fmt.Errorf("No %s parameter provided", paramContainer)
}
realm, ok := parameters[paramRealm]
if !ok || fmt.Sprint(realm) == "" {
realm = azure.DefaultBaseURL
}
return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm))
}
// New constructs a new Driver with the given Azure Storage Account credentials
func New(accountName, accountKey, container, realm string) (*Driver, error) {
api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true)
if err != nil {
return nil, err
}
blobClient := api.GetBlobService()
// Create registry container
if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil {
return nil, err
}
d := &driver{
client: blobClient,
container: container}
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
}
// Implement the storagedriver.StorageDriver interface.
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
blob, err := d.client.GetBlob(d.container, path)
if err != nil {
if is404(err) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
return ioutil.ReadAll(blob)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil {
return err
}
writer, err := d.Writer(ctx, path, false)
if err != nil {
return err
}
defer writer.Close()
_, err = writer.Write(contents)
if err != nil {
return err
}
return writer.Commit()
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
if ok, err := d.client.BlobExists(d.container, path); err != nil {
return nil, err
} else if !ok {
return nil, storagedriver.PathNotFoundError{Path: path}
}
info, err := d.client.GetBlobProperties(d.container, path)
if err != nil {
return nil, err
}
size := int64(info.ContentLength)
if offset >= size {
return ioutil.NopCloser(bytes.NewReader(nil)), nil
}
bytesRange := fmt.Sprintf("%v-", offset)
resp, err := d.client.GetBlobRange(d.container, path, bytesRange)
if err != nil {
return nil, err
}
return resp, nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
blobExists, err := d.client.BlobExists(d.container, path)
if err != nil {
return nil, err
}
var size int64
if blobExists {
if append {
blobProperties, err := d.client.GetBlobProperties(d.container, path)
if err != nil {
return nil, err
}
size = blobProperties.ContentLength
} else {
err := d.client.DeleteBlob(d.container, path)
if err != nil {
return nil, err
}
}
} else {
if append {
return nil, storagedriver.PathNotFoundError{Path: path}
}
err := d.client.PutAppendBlob(d.container, path, nil)
if err != nil {
return nil, err
}
}
return d.newWriter(path, size), nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
// Check if the path is a blob
if ok, err := d.client.BlobExists(d.container, path); err != nil {
return nil, err
} else if ok {
blob, err := d.client.GetBlobProperties(d.container, path)
if err != nil {
return nil, err
}
mtim, err := time.Parse(http.TimeFormat, blob.LastModified)
if err != nil {
return nil, err
}
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
Path: path,
Size: int64(blob.ContentLength),
ModTime: mtim,
IsDir: false,
}}, nil
}
// Check if path is a virtual container
virtContainerPath := path
if !strings.HasSuffix(virtContainerPath, "/") {
virtContainerPath += "/"
}
blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{
Prefix: virtContainerPath,
MaxResults: 1,
})
if err != nil {
return nil, err
}
if len(blobs.Blobs) > 0 {
// path is a virtual container
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
Path: path,
IsDir: true,
}}, nil
}
// path is not a blob or virtual container
return nil, storagedriver.PathNotFoundError{Path: path}
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
if path == "/" {
path = ""
}
blobs, err := d.listBlobs(d.container, path)
if err != nil {
return blobs, err
}
list := directDescendants(blobs, path)
if path != "" && len(list) == 0 {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return list, nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath)
err := d.client.CopyBlob(d.container, destPath, sourceBlobURL)
if err != nil {
if is404(err) {
return storagedriver.PathNotFoundError{Path: sourcePath}
}
return err
}
return d.client.DeleteBlob(d.container, sourcePath)
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
ok, err := d.client.DeleteBlobIfExists(d.container, path)
if err != nil {
return err
}
if ok {
return nil // was a blob and deleted, return
}
// Not a blob, see if path is a virtual container with blobs
blobs, err := d.listBlobs(d.container, path)
if err != nil {
return err
}
for _, b := range blobs {
if err = d.client.DeleteBlob(d.container, b); err != nil {
return err
}
}
if len(blobs) == 0 {
return storagedriver.PathNotFoundError{Path: path}
}
return nil
}
// URLFor returns a publicly accessible URL for the blob stored at given path
// for specified duration by making use of Azure Storage Shared Access Signatures (SAS).
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration
expires, ok := options["expiry"]
if ok {
t, ok := expires.(time.Time)
if ok {
expiresTime = t
}
}
return d.client.GetBlobSASURI(d.container, path, expiresTime, "r")
}
// directDescendants will find direct descendants (blobs or virtual containers)
// of from list of blob paths and will return their full paths. Elements in blobs
// list must be prefixed with a "/" and
//
// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is
// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"}
func directDescendants(blobs []string, prefix string) []string {
if !strings.HasPrefix(prefix, "/") { // add trailing '/'
prefix = "/" + prefix
}
if !strings.HasSuffix(prefix, "/") { // containerify the path
prefix += "/"
}
out := make(map[string]bool)
for _, b := range blobs {
if strings.HasPrefix(b, prefix) {
rel := b[len(prefix):]
c := strings.Count(rel, "/")
if c == 0 {
out[b] = true
} else {
out[prefix+rel[:strings.Index(rel, "/")]] = true
}
}
}
var keys []string
for k := range out {
keys = append(keys, k)
}
return keys
}
func (d *driver) listBlobs(container, virtPath string) ([]string, error) {
if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path
virtPath += "/"
}
out := []string{}
marker := ""
for {
resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{
Marker: marker,
Prefix: virtPath,
})
if err != nil {
return out, err
}
for _, b := range resp.Blobs {
out = append(out, b.Name)
}
if len(resp.Blobs) == 0 || resp.NextMarker == "" {
break
}
marker = resp.NextMarker
}
return out, nil
}
func is404(err error) bool {
statusCodeErr, ok := err.(azure.AzureStorageServiceError)
return ok && statusCodeErr.StatusCode == http.StatusNotFound
}
type writer struct {
driver *driver
path string
size int64
bw *bufio.Writer
closed bool
committed bool
cancelled bool
}
func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter {
return &writer{
driver: d,
path: path,
size: size,
bw: bufio.NewWriterSize(&blockWriter{
client: d.client,
container: d.container,
path: path,
}, maxChunkSize),
}
}
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
} else if w.committed {
return 0, fmt.Errorf("already committed")
} else if w.cancelled {
return 0, fmt.Errorf("already cancelled")
}
n, err := w.bw.Write(p)
w.size += int64(n)
return n, err
}
func (w *writer) Size() int64 {
return w.size
}
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
return w.bw.Flush()
}
func (w *writer) Cancel() error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
}
w.cancelled = true
return w.driver.client.DeleteBlob(w.driver.container, w.path)
}
func (w *writer) Commit() error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
} else if w.cancelled {
return fmt.Errorf("already cancelled")
}
w.committed = true
return w.bw.Flush()
}
type blockWriter struct {
client azure.BlobStorageClient
container string
path string
}
func (bw *blockWriter) Write(p []byte) (int, error) {
n := 0
for offset := 0; offset < len(p); offset += maxChunkSize {
chunkSize := maxChunkSize
if offset+chunkSize > len(p) {
chunkSize = len(p) - offset
}
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize])
if err != nil {
return n, err
}
n += chunkSize
}
return n, nil
}

View File

@ -0,0 +1,63 @@
package azure
import (
"fmt"
"os"
"strings"
"testing"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/testsuites"
. "gopkg.in/check.v1"
)
const (
envAccountName = "AZURE_STORAGE_ACCOUNT_NAME"
envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY"
envContainer = "AZURE_STORAGE_CONTAINER"
envRealm = "AZURE_STORAGE_REALM"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
func init() {
var (
accountName string
accountKey string
container string
realm string
)
config := []struct {
env string
value *string
}{
{envAccountName, &accountName},
{envAccountKey, &accountKey},
{envContainer, &container},
{envRealm, &realm},
}
missing := []string{}
for _, v := range config {
*v.value = os.Getenv(v.env)
if *v.value == "" {
missing = append(missing, v.env)
}
}
azureDriverConstructor := func() (storagedriver.StorageDriver, error) {
return New(accountName, accountKey, container, realm)
}
// Skip Azure storage driver tests if environment variable parameters are not provided
skipCheck := func() string {
if len(missing) > 0 {
return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", "))
}
return ""
}
testsuites.RegisterSuite(azureDriverConstructor, skipCheck)
}

View File

@ -0,0 +1,198 @@
// Package base provides a base implementation of the storage driver that can
// be used to implement common checks. The goal is to increase the amount of
// code sharing.
//
// The canonical approach to use this class is to embed in the exported driver
// struct such that calls are proxied through this implementation. First,
// declare the internal driver, as follows:
//
// type driver struct { ... internal ...}
//
// The resulting type should implement StorageDriver such that it can be the
// target of a Base struct. The exported type can then be declared as follows:
//
// type Driver struct {
// Base
// }
//
// Because Driver embeds Base, it effectively implements Base. If the driver
// needs to intercept a call, before going to base, Driver should implement
// that method. Effectively, Driver can intercept calls before coming in and
// driver implements the actual logic.
//
// To further shield the embed from other packages, it is recommended to
// employ a private embed struct:
//
// type baseEmbed struct {
// base.Base
// }
//
// Then, declare driver to embed baseEmbed, rather than Base directly:
//
// type Driver struct {
// baseEmbed
// }
//
// The type now implements StorageDriver, proxying through Base, without
// exporting an unnecessary field.
package base
import (
"io"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// Base provides a wrapper around a storagedriver implementation that provides
// common path and bounds checking.
type Base struct {
storagedriver.StorageDriver
}
// Format errors received from the storage driver
func (base *Base) setDriverName(e error) error {
switch actual := e.(type) {
case nil:
return nil
case storagedriver.ErrUnsupportedMethod:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.PathNotFoundError:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.InvalidPathError:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.InvalidOffsetError:
actual.DriverName = base.StorageDriver.Name()
return actual
default:
storageError := storagedriver.Error{
DriverName: base.StorageDriver.Name(),
Enclosed: e,
}
return storageError
}
}
// GetContent wraps GetContent of underlying storage driver.
func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.GetContent(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
b, e := base.StorageDriver.GetContent(ctx, path)
return b, base.setDriverName(e)
}
// PutContent wraps PutContent of underlying storage driver.
func (base *Base) PutContent(ctx context.Context, path string, content []byte) error {
ctx, done := context.WithTrace(ctx)
defer done("%s.PutContent(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) {
return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content))
}
// Reader wraps Reader of underlying storage driver.
func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.Reader(%q, %d)", base.Name(), path, offset)
if offset < 0 {
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()}
}
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
rc, e := base.StorageDriver.Reader(ctx, path, offset)
return rc, base.setDriverName(e)
}
// Writer wraps Writer of underlying storage driver.
func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.Writer(%q, %v)", base.Name(), path, append)
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
writer, e := base.StorageDriver.Writer(ctx, path, append)
return writer, base.setDriverName(e)
}
// Stat wraps Stat of underlying storage driver.
func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.Stat(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
fi, e := base.StorageDriver.Stat(ctx, path)
return fi, base.setDriverName(e)
}
// List wraps List of underlying storage driver.
func (base *Base) List(ctx context.Context, path string) ([]string, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.List(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
str, e := base.StorageDriver.List(ctx, path)
return str, base.setDriverName(e)
}
// Move wraps Move of underlying storage driver.
func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error {
ctx, done := context.WithTrace(ctx)
defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath)
if !storagedriver.PathRegexp.MatchString(sourcePath) {
return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()}
} else if !storagedriver.PathRegexp.MatchString(destPath) {
return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()}
}
return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath))
}
// Delete wraps Delete of underlying storage driver.
func (base *Base) Delete(ctx context.Context, path string) error {
ctx, done := context.WithTrace(ctx)
defer done("%s.Delete(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) {
return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
return base.setDriverName(base.StorageDriver.Delete(ctx, path))
}
// URLFor wraps URLFor of underlying storage driver.
func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
ctx, done := context.WithTrace(ctx)
defer done("%s.URLFor(%q)", base.Name(), path)
if !storagedriver.PathRegexp.MatchString(path) {
return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
str, e := base.StorageDriver.URLFor(ctx, path, options)
return str, base.setDriverName(e)
}

View File

@ -0,0 +1,145 @@
package base
import (
"io"
"sync"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
type regulator struct {
storagedriver.StorageDriver
*sync.Cond
available uint64
}
// NewRegulator wraps the given driver and is used to regulate concurrent calls
// to the given storage driver to a maximum of the given limit. This is useful
// for storage drivers that would otherwise create an unbounded number of OS
// threads if allowed to be called unregulated.
func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver {
return &regulator{
StorageDriver: driver,
Cond: sync.NewCond(&sync.Mutex{}),
available: limit,
}
}
func (r *regulator) enter() {
r.L.Lock()
for r.available == 0 {
r.Wait()
}
r.available--
r.L.Unlock()
}
func (r *regulator) exit() {
r.L.Lock()
// We only need to signal to a waiting FS operation if we're already at the
// limit of threads used
if r.available == 0 {
r.Signal()
}
r.available++
r.L.Unlock()
}
// Name returns the human-readable "name" of the driver, useful in error
// messages and logging. By convention, this will just be the registration
// name, but drivers may provide other information here.
func (r *regulator) Name() string {
r.enter()
defer r.exit()
return r.StorageDriver.Name()
}
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {
r.enter()
defer r.exit()
return r.StorageDriver.GetContent(ctx, path)
}
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error {
r.enter()
defer r.exit()
return r.StorageDriver.PutContent(ctx, path, content)
}
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Reader(ctx, path, offset)
}
// Writer stores the contents of the provided io.ReadCloser at a
// location designated by the given path.
// May be used to resume writing a stream by providing a nonzero offset.
// The offset must be no larger than the CurrentSize for this path.
func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Writer(ctx, path, append)
}
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Stat(ctx, path)
}
// List returns a list of the objects that are direct descendants of the
//given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.List(ctx, path)
}
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
// Note: This may be no more efficient than a copy followed by a delete for
// many implementations.
func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Move(ctx, sourcePath, destPath)
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (r *regulator) Delete(ctx context.Context, path string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Delete(ctx, path)
}
// URLFor returns a URL which may be used to retrieve the content stored at
// the given path, possibly using the given options.
// May return an ErrUnsupportedMethod in certain StorageDriver
// implementations.
func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.URLFor(ctx, path, options)
}

View File

@ -0,0 +1,64 @@
package factory
import (
"fmt"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// driverFactories stores an internal mapping between storage driver names and their respective
// factories
var driverFactories = make(map[string]StorageDriverFactory)
// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces
// Storage drivers should call Register() with a factory to make the driver available by name.
// Individual StorageDriver implementations generally register with the factory via the Register
// func (below) in their init() funcs, and as such they should be imported anonymously before use.
// See below for an example of how to register and get a StorageDriver for S3
//
// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws"
// s3Driver, err = factory.Create("s3", storageParams)
// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams
type StorageDriverFactory interface {
// Create returns a new storagedriver.StorageDriver with the given parameters
// Parameters will vary by driver and may be ignored
// Each parameter key must only consist of lowercase letters and numbers
Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error)
}
// Register makes a storage driver available by the provided name.
// If Register is called twice with the same name or if driver factory is nil, it panics.
// Additionally, it is not concurrency safe. Most Storage Drivers call this function
// in their init() functions. See the documentation for StorageDriverFactory for more.
func Register(name string, factory StorageDriverFactory) {
if factory == nil {
panic("Must not provide nil StorageDriverFactory")
}
_, registered := driverFactories[name]
if registered {
panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name))
}
driverFactories[name] = factory
}
// Create a new storagedriver.StorageDriver with the given name and
// parameters. To use a driver, the StorageDriverFactory must first be
// registered with the given name. If no drivers are found, an
// InvalidStorageDriverError is returned
func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
driverFactory, ok := driverFactories[name]
if !ok {
return nil, InvalidStorageDriverError{name}
}
return driverFactory.Create(parameters)
}
// InvalidStorageDriverError records an attempt to construct an unregistered storage driver
type InvalidStorageDriverError struct {
Name string
}
func (err InvalidStorageDriverError) Error() string {
return fmt.Sprintf("StorageDriver not registered: %s", err.Name)
}

View File

@ -0,0 +1,79 @@
package driver
import "time"
// FileInfo returns information about a given path. Inspired by os.FileInfo,
// it elides the base name method for a full path instead.
type FileInfo interface {
// Path provides the full path of the target of this file info.
Path() string
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
Size() int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime() time.Time
// IsDir returns true if the path is a directory.
IsDir() bool
}
// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal
// should only be used by storagedriver implementations. They should moved to
// a "driver" package, similar to database/sql.
// FileInfoFields provides the exported fields for implementing FileInfo
// interface in storagedriver implementations. It should be used with
// InternalFileInfo.
type FileInfoFields struct {
// Path provides the full path of the target of this file info.
Path string
// Size is current length in bytes of the file. The value of this field
// can be used to write to the end of the file at path. The value is
// meaningless if IsDir is set to true.
Size int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime time.Time
// IsDir returns true if the path is a directory.
IsDir bool
}
// FileInfoInternal implements the FileInfo interface. This should only be
// used by storagedriver implementations that don't have a specialized
// FileInfo type.
type FileInfoInternal struct {
FileInfoFields
}
var _ FileInfo = FileInfoInternal{}
var _ FileInfo = &FileInfoInternal{}
// Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string {
return fi.FileInfoFields.Path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi FileInfoInternal) Size() int64 {
return fi.FileInfoFields.Size
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi FileInfoInternal) ModTime() time.Time {
return fi.FileInfoFields.ModTime
}
// IsDir returns true if the path is a directory.
func (fi FileInfoInternal) IsDir() bool {
return fi.FileInfoFields.IsDir
}

View File

@ -0,0 +1,440 @@
package filesystem
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"strconv"
"time"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
)
const (
driverName = "filesystem"
defaultRootDirectory = "/var/lib/registry"
defaultMaxThreads = uint64(100)
// minThreads is the minimum value for the maxthreads configuration
// parameter. If the driver's parameters are less than this we set
// the parameters to minThreads
minThreads = uint64(25)
)
// DriverParameters represents all configuration options available for the
// filesystem driver
type DriverParameters struct {
RootDirectory string
MaxThreads uint64
}
func init() {
factory.Register(driverName, &filesystemDriverFactory{})
}
// filesystemDriverFactory implements the factory.StorageDriverFactory interface
type filesystemDriverFactory struct{}
func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters)
}
type driver struct {
rootDirectory string
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// filesystem. All provided paths will be subpaths of the RootDirectory.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - rootdirectory
// - maxthreads
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
params, err := fromParametersImpl(parameters)
if err != nil || params == nil {
return nil, err
}
return New(*params), nil
}
func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) {
var (
err error
maxThreads = defaultMaxThreads
rootDirectory = defaultRootDirectory
)
if parameters != nil {
if rootDir, ok := parameters["rootdirectory"]; ok {
rootDirectory = fmt.Sprint(rootDir)
}
// Get maximum number of threads for blocking filesystem operations,
// if specified
threads := parameters["maxthreads"]
switch v := threads.(type) {
case string:
if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil {
return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads)
}
case uint64:
maxThreads = v
case int, int32, int64:
val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int()
// If threads is negative casting to uint64 will wrap around and
// give you the hugest thread limit ever. Let's be sensible, here
if val > 0 {
maxThreads = uint64(val)
}
case uint, uint32:
maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint()
case nil:
// do nothing
default:
return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads)
}
if maxThreads < minThreads {
maxThreads = minThreads
}
}
params := &DriverParameters{
RootDirectory: rootDirectory,
MaxThreads: maxThreads,
}
return params, nil
}
// New constructs a new Driver with a given rootDirectory
func New(params DriverParameters) *Driver {
fsDriver := &driver{rootDirectory: params.RootDirectory}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads),
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
rc, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
defer rc.Close()
p, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
return p, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error {
writer, err := d.Writer(ctx, subPath, false)
if err != nil {
return err
}
defer writer.Close()
_, err = io.Copy(writer, bytes.NewReader(contents))
if err != nil {
writer.Cancel()
return err
}
return writer.Commit()
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
seekPos, err := file.Seek(int64(offset), os.SEEK_SET)
if err != nil {
file.Close()
return nil, err
} else if seekPos < int64(offset) {
file.Close()
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
return file, nil
}
func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) {
fullPath := d.fullPath(subPath)
parentDir := path.Dir(fullPath)
if err := os.MkdirAll(parentDir, 0777); err != nil {
return nil, err
}
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return nil, err
}
var offset int64
if !append {
err := fp.Truncate(0)
if err != nil {
fp.Close()
return nil, err
}
} else {
n, err := fp.Seek(0, os.SEEK_END)
if err != nil {
fp.Close()
return nil, err
}
offset = int64(n)
}
return newFileWriter(fp, offset), nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) {
fullPath := d.fullPath(subPath)
fi, err := os.Stat(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: subPath}
}
return nil, err
}
return fileInfo{
path: subPath,
FileInfo: fi,
}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, subPath string) ([]string, error) {
fullPath := d.fullPath(subPath)
dir, err := os.Open(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: subPath}
}
return nil, err
}
defer dir.Close()
fileNames, err := dir.Readdirnames(0)
if err != nil {
return nil, err
}
keys := make([]string, 0, len(fileNames))
for _, fileName := range fileNames {
keys = append(keys, path.Join(subPath, fileName))
}
return keys, nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
source := d.fullPath(sourcePath)
dest := d.fullPath(destPath)
if _, err := os.Stat(source); os.IsNotExist(err) {
return storagedriver.PathNotFoundError{Path: sourcePath}
}
if err := os.MkdirAll(path.Dir(dest), 0755); err != nil {
return err
}
err := os.Rename(source, dest)
return err
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, subPath string) error {
fullPath := d.fullPath(subPath)
_, err := os.Stat(fullPath)
if err != nil && !os.IsNotExist(err) {
return err
} else if err != nil {
return storagedriver.PathNotFoundError{Path: subPath}
}
err = os.RemoveAll(fullPath)
return err
}
// URLFor returns a URL which may be used to retrieve the content stored at the given path.
// May return an UnsupportedMethodErr in certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod{}
}
// fullPath returns the absolute path of a key within the Driver's storage.
func (d *driver) fullPath(subPath string) string {
return path.Join(d.rootDirectory, subPath)
}
type fileInfo struct {
os.FileInfo
path string
}
var _ storagedriver.FileInfo = fileInfo{}
// Path provides the full path of the target of this file info.
func (fi fileInfo) Path() string {
return fi.path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi fileInfo) Size() int64 {
if fi.IsDir() {
return 0
}
return fi.FileInfo.Size()
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi fileInfo) ModTime() time.Time {
return fi.FileInfo.ModTime()
}
// IsDir returns true if the path is a directory.
func (fi fileInfo) IsDir() bool {
return fi.FileInfo.IsDir()
}
type fileWriter struct {
file *os.File
size int64
bw *bufio.Writer
closed bool
committed bool
cancelled bool
}
func newFileWriter(file *os.File, size int64) *fileWriter {
return &fileWriter{
file: file,
size: size,
bw: bufio.NewWriter(file),
}
}
func (fw *fileWriter) Write(p []byte) (int, error) {
if fw.closed {
return 0, fmt.Errorf("already closed")
} else if fw.committed {
return 0, fmt.Errorf("already committed")
} else if fw.cancelled {
return 0, fmt.Errorf("already cancelled")
}
n, err := fw.bw.Write(p)
fw.size += int64(n)
return n, err
}
func (fw *fileWriter) Size() int64 {
return fw.size
}
func (fw *fileWriter) Close() error {
if fw.closed {
return fmt.Errorf("already closed")
}
if err := fw.bw.Flush(); err != nil {
return err
}
if err := fw.file.Sync(); err != nil {
return err
}
if err := fw.file.Close(); err != nil {
return err
}
fw.closed = true
return nil
}
func (fw *fileWriter) Cancel() error {
if fw.closed {
return fmt.Errorf("already closed")
}
fw.cancelled = true
fw.file.Close()
return os.Remove(fw.file.Name())
}
func (fw *fileWriter) Commit() error {
if fw.closed {
return fmt.Errorf("already closed")
} else if fw.committed {
return fmt.Errorf("already committed")
} else if fw.cancelled {
return fmt.Errorf("already cancelled")
}
if err := fw.bw.Flush(); err != nil {
return err
}
if err := fw.file.Sync(); err != nil {
return err
}
fw.committed = true
return nil
}

View File

@ -0,0 +1,113 @@
package filesystem
import (
"io/ioutil"
"os"
"reflect"
"testing"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/testsuites"
. "gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
func init() {
root, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(root)
driver, err := FromParameters(map[string]interface{}{
"rootdirectory": root,
})
if err != nil {
panic(err)
}
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) {
return driver, nil
}, testsuites.NeverSkip)
}
func TestFromParametersImpl(t *testing.T) {
tests := []struct {
params map[string]interface{} // techincally the yaml can contain anything
expected DriverParameters
pass bool
}{
// check we use default threads and root dirs
{
params: map[string]interface{}{},
expected: DriverParameters{
RootDirectory: defaultRootDirectory,
MaxThreads: defaultMaxThreads,
},
pass: true,
},
// Testing initiation with a string maxThreads which can't be parsed
{
params: map[string]interface{}{
"maxthreads": "fail",
},
expected: DriverParameters{},
pass: false,
},
{
params: map[string]interface{}{
"maxthreads": "100",
},
expected: DriverParameters{
RootDirectory: defaultRootDirectory,
MaxThreads: uint64(100),
},
pass: true,
},
{
params: map[string]interface{}{
"maxthreads": 100,
},
expected: DriverParameters{
RootDirectory: defaultRootDirectory,
MaxThreads: uint64(100),
},
pass: true,
},
// check that we use minimum thread counts
{
params: map[string]interface{}{
"maxthreads": 1,
},
expected: DriverParameters{
RootDirectory: defaultRootDirectory,
MaxThreads: minThreads,
},
pass: true,
},
}
for _, item := range tests {
params, err := fromParametersImpl(item.params)
if !item.pass {
// We only need to assert that expected failures have an error
if err == nil {
t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params)
}
continue
}
if err != nil {
t.Fatalf("unexpected error creating filesystem driver: %s", err)
}
// Note that we get a pointer to params back
if !reflect.DeepEqual(*params, item.expected) {
t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params)
}
}
}

View File

@ -0,0 +1,3 @@
// Package gcs implements the Google Cloud Storage driver backend. Support can be
// enabled by including the "include_gcs" build tag.
package gcs

View File

@ -0,0 +1,873 @@
// Package gcs provides a storagedriver.StorageDriver implementation to
// store blobs in Google cloud storage.
//
// This package leverages the google.golang.org/cloud/storage client library
//for interfacing with gcs.
//
// Because gcs is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
//
// Note that the contents of incomplete uploads are not accessible even though
// Stat returns their length
//
// +build include_gcs
package gcs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/googleapi"
"google.golang.org/cloud"
"google.golang.org/cloud/storage"
"github.com/Sirupsen/logrus"
ctx "github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
)
const (
driverName = "gcs"
dummyProjectID = "<unknown>"
uploadSessionContentType = "application/x-docker-upload-session"
minChunkSize = 256 * 1024
defaultChunkSize = 20 * minChunkSize
maxTries = 5
)
var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`)
// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set
type driverParameters struct {
bucket string
config *jwt.Config
email string
privateKey []byte
client *http.Client
rootDirectory string
chunkSize int
}
func init() {
factory.Register(driverName, &gcsDriverFactory{})
}
// gcsDriverFactory implements the factory.StorageDriverFactory interface
type gcsDriverFactory struct{}
// Create StorageDriver from parameters
func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters)
}
// driver is a storagedriver.StorageDriver implementation backed by GCS
// Objects are stored at absolute keys in the provided bucket.
type driver struct {
client *http.Client
bucket string
email string
privateKey []byte
rootDirectory string
chunkSize int
}
// FromParameters constructs a new Driver with a given parameters map
// Required parameters:
// - bucket
func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
bucket, ok := parameters["bucket"]
if !ok || fmt.Sprint(bucket) == "" {
return nil, fmt.Errorf("No bucket parameter provided")
}
rootDirectory, ok := parameters["rootdirectory"]
if !ok {
rootDirectory = ""
}
chunkSize := defaultChunkSize
chunkSizeParam, ok := parameters["chunksize"]
if ok {
switch v := chunkSizeParam.(type) {
case string:
vv, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam)
}
chunkSize = vv
case int, uint, int32, uint32, uint64, int64:
chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int())
default:
return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
}
if chunkSize < minChunkSize {
return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize)
}
if chunkSize%minChunkSize != 0 {
return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize)
}
}
var ts oauth2.TokenSource
jwtConf := new(jwt.Config)
if keyfile, ok := parameters["keyfile"]; ok {
jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile))
if err != nil {
return nil, err
}
jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl)
if err != nil {
return nil, err
}
ts = jwtConf.TokenSource(context.Background())
} else {
var err error
ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl)
if err != nil {
return nil, err
}
}
params := driverParameters{
bucket: fmt.Sprint(bucket),
rootDirectory: fmt.Sprint(rootDirectory),
email: jwtConf.Email,
privateKey: jwtConf.PrivateKey,
client: oauth2.NewClient(context.Background(), ts),
chunkSize: chunkSize,
}
return New(params)
}
// New constructs a new driver
func New(params driverParameters) (storagedriver.StorageDriver, error) {
rootDirectory := strings.Trim(params.rootDirectory, "/")
if rootDirectory != "" {
rootDirectory += "/"
}
if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 {
return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize)
}
d := &driver{
bucket: params.bucket,
rootDirectory: rootDirectory,
email: params.email,
privateKey: params.privateKey,
client: params.client,
chunkSize: params.chunkSize,
}
return &base.Base{
StorageDriver: d,
}, nil
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) {
gcsContext := d.context(context)
name := d.pathToKey(path)
var rc io.ReadCloser
err := retry(func() error {
var err error
rc, err = storage.NewReader(gcsContext, d.bucket, name)
return err
})
if err == storage.ErrObjectNotExist {
return nil, storagedriver.PathNotFoundError{Path: path}
}
if err != nil {
return nil, err
}
defer rc.Close()
p, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
return p, nil
}
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error {
return retry(func() error {
wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path))
wc.ContentType = "application/octet-stream"
return putContentsClose(wc, contents)
})
}
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) {
res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset)
if err != nil {
if res != nil {
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return nil, storagedriver.PathNotFoundError{Path: path}
}
if res.StatusCode == http.StatusRequestedRangeNotSatisfiable {
res.Body.Close()
obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path))
if err != nil {
return nil, err
}
if offset == int64(obj.Size) {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
}
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
}
return nil, err
}
if res.Header.Get("Content-Type") == uploadSessionContentType {
defer res.Body.Close()
return nil, storagedriver.PathNotFoundError{Path: path}
}
return res.Body, nil
}
func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) {
// copied from google.golang.org/cloud/storage#NewReader :
// to set the additional "Range" header
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", bucket, name),
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
if offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset))
}
var res *http.Response
err = retry(func() error {
var err error
res, err = client.Do(req)
return err
})
if err != nil {
return nil, err
}
return res, googleapi.CheckMediaResponse(res)
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) {
writer := &writer{
client: d.client,
bucket: d.bucket,
name: d.pathToKey(path),
buffer: make([]byte, d.chunkSize),
}
if append {
err := writer.init(path)
if err != nil {
return nil, err
}
}
return writer, nil
}
type writer struct {
client *http.Client
bucket string
name string
size int64
offset int64
closed bool
sessionURI string
buffer []byte
buffSize int
}
// Cancel removes any written content from this FileWriter.
func (w *writer) Cancel() error {
w.closed = true
err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name)
if err != nil {
if status, ok := err.(*googleapi.Error); ok {
if status.Code == http.StatusNotFound {
err = nil
}
}
}
return err
}
func (w *writer) Close() error {
if w.closed {
return nil
}
w.closed = true
err := w.writeChunk()
if err != nil {
return err
}
// Copy the remaining bytes from the buffer to the upload session
// Normally buffSize will be smaller than minChunkSize. However, in the
// unlikely event that the upload session failed to start, this number could be higher.
// In this case we can safely clip the remaining bytes to the minChunkSize
if w.buffSize > minChunkSize {
w.buffSize = minChunkSize
}
// commit the writes by updating the upload session
err = retry(func() error {
wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name)
wc.ContentType = uploadSessionContentType
wc.Metadata = map[string]string{
"Session-URI": w.sessionURI,
"Offset": strconv.FormatInt(w.offset, 10),
}
return putContentsClose(wc, w.buffer[0:w.buffSize])
})
if err != nil {
return err
}
w.size = w.offset + int64(w.buffSize)
w.buffSize = 0
return nil
}
func putContentsClose(wc *storage.Writer, contents []byte) error {
size := len(contents)
var nn int
var err error
for nn < size {
n, err := wc.Write(contents[nn:size])
nn += n
if err != nil {
break
}
}
if err != nil {
wc.CloseWithError(err)
return err
}
return wc.Close()
}
// Commit flushes all content written to this FileWriter and makes it
// available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader.
func (w *writer) Commit() error {
if err := w.checkClosed(); err != nil {
return err
}
w.closed = true
// no session started yet just perform a simple upload
if w.sessionURI == "" {
err := retry(func() error {
wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name)
wc.ContentType = "application/octet-stream"
return putContentsClose(wc, w.buffer[0:w.buffSize])
})
if err != nil {
return err
}
w.size = w.offset + int64(w.buffSize)
w.buffSize = 0
return nil
}
size := w.offset + int64(w.buffSize)
var nn int
// loop must be performed at least once to ensure the file is committed even when
// the buffer is empty
for {
n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size)
nn += int(n)
w.offset += n
w.size = w.offset
if err != nil {
w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize])
return err
}
if nn == w.buffSize {
break
}
}
w.buffSize = 0
return nil
}
func (w *writer) checkClosed() error {
if w.closed {
return fmt.Errorf("Writer already closed")
}
return nil
}
func (w *writer) writeChunk() error {
var err error
// chunks can be uploaded only in multiples of minChunkSize
// chunkSize is a multiple of minChunkSize less than or equal to buffSize
chunkSize := w.buffSize - (w.buffSize % minChunkSize)
if chunkSize == 0 {
return nil
}
// if their is no sessionURI yet, obtain one by starting the session
if w.sessionURI == "" {
w.sessionURI, err = startSession(w.client, w.bucket, w.name)
}
if err != nil {
return err
}
nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1)
w.offset += nn
if w.offset > w.size {
w.size = w.offset
}
// shift the remaining bytes to the start of the buffer
w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize])
return err
}
func (w *writer) Write(p []byte) (int, error) {
err := w.checkClosed()
if err != nil {
return 0, err
}
var nn int
for nn < len(p) {
n := copy(w.buffer[w.buffSize:], p[nn:])
w.buffSize += n
if w.buffSize == cap(w.buffer) {
err = w.writeChunk()
if err != nil {
break
}
}
nn += n
}
return nn, err
}
// Size returns the number of bytes written to this FileWriter.
func (w *writer) Size() int64 {
return w.size
}
func (w *writer) init(path string) error {
res, err := getObject(w.client, w.bucket, w.name, 0)
if err != nil {
return err
}
defer res.Body.Close()
if res.Header.Get("Content-Type") != uploadSessionContentType {
return storagedriver.PathNotFoundError{Path: path}
}
offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64)
if err != nil {
return err
}
buffer, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI")
w.buffSize = copy(w.buffer, buffer)
w.offset = offset
w.size = offset + int64(w.buffSize)
return nil
}
type request func() error
func retry(req request) error {
backoff := time.Second
var err error
for i := 0; i < maxTries; i++ {
err = req()
if err == nil {
return nil
}
status, ok := err.(*googleapi.Error)
if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) {
return err
}
time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond))
if i <= 4 {
backoff = backoff * 2
}
}
return err
}
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) {
var fi storagedriver.FileInfoFields
//try to get as file
gcsContext := d.context(context)
obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path))
if err == nil {
if obj.ContentType == uploadSessionContentType {
return nil, storagedriver.PathNotFoundError{Path: path}
}
fi = storagedriver.FileInfoFields{
Path: path,
Size: obj.Size,
ModTime: obj.Updated,
IsDir: false,
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
//try to get as folder
dirpath := d.pathToDirKey(path)
var query *storage.Query
query = &storage.Query{}
query.Prefix = dirpath
query.MaxResults = 1
objects, err := storageListObjects(gcsContext, d.bucket, query)
if err != nil {
return nil, err
}
if len(objects.Results) < 1 {
return nil, storagedriver.PathNotFoundError{Path: path}
}
fi = storagedriver.FileInfoFields{
Path: path,
IsDir: true,
}
obj = objects.Results[0]
if obj.Name == dirpath {
fi.Size = obj.Size
fi.ModTime = obj.Updated
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the
//given path.
func (d *driver) List(context ctx.Context, path string) ([]string, error) {
var query *storage.Query
query = &storage.Query{}
query.Delimiter = "/"
query.Prefix = d.pathToDirKey(path)
list := make([]string, 0, 64)
for {
objects, err := storageListObjects(d.context(context), d.bucket, query)
if err != nil {
return nil, err
}
for _, object := range objects.Results {
// GCS does not guarantee strong consistency between
// DELETE and LIST operations. Check that the object is not deleted,
// and filter out any objects with a non-zero time-deleted
if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType {
list = append(list, d.keyToPath(object.Name))
}
}
for _, subpath := range objects.Prefixes {
subpath = d.keyToPath(subpath)
list = append(list, subpath)
}
query = objects.Next
if query == nil {
break
}
}
if path != "/" && len(list) == 0 {
// Treat empty response as missing directory, since we don't actually
// have directories in Google Cloud Storage.
return nil, storagedriver.PathNotFoundError{Path: path}
}
return list, nil
}
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error {
gcsContext := d.context(context)
_, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil)
if err != nil {
if status, ok := err.(*googleapi.Error); ok {
if status.Code == http.StatusNotFound {
return storagedriver.PathNotFoundError{Path: sourcePath}
}
}
return err
}
err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath))
// if deleting the file fails, log the error, but do not fail; the file was successfully copied,
// and the original should eventually be cleaned when purging the uploads folder.
if err != nil {
logrus.Infof("error deleting file: %v due to %v", sourcePath, err)
}
return nil
}
// listAll recursively lists all names of objects stored at "prefix" and its subpaths.
func (d *driver) listAll(context context.Context, prefix string) ([]string, error) {
list := make([]string, 0, 64)
query := &storage.Query{}
query.Prefix = prefix
query.Versions = false
for {
objects, err := storageListObjects(d.context(context), d.bucket, query)
if err != nil {
return nil, err
}
for _, obj := range objects.Results {
// GCS does not guarantee strong consistency between
// DELETE and LIST operations. Check that the object is not deleted,
// and filter out any objects with a non-zero time-deleted
if obj.Deleted.IsZero() {
list = append(list, obj.Name)
}
}
query = objects.Next
if query == nil {
break
}
}
return list, nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(context ctx.Context, path string) error {
prefix := d.pathToDirKey(path)
gcsContext := d.context(context)
keys, err := d.listAll(gcsContext, prefix)
if err != nil {
return err
}
if len(keys) > 0 {
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
for _, key := range keys {
err := storageDeleteObject(gcsContext, d.bucket, key)
// GCS only guarantees eventual consistency, so listAll might return
// paths that no longer exist. If this happens, just ignore any not
// found error
if status, ok := err.(*googleapi.Error); ok {
if status.Code == http.StatusNotFound {
err = nil
}
}
if err != nil {
return err
}
}
return nil
}
err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path))
if err != nil {
if status, ok := err.(*googleapi.Error); ok {
if status.Code == http.StatusNotFound {
return storagedriver.PathNotFoundError{Path: path}
}
}
}
return err
}
func storageDeleteObject(context context.Context, bucket string, name string) error {
return retry(func() error {
return storage.DeleteObject(context, bucket, name)
})
}
func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) {
var obj *storage.Object
err := retry(func() error {
var err error
obj, err = storage.StatObject(context, bucket, name)
return err
})
return obj, err
}
func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) {
var objs *storage.Objects
err := retry(func() error {
var err error
objs, err = storage.ListObjects(context, bucket, q)
return err
})
return objs, err
}
func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) {
var obj *storage.Object
err := retry(func() error {
var err error
obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs)
return err
})
return obj, err
}
// URLFor returns a URL which may be used to retrieve the content stored at
// the given path, possibly using the given options.
// Returns ErrUnsupportedMethod if this driver has no privateKey
func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) {
if d.privateKey == nil {
return "", storagedriver.ErrUnsupportedMethod{}
}
name := d.pathToKey(path)
methodString := "GET"
method, ok := options["method"]
if ok {
methodString, ok = method.(string)
if !ok || (methodString != "GET" && methodString != "HEAD") {
return "", storagedriver.ErrUnsupportedMethod{}
}
}
expiresTime := time.Now().Add(20 * time.Minute)
expires, ok := options["expiry"]
if ok {
et, ok := expires.(time.Time)
if ok {
expiresTime = et
}
}
opts := &storage.SignedURLOptions{
GoogleAccessID: d.email,
PrivateKey: d.privateKey,
Method: methodString,
Expires: expiresTime,
}
return storage.SignedURL(d.bucket, name, opts)
}
func startSession(client *http.Client, bucket string, name string) (uri string, err error) {
u := &url.URL{
Scheme: "https",
Host: "www.googleapis.com",
Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket),
RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name),
}
err = retry(func() error {
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return err
}
req.Header.Set("X-Upload-Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", "0")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
err = googleapi.CheckMediaResponse(resp)
if err != nil {
return err
}
uri = resp.Header.Get("Location")
return nil
})
return uri, err
}
func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) {
bytesPut := int64(0)
err := retry(func() error {
req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk))
if err != nil {
return err
}
length := int64(len(chunk))
to := from + length - 1
size := "*"
if totalSize >= 0 {
size = strconv.FormatInt(totalSize, 10)
}
req.Header.Set("Content-Type", "application/octet-stream")
if from == to+1 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size))
}
req.Header.Set("Content-Length", strconv.FormatInt(length, 10))
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if totalSize < 0 && resp.StatusCode == 308 {
groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range"))
end, err := strconv.ParseInt(groups[2], 10, 64)
if err != nil {
return err
}
bytesPut = end - from + 1
return nil
}
err = googleapi.CheckMediaResponse(resp)
if err != nil {
return err
}
bytesPut = to - from + 1
return nil
})
return bytesPut, err
}
func (d *driver) context(context ctx.Context) context.Context {
return cloud.WithContext(context, dummyProjectID, d.client)
}
func (d *driver) pathToKey(path string) string {
return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/")
}
func (d *driver) pathToDirKey(path string) string {
return d.pathToKey(path) + "/"
}
func (d *driver) keyToPath(key string) string {
return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/")
}

View File

@ -0,0 +1,311 @@
// +build include_gcs
package gcs
import (
"io/ioutil"
"os"
"testing"
"fmt"
ctx "github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/testsuites"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/cloud/storage"
"gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
var skipGCS func() string
func init() {
bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET")
credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
// Skip GCS storage driver tests if environment variable parameters are not provided
skipGCS = func() string {
if bucket == "" || credentials == "" {
return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS"
}
return ""
}
if skipGCS() != "" {
return
}
root, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(root)
var ts oauth2.TokenSource
var email string
var privateKey []byte
ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl)
if err != nil {
// Assume that the file contents are within the environment variable since it exists
// but does not contain a valid file path
jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl)
if err != nil {
panic(fmt.Sprintf("Error reading JWT config : %s", err))
}
email = jwtConfig.Email
privateKey = []byte(jwtConfig.PrivateKey)
if len(privateKey) == 0 {
panic("Error reading JWT config : missing private_key property")
}
if email == "" {
panic("Error reading JWT config : missing client_email property")
}
ts = jwtConfig.TokenSource(ctx.Background())
}
gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) {
parameters := driverParameters{
bucket: bucket,
rootDirectory: root,
email: email,
privateKey: privateKey,
client: oauth2.NewClient(ctx.Background(), ts),
chunkSize: defaultChunkSize,
}
return New(parameters)
}
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) {
return gcsDriverConstructor(root)
}, skipGCS)
}
// Test Committing a FileWriter without having called Write
func TestCommitEmpty(t *testing.T) {
if skipGCS() != "" {
t.Skip(skipGCS())
}
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
filename := "/test"
ctx := ctx.Background()
writer, err := driver.Writer(ctx, filename, false)
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)
}
err = writer.Commit()
if err != nil {
t.Fatalf("writer.Commit: unexpected error: %v", err)
}
err = writer.Close()
if err != nil {
t.Fatalf("writer.Close: unexpected error: %v", err)
}
if writer.Size() != 0 {
t.Fatalf("writer.Size: %d != 0", writer.Size())
}
readContents, err := driver.GetContent(ctx, filename)
if err != nil {
t.Fatalf("driver.GetContent: unexpected error: %v", err)
}
if len(readContents) != 0 {
t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents))
}
}
// Test Committing a FileWriter after having written exactly
// defaultChunksize bytes.
func TestCommit(t *testing.T) {
if skipGCS() != "" {
t.Skip(skipGCS())
}
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
filename := "/test"
ctx := ctx.Background()
contents := make([]byte, defaultChunkSize)
writer, err := driver.Writer(ctx, filename, false)
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)
}
_, err = writer.Write(contents)
if err != nil {
t.Fatalf("writer.Write: unexpected error: %v", err)
}
err = writer.Commit()
if err != nil {
t.Fatalf("writer.Commit: unexpected error: %v", err)
}
err = writer.Close()
if err != nil {
t.Fatalf("writer.Close: unexpected error: %v", err)
}
if writer.Size() != int64(len(contents)) {
t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents))
}
readContents, err := driver.GetContent(ctx, filename)
if err != nil {
t.Fatalf("driver.GetContent: unexpected error: %v", err)
}
if len(readContents) != len(contents) {
t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents))
}
}
func TestRetry(t *testing.T) {
if skipGCS() != "" {
t.Skip(skipGCS())
}
assertError := func(expected string, observed error) {
observedMsg := "<nil>"
if observed != nil {
observedMsg = observed.Error()
}
if observedMsg != expected {
t.Fatalf("expected %v, observed %v\n", expected, observedMsg)
}
}
err := retry(func() error {
return &googleapi.Error{
Code: 503,
Message: "google api error",
}
})
assertError("googleapi: Error 503: google api error", err)
err = retry(func() error {
return &googleapi.Error{
Code: 404,
Message: "google api error",
}
})
assertError("googleapi: Error 404: google api error", err)
err = retry(func() error {
return fmt.Errorf("error")
})
assertError("error", err)
}
func TestEmptyRootList(t *testing.T) {
if skipGCS() != "" {
t.Skip(skipGCS())
}
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
rootedDriver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
emptyRootDriver, err := gcsDriverConstructor("")
if err != nil {
t.Fatalf("unexpected error creating empty root driver: %v", err)
}
slashRootDriver, err := gcsDriverConstructor("/")
if err != nil {
t.Fatalf("unexpected error creating slash root driver: %v", err)
}
filename := "/test"
contents := []byte("contents")
ctx := ctx.Background()
err = rootedDriver.PutContent(ctx, filename, contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
defer func() {
err := rootedDriver.Delete(ctx, filename)
if err != nil {
t.Fatalf("failed to remove %v due to %v\n", filename, err)
}
}()
keys, err := emptyRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
keys, err = slashRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
}
// TestMoveDirectory checks that moving a directory returns an error.
func TestMoveDirectory(t *testing.T) {
if skipGCS() != "" {
t.Skip(skipGCS())
}
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
ctx := ctx.Background()
contents := []byte("contents")
// Create a regular file.
err = driver.PutContent(ctx, "/parent/dir/foo", contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
defer func() {
err := driver.Delete(ctx, "/parent")
if err != nil {
t.Fatalf("failed to remove /parent due to %v\n", err)
}
}()
err = driver.Move(ctx, "/parent/dir", "/parent/other")
if err == nil {
t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n")
}
}

View File

@ -0,0 +1,312 @@
package inmemory
import (
"fmt"
"io"
"io/ioutil"
"sync"
"time"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
)
const driverName = "inmemory"
func init() {
factory.Register(driverName, &inMemoryDriverFactory{})
}
// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface.
type inMemoryDriverFactory struct{}
func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return New(), nil
}
type driver struct {
root *dir
mutex sync.RWMutex
}
// baseEmbed allows us to hide the Base embed.
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local map.
// Intended solely for example and testing purposes.
type Driver struct {
baseEmbed // embedded, hidden base driver.
}
var _ storagedriver.StorageDriver = &Driver{}
// New constructs a new Driver.
func New() *Driver {
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: &driver{
root: &dir{
common: common{
p: "/",
mod: time.Now(),
},
},
},
},
},
}
}
// Implement the storagedriver.StorageDriver interface.
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
rc, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(p)
f, err := d.root.mkfile(normalized)
if err != nil {
// TODO(stevvooe): Again, we need to clarify when this is not a
// directory in StorageDriver API.
return fmt.Errorf("not a file")
}
f.truncate()
f.WriteAt(contents, 0)
return nil
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
if offset < 0 {
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
normalized := normalize(path)
found := d.root.find(normalized)
if found.path() != normalized {
return nil, storagedriver.PathNotFoundError{Path: path}
}
if found.isdir() {
return nil, fmt.Errorf("%q is a directory", path)
}
return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(path)
f, err := d.root.mkfile(normalized)
if err != nil {
return nil, fmt.Errorf("not a file")
}
if !append {
f.truncate()
}
return d.newWriter(f), nil
}
// Stat returns info about the provided path.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
normalized := normalize(path)
found := d.root.find(normalized)
if found.path() != normalized {
return nil, storagedriver.PathNotFoundError{Path: path}
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: found.isdir(),
ModTime: found.modtime(),
}
if !fi.IsDir {
fi.Size = int64(len(found.(*file).data))
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
normalized := normalize(path)
found := d.root.find(normalized)
if !found.isdir() {
return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this...
}
entries, err := found.(*dir).list(normalized)
if err != nil {
switch err {
case errNotExists:
return nil, storagedriver.PathNotFoundError{Path: path}
case errIsNotDir:
return nil, fmt.Errorf("not a directory")
default:
return nil, err
}
}
return entries, nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath)
err := d.root.move(normalizedSrc, normalizedDst)
switch err {
case errNotExists:
return storagedriver.PathNotFoundError{Path: destPath}
default:
return err
}
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(path)
err := d.root.delete(normalized)
switch err {
case errNotExists:
return storagedriver.PathNotFoundError{Path: path}
default:
return err
}
}
// URLFor returns a URL which may be used to retrieve the content stored at the given path.
// May return an UnsupportedMethodErr in certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod{}
}
type writer struct {
d *driver
f *file
closed bool
committed bool
cancelled bool
}
func (d *driver) newWriter(f *file) storagedriver.FileWriter {
return &writer{
d: d,
f: f,
}
}
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
} else if w.committed {
return 0, fmt.Errorf("already committed")
} else if w.cancelled {
return 0, fmt.Errorf("already cancelled")
}
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
return w.f.WriteAt(p, int64(len(w.f.data)))
}
func (w *writer) Size() int64 {
w.d.mutex.RLock()
defer w.d.mutex.RUnlock()
return int64(len(w.f.data))
}
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
return nil
}
func (w *writer) Cancel() error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
}
w.cancelled = true
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
return w.d.root.delete(w.f.path())
}
func (w *writer) Commit() error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
} else if w.cancelled {
return fmt.Errorf("already cancelled")
}
w.committed = true
return nil
}

View File

@ -0,0 +1,19 @@
package inmemory
import (
"testing"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
func init() {
inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) {
return New(), nil
}
testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip)
}

Some files were not shown because too many files have changed in this diff Show More