commit c02f1a5507353fff1b430f9c1dc90fca0ba70df1 Author: Stephen J Day Date: Tue Jan 6 10:37:27 2015 -0800 Move registry package out of repo root Since the repo is no longer just the registry, we are moving the registry web application package out of the repo root into a sub-package. We may break down the registry package further to separate webapp components and bring the client package under it. This change accomplishes the task of freeing up the repo root for a distribution-oriented package. A stub doc.go file is left in place to declare intent. Signed-off-by: Stephen J Day diff --git a/docs/api_test.go b/docs/api_test.go new file mode 100644 index 00000000..b0f3bb2b --- /dev/null +++ b/docs/api_test.go @@ -0,0 +1,541 @@ +package registry + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "testing" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + _ "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" +) + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestLayerAPI conducts a full of the of the layer api. +func TestLayerAPI(t *testing.T) { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + + // ----------------------------------- + // Test fetch for non-existent content + layerURL, err := builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Upload a layer + layerUploadURL, err := builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("error building upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("error starting layer upload: %v", err) + } + + checkResponse(t, "starting layer upload", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + // TODO(sday): Cancel the layer upload here and restart. + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) + + // Verify the body + verifier := digest.NewDigestVerifier(layerDigest) + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // Missing tests: + // - Upload the same tarsum file under and different repository and + // ensure the content remains uncorrupted. +} + +func TestManifestAPI(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("unexpected error creating url builder: %v", err) + } + + imageName := "foo/bar" + tag := "thetag" + + manifestURL, err := builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + dec := json.NewDecoder(resp.Body) + + var respErrs v2.Errors + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != v2.ErrorCodeManifestUnknown { + t.Fatalf("expected manifest unknown error: got %v", respErrs) + } + + tagsURL, err := builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != v2.ErrorCodeNameUnknown { + t.Fatalf("expected respository unknown error: got %v", respErrs) + } + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &manifest.Manifest{ + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + var unverified int + var missingLayers int + var invalidDigests int + + for _, err := range respErrs.Errors { + switch err.Code { + case v2.ErrorCodeManifestUnverified: + unverified++ + case v2.ErrorCodeBlobUnknown: + missingLayers++ + case v2.ErrorCodeDigestInvalid: + // TODO(stevvooe): This error isn't quite descriptive enough -- + // the layer with an invalid digest isn't identified. + invalidDigests++ + default: + t.Fatalf("unexpected error: %v", err) + } + } + + if unverified != 1 { + t.Fatalf("should have received one unverified manifest error: %v", respErrs) + } + + if missingLayers != 2 { + t.Fatalf("should have received two missing layer errors: %v", respErrs) + } + + if invalidDigests != 2 { + t.Fatalf("should have received two invalid digest errors: %v", respErrs) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get a unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := manifest.Sign(unsignedManifest, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) + + checkResponse(t, "putting signed manifest", resp, http.StatusOK) + + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + + var fetchedManifest manifest.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } +} + +func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { + var body []byte + if sm, ok := v.(*manifest.SignedManifest); ok { + body = sm.Raw + } else { + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { + layerUploadURL, err := ub.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { + rsLength, _ := rs.Seek(0, os.SEEK_END) + rs.Seek(0, os.SEEK_SET) + + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + + "digest": []string{dgst.String()}, + + // TODO(stevvooe): Layer upload can be completed with and without size + // argument. We'll need to add a test that checks the latter path. + "size": []string{fmt.Sprint(rsLength)}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, rs) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error doing put: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[k]) > 0 { + continue + } + } + + for _, hv := range resp.Header[k] { + if hv != v { + t.Fatalf("header value not matched in response: %q != %q", hv, v) + } + } + } + } +} diff --git a/docs/app.go b/docs/app.go new file mode 100644 index 00000000..fefeb084 --- /dev/null +++ b/docs/app.go @@ -0,0 +1,263 @@ +package registry + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/auth" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/storage" + "github.com/docker/distribution/storagedriver" + "github.com/docker/distribution/storagedriver/factory" + + log "github.com/Sirupsen/logrus" + "github.com/gorilla/mux" +) + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + Config configuration.Configuration + + router *mux.Router + + // driver maintains the app global storage driver instance. + driver storagedriver.StorageDriver + + // services contains the main services instance for the application. + services *storage.Services + + tokenProvider tokenProvider + + accessController auth.AccessController +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(configuration configuration.Configuration) *App { + app := &App{ + Config: configuration, + router: v2.Router(), + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, layerDispatcher) + app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) + + driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + app.driver = driver + app.services = storage.NewServices(app.driver) + app.tokenProvider = newHMACTokenProvider(configuration.HTTP.Secret) + + authType := configuration.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + } + + return app +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + app.router.ServeHTTP(w, r) +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// singleStatusResponseWriter only allows the first status to be written to be +// the valid request status. The current use case of this class should be +// factored out. +type singleStatusResponseWriter struct { + http.ResponseWriter + status int +} + +func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { + if ssrw.status != 0 { + return + } + ssrw.status = status + ssrw.ResponseWriter.WriteHeader(status) +} + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + context := app.context(r) + + if err := app.authorized(w, r, context); err != nil { + return + } + + context.log = log.WithField("name", context.Name) + handler := dispatch(context, r) + + ssrw := &singleStatusResponseWriter{ResponseWriter: w} + context.log.Infoln("handler", resolveHandlerName(r.Method, handler)) + handler.ServeHTTP(ssrw, r) + + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if ssrw.status == 0 { + w.WriteHeader(http.StatusBadRequest) + } + serveJSON(w, context.Errors) + } + }) +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(r *http.Request) *Context { + vars := mux.Vars(r) + context := &Context{ + App: app, + Name: vars["name"], + urlBuilder: v2.NewURLBuilderFromRequest(r), + } + + // Store vars for underlying handlers. + context.vars = vars + + return context +} + +// authorized checks if the request can proceed with with request access- +// level. If it cannot, the method will return an error. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if context.Name != "" { + resource := auth.Resource{ + Type: "repository", + Name: context.Name, + } + + switch r.Method { + case "GET", "HEAD": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + } else { + // Only allow the name not to be set on the base route. + route := mux.CurrentRoute(r) + + if route == nil || route.GetName() != v2.RouteNameBase { + // For this to be properly secured, context.Name must always be set + // for a resource that may make a modification. The only condition + // under which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making that + // mistake elsewhere in the code, allowing any operation to proceed. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized) + serveJSON(w, errs) + } + } + + if err := app.accessController.Authorized(r, accessRecords...); err != nil { + switch err := err.(type) { + case auth.Challenge: + w.Header().Set("Content-Type", "application/json; charset=utf-8") + err.ServeHTTP(w, r) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized, accessRecords) + serveJSON(w, errs) + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + context.log.Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + return nil +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} diff --git a/docs/app_test.go b/docs/app_test.go new file mode 100644 index 00000000..4d9535f7 --- /dev/null +++ b/docs/app_test.go @@ -0,0 +1,194 @@ +package registry + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/distribution/api/v2" + _ "github.com/docker/distribution/auth/silly" + "github.com/docker/distribution/configuration" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + app := &App{ + Config: configuration.Configuration{}, + router: v2.Router(), + } + server := httptest.NewServer(app) + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Name != ctx.vars["name"] { + t.Fatalf("unexpected name: %q != %q", ctx.Name, "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.vars[expectedK] != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.vars[expectedK], expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.vars { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "tag", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "foo/bar", + "digest", "tarsum.v1+bogus:abcdef0123456789", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": nil, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(config) + + server := httptest.NewServer(app) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if req.Header.Get("Authorization") != expectedAuthHeader { + t.Fatalf("unexpected authorization header: %q != %q", req.Header.Get("Authorization"), expectedAuthHeader) + } + + var errs v2.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) + } +} diff --git a/docs/context.go b/docs/context.go new file mode 100644 index 00000000..88193cda --- /dev/null +++ b/docs/context.go @@ -0,0 +1,32 @@ +package registry + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/api/v2" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + + // Name is the prefix for the current request. Corresponds to the + // namespace/repository associated with the image. + Name string + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors v2.Errors + + // vars contains the extracted gorilla/mux variables that can be used for + // assignment. + vars map[string]string + + // log provides a context specific logger. + log *logrus.Entry + + urlBuilder *v2.URLBuilder +} diff --git a/docs/helpers.go b/docs/helpers.go new file mode 100644 index 00000000..6bcb4ae8 --- /dev/null +++ b/docs/helpers.go @@ -0,0 +1,32 @@ +package registry + +import ( + "encoding/json" + "io" + "net/http" +) + +// serveJSON marshals v and sets the content-type header to +// 'application/json'. If a different status code is required, call +// ResponseWriter.WriteHeader before this function. +func serveJSON(w http.ResponseWriter, v interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + enc := json.NewEncoder(w) + + if err := enc.Encode(v); err != nil { + return err + } + + return nil +} + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} diff --git a/docs/images.go b/docs/images.go new file mode 100644 index 00000000..a6b55859 --- /dev/null +++ b/docs/images.go @@ -0,0 +1,114 @@ +package registry + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + Tag: ctx.vars["tag"], + } + + imageManifestHandler.log = imageManifestHandler.log.WithField("tag", imageManifestHandler.Tag) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), + "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), + } +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + Tag string +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + manifest, err := manifests.Get(imh.Name, imh.Tag) + + if err != nil { + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) + w.WriteHeader(http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(manifest.Raw))) + w.Write(manifest.Raw) +} + +// PutImageManifest validates and stores and image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + dec := json.NewDecoder(r.Body) + + var manifest manifest.SignedManifest + if err := dec.Decode(&manifest); err != nil { + imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) + w.WriteHeader(http.StatusBadRequest) + return + } + + if err := manifests.Put(imh.Name, imh.Tag, &manifest); err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + switch err := err.(type) { + case storage.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case storage.ErrUnknownLayer: + imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) + case storage.ErrManifestUnverified: + imh.Errors.Push(v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + // TODO(stevvooe): We need to really need to move all + // errors to types. Its much more straightforward. + imh.Errors.Push(v2.ErrorCodeDigestInvalid) + } else { + imh.Errors.PushErr(verificationError) + } + } + } + default: + imh.Errors.PushErr(err) + } + + w.WriteHeader(http.StatusBadRequest) + return + } +} + +// DeleteImageManifest removes the image with the given tag from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + if err := manifests.Delete(imh.Name, imh.Tag); err != nil { + switch err := err.(type) { + case storage.ErrUnknownManifest: + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) + w.WriteHeader(http.StatusNotFound) + default: + imh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusBadRequest) + } + return + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/docs/layer.go b/docs/layer.go new file mode 100644 index 00000000..a7c46c31 --- /dev/null +++ b/docs/layer.go @@ -0,0 +1,62 @@ +package registry + +import ( + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// layerDispatcher uses the request context to build a layerHandler. +func layerDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := digest.ParseDigest(ctx.vars["digest"]) + + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + }) + } + + layerHandler := &layerHandler{ + Context: ctx, + Digest: dgst, + } + + layerHandler.log = layerHandler.log.WithField("digest", dgst) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(layerHandler.GetLayer), + "HEAD": http.HandlerFunc(layerHandler.GetLayer), + } +} + +// layerHandler serves http layer requests. +type layerHandler struct { + *Context + + Digest digest.Digest +} + +// GetLayer fetches the binary data from backend storage returns it in the +// response. +func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { + layers := lh.services.Layers() + + layer, err := layers.Fetch(lh.Name, lh.Digest) + + if err != nil { + switch err := err.(type) { + case storage.ErrUnknownLayer: + w.WriteHeader(http.StatusNotFound) + lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) + default: + lh.Errors.Push(v2.ErrorCodeUnknown, err) + } + return + } + defer layer.Close() + + http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) +} diff --git a/docs/layerupload.go b/docs/layerupload.go new file mode 100644 index 00000000..b694a677 --- /dev/null +++ b/docs/layerupload.go @@ -0,0 +1,245 @@ +package registry + +import ( + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// layerUploadDispatcher constructs and returns the layer upload handler for +// the given request context. +func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + luh := &layerUploadHandler{ + Context: ctx, + UUID: ctx.vars["uuid"], + } + + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(luh.StartLayerUpload), + "GET": http.HandlerFunc(luh.GetUploadStatus), + "HEAD": http.HandlerFunc(luh.GetUploadStatus), + "PUT": http.HandlerFunc(luh.PutLayerChunk), + "DELETE": http.HandlerFunc(luh.CancelLayerUpload), + }) + + if luh.UUID != "" { + luh.log = luh.log.WithField("uuid", luh.UUID) + + state, err := ctx.tokenProvider.layerUploadStateFromToken(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logrus.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + }) + } + + layers := ctx.services.Layers() + upload, err := layers.Resume(state) + if err != nil && err != storage.ErrLayerUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logrus.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + }) + } + + luh.Upload = upload + handler = closeResources(handler, luh.Upload) + } + + return handler +} + +// layerUploadHandler handles the http layer upload process. +type layerUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. + UUID string + + Upload storage.LayerUpload +} + +// StartLayerUpload begins the layer upload process and allocates a server- +// side upload session. +func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { + layers := luh.services.Layers() + upload, err := layers.Upload(luh.Name) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + luh.Upload = upload + defer luh.Upload.Close() + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by uuid. +func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// PutLayerChunk receives a layer chunk during the layer upload process, +// possible completing the upload with a checksum and length. +func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + + var finished bool + + // TODO(stevvooe): This is woefully incomplete. Missing stuff: + // + // 1. Extract information from range header, if present. + // 2. Check offset of current layer. + // 3. Emit correct error responses. + + // Read in the chunk + io.Copy(luh.Upload, r.Body) + + if err := luh.maybeCompleteUpload(w, r); err != nil { + if err != errNotReadyToComplete { + switch err := err.(type) { + case storage.ErrLayerInvalidSize: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeSizeInvalid, err) + return + case storage.ErrLayerInvalidDigest: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + return + default: + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + } + } + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + if finished { + w.WriteHeader(http.StatusCreated) + } else { + w.WriteHeader(http.StatusAccepted) + } +} + +// CancelLayerUpload cancels an in-progress upload of a layer. +func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + +} + +// layerUploadResponse provides a standard request for uploading layers and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. +func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { + values := make(url.Values) + stateToken, err := luh.Context.tokenProvider.layerUploadStateToToken(storage.LayerUploadState{Name: luh.Upload.Name(), UUID: luh.Upload.UUID(), Offset: luh.Upload.Offset()}) + if err != nil { + logrus.Infof("error building upload state token: %s", err) + return err + } + values.Set("_state", stateToken) + uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL(luh.Upload.Name(), luh.Upload.UUID(), values) + if err != nil { + logrus.Infof("error building upload url: %s", err) + return err + } + + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", luh.Upload.Offset())) + + return nil +} + +var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") + +// maybeCompleteUpload tries to complete the upload if the correct parameters +// are available. Returns errNotReadyToComplete if not ready to complete. +func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { + // If we get a digest and length, we can finish the upload. + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + sizeStr := r.FormValue("size") + + if dgstStr == "" { + return errNotReadyToComplete + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + return err + } + + var size int64 + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return err + } + } else { + size = -1 + } + + luh.completeUpload(w, r, size, dgst) + return nil +} + +// completeUpload finishes out the upload with the correct response. +func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { + layer, err := luh.Upload.Finish(size, dgst) + if err != nil { + luh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) + if err != nil { + luh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Location", layerURL) + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusCreated) +} diff --git a/docs/tags.go b/docs/tags.go new file mode 100644 index 00000000..18f6add2 --- /dev/null +++ b/docs/tags.go @@ -0,0 +1,60 @@ +package registry + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + manifests := th.services.Manifests() + + tags, err := manifests.Tags(th.Name) + if err != nil { + switch err := err.(type) { + case storage.ErrUnknownRepository: + w.WriteHeader(404) + th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) + default: + th.Errors.PushErr(err) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Name, + Tags: tags, + }); err != nil { + th.Errors.PushErr(err) + return + } +} diff --git a/docs/tokens.go b/docs/tokens.go new file mode 100644 index 00000000..276b896e --- /dev/null +++ b/docs/tokens.go @@ -0,0 +1,65 @@ +package registry + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/docker/distribution/storage" +) + +// tokenProvider contains methods for serializing and deserializing state from token strings. +type tokenProvider interface { + // layerUploadStateFromToken retrieves the LayerUploadState for a given state token. + layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) + + // layerUploadStateToToken returns a token string representing the given LayerUploadState. + layerUploadStateToToken(layerUploadState storage.LayerUploadState) (string, error) +} + +type hmacTokenProvider struct { + secret string +} + +func newHMACTokenProvider(secret string) tokenProvider { + return &hmacTokenProvider{secret: secret} +} + +// layerUploadStateFromToken deserializes the given HMAC stateToken and validates the prefix HMAC +func (ts *hmacTokenProvider) layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) { + var lus storage.LayerUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(stateToken) + if err != nil { + return lus, err + } + mac := hmac.New(sha256.New, []byte(ts.secret)) + + if len(tokenBytes) < mac.Size() { + return lus, fmt.Errorf("Invalid token") + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return lus, fmt.Errorf("Invalid token") + } + + if err := json.Unmarshal(messageBytes, &lus); err != nil { + return lus, err + } + + return lus, nil +} + +// layerUploadStateToToken serializes the given LayerUploadState to JSON with an HMAC prepended +func (ts *hmacTokenProvider) layerUploadStateToToken(lus storage.LayerUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(ts.secret)) + stateJSON := fmt.Sprintf("{\"Name\": \"%s\", \"UUID\": \"%s\", \"Offset\": %d}", lus.Name, lus.UUID, lus.Offset) + mac.Write([]byte(stateJSON)) + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), stateJSON...)), nil +} diff --git a/docs/tokens_test.go b/docs/tokens_test.go new file mode 100644 index 00000000..a447438a --- /dev/null +++ b/docs/tokens_test.go @@ -0,0 +1,121 @@ +package registry + +import ( + "testing" + + "github.com/docker/distribution/storage" +) + +var layerUploadStates = []storage.LayerUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + tokenProvider := newHMACTokenProvider("supersecret") + + for _, testcase := range layerUploadStates { + token, err := tokenProvider.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := tokenProvider.layerUploadStateFromToken(token) + if err != nil { + t.Fatal(err) + } + + assertLayerUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + tokenProvider1 := newHMACTokenProvider(secret) + tokenProvider2 := newHMACTokenProvider(secret) + badTokenProvider := newHMACTokenProvider("DifferentSecret") + + for _, testcase := range layerUploadStates { + token, err := tokenProvider1.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := tokenProvider2.layerUploadStateFromToken(token) + if err != nil { + t.Fatal(err) + } + + assertLayerUploadStateEquals(t, testcase, lus) + + _, err = badTokenProvider.layerUploadStateFromToken(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badTokenProvider.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + _, err = tokenProvider1.layerUploadStateFromToken(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = tokenProvider2.layerUploadStateFromToken(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertLayerUploadStateEquals(t *testing.T, expected storage.LayerUploadState, received storage.LayerUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/docs/util.go b/docs/util.go new file mode 100644 index 00000000..976ddf31 --- /dev/null +++ b/docs/util.go @@ -0,0 +1,27 @@ +package registry + +import ( + "net/http" + "reflect" + "runtime" + + "github.com/gorilla/handlers" +) + +// functionName returns the name of the function fn. +func functionName(fn interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +// resolveHandlerName attempts to resolve a nice, pretty name for the passed +// in handler. +func resolveHandlerName(method string, handler http.Handler) string { + switch v := handler.(type) { + case handlers.MethodHandler: + return functionName(v[method]) + case http.HandlerFunc: + return functionName(v) + default: + return functionName(handler.ServeHTTP) + } +}