diff --git a/docs/client/repository.go b/docs/client/repository.go index cd93cd1a..a806aea4 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -321,7 +321,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut if err != nil { return distribution.Descriptor{}, err } - dgstr := digest.NewCanonicalDigester() + dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 1a41cfb8..5132f72e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -213,7 +213,7 @@ func TestBlobAPI(t *testing.T) { // Now, push just a chunk layerFile.Seek(0, 0) - canonicalDigester := digest.NewCanonicalDigester() + canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } @@ -637,7 +637,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { @@ -702,7 +702,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp uploadURL := u.String() - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 40841a5e..6a37e81d 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -1,6 +1,7 @@ package storage import ( + "errors" "fmt" "io" "time" @@ -13,7 +14,7 @@ import ( ) var ( - errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available") + errResumableDigestNotAvailable = errors.New("resumable digest not available") ) // layerWriter is used to control the various aspects of resumable @@ -197,7 +198,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. - if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() { + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } @@ -206,7 +207,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() digestVerifier, err := digest.NewDigestVerifier(desc.Digest) if err != nil { diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index ceb53fa8..cb06e354 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -164,7 +164,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string blobStore: lbs, id: uuid, startedAt: startedAt, - digester: digest.NewCanonicalDigester(), + digester: digest.Canonical.New(), bufferedFileWriter: *fw, } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 9e150d3b..35debddf 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -262,7 +262,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -447,7 +447,7 @@ func (uploadStartedAtPathSpec) pathSpec() {} type uploadHashStatePathSpec struct { name string id string - alg string + alg digest.Algorithm offset int64 list bool } @@ -479,7 +479,7 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) return nil, err } - algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm}