Refactor specification of supported digests

To make the definition of supported digests more clear, we have refactored the
digest package to have a special Algorithm type. This represents the digest's
prefix and we associated various supported hash implementations through
function calls.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2015-05-21 18:44:08 -07:00
parent a0d242d9df
commit bdaed4c789
5 changed files with 12 additions and 11 deletions

View File

@ -321,7 +321,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
if err != nil {
return distribution.Descriptor{}, err
}
dgstr := digest.NewCanonicalDigester()
dgstr := digest.Canonical.New()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return distribution.Descriptor{}, err

View File

@ -213,7 +213,7 @@ func TestBlobAPI(t *testing.T) {
// Now, push just a chunk
layerFile.Seek(0, 0)
canonicalDigester := digest.NewCanonicalDigester()
canonicalDigester := digest.Canonical.New()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
t.Fatalf("error copying to digest: %v", err)
}
@ -637,7 +637,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges
// pushLayer pushes the layer content returning the url on success.
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
digester := digest.NewCanonicalDigester()
digester := digest.Canonical.New()
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash()))
if err != nil {
@ -702,7 +702,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp
uploadURL := u.String()
digester := digest.NewCanonicalDigester()
digester := digest.Canonical.New()
req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash()))
if err != nil {

View File

@ -1,6 +1,7 @@
package storage
import (
"errors"
"fmt"
"io"
"time"
@ -13,7 +14,7 @@ import (
)
var (
errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available")
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
// layerWriter is used to control the various aspects of resumable
@ -197,7 +198,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() {
if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
@ -206,7 +207,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.NewCanonicalDigester()
digester := digest.Canonical.New()
digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
if err != nil {

View File

@ -164,7 +164,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string
blobStore: lbs,
id: uuid,
startedAt: startedAt,
digester: digest.NewCanonicalDigester(),
digester: digest.Canonical.New(),
bufferedFileWriter: *fw,
}

View File

@ -262,7 +262,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) {
if v.list {
offset = "" // Limit to the prefix for listing offsets.
}
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil
case repositoriesRootPathSpec:
return path.Join(repoPrefix...), nil
default:
@ -447,7 +447,7 @@ func (uploadStartedAtPathSpec) pathSpec() {}
type uploadHashStatePathSpec struct {
name string
id string
alg string
alg digest.Algorithm
offset int64
list bool
}
@ -479,7 +479,7 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
return nil, err
}
algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm())
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
hex := dgst.Hex()
prefix := []string{algorithm}