digest: migrate to opencontainers/go-digest
Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
parent
2d500932f2
commit
532ec9f036
2
blobs.go
2
blobs.go
@ -8,8 +8,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -1,149 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
// Note the that this defines both the hash algorithm used and the string
|
||||
// encoding.
|
||||
type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
// digest.
|
||||
Canonical = SHA256
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||
// registration of digests. Effectively, we are a registerable set and
|
||||
// common symbol access.
|
||||
|
||||
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||
// may be available but they cannot be calculated by the digest package.
|
||||
algorithms = map[Algorithm]crypto.Hash{
|
||||
SHA256: crypto.SHA256,
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, New and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check availability of the hash, as well
|
||||
return h.Available()
|
||||
}
|
||||
|
||||
func (a Algorithm) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Size returns number of bytes returned by the hash.
|
||||
func (a Algorithm) Size() int {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return h.Size()
|
||||
}
|
||||
|
||||
// Set implemented to allow use of Algorithm as a command line flag.
|
||||
func (a *Algorithm) Set(value string) error {
|
||||
if value == "" {
|
||||
*a = Canonical
|
||||
} else {
|
||||
// just do a type conversion, support is queried with Available.
|
||||
*a = Algorithm(value)
|
||||
}
|
||||
|
||||
if !a.Available() {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Digester returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling New.
|
||||
func (a Algorithm) Digester() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// New is deprecated. Use Algorithm.Digester.
|
||||
func (a Algorithm) New() Digester {
|
||||
return a.Digester()
|
||||
}
|
||||
|
||||
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||
// method will panic. Check Algorithm.Available() before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
// Empty algorithm string is invalid
|
||||
if a == "" {
|
||||
panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||
// must be resolved at compile time. We don't import in the digest
|
||||
// package to allow users to choose their hash implementation (such as
|
||||
// when using stevvooe/resumable or a hardware accelerated package).
|
||||
//
|
||||
// Applications that may want to resolve the hash at runtime should
|
||||
// call Algorithm.Available before call Algorithm.Hash().
|
||||
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||
}
|
||||
|
||||
return algorithms[a].New()
|
||||
}
|
||||
|
||||
// FromReader returns the digest of the reader using the algorithm.
|
||||
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := digester.Hash().Write(p); err != nil {
|
||||
// Writes to a Hash should never fail. None of the existing
|
||||
// hash implementations in the stdlib or hashes vendored
|
||||
// here can return errors from Write. Having a panic in this
|
||||
// condition instead of having FromBytes return an error value
|
||||
// avoids unnecessary error handling paths in all callers.
|
||||
panic("write to hash function returned error: " + err.Error())
|
||||
}
|
||||
|
||||
return digester.Digest()
|
||||
}
|
||||
|
||||
// FromString digests the string input and returns a Digest.
|
||||
func (a Algorithm) FromString(s string) Digest {
|
||||
return a.FromBytes([]byte(s))
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFlagInterface(t *testing.T) {
|
||||
var (
|
||||
alg Algorithm
|
||||
flagSet flag.FlagSet
|
||||
)
|
||||
|
||||
flagSet.Var(&alg, "algorithm", "set the digest algorithm")
|
||||
for _, testcase := range []struct {
|
||||
Name string
|
||||
Args []string
|
||||
Err error
|
||||
Expected Algorithm
|
||||
}{
|
||||
{
|
||||
Name: "Invalid",
|
||||
Args: []string{"-algorithm", "bean"},
|
||||
Err: ErrDigestUnsupported,
|
||||
},
|
||||
{
|
||||
Name: "Default",
|
||||
Args: []string{"unrelated"},
|
||||
Expected: "sha256",
|
||||
},
|
||||
{
|
||||
Name: "Other",
|
||||
Args: []string{"-algorithm", "sha512"},
|
||||
Expected: "sha512",
|
||||
},
|
||||
} {
|
||||
t.Run(testcase.Name, func(t *testing.T) {
|
||||
alg = Canonical
|
||||
if err := flagSet.Parse(testcase.Args); err != testcase.Err {
|
||||
if testcase.Err == nil {
|
||||
t.Fatal("unexpected error", err)
|
||||
}
|
||||
|
||||
// check that flag package returns correct error
|
||||
if !strings.Contains(err.Error(), testcase.Err.Error()) {
|
||||
t.Fatalf("unexpected error: %v != %v", err, testcase.Err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if alg != testcase.Expected {
|
||||
t.Fatalf("unexpected algorithm: %v != %v", alg, testcase.Expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFroms(t *testing.T) {
|
||||
p := make([]byte, 1<<20)
|
||||
rand.Read(p)
|
||||
|
||||
for alg := range algorithms {
|
||||
h := alg.Hash()
|
||||
h.Write(p)
|
||||
expected := Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
||||
readerDgst, err := alg.FromReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
t.Fatalf("error calculating hash from reader: %v", err)
|
||||
}
|
||||
|
||||
dgsts := []Digest{
|
||||
alg.FromBytes(p),
|
||||
alg.FromString(string(p)),
|
||||
readerDgst,
|
||||
}
|
||||
|
||||
if alg == Canonical {
|
||||
readerDgst, err := FromReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
t.Fatalf("error calculating hash from reader: %v", err)
|
||||
}
|
||||
|
||||
dgsts = append(dgsts,
|
||||
FromBytes(p),
|
||||
FromString(string(p)),
|
||||
readerDgst)
|
||||
}
|
||||
for _, dgst := range dgsts {
|
||||
if dgst != expected {
|
||||
t.Fatalf("unexpected digest %v != %v", dgst, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
151
digest/digest.go
151
digest/digest.go
@ -1,151 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||
// the correct format and it provides quick access to the components of a
|
||||
// digest string.
|
||||
//
|
||||
// The following is an example of the contents of Digest types:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// This allows to abstract the digest behind this type and work only in those
|
||||
// terms.
|
||||
type Digest string
|
||||
|
||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||
}
|
||||
|
||||
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||
// functions. This is also useful for rebuilding digests from binary
|
||||
// serializations.
|
||||
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%x", alg, p))
|
||||
}
|
||||
|
||||
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
|
||||
func NewDigestFromHex(alg, hex string) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%s", alg, hex))
|
||||
}
|
||||
|
||||
// DigestRegexp matches valid digest types.
|
||||
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
|
||||
|
||||
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||
|
||||
var (
|
||||
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||
|
||||
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||
|
||||
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||
)
|
||||
|
||||
// Parse parses s and returns the validated digest object. An error will
|
||||
// be returned if the format is invalid.
|
||||
func Parse(s string) (Digest, error) {
|
||||
d := Digest(s)
|
||||
return d, d.Validate()
|
||||
}
|
||||
|
||||
// ParseDigest is deprecated. Use Parse.
|
||||
func ParseDigest(s string) (Digest, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// FromReader returns the most valid digest for the underlying content using
|
||||
// the canonical digest algorithm.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func FromBytes(p []byte) Digest {
|
||||
return Canonical.FromBytes(p)
|
||||
}
|
||||
|
||||
// FromString digests the input and returns a Digest.
|
||||
func FromString(s string) Digest {
|
||||
return Canonical.FromString(s)
|
||||
}
|
||||
|
||||
// Validate checks that the contents of d is a valid digest, returning an
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
|
||||
i := strings.Index(s, ":")
|
||||
|
||||
// validate i then run through regexp
|
||||
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
algorithm := Algorithm(s[:i])
|
||||
if !algorithm.Available() {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||
// always be size*2
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
// the underlying digest is not in a valid format.
|
||||
func (d Digest) Algorithm() Algorithm {
|
||||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Verifier returns a writer object that can be used to verify a stream of
|
||||
// content against the digest. If the digest is invalid, the method will panic.
|
||||
func (d Digest) Verifier() Verifier {
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}
|
||||
}
|
||||
|
||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||
// underlying digest is not in a valid format.
|
||||
func (d Digest) Hex() string {
|
||||
return string(d[d.sepIndex()+1:])
|
||||
}
|
||||
|
||||
func (d Digest) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
func (d Digest) sepIndex() int {
|
||||
i := strings.Index(string(d), ":")
|
||||
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("no ':' separator in digest %q", d))
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDigest(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
input string
|
||||
err error
|
||||
algorithm Algorithm
|
||||
hex string
|
||||
}{
|
||||
{
|
||||
input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
algorithm: "sha256",
|
||||
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
{
|
||||
input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
|
||||
algorithm: "sha384",
|
||||
hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
|
||||
},
|
||||
{
|
||||
// empty hex
|
||||
input: "sha256:",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// empty hex
|
||||
input: ":",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// just hex
|
||||
input: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// not hex
|
||||
input: "sha256:d41d8cd98f00b204e9800m98ecf8427e",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// too short
|
||||
input: "sha256:abcdef0123456789",
|
||||
err: ErrDigestInvalidLength,
|
||||
},
|
||||
{
|
||||
// too short (from different algorithm)
|
||||
input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
err: ErrDigestInvalidLength,
|
||||
},
|
||||
{
|
||||
input: "foo:d41d8cd98f00b204e9800998ecf8427e",
|
||||
err: ErrDigestUnsupported,
|
||||
},
|
||||
} {
|
||||
digest, err := Parse(testcase.input)
|
||||
if err != testcase.err {
|
||||
t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err)
|
||||
}
|
||||
|
||||
if testcase.err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if digest.Algorithm() != testcase.algorithm {
|
||||
t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm)
|
||||
}
|
||||
|
||||
if digest.Hex() != testcase.hex {
|
||||
t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex)
|
||||
}
|
||||
|
||||
// Parse string return value and check equality
|
||||
newParsed, err := Parse(digest.String())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err)
|
||||
}
|
||||
|
||||
if newParsed != digest {
|
||||
t.Fatalf("expected equal: %q != %q", newParsed, digest)
|
||||
}
|
||||
|
||||
newFromHex := NewDigestFromHex(newParsed.Algorithm().String(), newParsed.Hex())
|
||||
if newFromHex != digest {
|
||||
t.Fatalf("%v != %v", newFromHex, digest)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
package digest
|
||||
|
||||
import "hash"
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
// Package digest provides a generalized type to opaquely represent message
|
||||
// digests and their operations within the registry. The Digest type is
|
||||
// designed to serve as a flexible identifier in a content-addressable system.
|
||||
// More importantly, it provides tools and wrappers to work with
|
||||
// hash.Hash-based digests with little effort.
|
||||
//
|
||||
// Basics
|
||||
//
|
||||
// The format of a digest is simply a string with two parts, dubbed the
|
||||
// "algorithm" and the "digest", separated by a colon:
|
||||
//
|
||||
// <algorithm>:<digest>
|
||||
//
|
||||
// An example of a sha256 digest representation follows:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||
// the "digest".
|
||||
//
|
||||
// Because the Digest type is simply a string, once a valid Digest is
|
||||
// obtained, comparisons are cheap, quick and simple to express with the
|
||||
// standard equality operator.
|
||||
//
|
||||
// Verification
|
||||
//
|
||||
// The main benefit of using the Digest type is simple verification against a
|
||||
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||
// interface, provides a common write sink for digest verification. After
|
||||
// writing is complete, calling the Verifier.Verified method will indicate
|
||||
// whether or not the stream of bytes matches the target digest.
|
||||
//
|
||||
// Missing Features
|
||||
//
|
||||
// In addition to the above, we intend to add the following features to this
|
||||
// package:
|
||||
//
|
||||
// 1. A Digester type that supports write sink digest calculation.
|
||||
//
|
||||
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||
//
|
||||
package digest
|
@ -1,36 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Verifier presents a general verification interface to be used with message
|
||||
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||
// from one of the various methods, write the data under test to it then check
|
||||
// the result with the Verified method.
|
||||
type Verifier interface {
|
||||
io.Writer
|
||||
|
||||
// Verified will return true if the content written to Verifier matches
|
||||
// the digest.
|
||||
Verified() bool
|
||||
}
|
||||
|
||||
// NewDigestVerifier is deprecated. Please use Digest.Verifier.
|
||||
func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||
return d.Verifier(), nil
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
digest Digest
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||
return hv.hash.Write(p)
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Verified() bool {
|
||||
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDigestVerifier(t *testing.T) {
|
||||
p := make([]byte, 1<<20)
|
||||
rand.Read(p)
|
||||
digest := FromBytes(p)
|
||||
|
||||
verifier := digest.Verifier()
|
||||
|
||||
io.Copy(verifier, bytes.NewReader(p))
|
||||
|
||||
if !verifier.Verified() {
|
||||
t.Fatalf("bytes not verified")
|
||||
}
|
||||
}
|
||||
|
||||
// TestVerifierUnsupportedDigest ensures that unsupported digest validation is
|
||||
// flowing through verifier creation.
|
||||
func TestVerifierUnsupportedDigest(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
Name string
|
||||
Digest Digest
|
||||
Expected interface{} // expected panic target
|
||||
}{
|
||||
{
|
||||
Name: "Empty",
|
||||
Digest: "",
|
||||
Expected: "no ':' separator in digest \"\"",
|
||||
},
|
||||
{
|
||||
Name: "EmptyAlg",
|
||||
Digest: ":",
|
||||
Expected: "empty digest algorithm, validate before calling Algorithm.Hash()",
|
||||
},
|
||||
{
|
||||
Name: "Unsupported",
|
||||
Digest: Digest("bean:0123456789abcdef"),
|
||||
Expected: "bean not available (make sure it is imported)",
|
||||
},
|
||||
{
|
||||
Name: "Garbage",
|
||||
Digest: Digest("sha256-garbage:pure"),
|
||||
Expected: "sha256-garbage not available (make sure it is imported)",
|
||||
},
|
||||
} {
|
||||
t.Run(testcase.Name, func(t *testing.T) {
|
||||
expected := testcase.Expected
|
||||
defer func() {
|
||||
recovered := recover()
|
||||
if !reflect.DeepEqual(recovered, expected) {
|
||||
t.Fatalf("unexpected recover: %v != %v", recovered, expected)
|
||||
}
|
||||
}()
|
||||
|
||||
_ = testcase.Digest.Verifier()
|
||||
})
|
||||
}
|
||||
}
|
@ -1,10 +1,12 @@
|
||||
package digest
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -44,7 +46,7 @@ func NewSet() *Set {
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg Algorithm
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := Parse(d)
|
||||
if err == ErrDigestInvalidFormat {
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d Digest) error {
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error {
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d Digest) error {
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error {
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []Digest {
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]Digest, len(dst.entries))
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
@ -183,10 +185,10 @@ func (dst *Set) All() []Digest {
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[Digest]string, len(dst.entries))
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg Algorithm
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest Digest
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
@ -1,20 +1,23 @@
|
||||
package digest
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func assertEqualDigests(t *testing.T, d1, d2 Digest) {
|
||||
func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) {
|
||||
if d1 != d2 {
|
||||
t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
digests := []Digest{
|
||||
digests := []digest.Digest{
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
@ -88,7 +91,7 @@ func TestLookup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddDuplication(t *testing.T) {
|
||||
digests := []Digest{
|
||||
digests := []digest.Digest{
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
@ -110,7 +113,7 @@ func TestAddDuplication(t *testing.T) {
|
||||
t.Fatal("Invalid dset size")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -118,7 +121,7 @@ func TestAddDuplication(t *testing.T) {
|
||||
t.Fatal("Duplicate digest insert allowed")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -170,7 +173,7 @@ func TestAll(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
all := map[Digest]struct{}{}
|
||||
all := map[digest.Digest]struct{}{}
|
||||
for _, dgst := range dset.All() {
|
||||
all[dgst] = struct{}{}
|
||||
}
|
||||
@ -194,7 +197,7 @@ func assertEqualShort(t *testing.T, actual, expected string) {
|
||||
}
|
||||
|
||||
func TestShortCodeTable(t *testing.T) {
|
||||
digests := []Digest{
|
||||
digests := []digest.Digest{
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
@ -227,15 +230,15 @@ func TestShortCodeTable(t *testing.T) {
|
||||
assertEqualShort(t, dump[digests[7]], "653")
|
||||
}
|
||||
|
||||
func createDigests(count int) ([]Digest, error) {
|
||||
func createDigests(count int) ([]digest.Digest, error) {
|
||||
r := rand.New(rand.NewSource(25823))
|
||||
digests := make([]Digest, count)
|
||||
digests := make([]digest.Digest, count)
|
||||
for i := range digests {
|
||||
h := sha256.New()
|
||||
if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digests[i] = NewDigest("sha256", h)
|
||||
digests[i] = digest.NewDigest("sha256", h)
|
||||
}
|
||||
return digests, nil
|
||||
}
|
@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// ErrAccessDenied is returned when an access to a requested resource is
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// MediaTypeManifestList specifies the mediaType for manifest lists.
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type diffID digest.Digest
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type mockBlobService struct {
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -6,10 +6,10 @@ import (
|
||||
"errors"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// referenceManifestBuilder is a type for constructing manifests from schema1
|
||||
|
@ -4,10 +4,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest {
|
||||
|
@ -3,7 +3,7 @@ package schema2
|
||||
import (
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// builder is a type for constructing manifests.
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type mockBlobService struct {
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"mime"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Manifest represents a registry object specifying a set of
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type bridge struct {
|
||||
|
@ -4,12 +4,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// ManifestListener describes a set of methods for listening to events related to manifests.
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
@ -16,6 +15,7 @@ import (
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestListener(t *testing.T) {
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestReferenceParse(t *testing.T) {
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -15,12 +15,12 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
|
||||
@ -661,7 +661,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
dgstr := digest.Canonical.New()
|
||||
dgstr := digest.Canonical.Digester()
|
||||
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
@ -25,6 +24,7 @@ import (
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func testServer(rrm testutil.RequestResponseMap) (string, func()) {
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
@ -33,12 +32,18 @@ import (
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var headerConfig = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
}
|
||||
|
||||
const (
|
||||
// digestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified
|
||||
// 200 OK response.
|
||||
func TestCheckAPI(t *testing.T) {
|
||||
@ -514,7 +519,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
||||
// Now, push just a chunk
|
||||
layerFile.Seek(0, 0)
|
||||
|
||||
canonicalDigester := digest.Canonical.New()
|
||||
canonicalDigester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
|
||||
t.Fatalf("error copying to digest: %v", err)
|
||||
}
|
||||
@ -672,7 +677,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
|
||||
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
|
||||
|
||||
layerFile.Seek(0, os.SEEK_SET)
|
||||
canonicalDigester := digest.Canonical.New()
|
||||
canonicalDigester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
|
||||
t.Fatalf("error copying to digest: %v", err)
|
||||
}
|
||||
@ -832,7 +837,7 @@ func TestManifestDeleteDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) {
|
||||
ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar)
|
||||
ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar)
|
||||
manifestURL, err := env.builder.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting manifest url: %v", err)
|
||||
@ -2072,7 +2077,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst dig
|
||||
|
||||
// pushLayer pushes the layer content returning the url on success.
|
||||
func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash()))
|
||||
if err != nil {
|
||||
@ -2139,7 +2144,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp
|
||||
|
||||
uploadURL := u.String()
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash()))
|
||||
if err != nil {
|
||||
@ -2405,7 +2410,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
|
||||
checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
|
||||
|
||||
// Blob Delete
|
||||
ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar)
|
||||
ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar)
|
||||
blobURL, err := env.builder.BuildBlobURL(ref)
|
||||
resp, err = httpDelete(blobURL)
|
||||
checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
|
||||
|
@ -5,10 +5,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobDispatcher uses the request context to build a blobHandler.
|
||||
|
@ -7,12 +7,12 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobUploadDispatcher constructs and returns the blob upload handler for the
|
||||
|
@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
@ -17,6 +16,7 @@ import (
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// These constants determine which architecture and OS to choose from a
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/proxy/scheduler"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// todo(richardscothern): from cache control header or config file
|
||||
|
@ -11,13 +11,13 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/proxy/scheduler"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||
"github.com/docker/distribution/registry/storage/driver/filesystem"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var sbsMu sync.Mutex
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/proxy/scheduler"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// todo(richardscothern): from cache control header or config
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
@ -19,6 +18,7 @@ import (
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type statsManifest struct {
|
||||
|
@ -13,11 +13,11 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||
"github.com/docker/distribution/registry/storage/driver/testdriver"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// TestWriteSeek tests that the current file size can be
|
||||
@ -530,7 +530,7 @@ func TestLayerUploadZeroLength(t *testing.T) {
|
||||
}
|
||||
bs := repository.Blobs(ctx)
|
||||
|
||||
simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar)
|
||||
simpleUpload(t, bs, []byte{}, digestSha256EmptyTar)
|
||||
}
|
||||
|
||||
func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) {
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// TODO(stevvooe): This should configurable in the future.
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobStore implements the read side of the blob store interface over a
|
||||
|
@ -10,14 +10,19 @@ import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
errResumableDigestNotAvailable = errors.New("resumable digest not available")
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// blobWriter is used to control the various aspects of resumable
|
||||
// blob upload.
|
||||
type blobWriter struct {
|
||||
@ -234,7 +239,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
|
||||
// paths. We may be able to make the size-based check a stronger
|
||||
// guarantee, so this may be defensive.
|
||||
if !verified {
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
verifier := desc.Digest.Verifier()
|
||||
|
||||
// Read the file from the backend driver and validate it.
|
||||
@ -318,7 +323,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
|
||||
// a zero-length blob into a nonzero-length blob location. To
|
||||
// prevent this horrid thing, we employ the hack of only allowing
|
||||
// to this happen for the digest of an empty tar.
|
||||
if desc.Digest == digest.DigestSha256EmptyTar {
|
||||
if desc.Digest == digestSha256EmptyTar {
|
||||
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
|
||||
}
|
||||
|
||||
|
2
registry/storage/cache/cachecheck/suite.go
vendored
2
registry/storage/cache/cachecheck/suite.go
vendored
@ -6,8 +6,8 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// CheckBlobDescriptorCache takes a cache implementation through a common set
|
||||
|
@ -2,7 +2,7 @@ package cache
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
)
|
||||
|
2
registry/storage/cache/memory/memory.go
vendored
2
registry/storage/cache/memory/memory.go
vendored
@ -5,9 +5,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type inMemoryBlobDescriptorCacheProvider struct {
|
||||
|
2
registry/storage/cache/redis/redis.go
vendored
2
registry/storage/cache/redis/redis.go
vendored
@ -5,10 +5,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// redisBlobStatService provides an implementation of
|
||||
|
@ -8,12 +8,12 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type setupEnv struct {
|
||||
|
@ -1,10 +1,11 @@
|
||||
// +build !noresumabledigest
|
||||
|
||||
package digest
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/stevvooe/resumable"
|
||||
_ "github.com/stevvooe/resumable/sha256"
|
||||
)
|
||||
@ -13,7 +14,7 @@ import (
|
||||
// is exposed through the digester type, which is just a hash plus a Digest
|
||||
// method.
|
||||
func TestResumableDetection(t *testing.T) {
|
||||
d := Canonical.New()
|
||||
d := digest.Canonical.Digester()
|
||||
|
||||
if _, ok := d.Hash().(resumable.Hash); !ok {
|
||||
t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash())
|
@ -8,8 +8,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestSimpleRead(t *testing.T) {
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func emit(format string, a ...interface{}) {
|
||||
|
@ -7,12 +7,12 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type image struct {
|
||||
|
@ -8,10 +8,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// linkPathFunc describes a function that can resolve a link based on the
|
||||
@ -321,7 +321,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string
|
||||
blobStore: lbs,
|
||||
id: uuid,
|
||||
startedAt: startedAt,
|
||||
digester: digest.Canonical.New(),
|
||||
digester: digest.Canonical.Digester(),
|
||||
fileWriter: fw,
|
||||
driver: lbs.driver,
|
||||
path: path,
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/testutil"
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// manifestListHandler is a ManifestHandler that covers schema2 manifest lists.
|
||||
|
@ -6,11 +6,11 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// A ManifestHandler gets and puts manifests of a particular type.
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
@ -17,6 +16,7 @@ import (
|
||||
"github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
"github.com/docker/distribution/testutil"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type manifestStoreTestEnv struct {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -3,7 +3,7 @@ package storage
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestPathMapper(t *testing.T) {
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -6,10 +6,10 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var _ distribution.TagService = &tagStore{}
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"path"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// vacuum contains functions for cleaning up repositories and blobs
|
||||
|
@ -5,12 +5,12 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// MakeManifestList constructs a manifest list out of a list of manifest digests
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// CreateRandomTarFile creates a random tarfile, returning it as an
|
||||
|
Loading…
Reference in New Issue
Block a user