2015-02-11 02:41:09 +01:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.google.com/p/go-uuid/uuid"
|
2015-02-12 01:49:49 +01:00
|
|
|
"github.com/docker/distribution"
|
2015-02-11 02:41:09 +01:00
|
|
|
ctxu "github.com/docker/distribution/context"
|
|
|
|
"github.com/docker/distribution/digest"
|
|
|
|
"github.com/docker/distribution/manifest"
|
2015-02-11 03:14:23 +01:00
|
|
|
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
2015-02-11 02:41:09 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type layerStore struct {
|
|
|
|
repository *repository
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) Exists(digest digest.Digest) (bool, error) {
|
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists")
|
|
|
|
|
|
|
|
// Because this implementation just follows blob links, an existence check
|
|
|
|
// is pretty cheap by starting and closing a fetch.
|
|
|
|
_, err := ls.Fetch(digest)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
switch err.(type) {
|
2015-02-12 01:49:49 +01:00
|
|
|
case distribution.ErrUnknownLayer:
|
2015-02-11 02:41:09 +01:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) {
|
2015-02-11 02:41:09 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch")
|
|
|
|
bp, err := ls.path(dgst)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fr, err := newFileReader(ls.repository.driver, bp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &layerReader{
|
|
|
|
fileReader: *fr,
|
|
|
|
digest: dgst,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload begins a layer upload, returning a handle. If the layer upload
|
|
|
|
// is already in progress or the layer has already been uploaded, this
|
|
|
|
// will return an error.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Upload() (distribution.LayerUpload, error) {
|
2015-02-11 02:41:09 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload")
|
|
|
|
|
|
|
|
// NOTE(stevvooe): Consider the issues with allowing concurrent upload of
|
|
|
|
// the same two layers. Should it be disallowed? For now, we allow both
|
|
|
|
// parties to proceed and the the first one uploads the layer.
|
|
|
|
|
|
|
|
uuid := uuid.New()
|
|
|
|
startedAt := time.Now().UTC()
|
|
|
|
|
|
|
|
path, err := ls.repository.registry.pm.path(uploadDataPathSpec{
|
|
|
|
name: ls.repository.Name(),
|
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{
|
|
|
|
name: ls.repository.Name(),
|
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a startedat file for this upload
|
|
|
|
if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ls.newLayerUpload(uuid, path, startedAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resume continues an in progress layer upload, returning the current
|
|
|
|
// state of the upload.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) {
|
2015-02-11 02:41:09 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume")
|
|
|
|
startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{
|
|
|
|
name: ls.repository.Name(),
|
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath)
|
|
|
|
if err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case storagedriver.PathNotFoundError:
|
2015-02-12 01:49:49 +01:00
|
|
|
return nil, distribution.ErrLayerUploadUnknown
|
2015-02-11 02:41:09 +01:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
path, err := ls.repository.pm.path(uploadDataPathSpec{
|
|
|
|
name: ls.repository.Name(),
|
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ls.newLayerUpload(uuid, path, startedAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// newLayerUpload allocates a new upload controller with the given state.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) {
|
2015-02-11 02:41:09 +01:00
|
|
|
fw, err := newFileWriter(ls.repository.driver, path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-03-03 17:57:52 +01:00
|
|
|
return &layerWriter{
|
2015-03-03 23:47:07 +01:00
|
|
|
layerStore: ls,
|
|
|
|
uuid: uuid,
|
|
|
|
startedAt: startedAt,
|
|
|
|
bufferedFileWriter: *fw,
|
2015-02-11 02:41:09 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) path(dgst digest.Digest) (string, error) {
|
|
|
|
// We must traverse this path through the link to enforce ownership.
|
|
|
|
layerLinkPath, err := ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst})
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
blobPath, err := ls.repository.blobStore.resolve(layerLinkPath)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case storagedriver.PathNotFoundError:
|
2015-02-12 01:49:49 +01:00
|
|
|
return "", distribution.ErrUnknownLayer{
|
|
|
|
FSLayer: manifest.FSLayer{BlobSum: dgst},
|
|
|
|
}
|
2015-02-11 02:41:09 +01:00
|
|
|
default:
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blobPath, nil
|
|
|
|
}
|