2014-11-18 01:29:42 +01:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.google.com/p/go-uuid/uuid"
|
2015-02-12 01:49:49 +01:00
|
|
|
"github.com/docker/distribution"
|
2015-02-09 23:44:58 +01:00
|
|
|
ctxu "github.com/docker/distribution/context"
|
2014-12-24 01:01:38 +01:00
|
|
|
"github.com/docker/distribution/digest"
|
2015-01-02 22:21:29 +01:00
|
|
|
"github.com/docker/distribution/manifest"
|
2015-02-11 03:14:23 +01:00
|
|
|
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
2014-11-18 01:29:42 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type layerStore struct {
|
2015-01-17 03:24:07 +01:00
|
|
|
repository *repository
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
func (ls *layerStore) Exists(digest digest.Digest) (bool, error) {
|
2015-02-09 23:44:58 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists")
|
|
|
|
|
2014-11-18 01:29:42 +01:00
|
|
|
// Because this implementation just follows blob links, an existence check
|
|
|
|
// is pretty cheap by starting and closing a fetch.
|
2015-01-17 03:24:07 +01:00
|
|
|
_, err := ls.Fetch(digest)
|
2014-11-18 01:29:42 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2014-11-26 21:52:52 +01:00
|
|
|
switch err.(type) {
|
2015-02-12 01:49:49 +01:00
|
|
|
case distribution.ErrUnknownLayer:
|
2014-11-18 01:29:42 +01:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) {
|
2015-02-09 23:44:58 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch")
|
2015-01-17 03:24:07 +01:00
|
|
|
bp, err := ls.path(dgst)
|
2014-11-18 01:29:42 +01:00
|
|
|
if err != nil {
|
2015-01-14 21:02:43 +01:00
|
|
|
return nil, err
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
fr, err := newFileReader(ls.repository.driver, bp)
|
2014-11-18 01:29:42 +01:00
|
|
|
if err != nil {
|
2015-01-14 21:02:43 +01:00
|
|
|
return nil, err
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
2014-11-21 02:49:35 +01:00
|
|
|
return &layerReader{
|
|
|
|
fileReader: *fr,
|
2015-01-14 21:02:43 +01:00
|
|
|
digest: dgst,
|
2014-11-21 02:49:35 +01:00
|
|
|
}, nil
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Upload begins a layer upload, returning a handle. If the layer upload
|
|
|
|
// is already in progress or the layer has already been uploaded, this
|
|
|
|
// will return an error.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Upload() (distribution.LayerUpload, error) {
|
2015-02-09 23:44:58 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload")
|
2014-11-18 01:29:42 +01:00
|
|
|
|
|
|
|
// NOTE(stevvooe): Consider the issues with allowing concurrent upload of
|
|
|
|
// the same two layers. Should it be disallowed? For now, we allow both
|
|
|
|
// parties to proceed and the the first one uploads the layer.
|
|
|
|
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
uuid := uuid.New()
|
|
|
|
startedAt := time.Now().UTC()
|
|
|
|
|
2015-04-27 09:18:55 +02:00
|
|
|
path, err := ls.repository.pm.path(uploadDataPathSpec{
|
2015-01-17 03:24:07 +01:00
|
|
|
name: ls.repository.Name(),
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
2014-11-18 01:29:42 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-04-27 09:18:55 +02:00
|
|
|
startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{
|
2015-01-17 03:24:07 +01:00
|
|
|
name: ls.repository.Name(),
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a startedat file for this upload
|
2015-01-17 03:24:07 +01:00
|
|
|
if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
return ls.newLayerUpload(uuid, path, startedAt)
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Resume continues an in progress layer upload, returning the current
|
|
|
|
// state of the upload.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) {
|
2015-02-09 23:44:58 +01:00
|
|
|
ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume")
|
2015-04-27 09:18:55 +02:00
|
|
|
startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{
|
2015-01-17 03:24:07 +01:00
|
|
|
name: ls.repository.Name(),
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath)
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
if err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case storagedriver.PathNotFoundError:
|
2015-02-12 01:49:49 +01:00
|
|
|
return nil, distribution.ErrLayerUploadUnknown
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2014-11-18 01:29:42 +01:00
|
|
|
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
|
2014-11-18 01:29:42 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
path, err := ls.repository.pm.path(uploadDataPathSpec{
|
|
|
|
name: ls.repository.Name(),
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
uuid: uuid,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
return ls.newLayerUpload(uuid, path, startedAt)
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// newLayerUpload allocates a new upload controller with the given state.
|
2015-02-12 01:49:49 +01:00
|
|
|
func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) {
|
2015-01-17 03:24:07 +01:00
|
|
|
fw, err := newFileWriter(ls.repository.driver, path)
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 23:24:02 +01:00
|
|
|
|
2015-04-16 03:12:45 +02:00
|
|
|
lw := &layerWriter{
|
2015-03-03 23:47:07 +01:00
|
|
|
layerStore: ls,
|
|
|
|
uuid: uuid,
|
|
|
|
startedAt: startedAt,
|
|
|
|
bufferedFileWriter: *fw,
|
2015-04-16 03:12:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
lw.setupResumableDigester()
|
|
|
|
|
|
|
|
return lw, nil
|
2014-11-18 01:29:42 +01:00
|
|
|
}
|
2015-01-14 21:02:43 +01:00
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
func (ls *layerStore) path(dgst digest.Digest) (string, error) {
|
2015-01-14 21:02:43 +01:00
|
|
|
// We must traverse this path through the link to enforce ownership.
|
2015-04-27 09:18:55 +02:00
|
|
|
layerLinkPath, err := ls.repository.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst})
|
2015-01-14 21:02:43 +01:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2015-01-17 03:24:07 +01:00
|
|
|
blobPath, err := ls.repository.blobStore.resolve(layerLinkPath)
|
2015-01-14 21:02:43 +01:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case storagedriver.PathNotFoundError:
|
2015-02-12 01:49:49 +01:00
|
|
|
return "", distribution.ErrUnknownLayer{
|
|
|
|
FSLayer: manifest.FSLayer{BlobSum: dgst},
|
|
|
|
}
|
2015-01-14 21:02:43 +01:00
|
|
|
default:
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blobPath, nil
|
|
|
|
}
|