2014-11-14 03:42:49 +01:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
|
2014-12-24 01:01:38 +01:00
|
|
|
"github.com/docker/distribution/storage"
|
2014-11-18 02:33:03 +01:00
|
|
|
|
2014-11-14 03:42:49 +01:00
|
|
|
log "github.com/Sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
2014-11-18 02:33:03 +01:00
|
|
|
// simultaneousLayerPullWindow is the size of the parallel layer pull window.
|
|
|
|
// A layer may not be pulled until the layer preceeding it by the length of the
|
|
|
|
// pull window has been successfully pulled.
|
|
|
|
const simultaneousLayerPullWindow = 4
|
|
|
|
|
2014-11-14 03:42:49 +01:00
|
|
|
// Pull implements a client pull workflow for the image defined by the given
|
|
|
|
// name and tag pair, using the given ObjectStore for local manifest and layer
|
|
|
|
// storage
|
|
|
|
func Pull(c Client, objectStore ObjectStore, name, tag string) error {
|
|
|
|
manifest, err := c.GetImageManifest(name, tag)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.WithField("manifest", manifest).Info("Pulled manifest")
|
|
|
|
|
|
|
|
if len(manifest.FSLayers) != len(manifest.History) {
|
|
|
|
return fmt.Errorf("Length of history not equal to number of layers")
|
|
|
|
}
|
|
|
|
if len(manifest.FSLayers) == 0 {
|
|
|
|
return fmt.Errorf("Image has no layers")
|
|
|
|
}
|
|
|
|
|
2014-11-18 02:33:03 +01:00
|
|
|
errChans := make([]chan error, len(manifest.FSLayers))
|
|
|
|
for i := range manifest.FSLayers {
|
|
|
|
errChans[i] = make(chan error)
|
|
|
|
}
|
2014-11-14 03:42:49 +01:00
|
|
|
|
2014-11-21 01:24:25 +01:00
|
|
|
// To avoid leak of goroutines we must notify
|
2014-11-21 01:15:34 +01:00
|
|
|
// pullLayer goroutines about a cancelation,
|
|
|
|
// otherwise they will lock forever.
|
|
|
|
cancelCh := make(chan struct{})
|
|
|
|
|
2014-11-18 02:33:03 +01:00
|
|
|
// Iterate over each layer in the manifest, simultaneously pulling no more
|
|
|
|
// than simultaneousLayerPullWindow layers at a time. If an error is
|
|
|
|
// received from a layer pull, we abort the push.
|
|
|
|
for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ {
|
|
|
|
dependentLayer := i - simultaneousLayerPullWindow
|
|
|
|
if dependentLayer >= 0 {
|
|
|
|
err := <-errChans[dependentLayer]
|
|
|
|
if err != nil {
|
|
|
|
log.WithField("error", err).Warn("Pull aborted")
|
2014-11-21 01:15:34 +01:00
|
|
|
close(cancelCh)
|
2014-11-18 02:33:03 +01:00
|
|
|
return err
|
|
|
|
}
|
2014-11-14 03:42:49 +01:00
|
|
|
}
|
2014-11-18 02:33:03 +01:00
|
|
|
|
|
|
|
if i < len(manifest.FSLayers) {
|
|
|
|
go func(i int) {
|
2014-11-21 01:15:34 +01:00
|
|
|
select {
|
|
|
|
case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]):
|
|
|
|
case <-cancelCh: // no chance to recv until cancelCh's closed
|
|
|
|
}
|
2014-11-18 02:33:03 +01:00
|
|
|
}(i)
|
2014-11-14 03:42:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = objectStore.WriteManifest(name, tag, manifest)
|
|
|
|
if err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"manifest": manifest,
|
|
|
|
}).Warn("Unable to write image manifest")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2014-11-18 02:33:03 +01:00
|
|
|
|
2014-11-22 04:29:08 +01:00
|
|
|
func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error {
|
2014-11-18 02:33:03 +01:00
|
|
|
log.WithField("layer", fsLayer).Info("Pulling layer")
|
|
|
|
|
|
|
|
layer, err := objectStore.Layer(fsLayer.BlobSum)
|
|
|
|
if err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"layer": fsLayer,
|
|
|
|
}).Warn("Unable to write local layer")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-11-19 00:44:39 +01:00
|
|
|
layerWriter, err := layer.Writer()
|
2014-11-18 02:33:03 +01:00
|
|
|
if err == ErrLayerAlreadyExists {
|
|
|
|
log.WithField("layer", fsLayer).Info("Layer already exists")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err == ErrLayerLocked {
|
|
|
|
log.WithField("layer", fsLayer).Info("Layer download in progress, waiting")
|
|
|
|
layer.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"layer": fsLayer,
|
|
|
|
}).Warn("Unable to write local layer")
|
|
|
|
return err
|
|
|
|
}
|
2014-11-19 00:44:39 +01:00
|
|
|
defer layerWriter.Close()
|
2014-11-18 02:33:03 +01:00
|
|
|
|
2014-11-19 00:44:39 +01:00
|
|
|
if layerWriter.CurrentSize() > 0 {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": fsLayer,
|
|
|
|
"currentSize": layerWriter.CurrentSize(),
|
|
|
|
"size": layerWriter.Size(),
|
|
|
|
}).Info("Layer partially downloaded, resuming")
|
|
|
|
}
|
|
|
|
|
|
|
|
layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize())
|
2014-11-18 02:33:03 +01:00
|
|
|
if err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"layer": fsLayer,
|
|
|
|
}).Warn("Unable to download layer")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer layerReader.Close()
|
|
|
|
|
2014-11-19 00:44:39 +01:00
|
|
|
layerWriter.SetSize(layerWriter.CurrentSize() + length)
|
|
|
|
|
|
|
|
_, err = io.Copy(layerWriter, layerReader)
|
2014-11-18 02:33:03 +01:00
|
|
|
if err != nil {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"layer": fsLayer,
|
|
|
|
}).Warn("Unable to download layer")
|
|
|
|
return err
|
|
|
|
}
|
2014-11-19 00:44:39 +01:00
|
|
|
if layerWriter.CurrentSize() != layerWriter.Size() {
|
2014-11-18 02:33:03 +01:00
|
|
|
log.WithFields(log.Fields{
|
2014-11-19 00:44:39 +01:00
|
|
|
"size": layerWriter.Size(),
|
|
|
|
"currentSize": layerWriter.CurrentSize(),
|
|
|
|
"layer": fsLayer,
|
|
|
|
}).Warn("Layer invalid size")
|
2014-11-18 02:33:03 +01:00
|
|
|
return fmt.Errorf(
|
|
|
|
"Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d",
|
2014-11-19 00:44:39 +01:00
|
|
|
fsLayer, layerWriter.Size(), layerWriter.CurrentSize(),
|
2014-11-18 02:33:03 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|