예제 #1
0
def download_layers(self, repo_name, digest=None, destination=None):
    ''' download layers is a wrapper to do the following for a client loaded
        with a manifest for an image:
      
        1. use the manifests to retrieve list of digests (get_digests)
        2. atomically download the list to destination (get_layers)

        This function uses the MultiProcess client to download layers
        at the same time.
    '''
    from sregistry.main.workers import ( Workers, download_task )

    # 1. Get manifests if not retrieved
    if not hasattr(self, 'manifests'):
        self._get_manifests(repo_name, digest)

    # Obtain list of digets, and destination for download
    digests = self._get_digests()
    destination = self._get_download_cache(destination)

    # Create multiprocess download client
    workers = Workers()

    # Download each layer atomically
    tasks = []
    layers = []
    for digest in digests:

        targz = "%s/%s.tar.gz" % (destination, digest)

        # Only download if not in cache already
        if not os.path.exists(targz):
            url = "%s/%s/blobs/%s" % (self.base, repo_name, digest)
            tasks.append((url, self.headers, targz))
        layers.append(targz)

    # Download layers with multiprocess workers
    if len(tasks) > 0:
        download_layers = workers.run(func=download_task,
                                      tasks=tasks)
    # Create the metadata tar
    metadata = self._create_metadata_tar(destination)
    if metadata is not None:
        layers.append(metadata)


    return layers
예제 #2
0
def download_layers(self, repo_name, digest=None, destination=None):
    ''' download layers is a wrapper to do the following for a client loaded
        with a manifest for an image:
      
        1. use the manifests to retrieve list of digests (get_digests)
        2. atomically download the list to destination (get_layers)

        This function uses the MultiProcess client to download layers
        at the same time.
    '''
    from sregistry.main.workers import Workers
    from sregistry.main.workers.aws import download_task

    # Obtain list of digets, and destination for download
    self._get_manifest(repo_name, digest)
    digests = self._get_digests(repo_name, digest)
    destination = self._get_download_cache(destination)

    # Create multiprocess download client
    workers = Workers()

    # Download each layer atomically
    tasks = []
    layers = []

    # Start with a fresh token
    self._update_token()

    for digest in digests:

        targz = "%s/%s.tar.gz" % (destination, digest['digest'])
        url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])

        # Only download if not in cache already
        if not os.path.exists(targz):
            tasks.append((url, self.headers, targz))
        layers.append(targz)

    # Download layers with multiprocess workers
    if len(tasks) > 0:

        download_layers = workers.run(func=download_task, tasks=tasks)

    return layers, url