示例#1
0
def get_layer(self, image_id, repo_name, download_folder=None):
    '''download an image layer (.tar.gz) to a specified download folder.

       Parameters
       ==========
       download_folder: download to this folder. If not set, uses temp.
       repo_name: the image name (library/ubuntu) to retrieve

    '''
    url = self._get_layerLink(repo_name, image_id)

    bot.verbose("Downloading layers from %s" % url)

    download_folder = get_tmpdir(download_folder)
    download_folder = "%s/%s.tar.gz" % (download_folder, image_id)

    # Update user what we are doing
    bot.debug("Downloading layer %s" % image_id)

    # Step 1: Download the layer atomically
    file_name = "%s.%s" % (download_folder,
                           next(tempfile._get_candidate_names()))

    tar_download = self.download(url, file_name)

    try:
        shutil.move(tar_download, download_folder)
    except Exception:
        msg = "Cannot untar layer %s," % tar_download
        msg += " was there a problem with download?"
        bot.error(msg)
        sys.exit(1)
    return download_folder
示例#2
0
def create_build_package(package_files, working_dir=None):
    """given a list of files, copy them to a temporary folder,
       compress into a .tar.gz, and rename based on the file hash.
       Return the full path to the .tar.gz in the temporary folder.

       Parameters
       ==========
       package_files: a list of files to include in the tar.gz
       working_dir: If set, the path derived for the recipe and
                    files is relative to this.

    """
    # Ensure package files all exist
    for package_file in package_files:
        if not os.path.exists(package_file):
            bot.exit("Cannot find %s." % package_file)

    bot.log("Generating build package for %s files..." % len(package_files))
    build_dir = get_tmpdir(prefix="sregistry-build")
    build_tar = "%s/build.tar.gz" % build_dir
    tar = tarfile.open(build_tar, "w:gz")

    # Create the tar.gz, making sure relative to working_dir
    for package_file in package_files:

        # Get a relative path
        relative_path = get_relative_path(package_file, working_dir)
        tar.add(package_file, arcname=relative_path)
    tar.close()

    # Get hash (sha256), and rename file
    sha256 = get_file_hash(build_tar)
    hash_tar = "%s/%s.tar.gz" % (build_dir, sha256)
    shutil.move(build_tar, hash_tar)
    return hash_tar
示例#3
0
def create_build_package(package_files):
    '''given a list of files, copy them to a temporary folder,
       compress into a .tar.gz, and rename based on the file hash.
       Return the full path to the .tar.gz in the temporary folder.

       Parameters
       ==========
       package_files: a list of files to include in the tar.gz

    '''
    # Ensure package files all exist
    for package_file in package_files:
        if not os.path.exists(package_file):
            bot.exit('Cannot find %s.' % package_file)

    bot.log('Generating build package for %s files...' % len(package_files))
    build_dir = get_tmpdir(prefix="sregistry-build")
    build_tar = '%s/build.tar.gz' % build_dir
    tar = tarfile.open(build_tar, "w:gz")

    # Create the tar.gz
    for package_file in package_files:
        tar.add(package_file)
    tar.close()

    # Get hash (sha256), and rename file
    sha256 = get_file_hash(build_tar)
    hash_tar = "%s/%s.tar.gz" % (build_dir, sha256)
    shutil.move(build_tar, hash_tar)
    return hash_tar
示例#4
0
def test_get_tmpdir_tmpfile():
    print("Testing utils.get_tmpdir, get_tmpfile")
    from sregistry.utils import get_tmpdir, get_tmpfile
    tmpdir = get_tmpdir()
    assert os.path.exists(tmpdir)
    assert os.path.basename(tmpdir).startswith('sregistry')
    shutil.rmtree(tmpdir)
    tmpdir = get_tmpdir(prefix='name')
    assert os.path.basename(tmpdir).startswith('name')
    shutil.rmtree(tmpdir)
    tmpfile = get_tmpfile()
    assert 'sregistry' in tmpfile
    os.remove(tmpfile)
    tmpfile = get_tmpfile(prefix="pancakes")
    assert 'pancakes' in tmpfile
    os.remove(tmpfile)
示例#5
0
def get_download_cache(self, destination, subfolder="docker"):
    """determine the user preference for atomic download of layers. If
       the user has set a singularity cache directory, honor it. Otherwise,
       use the Singularity default.
    """
    # First priority after user specification is Singularity Cache
    if destination is None:
        destination = self._get_setting("SINGULARITY_CACHEDIR", SINGULARITY_CACHE)

        # If not set, the user has disabled (use tmp)
        destination = get_tmpdir(destination)

    if not destination.endswith(subfolder):
        destination = "%s/%s" % (destination, subfolder)

    # Create subfolders, if don't exist
    mkdir_p(destination)
    return destination
示例#6
0
def _pull(self,
          file_name,
          names,
          save=True,
          force=False,
          uri="docker://",
          **kwargs):
    """pull an image from a docker hub. This is a (less than ideal) workaround
       that actually does the following:

       - creates a sandbox folder
       - adds docker layers, metadata folder, and custom metadata to it
       - converts to a squashfs image with build

    the docker manifests are stored with registry metadata.
 
    Parameters
    ==========
    images: refers to the uri given by the user to pull in the format
    <collection>/<namespace>. You should have an API that is able to 
    retrieve a container based on parsing this uri.
    file_name: the user's requested name for the file. It can 
               optionally be None if the user wants a default.
    save: if True, you should save the container to the database
          using self.add()
    
    Returns
    =======
    finished: a single container path, or list of paths
    """

    # Use Singularity to build the image, based on user preference
    if file_name is None:
        file_name = self._get_storage_name(names)

    # Determine if the user already has the image
    if os.path.exists(file_name) and force is False:
        bot.exit("Image exists! Remove first, or use --force to overwrite")

    digest = names["version"] or names["tag"]

    # Build from sandbox, prefix with sandbox
    sandbox = get_tmpdir(prefix="sregistry-sandbox")

    # First effort, get image via Sregistry
    layers = self._download_layers(names["url"], digest)

    # This is the url where the manifests were obtained
    url = self._get_manifest_selfLink(names["url"], digest)

    # Add environment to the layers
    envtar = self._get_environment_tar()
    layers = [envtar] + layers

    # Create singularity image from an empty folder
    for layer in layers:
        bot.info("Exploding %s" % layer)
        result = extract_tar(layer, sandbox, handle_whiteout=True)
        if result["return_code"] != 0:
            bot.exit(result["message"])

    sudo = kwargs.get("sudo", False)

    # Build from a sandbox (recipe) into the image_file (squashfs)
    image_file = Singularity.build(image=file_name, recipe=sandbox, sudo=sudo)

    # Fall back to using Singularity
    if image_file is None:
        bot.info("Downloading with native Singularity, please wait...")
        image = file_name.replace("docker://", uri)
        image_file = Singularity.pull(image, pull_folder=sandbox)

    # Save to local storage
    if save is True:

        # Did we get the manifests?
        manifests = {}
        if hasattr(self, "manifests"):
            manifests = self.manifests

        container = self.add(image_path=image_file,
                             image_uri=names["uri"],
                             metadata=manifests,
                             url=url)

        # When the container is created, this is the path to the image
        image_file = container.image

    if os.path.exists(image_file):
        bot.debug("Retrieved image file %s" % image_file)
        bot.custom(prefix="Success!", message=image_file)

    # Clean up sandbox
    shutil.rmtree(sandbox)

    return image_file
示例#7
0
def _pull(self, file_name, names, save=True, force=False, **kwargs):
    '''pull an image from aws. This is a (less than ideal) workaround
       that actually does the following:

       - creates a sandbox folder
       - adds docker layers from S3
       - converts to a squashfs image with build
 
    Parameters
    ==========
    images: refers to the uri given by the user to pull in the format
    <collection>/<namespace>. You should have an API that is able to 
    retrieve a container based on parsing this uri.
    file_name: the user's requested name for the file. It can 
               optionally be None if the user wants a default.
    save: if True, you should save the container to the database
          using self.add()
    
    Returns
    =======
    finished: a single container path, or list of paths
    '''

    # Use Singularity to build the image, based on user preference
    if file_name is None:
        file_name = self._get_storage_name(names)

    # Determine if the user already has the image
    if os.path.exists(file_name) and force is False:
        bot.error('Image exists! Remove first, or use --force to overwrite')
        sys.exit(1)

    digest = names['version'] or names['tag']

    # Build from sandbox
    sandbox = get_tmpdir(prefix="sregistry-sandbox")

    # First effort, get image via Sregistry
    layers, url = self._download_layers(names['url'], digest)

    # Add environment to the layers
    envtar = self._get_environment_tar()
    layers = [envtar] + layers

    # Create singularity image from an empty folder
    for layer in layers:
        bot.info('Exploding %s' % layer)
        result = extract_tar(layer, sandbox, handle_whiteout=True)
        if result['return_code'] != 0:
            bot.error(result['message'])
            sys.exit(1)

    sudo = kwargs.get('sudo', False)

    # Build from a sandbox (recipe) into the image_file (squashfs)
    image_file = Singularity.build(image=file_name, recipe=sandbox, sudo=sudo)

    # Fall back to using Singularity
    if image_file is None:
        bot.info('Downloading with native Singularity, please wait...')
        image = image.replace('aws://', 'docker://')
        image_file = Singularity.pull(image, pull_folder=sandbox)

    # Save to local storage
    if save is True:

        # Did we get the manifests?
        manifests = {}
        if hasattr(self, 'manifest'):
            manifest = self.manifest

        container = self.add(image_path=image_file,
                             image_uri=names['uri'],
                             metadata=manifest,
                             url=url)

        # When the container is created, this is the path to the image
        image_file = container.image

    if os.path.exists(image_file):
        bot.debug('Retrieved image file %s' % image_file)
        bot.custom(prefix="Success!", message=image_file)

    # Clean up sandbox
    shutil.rmtree(sandbox)

    return image_file