Esempio n. 1
0
 def append(self):
     if notebook_helper.is_in_notebook():
         notebook_helper.export_notebook_to_tar_gz(
             self.notebook_file,
             TEMP_TAR_GZ_FILENAME,
             converted_filename=self.get_python_entrypoint())
     else:
         utils.generate_context_tarball(".", TEMP_TAR_GZ_FILENAME)
     transport = transport_pool.Http(httplib2.Http, size=_THREADS)
     src = docker_name.Tag(self.base_image)
     creds = docker_creds.DefaultKeychain.Resolve(src)
     with v2_2_image.FromRegistry(src, creds, transport) as src_image:
         with open(TEMP_TAR_GZ_FILENAME, 'rb') as f:
             new_img = append.Layer(src_image, f.read())
     if self.image_tag is None:
         self.image_tag = new_img.digest().split(":")[1]
     dst = docker_name.Tag(self.full_image_name())
     creds = docker_creds.DefaultKeychain.Resolve(dst)
     with docker_session.Push(dst,
                              creds,
                              transport,
                              threads=_THREADS,
                              mount=[src.as_repository()]) as session:
         logger.warn("Uploading {}".format(self.full_image_name()))
         session.upload(new_img)
     os.remove(TEMP_TAR_GZ_FILENAME)
     logger.warn("Pushed image {}".format(self.full_image_name()))
Esempio n. 2
0
def main():
    args = parser.parse_args()

    if not args.src_image or not args.tarball or not args.dst_image:
        raise Exception('--src-image, --dst-image and --tarball are required '
                        'arguments.')

    transport = transport_pool.Http(httplib2.Http, size=_THREADS)

    # This library can support push-by-digest, but the likelihood of a user
    # correctly providing us with the digest without using this library
    # directly is essentially nil.
    src = docker_name.Tag(args.src_image)
    dst = docker_name.Tag(args.dst_image)

    # Resolve the appropriate credential to use based on the standard Docker
    # client logic.
    creds = docker_creds.DefaultKeychain.Resolve(src)
    with v2_2_image.FromRegistry(src, creds, transport) as src_image:
        with open(args.tarball, 'rb') as f:
            new_img = append.Layer(src_image, f.read())

    creds = docker_creds.DefaultKeychain.Resolve(dst)
    with docker_session.Push(dst, creds, transport,
                             threads=_THREADS) as session:
        session.upload(new_img)
def main():
    logging_setup.DefineCommandLineArgs(parser)
    args = parser.parse_args()
    logging_setup.Init(args=args)

    transport = transport_pool.Http(httplib2.Http, size=_THREADS)

    # This library can support push-by-digest, but the likelihood of a user
    # correctly providing us with the digest without using this library
    # directly is essentially nil.
    src = docker_name.Tag(args.src_image)
    dst = docker_name.Tag(args.dst_image)

    # Resolve the appropriate credential to use based on the standard Docker
    # client logic.
    creds = docker_creds.DefaultKeychain.Resolve(src)
    logging.info('Pulling v2.2 image from %r ...', src)
    with v2_2_image.FromRegistry(src, creds, transport) as src_image:
        with open(args.tarball, 'rb') as f:
            new_img = append.Layer(src_image, f.read())

    creds = docker_creds.DefaultKeychain.Resolve(dst)
    with docker_session.Push(dst,
                             creds,
                             transport,
                             threads=_THREADS,
                             mount=[src.as_repository()]) as session:
        logging.info('Starting upload ...')
        session.upload(new_img)
        digest = new_img.digest()

        print(('{name} was published with digest: {digest}'.format(
            name=dst, digest=digest)))
Esempio n. 4
0
    def CreatePackageBase(self, base_image, cache, use_cache=True):
        """Override."""
        # Figure out if we need to override entrypoint.
        # Save the overrides for later to avoid adding an extra layer.
        pj_contents = {}
        if self._ctx.Contains(_PACKAGE_JSON):
            pj_contents = json.loads(self._ctx.GetFile(_PACKAGE_JSON))
        entrypoint = parse_entrypoint(pj_contents)
        overrides = metadata.Overrides(entrypoint=entrypoint)

        descriptor = None
        for f in [_PACKAGE_LOCK, _PACKAGE_JSON]:
            if self._ctx.Contains(f):
                descriptor = f
                descriptor_contents = self._ctx.GetFile(f)
                break

        if not descriptor:
            logging.info('No package descriptor found. No packages installed.')

            # Add the overrides now.
            return append.Layer(base_image, tar_gz=None, overrides=overrides)

        checksum = hashlib.sha256(descriptor_contents).hexdigest()
        if use_cache:
            hit = cache.Get(base_image, _NODE_NAMESPACE, checksum)
            if hit:
                logging.info('Found cached dependency layer for %s' % checksum)
                return hit
            else:
                logging.info('No cached dependency layer for %s' % checksum)
        else:
            logging.info('Skipping checking cache for dependency layer %s'
                         % checksum)

        layer, sha = self._gen_package_tar(descriptor, descriptor_contents)

        with append.Layer(
          base_image, layer, diff_id=sha, overrides=overrides) as dep_image:
            if use_cache:
                logging.info('Storing layer %s in cache.', sha)
                cache.Store(base_image, _NODE_NAMESPACE, checksum, dep_image)
            else:
                logging.info('Skipping storing layer %s in cache.', sha)
            return dep_image
Esempio n. 5
0
 def _build(self, transport, src):
     file, hash = self.preprocessor.context_tar_gz()
     self.context_file, self.context_hash = file, hash
     self.image_tag = self.full_image_name(self.context_hash)
     creds = docker_creds.DefaultKeychain.Resolve(src)
     with v2_2_image.FromRegistry(src, creds, transport) as src_image:
         with open(self.context_file, 'rb') as f:
             new_img = append.Layer(src_image, f.read())
     return new_img
Esempio n. 6
0
  def CreatePackageBase(self, base_image, cache):
    """Override."""
    descriptor = self._ctx.GetFile('requirements.txt')
    checksum = hashlib.sha256(descriptor).hexdigest()
    hit = cache.Get(base_image, _PYTHON_NAMESPACE, checksum)
    if hit:
      return hit

    buf = cStringIO.StringIO()
    with tarfile.open(fileobj=buf, mode='w:gz') as out:
      for whl in self._ResolveWHLs(descriptor):
        self._AddWHLFiles(whl, out)
        self._AddWHLScripts(whl, out)
    layer = buf.getvalue()

    with append.Layer(base_image, layer) as dep_image:
      cache.Store(base_image, _PYTHON_NAMESPACE, checksum, dep_image)
    return append.Layer(base_image, layer)
Esempio n. 7
0
 def _build(self, transport, src):
     file, hash = self.preprocessor.context_tar_gz()  # pylint:disable=redefined-builtin
     self.context_file, self.context_hash = file, hash
     self.image_tag = self.full_image_name(self.context_hash)
     creds = docker_creds.DefaultKeychain.Resolve(src)
     with v2_2_image.FromRegistry(src, creds, transport) as src_image:
         with open(self.context_file, 'rb') as f:
             new_img = append.Layer(src_image, f.read(), overrides=metadata.Overrides(
                 cmd=self.preprocessor.get_command(),
                 user='******', env={"FAIRING_RUNTIME": "1"}))
     return new_img
Esempio n. 8
0
def handle_app(pm, cache_dir):
    packages = pm.get_package_list()
    img = pm.base_image()
    # This loop could be parallelized.
    for pkg in packages:
        contents = get_layer_cache(pkg, cache_dir)
        if not contents:
            contents = pm.build_layer_for_one_package(pkg)
            set_layer_cache(pkg, contents, cache_dir)
            img = append.Layer(img, contents)
    return img
Esempio n. 9
0
def AppendLayersIntoImage(imgs):
    with Timing('Stitching layers into final image'):
        for i, img in enumerate(imgs):
            if i == 0:
                result_image = img
                continue
            diff_ids = img.diff_ids()
            for diff_id in diff_ids:
                lyr = img.blob(img._diff_id_to_digest(diff_id))
                overrides = CfgDctToOverrides(json.loads(img.config_file()))
                result_image = append.Layer(result_image,
                                            lyr,
                                            diff_id=diff_id,
                                            overrides=overrides)
        return result_image
    def AppendLayersIntoImage(self, lyr_imgs):
        for i, lyr_img in enumerate(lyr_imgs):
            if i == 0:
                result_image = lyr_img
                continue
            img = lyr_img.GetImage()
            diff_ids = img.diff_ids()
            for diff_id in diff_ids:
                lyr = img.blob(img._diff_id_to_digest(diff_id))
                overrides = ftl_util.CfgDctToOverrides(
                    json.loads(img.config_file()))

                result_image = append.Layer(result_image,
                                            lyr,
                                            diff_id=diff_id,
                                            overrides=overrides)
        return result_image
Esempio n. 11
0
def main(args):
    args = parser.parse_args(args)
    logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity])
    logging.basicConfig(
        format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
        datefmt='%Y-%m-%d,%H:%M:%S')
    transport = transport_pool.Http(httplib2.Http, size=_THREADS)

    # TODO(mattmoor): Support digest base images.
    base_name = docker_name.Tag(args.base)
    base_creds = docker_creds.DefaultKeychain.Resolve(base_name)

    target_image = docker_name.Tag(args.name)
    target_creds = docker_creds.DefaultKeychain.Resolve(target_image)

    ctx = context.Workspace(args.directory)
    cash = cache.Registry(target_image.as_repository(),
                          target_creds,
                          transport,
                          threads=_THREADS,
                          mount=[base_name])
    bldr = builder.From(ctx)
    with docker_image.FromRegistry(base_name, base_creds,
                                   transport) as base_image:

        # Create (or pull from cache) the base image with the
        # package descriptor installation overlaid.
        logging.info('Generating dependency layer...')
        with bldr.CreatePackageBase(base_image, cash, args.cache) as deps:
            # Construct the application layer from the context.
            logging.info('Generating app layer...')
            app_layer, diff_id = bldr.BuildAppLayer()
            with append.Layer(deps, app_layer, diff_id=diff_id) as app_image:
                if args.output_path:
                    with tarfile.open(name=args.output_path, mode='w') as tar:
                        save.tarball(target_image, app_image, tar)
                    logging.info("{0} tarball located at {1}".format(
                        str(target_image), args.output_path))
                    return
                with docker_session.Push(target_image,
                                         target_creds,
                                         transport,
                                         threads=_THREADS,
                                         mount=[base_name]) as session:
                    logging.info('Pushing final image...')
                    session.upload(app_image)
Esempio n. 12
0
def main():
    args = parser.parse_args()

    transport = transport_pool.Http(httplib2.Http, size=_THREADS)

    # TODO(mattmoor): Support digest base images.
    base_name = docker_name.Tag(args.base)
    base_creds = docker_creds.DefaultKeychain.Resolve(base_name)

    target_image = docker_name.Tag(args.name)
    target_creds = docker_creds.DefaultKeychain.Resolve(target_image)

    with context.Workspace(args.directory) as ctx:
        with cache.Registry(target_image.as_repository(),
                            target_creds,
                            transport,
                            threads=_THREADS,
                            mount=[base_name]) as cash:
            with builder.From(ctx) as bldr:
                with docker_image.FromRegistry(base_name, base_creds,
                                               transport) as base_image:

                    # Create (or pull from cache) the base image with the package
                    # descriptor installation overlaid.
                    with bldr.CreatePackageBase(base_image,
                                                cash) as base_with_deps:
                        # Construct the application layer from the context.
                        app_layer = bldr.BuildAppLayer()

                        with append.Layer(base_with_deps,
                                          app_layer) as app_image:
                            with docker_session.Push(target_image,
                                                     target_creds,
                                                     transport,
                                                     threads=_THREADS,
                                                     mount=[base_name
                                                            ]) as session:
                                session.upload(app_image)

    print("Hi, One Build.")
Esempio n. 13
0
def main(args):
    args = parser.parse_args(args)
    logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity])

    transport = transport_pool.Http(httplib2.Http, size=_THREADS)

    # TODO(mattmoor): Support digest base images.
    base_name = docker_name.Tag(args.base)
    base_creds = docker_creds.DefaultKeychain.Resolve(base_name)

    target_image = docker_name.Tag(args.name)
    target_creds = docker_creds.DefaultKeychain.Resolve(target_image)

    ctx = context.Workspace(args.directory)
    cash = cache.Registry(target_image.as_repository(),
                          target_creds,
                          transport,
                          threads=_THREADS,
                          mount=[base_name])
    bldr = builder.From(ctx)
    with docker_image.FromRegistry(base_name, base_creds,
                                   transport) as base_image:

        # Create (or pull from cache) the base image with the
        # package descriptor installation overlaid.
        logging.info('Generating dependency layer...')
        with bldr.CreatePackageBase(base_image, cash) as deps:
            # Construct the application layer from the context.
            logging.info('Generating app layer...')
            app_layer, diff_id = bldr.BuildAppLayer()
            with append.Layer(deps, app_layer, diff_id=diff_id) as app_image:
                with docker_session.Push(target_image,
                                         target_creds,
                                         transport,
                                         threads=_THREADS,
                                         mount=[base_name]) as session:
                    logging.info('Pushing final image...')
                    session.upload(app_image)
Esempio n. 14
0
    def CreatePackageBase(self, base_image, cache):
        """Override."""
        # Figure out if we need to override entrypoint.
        # Save the overrides for later to avoid adding an extra layer.
        pj_contents = {}
        if self._ctx.Contains(_PACKAGE_JSON):
            pj_contents = json.loads(self._ctx.GetFile(_PACKAGE_JSON))
        entrypoint = parse_entrypoint(pj_contents)
        overrides = metadata.Overrides(entrypoint=entrypoint)

        descriptor = None
        for f in [_PACKAGE_LOCK, _PACKAGE_JSON]:
            if self._ctx.Contains(f):
                descriptor = f
                descriptor_contents = self._ctx.GetFile(f)
                break

        if not descriptor:
            logging.info('No package descriptor found. No packages installed.')

            # Add the overrides now.
            return append.Layer(base_image, tar_gz=None, overrides=overrides)

        checksum = hashlib.sha256(descriptor_contents).hexdigest()
        hit = cache.Get(base_image, _NODE_NAMESPACE, checksum)
        if hit:
            logging.info('Found cached dependency layer for %s' % checksum)
            return hit
        else:
            logging.info('No cached dependency layer for %s' % checksum)

        # We want the node_modules directory rooted at /app/node_modules in
        # the final image.
        # So we build a hierarchy like:
        # /$tmp/app/node_modules
        # And use the -C flag to tar to root the tarball at /$tmp.
        tmp = tempfile.mkdtemp()
        app_dir = os.path.join(tmp, 'app')
        os.mkdir(app_dir)

        # Copy out the relevant package descriptors to a tempdir.
        with open(os.path.join(app_dir, descriptor), 'w') as f:
            f.write(descriptor_contents)

        tar_path = tempfile.mktemp()
        check_gcp_build(json.loads(self._ctx.GetFile(_PACKAGE_JSON)), app_dir)
        subprocess.check_call(
            ['rm', '-rf', os.path.join(app_dir, 'node_modules')])
        subprocess.check_call(['npm', 'install', '--production', '--no-cache'],
                              cwd=app_dir)
        subprocess.check_call(['tar', '-C', tmp, '-cf', tar_path, '.'])

        # We need the sha of the unzipped and zipped tarball.
        # So for performance, tar, sha, zip, sha.
        # We use gzip for performance instead of python's zip.
        sha = 'sha256:' + hashlib.sha256(open(tar_path).read()).hexdigest()
        subprocess.check_call(['gzip', tar_path])
        layer = open(os.path.join(tmp, tar_path + '.gz'), 'rb').read()

        with append.Layer(base_image, layer, diff_id=sha,
                          overrides=overrides) as dep_image:
            logging.info('Storing layer %s in cache.', sha)
            cache.Store(base_image, _NODE_NAMESPACE, checksum, dep_image)
            return dep_image