def process_main(self, item=None): """ For each layer that we need to save, create the ancestry file then call the parent class to finish processing. :param item: A docker image unit :type item: pulp_docker.plugins.models.Image """ # Write out the ancestry file ancestry = tarutils.get_ancestry(item.image_id, self.parent.metadata) layer_dir = os.path.join(self.get_working_dir(), item.image_id) with open(os.path.join(layer_dir, 'ancestry'), 'w') as ancestry_fp: json.dump(ancestry, ancestry_fp) # compress layer.tar to a new file called layer layer_src_path = os.path.join(self.get_working_dir(), item.image_id, 'layer.tar') layer_dest_path = os.path.join(self.get_working_dir(), item.image_id, 'layer') with open(layer_src_path) as layer_src: with contextlib.closing(gzip.open(layer_dest_path, 'w')) as layer_dest: # these can be big files, so we chunk them reader = functools.partial(layer_src.read, 4096) for chunk in iter(reader, ''): layer_dest.write(chunk) # we don't need layer.tar anymore os.remove(layer_src_path) super(AddImages, self).process_main(item=item)
def process_main(self, item=None): """ For each layer that we need to save, create the ancestry file then call the parent class to finish processing. :param item: A docker image unit :type item: pulp_docker.plugins.db.models.DockerImage """ # Write out the ancestry file ancestry = tarutils.get_ancestry(item.image_id, self.parent.metadata) layer_dir = os.path.join(self.get_working_dir(), item.image_id) with open(os.path.join(layer_dir, 'ancestry'), 'w') as ancestry_fp: json.dump(ancestry, ancestry_fp) super(AddDockerUnits, self).process_main(item=item)
def upload_unit(self, repo, type_id, unit_key, metadata, file_path, conduit, config): """ Upload a docker image. The file should be the product of "docker save". This will import all images in that tarfile into the specified repository, each as an individual unit. This will also update the repo's tags to reflect the tags present in the tarfile. The following is copied from the superclass. :param repo: metadata describing the repository :type repo: pulp.plugins.model.Repository :param type_id: type of unit being uploaded :type type_id: str :param unit_key: identifier for the unit, specified by the user :type unit_key: dict :param metadata: any user-specified metadata for the unit :type metadata: dict :param file_path: path on the Pulp server's filesystem to the temporary location of the uploaded file; may be None in the event that a unit is comprised entirely of metadata and has no bits associated :type file_path: str :param conduit: provides access to relevant Pulp functionality :type conduit: pulp.plugins.conduits.unit_add.UnitAddConduit :param config: plugin configuration for the repository :type config: pulp.plugins.config.PluginCallConfiguration :return: A dictionary describing the success or failure of the upload. It must contain the following keys: 'success_flag': bool. Indicates whether the upload was successful 'summary': json-serializable object, providing summary 'details': json-serializable object, providing details :rtype: dict """ # retrieve metadata from the tarball metadata = tarutils.get_metadata(file_path) # turn that metadata into a collection of models mask_id = config.get(constants.CONFIG_KEY_MASK_ID) models = upload.get_models(metadata, mask_id) ancestry = tarutils.get_ancestry(models[0].image_id, metadata) # save those models as units in pulp upload.save_models(conduit, models, ancestry, file_path) upload.update_tags(repo.id, file_path)
def test_from_busybox(self): metadata = tarutils.get_metadata(busybox_tar_path) ancestry = tarutils.get_ancestry(busybox_ids[0], metadata) self.assertEqual(ancestry, busybox_ids)