def __get_or_create_manifest(self, vol_container, file_size): _, bucket, key = self.args['s3_service'].resolve_url_to_location( vol_container['image']['importManifestUrl']) manifest_s3path = '/'.join((bucket, key)) try: with tempfile.SpooledTemporaryFile(max_size=1024000) as \ manifest_destfile: get_req = GetObject.from_other( self, service=self.args['s3_service'], auth=self.args['s3_auth'], source=manifest_s3path, dest=manifest_destfile, show_progress=False) get_req.main() self.log.info('using existing import manifest from the server') manifest_destfile.seek(0) manifest = ImportManifest.read_from_fileobj( manifest_destfile) except ServerError as err: if err.status_code == 404: self.log.info('creating new import manifest') manifest = self.__generate_manifest(vol_container, file_size) tempdir = tempfile.mkdtemp() manifest_filename = os.path.join(tempdir, os.path.basename(key)) with open(manifest_filename, 'w') as manifest_file: manifest.dump_to_fileobj(manifest_file, pretty_print=True) put_req = PutObject.from_other( get_req, source=manifest_filename, dest=manifest_s3path, show_progress=False) put_req.main() os.remove(manifest_filename) os.rmdir(tempdir) else: raise return manifest
def upload_bundle_file(self, source, dest, show_progress=False, **putobj_kwargs): if self.args.get('upload_policy'): if show_progress: # PostObject does not yet support show_progress print source, 'uploading...' if self.args.get('security_token'): postobj_kwargs = \ {'x-amz-security-token': self.args['security_token']} else: postobj_kwargs = {} postobj_kwargs.update(putobj_kwargs) req = PostObject.from_other( self, source=source, dest=dest, acl=self.args.get('acl') or 'aws-exec-read', Policy=self.args['upload_policy'], Signature=self.args['upload_policy_signature'], AWSAccessKeyId=self.args['key_id'], **postobj_kwargs) else: req = PutObject.from_other( self, source=source, dest=dest, acl=self.args.get('acl') or 'aws-exec-read', retries=self.args.get('retries') or 0, show_progress=show_progress, **putobj_kwargs) req.main()
def upload_bundle_file(self, source, dest, show_progress=False, **putobj_kwargs): if self.args.get('upload_policy'): if show_progress: # PostObject does not yet support show_progress print source, 'uploading...' req = PostObject.from_other( self, source=source, dest=dest, acl=self.args.get('acl') or 'aws-exec-read', Policy=self.args['upload_policy'], Signature=self.args['upload_policy_signature'], AWSAccessKeyId=self.args['key_id'], **putobj_kwargs) else: req = PutObject.from_other(self, source=source, dest=dest, acl=self.args.get('acl') or 'aws-exec-read', retries=self.args.get('retries') or 0, show_progress=show_progress, **putobj_kwargs) req.main()
def main(self): key_prefix = self.get_bundle_key_prefix() self.ensure_dest_bucket_exists() manifest = BundleManifest.read_from_file(self.args["manifest"]) part_dir = self.args.get("directory") or os.path.dirname(self.args["manifest"]) for part in manifest.image_parts: part.filename = os.path.join(part_dir, part.filename) if not os.path.isfile(part.filename): raise ValueError("no such part: '{0}'".format(part.filename)) # manifest -> upload part_out_r, part_out_w = multiprocessing.Pipe(duplex=False) part_gen = multiprocessing.Process(target=_generate_bundle_parts, args=(manifest, part_out_w)) part_gen.start() part_out_w.close() # Drive the upload process by feeding in part info self.upload_bundle_parts(part_out_r, key_prefix, show_progress=self.args.get("show_progress")) part_gen.join() # (conditionally) upload the manifest if not self.args.get("skip_manifest"): manifest_dest = key_prefix + os.path.basename(self.args["manifest"]) req = PutObject( source=self.args["manifest"], dest=manifest_dest, acl=self.args.get("acl") or "aws-exec-read", retries=self.args.get("retries") or 0, service=self.service, config=self.config, ) req.main() else: manifest_dest = None return { "parts": tuple( {"filename": part.filename, "key": (key_prefix + os.path.basename(part.filename))} for part in manifest.image_parts ), "manifests": ({"filename": self.args["manifest"], "key": manifest_dest},), }
def main(self): key_prefix = self.get_bundle_key_prefix() self.ensure_dest_bucket_exists() manifest = BundleManifest.read_from_file(self.args['manifest']) part_dir = (self.args.get('directory') or os.path.dirname(self.args['manifest'])) for part in manifest.image_parts: part.filename = os.path.join(part_dir, part.filename) if not os.path.isfile(part.filename): raise ValueError("no such part: '{0}'".format(part.filename)) # manifest -> upload part_out_r, part_out_w = multiprocessing.Pipe(duplex=False) part_gen = multiprocessing.Process(target=_generate_bundle_parts, args=(manifest, part_out_w)) part_gen.start() part_out_w.close() # Drive the upload process by feeding in part info self.upload_bundle_parts(part_out_r, key_prefix, show_progress=self.args.get('show_progress')) part_gen.join() # (conditionally) upload the manifest if not self.args.get('skip_manifest'): manifest_dest = (key_prefix + os.path.basename(self.args['manifest'])) req = PutObject(source=self.args['manifest'], dest=manifest_dest, acl=self.args.get('acl') or 'aws-exec-read', retries=self.args.get('retries') or 0, service=self.service, config=self.config) req.main() else: manifest_dest = None return {'parts': tuple({'filename': part.filename, 'key': (key_prefix + os.path.basename(part.filename))} for part in manifest.image_parts), 'manifests': ({'filename': self.args['manifest'], 'key': manifest_dest},)}
def __upload_part(self, part, part_s3path, pbar_label_template): self.log.info('Uploading part %s (bytes %i-%i)', part_s3path, part.start, part.end) part_pbar_label = pbar_label_template.format( fname=os.path.basename(part.key), index=(part.index + 1)) with open(self.args['source']) as source: source.seek(part.start) put_req = PutObject.from_other( self, service=self.args['s3_service'], auth=self.args['s3_auth'], source=source, dest=part_s3path, size=(part.end - part.start + 1), show_progress=self.args.get('show_progress', False), progressbar_label=part_pbar_label) return put_req.main()
def main(self): key_prefix = self.get_bundle_key_prefix() self.ensure_dest_bucket_exists() manifest = BundleManifest.read_from_file(self.args['manifest']) part_dir = (self.args.get('directory') or os.path.dirname(self.args['manifest'])) for part in manifest.image_parts: part.filename = os.path.join(part_dir, part.filename) if not os.path.isfile(part.filename): raise ValueError("no such part: '{0}'".format(part.filename)) # manifest -> upload part_out_r, part_out_w = multiprocessing.Pipe(duplex=False) part_gen = multiprocessing.Process(target=_generate_bundle_parts, args=(manifest, part_out_w)) part_gen.start() part_out_w.close() # Drive the upload process by feeding in part info self.upload_bundle_parts(part_out_r, key_prefix, show_progress=self.args.get('show_progress')) part_gen.join() # (conditionally) upload the manifest if not self.args.get('skipmanifest'): manifest_dest = (key_prefix + os.path.basename(self.args['manifest'])) req = PutObject.from_other( self, source=self.args['manifest'], dest=manifest_dest, acl=self.args.get('acl') or 'aws-exec-read', retries=self.args.get('retries') or 0) req.main() else: manifest_dest = None return {'parts': tuple({'filename': part.filename, 'key': (key_prefix + os.path.basename(part.filename))} for part in manifest.image_parts), 'manifests': ({'filename': self.args['manifest'], 'key': manifest_dest},)}