def azure_run_ore(build, args): """ Execute ore to upload the vhd image in blob format See: - https://github.com/coreos/mantle/#azure - https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction :param args: The command line arguments :type args: argparse.Namespace :param build: Build instance to use :type build: Build """ azure_vhd_name = f"{build.image_name_base}.vhd" ore_args = [ 'ore', '--log-level', args.log_level, 'azure', 'upload-blob', '--azure-auth', args.auth, '--azure-location', args.location, '--azure-profile', args.profile, '--blob-name', azure_vhd_name, '--file', f"{build.image_path}", '--container', args.container, '--resource-group', args.resource_group, '--storage-account', args.storage_account ] if args.force: ore_args.append('--overwrite') run_verbose(ore_args) url_path = urllib.parse.quote( (f"{args.storage_account}.blob.core.windows.net/" f"{args.container}/{azure_vhd_name}")) build.meta['azure'] = { 'image': azure_vhd_name, 'url': f"https://{url_path}", } build.meta_write() # update build metadata
def mutate_image(self): """ mutate_image is broken out seperately to allow other Classes to override the behavor. The callback parameter used to do post-processing on the working image before commiting it to the final location. To see how this is done, look at cosalib.vmware.VMwareOVA.mutate_image. :param callback: callback function for extra processing image :type callback: function """ work_img = os.path.join(self._tmpdir, f"{self.image_name_base}.{self.image_format}") final_img = os.path.join(os.path.abspath(self.build_dir), self.image_name) log.info(f"Staging temp image: {work_img}") self.set_platform() cmd = ['qemu-img', 'convert', '-f', 'qcow2', '-O', self.image_format, self.tmp_image] for k, v in self.convert_options.items(): cmd.extend([k, v]) cmd.extend([work_img]) run_verbose(cmd) img_info = image_info(work_img) if self.image_format != img_info.get("format"): raise ImageError((f"{work_img} format mismatch" f" expected: '{self.image_format}'" f" found: '{img_info.get('format')}'")) if self.mutate_callback: log.info(f"Processing work image callback") self.mutate_callback(work_img) if self.tar_members: # Python does not create sparse Tarfiles, so we have do it via # the CLI here. tarlist = [] for member in self.tar_members: member_name = os.path.basename(member) # In the case of several clouds, the disk is named # `disk.raw` or `disk.vmdk`. When creating a tarball, we # rename the disk to the in-tar name if the name does not # match the default naming. if member_name.endswith(('.raw', '.vmdk')): if member_name != os.path.basename(work_img): shutil.move(work_img, os.path.join(self._tmpdir, member_name)) tarlist.append(member_name) tar_cmd = ['tar', '--owner=0', '--group=0', '-C', self._tmpdir] tar_cmd.extend(self.tar_flags) tar_cmd.extend(['-f', final_img]) tar_cmd.extend(tarlist) run_verbose(tar_cmd) else: log.info(f"Moving {work_img} to {final_img}") shutil.move(work_img, final_img)
def remove_gcp_image(gcp_id, json_key, project): print(f"GCP: removing image {gcp_id}") try: run_verbose(['ore', 'gcloud', 'delete-images', gcp_id, '--json-key', json_key, '--project', project]) except SystemExit: raise Exception("Failed to remove image")
def mutate_digitalocean(path): # DigitalOcean can import custom images directly from a URL, and # supports .gz and .bz2 compression but not .xz. .bz2 is a bit tighter # but isn't used for any other artifact. Manually gzip the artifact # here. cmd-compress will skip recompressing it later. temp_path = f"{path}.gz" with open(temp_path, "wb") as fh: run_verbose(['gzip', '-9c', path], stdout=fh) os.rename(temp_path, path)
def remove_aliyun_image(aliyun_id, region): print(f"aliyun: removing image {aliyun_id} in {region}") try: run_verbose([ 'ore', 'aliyun', '--log-level', 'debug', 'delete-image', '--id', aliyun_id, '--force' ]) except SystemExit: raise Exception("Failed to remove image")
def test_image_info(tmpdir): cmdlib.run_verbose( ["qemu-img", "create", "-f", "qcow2", f"{tmpdir}/test.qcow2", "10M"]) assert cmdlib.image_info(f"{tmpdir}/test.qcow2").get('format') == "qcow2" cmdlib.run_verbose([ "qemu-img", "create", "-f", "vpc", '-o', 'force_size,subformat=fixed', f"{tmpdir}/test.vpc", "10M" ]) assert cmdlib.image_info(f"{tmpdir}/test.vpc").get('format') == "vpc"
def ibmcloud_run_ore(build, args): ore_args = ['ore'] if args.log_level: ore_args.extend(['--log-level', args.log_level]) region = "us-east" if args.region is not None and len(args.region) > 0: region = args.region[0] platform = args.target if args.cloud_object_storage is not None: cloud_object_storage = args.cloud_object_storage else: cloud_object_storage = f"coreos-dev-image-{platform}" ibmcloud_object_name = f"{build.build_name}-{build.build_id}-{build.basearch}-{build.platform}" if platform == "powervs": # powervs requires the image name to have an extension and also does not # tolerate dots in the name. It affects the internal import from IBMCloud # to the PowerVS systems ibmcloud_object_name = ibmcloud_object_name.replace(".", "-") + ".ova.gz" ore_args.extend([ 'ibmcloud', 'upload', '--region', f"{region}", '--cloud-object-storage', f"{cloud_object_storage}", '--bucket', f"{args.bucket}", '--name', ibmcloud_object_name, '--file', f"{build.image_path}", ]) if args.credentials_file is not None: ore_args.extend(['--credentials-file', f"{args.credentials_file}"]) if args.force: ore_args.extend(['--force']) run_verbose(ore_args) url_path = urllib.parse.quote( (f"s3.{region}.cloud-object-storage.appdomain.cloud/" f"{args.bucket}/{ibmcloud_object_name}")) build.meta[platform] = [{ 'object': ibmcloud_object_name, 'bucket': args.bucket, 'region': region, 'url': f"https://{url_path}", }] build.meta_write() # update build metadata
def remove_azure_image(image, resource_group, auth, profile): print(f"Azure: removing image {image}") try: run_verbose([ 'ore', 'azure', '--azure-auth', auth, '--azure-profile', profile, 'delete-image', '--image-name', image, '--resource-group', resource_group ]) except SystemExit: raise Exception("Failed to remove image")
def test_run_verbose(): """ Verify run_verbose returns expected information """ result = cmdlib.run_verbose(['echo', 'hi']) assert result.stdout is None with pytest.raises(FileNotFoundError): cmdlib.run_verbose(['idonotexist']) # If we are not at least on Python 3.7 we must skip the following test if PY_MAJOR == 3 and PY_MINOR >= 7: result = cmdlib.run_verbose(['echo', 'hi'], capture_output=True) assert result.stdout == b'hi\n'
def mutate_image(self, callback=None): """ mutate_image is broken out seperately to allow other Classes to override the behavor. The callback parameter used to do post-processing on the working image before commiting it to the final location. To see how this is done, look at cosalib.vmware.VMwareOVA.mutate_image. :param callback: callback function for extra processing image :type callback: function """ work_img = os.path.join(self._tmpdir, f"{self.image_name_base}.{self.image_format}") final_img = os.path.join(os.path.abspath(self.build_dir), self.image_name) log.info(f"Staging temp image: {work_img}") self.set_platform() cmd = [ 'qemu-img', 'convert', '-f', 'qcow2', '-O', self.image_format, self.tmp_image ] for k, v in self.convert_options.items(): cmd.extend([k, v]) cmd.extend([work_img]) run_verbose(cmd) if callback: log.info(f"Processing work image callback") callback(work_img) if self.tar_members: wmode = 'w' if final_img.endswith('gz'): wmode = 'w:gz' elif final_img.endswith('xz'): wmode = 'w:xz' log.info(f"Preparing tarball with mode '{wmode}': {final_img}") with tarfile.open(final_img, wmode) as tar: base_disk = self.tar_members.pop(0) base_name = os.path.basename(base_disk) log.info(f" - adding base disk '{work_img}' as '{base_name}'") tar.add(work_img, arcname=base_name) for te in self.tar_members: te_name = os.path.basename(te) log.info(f" - adding additional file: {te_name}") tar.add(te, arcname=te_name) else: log.info(f"Moving {work_img} to {final_img}") shutil.move(work_img, final_img)
def gcp_run_ore(build, args): """ Execute ore to upload the tarball and register the image """ arg_exp_str = "parameter '--{}' or envVar '{}' must be defined" if args.bucket is None: raise Exception(arg_exp_str.format("bucket", "GCP_BUCKET")) if args.json_key is None: raise Exception(arg_exp_str.format("json-key", "GCP_JSON_AUTH")) if args.project is None: raise Exception(arg_exp_str.format("project", "GCP_PROJECT")) ore_args = ['ore'] if args.log_level == "DEBUG": ore_args.extend(['--log-level', "DEBUG"]) gcp_name = re.sub(r'[_\.]', '-', build.image_name_base) if not re.fullmatch(GCP_NAMING_RE, gcp_name): raise Exception(f"{gcp_name} does match the naming rule: file a bug") urltmp = os.path.join(build.tmpdir, "gcp-url") ore_args.extend([ 'gcloud', '--project', args.project, '--basename', build.build_name, 'upload', '--force', # We want to support restarting the pipeline '--bucket', f'{args.bucket}', '--json-key', args.json_key, '--name', gcp_name, '--file', f"{build.image_path}", '--write-url', urltmp, ]) if args.family: ore_args.extend(['--family', args.family]) if args.description: ore_args.extend(['--description', args.description]) run_verbose(ore_args) build.meta['gcp'] = {'image': gcp_name, 'url': open(urltmp).read().strip()} build.meta_write()
def gcp_run_ore(build, args): """ Execute ore to upload the tarball and register the image """ arg_exp_str = "parameter '--{}' or envVar '{}' must be defined" if args.bucket is None: raise Exception(arg_exp_str.format("bucket", "GCP_BUCKET")) if args.json_key is None: raise Exception(arg_exp_str.format("json-key", "GCP_JSON_AUTH")) if args.project is None: raise Exception(arg_exp_str.format("project", "GCP_PROJECT")) ore_args = ['ore'] if args.log_level == "DEBUG": ore_args.extend(['--log-level', "DEBUG"]) gcp_name = re.sub(r'[_\.]', '-', build.image_name_base) if not re.fullmatch(GCP_NAMING_RE, gcp_name): raise Exception(f"{gcp_name} does match the naming rule: file a bug") ore_args.extend([ 'gcloud', '--project', args.project, '--basename', build.build_name, 'upload', '--force', # We want to support restarting the pipeline '--board=""', '--bucket', f'gs://{args.bucket}/{build.build_name}', '--json-key', args.json_key, '--name', gcp_name, '--file', f"{build.image_path}", ]) run_verbose(ore_args) url_path = urllib.parse.quote( ("storage.googleapis.com/" f"{args.bucket}/{build.build_name}/{build.image_name}")) build.meta['gcp'] = { 'image': gcp_name, 'url': f"https://{url_path}", } build.meta_write()
def generate_ovf_parameters(self, vmdk, cpu=2, memory=4096, system_type="vmx-13", os_type="rhel7_64Guest", scsi="VirtualSCSI", network="VmxNet3"): """ Returns a dictionary with the parameters needed to create an OVF file based on the qemu, vmdk, and info from the build metadata """ qemu_info = run_verbose(["qemu-img", "info", vmdk, "--output", "json"], capture_output=True) disk_size = json.loads(qemu_info.stdout)['virtual-size'] vmdk_size = str(os.stat(vmdk).st_blocks * 512) image = self.summary product = f'{self.meta["name"]} {self.summary}' vendor = self.meta['name'] version = self.meta['ostree-version'] params = { 'ovf_cpu_count': cpu, 'ovf_memory_mb': memory, 'vsphere_image_name': image, 'vsphere_product_name': product, 'vsphere_product_vendor_name': vendor, 'vsphere_product_version': version, 'vsphere_virtual_system_type': system_type, 'vsphere_os_type': os_type, 'vsphere_scsi_controller_type': scsi, 'vsphere_network_controller_type': network, 'virtual_disk_size': disk_size, 'vmdk_size': vmdk_size } return params
def set_platform(self): run_verbose([ '/usr/lib/coreos-assembler/gf-platformid', self.image_qemu, self.tmp_image, self.platform ])
def gcp_run_ore(build, args): """ Execute ore to upload the tarball and register the image """ arg_exp_str = "parameter '--{}' or envVar '{}' must be defined" if args.bucket is None: raise Exception(arg_exp_str.format("bucket", "GCP_BUCKET")) if args.json_key is None: raise Exception(arg_exp_str.format("json-key", "GCP_JSON_AUTH")) if args.project is None: raise Exception(arg_exp_str.format("project", "GCP_PROJECT")) gcp_name = re.sub(r'[_\.]', '-', build.image_name_base) if not re.fullmatch(GCP_NAMING_RE, gcp_name): raise Exception(f"{gcp_name} does match the naming rule: file a bug") urltmp = os.path.join(build.tmpdir, "gcp-url") ore_common_args = [ 'ore', 'gcloud', '--project', args.project, '--json-key', args.json_key, ] if args.log_level == "DEBUG": ore_common_args.extend(['--log-level', "DEBUG"]) ore_upload_cmd = ore_common_args + [ 'upload', '--basename', build.build_name, '--force', # We want to support restarting the pipeline '--bucket', f'{args.bucket}', '--name', gcp_name, '--file', f"{build.image_path}", '--write-url', urltmp, ] if args.description: ore_upload_cmd.extend(['--description', args.description]) if not args.create_image: ore_upload_cmd.extend(['--create-image=false']) if args.license: ore_upload_cmd.extend(['--license', args.license]) run_verbose(ore_upload_cmd) # Run deprecate image to deprecate if requested if args.deprecated: ore_deprecate_cmd = ore_common_args + [ 'deprecate-image', '--image', gcp_name, '--state', 'DEPRECATED' ] run_verbose(ore_deprecate_cmd) # Run update-image to add to an image family if requested. # We run this as a separate API call because we want to run # it AFTER the deprecation if the user passed --deprecated if args.family: ore_update_cmd = ore_common_args + [ 'update-image', '--image', gcp_name, '--family', args.family ] run_verbose(ore_update_cmd) build.meta['gcp'] = { 'image': gcp_name, 'url': open(urltmp).read().strip() } build.meta_write()