def __init__(self, az_account: 'account.AZAccount', resource_id: str, name: str, region: str, zones: Optional[List[str]] = None) -> None: """Initialize the AZComputeResource class. Args: az_account (AZAccount): An Azure account object. resource_id (str): The Azure resource ID. name (str): The resource's name. region (str): The region in which the resource is located. zones (List[str]): Optional. Availability zones within the region where the resource is located. Raises: InvalidNameError: If the resource ID is malformed. """ if not common.REGEX_COMPUTE_RESOURCE_ID.match(resource_id): raise errors.InvalidNameError( 'Malformed resource ID: expected {0:s}, got {1:s}'.format( common.REGEX_COMPUTE_RESOURCE_ID.pattern, resource_id), __name__) self.az_account = az_account # Format of resource_id: /subscriptions/{id}/resourceGroups/{ # resource_group_name}/providers/Microsoft.Compute/{resourceType}/{resource} self.resource_group_name = resource_id.split('/')[4] self.resource_id = resource_id self.name = name self.region = region self.zones = zones
def Snapshot(self, snapshot_name: Optional[str] = None, tags: Optional[Dict[str, str]] = None) -> 'AZComputeSnapshot': """Create a snapshot of the disk. Args: snapshot_name (str): Optional. A name for the snapshot. If none provided, one will be generated based on the disk's name. tags (Dict[str, str]): Optional. A dictionary of tags to add to the snapshot, for example {'TicketID': 'xxx'}. Returns: AZComputeSnapshot: A snapshot object. Raises: InvalidNameError: If the snapshot name does not comply with the RegEx. ResourceCreationError: If the snapshot could not be created. """ if not snapshot_name: snapshot_name = self.name + '_snapshot' truncate_at = 80 - 1 snapshot_name = snapshot_name[:truncate_at] if not common.REGEX_SNAPSHOT_NAME.match(snapshot_name): raise errors.InvalidNameError( 'Snapshot name {0:s} does not comply with {1:s}'.format( snapshot_name, common.REGEX_SNAPSHOT_NAME.pattern), __name__) creation_data = { 'location': self.region, 'creation_data': { 'sourceResourceId': self.resource_id, 'create_option': models.DiskCreateOption.copy } } if tags: creation_data['tags'] = tags try: logger.info('Creating snapshot: {0:s}'.format(snapshot_name)) request = self.compute_client.snapshots.create_or_update( self.resource_group_name, snapshot_name, creation_data) while not request.done(): sleep(5) # Wait 5 seconds before checking snapshot status again snapshot = request.result() logger.info('Snapshot {0:s} successfully created'.format(snapshot_name)) except azure_exceptions.CloudError as exception: raise errors.ResourceCreationError( 'Could not create snapshot for disk {0:s}: {1!s}'.format( self.resource_id, exception), __name__) return AZComputeSnapshot(self.az_account, snapshot.id, snapshot.name, snapshot.location, self)
def CreateStorageAccount(self, storage_account_name: str, region: Optional[str] = None) -> Tuple[str, str]: """Create a storage account and returns its ID and access key. Args: storage_account_name (str): The name for the storage account. region (str): Optional. The region in which to create the storage account. If not provided, it will be created in the default_region associated to the AZAccount object. Returns: Tuple[str, str]: The storage account ID and its access key. Raises: InvalidNameError: If the storage account name is invalid. """ if not common.REGEX_ACCOUNT_STORAGE_NAME.match(storage_account_name): raise errors.InvalidNameError( 'Storage account name {0:s} does not comply with {1:s}'.format( storage_account_name, common.REGEX_ACCOUNT_STORAGE_NAME.pattern), __name__) if not region: region = self.az_account.default_region # https://docs.microsoft.com/en-us/rest/api/storagerp/srp_sku_types creation_data = { 'location': region, 'sku': { 'name': 'Standard_RAGRS' }, 'kind': 'Storage' } # pylint: disable=line-too-long # https://docs.microsoft.com/en-us/samples/azure-samples/storage-python-manage/storage-python-manage/ # https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python # pylint: enable=line-too-long logger.info( 'Creating storage account: {0:s}'.format(storage_account_name)) request = self.storage_client.storage_accounts.begin_create( self.az_account.default_resource_group_name, storage_account_name, creation_data) logger.info('Storage account {0:s} successfully created'.format( storage_account_name)) storage_account = request.result() storage_account_keys = self.storage_client.storage_accounts.list_keys( self.az_account.default_resource_group_name, storage_account_name) storage_account_keys = { key.key_name: key.value for key in storage_account_keys.keys } storage_account_id = storage_account.id # type: str storage_account_key = storage_account_keys['key1'] # type: str return storage_account_id, storage_account_key
def Snapshot(self, tags: Optional[Dict[str, str]] = None) -> 'AWSSnapshot': """Create a snapshot of the volume. Args: tags (Dict[str, str]): Optional. A dictionary of tags to add to the snapshot, for example {'Name': 'my-snapshot-name', 'TicketID': 'xxx'}. Returns: AWSSnapshot: A snapshot object. Raises: InvalidNameError: If the snapshot name does not comply with the RegEx. ResourceCreationError: If the snapshot could not be created. """ if not tags: tags = {} snapshot_name = tags.get('Name') or (self.volume_id + '-snapshot') truncate_at = 255 - 1 snapshot_name = snapshot_name[:truncate_at] if len(snapshot_name) > 255: raise errors.InvalidNameError( 'Snapshot name {0:s} is too long (>255 chars)'.format( snapshot_name), __name__) tags['Name'] = snapshot_name client = self.aws_account.ClientApi(common.EC2_SERVICE) try: snapshot = client.create_snapshot( VolumeId=self.volume_id, TagSpecifications=[common.CreateTags(common.SNAPSHOT, tags)]) snapshot_id = snapshot.get('SnapshotId') # Wait for snapshot completion client.get_waiter('snapshot_completed').wait( SnapshotIds=[snapshot_id], WaiterConfig={ 'Delay': 30, 'MaxAttempts': 100 }) except (client.exceptions.ClientError, botocore.exceptions.WaiterError) as exception: raise errors.ResourceCreationError( 'Could not create snapshot for volume {0:s}: {1:s}'.format( self.volume_id, str(exception)), __name__) return AWSSnapshot(snapshot_id, self.aws_account, self.aws_account.default_region, self.aws_account.default_availability_zone, self, name=snapshot_name)
def CreateImageFromGcsTarGz( self, gcs_uri: str, name: Optional[str] = None) -> 'GoogleComputeImage': """Creates a GCE image from a Gzip compressed Tar archive in GCS. Args: gcs_uri (str): Path to the compressed image archive (image.tar.gz) in Cloud Storage. It must be a gzip compressed tar archive with the extension .tar.gz. ex: 'https://storage.cloud.google.com/foo/bar.tar.gz' 'gs://foo/bar.tar.gz' 'foo/bar.tar.gz' name (str): Optional. Name of the image to create. Default is [src_disk.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeImage: A Google Compute Image object. Raises: InvalidNameError: If the GCE Image name is invalid. ValueError: If the extension of the archived image is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Image name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName('imported-image', common.COMPUTE_NAME_LIMIT) if not gcs_uri.lower().endswith('.tar.gz'): raise ValueError( 'Image imported from {0:s} must be a GZIP compressed TAR ' 'archive with the extension: .tar.gz'.format(gcs_uri)) gcs_uri = os.path.relpath(gcs_uri, 'gs://') if not gcs_uri.startswith(common.STORAGE_LINK_URL): gcs_uri = os.path.join(common.STORAGE_LINK_URL, gcs_uri) image_body = { 'name': name, 'rawDisk': { 'source': gcs_uri } } gce_image_client = self.GceApi().images() request = gce_image_client.insert( project=self.project_id, body=image_body, forceCreate=True) response = request.execute() self.BlockOperation(response) return GoogleComputeImage(self.project_id, '', name)
def GenerateDiskName(snapshot: 'compute.AZComputeSnapshot', disk_name_prefix: Optional[str] = None) -> str: """Generate a new disk name for the disk to be created from the Snapshot. The disk name must comply with the following RegEx: - ^[\\w]{1-80}$ i.e., it must be between 1 and 80 chars, and can only contain alphanumeric characters and underscores. Args: snapshot (AZComputeSnapshot): A disk's Snapshot. disk_name_prefix (str): Optional. A prefix for the disk name. Returns: str: A name for the disk. Raises: InvalidNameError: If the disk name does not comply with the RegEx. """ # Max length of disk names in Azure is 80 characters subscription_id = snapshot.az_account.subscription_id disk_id = subscription_id + snapshot.disk.resource_id disk_id_crc32 = '{0:08x}'.format( binascii.crc32(disk_id.encode()) & 0xffffffff) truncate_at = 80 - len(disk_id_crc32) - len('_copy') - 1 if disk_name_prefix: disk_name_prefix += '_' if len(disk_name_prefix) > truncate_at: # The disk name prefix is too long disk_name_prefix = disk_name_prefix[:truncate_at] truncate_at -= len(disk_name_prefix) disk_name = '{0:s}{1:s}_{2:s}_copy'.format(disk_name_prefix, snapshot.name[:truncate_at], disk_id_crc32) else: disk_name = '{0:s}_{1:s}_copy'.format(snapshot.name[:truncate_at], disk_id_crc32) # Azure doesn't allow dashes in disk names, only underscores. If the # name of the source snapshot contained dashes, we need to replace them. disk_name = disk_name.replace('-', '_') if not REGEX_DISK_NAME.match(disk_name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with ' '{1:s}'.format(disk_name, REGEX_DISK_NAME.pattern), __name__) return disk_name
def GenerateDiskName(snapshot: 'compute.GoogleComputeSnapshot', disk_name_prefix: Optional[str] = None) -> str: """Generate a new disk name for the disk to be created from the Snapshot. The disk name must comply with the following RegEx: - ^(?=.{1,63}$)[a-z]([-a-z0-9]*[a-z0-9])?$ i.e., it must be between 1 and 63 chars, the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Args: snapshot (GoogleComputeSnapshot): A disk's Snapshot. disk_name_prefix (str): Optional. A prefix for the disk name. Returns: str: A name for the disk. Raises: InvalidNameError: If the disk name does not comply with the RegEx. """ # Max length of disk names in GCP is 63 characters project_id = snapshot.project_id disk_id = project_id + snapshot.disk.name disk_id_crc32 = '{0:08x}'.format( binascii.crc32(disk_id.encode()) & 0xffffffff) truncate_at = 63 - len(disk_id_crc32) - len('-copy') - 1 if disk_name_prefix: disk_name_prefix += '-' if len(disk_name_prefix) > truncate_at: # The disk name prefix is too long disk_name_prefix = disk_name_prefix[:truncate_at] truncate_at -= len(disk_name_prefix) disk_name = '{0:s}{1:s}-{2:s}-copy'.format(disk_name_prefix, snapshot.name[:truncate_at], disk_id_crc32) else: disk_name = '{0:s}-{1:s}-copy'.format(snapshot.name[:truncate_at], disk_id_crc32) if not REGEX_DISK_NAME.match(disk_name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with {1:s}'.format( disk_name, REGEX_DISK_NAME.pattern), __name__) return disk_name
def ExportImage(self, gcs_output_folder: str, output_name: Optional[str] = None) -> None: """Export compute image to Google Cloud storage. Exported image is compressed and stored in .tar.gz format. Args: gcs_output_folder (str): Folder path of the exported image. output_name (str): Optional. Name of the output file. Name will be appended with .tar.gz. Default is [image_name].tar.gz. Raises: InvalidNameError: If exported image name is invalid. """ if output_name: if not common.REGEX_DISK_NAME.match(output_name): raise errors.InvalidNameError( 'Exported image name {0:s} does not comply with {1:s}'. format(output_name, common.REGEX_DISK_NAME.pattern), __name__) full_path = '{0:s}.tar.gz'.format( os.path.join(gcs_output_folder, output_name)) else: full_path = '{0:s}.tar.gz'.format( os.path.join(gcs_output_folder, self.name)) build_body = { 'timeout': '86400s', 'steps': [{ 'args': [ '-source_image={0:s}'.format(self.name), '-destination_uri={0:s}'.format(full_path), '-client_id=api', ], 'name': 'gcr.io/compute-image-tools/gce_vm_image_export:release', 'env': [] }], 'tags': ['gce-daisy', 'gce-daisy-image-export'] } cloud_build = build.GoogleCloudBuild(self.project_id) response = cloud_build.CreateBuild(build_body) cloud_build.BlockOperation(response) logger.info('Image {0:s} exported to {1:s}.'.format( self.name, full_path))
def CreateDiskFromImage(self, src_image: 'GoogleComputeImage', zone: str, name: Optional[str] = None) -> 'GoogleComputeDisk': """Creates a GCE persistent disk from a GCE image. Args: src_image (GoogleComputeImage): Source image for the disk. zone (str): Zone to create the new disk in. name (str): Optional. Name of the disk to create. Default is [src_image.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeDisk: A Google Compute Disk object. Raises: InvalidNameError: If GCE disk name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName(src_image.name, common.COMPUTE_NAME_LIMIT) disk_body = { 'name': name, 'sourceImage': 'projects/{project_id}/global/images/{src_image}'.format( project_id=src_image.project_id, src_image=src_image.name) } gce_disk_client = self.GceApi().disks() request = gce_disk_client.insert(project=self.project_id, body=disk_body, zone=zone) response = request.execute() self.BlockOperation(response, zone) return GoogleComputeDisk(self.project_id, zone, name)
def CreateImageFromDisk( self, src_disk: 'GoogleComputeDisk', name: Optional[str] = None) -> 'GoogleComputeImage': """Creates an image from a persistent disk. Args: src_disk (GoogleComputeDisk): Source disk for the image. name (str): Optional. Name of the image to create. Default is [src_disk.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeImage: A Google Compute Image object. Raises: InvalidNameError: If the GCE Image name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Image name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName(src_disk.name, common.COMPUTE_NAME_LIMIT) image_body = { 'name': name, 'sourceDisk': 'projects/{project_id}/zones/{zone}/disks/{src_disk}'.format( project_id=src_disk.project_id, zone=src_disk.zone, src_disk=src_disk.name) } gce_image_client = self.GceApi().images() request = gce_image_client.insert(project=self.project_id, body=image_body, forceCreate=True) response = request.execute() self.BlockOperation(response) return GoogleComputeImage(self.project_id, '', name)
def Snapshot( self, snapshot_name: Optional[str] = None) -> 'GoogleComputeSnapshot': """Create Snapshot of the disk. The Snapshot name must comply with the following RegEx: - ^(?=.{1,63}$)[a-z]([-a-z0-9]*[a-z0-9])?$ i.e., it must be between 1 and 63 chars, the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Args: snapshot_name (str): Optional. Name of the Snapshot. Returns: GoogleComputeSnapshot: A Snapshot object. Raises: InvalidNameError: If the name of the snapshot does not comply with the RegEx. """ if not snapshot_name: snapshot_name = self.name snapshot_name = common.GenerateUniqueInstanceName( snapshot_name, common.COMPUTE_NAME_LIMIT) if not common.REGEX_DISK_NAME.match(snapshot_name): raise errors.InvalidNameError( 'Snapshot name {0:s} does not comply with {1:s}'.format( snapshot_name, common.REGEX_DISK_NAME.pattern), __name__) logger.info( self.FormatLogMessage('New Snapshot: {0:s}'.format(snapshot_name))) operation_config = {'name': snapshot_name} gce_disk_client = self.GceApi().disks() request = gce_disk_client.createSnapshot(disk=self.name, project=self.project_id, zone=self.zone, body=operation_config) response = request.execute() self.BlockOperation(response, zone=self.zone) return GoogleComputeSnapshot(disk=self, name=snapshot_name)
def SetTags(self, new_tags: List[str]) -> None: """Sets tags for the compute instance. Tags are used to configure firewall rules and network routes. Args: new_tags (List[str]): A list of tags. Each tag must be 1-63 characters long, and comply with RFC1035. Raises: InvalidNameError: If the name of the tags does not comply with RFC1035. """ logger.info( self.FormatLogMessage(', adding tags {0!s} to instance ' '{1:s}.'.format(new_tags, self.name))) for tag in new_tags: if not common.COMPUTE_RFC1035_REGEX.match(tag): raise errors.InvalidNameError( 'Network Tag {0:s} does not comply with {1:s}.'.format( tag, common.COMPUTE_RFC1035_REGEX.pattern), __name__) get_operation = self.GetOperation() tags_dict = get_operation['tags'] existing_tags = tags_dict.get('items', []) tags_fingerprint = tags_dict['fingerprint'] tags = existing_tags + new_tags request_body = { 'fingerprint': tags_fingerprint, 'items': tags, } gce_instance_client = self.GceApi().instances() request = gce_instance_client.setTags( project=self.project_id, zone=self.zone, instance=self.name, body=request_body ) response = request.execute() self.BlockOperation(response, zone=self.zone)
def CreateVolumeFromSnapshot( self, snapshot: AWSSnapshot, volume_name: Optional[str] = None, volume_name_prefix: Optional[str] = None, volume_type: str = 'gp2', kms_key_id: Optional[str] = None, tags: Optional[Dict[str, str]] = None) -> AWSVolume: """Create a new volume based on a snapshot. Args: snapshot (AWSSnapshot): Snapshot to use. volume_name (str): Optional. String to use as new volume name. volume_name_prefix (str): Optional. String to prefix the volume name with. volume_type (str): Optional. The volume type for the volume to create. Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The default is 'gp2'. kms_key_id (str): Optional. A KMS key id to encrypt the volume with. tags (Dict[str, str]): Optional. A dictionary of tags to add to the volume, for example {'TicketID': 'xxx'}. An entry for the volume name is added by default. Returns: AWSVolume: An AWS EBS Volume. Raises: InvalidNameError: If the volume name does not comply with the RegEx ValueError: If the volume type is invalid. ResourceCreationError: If the volume could not be created. """ if volume_type not in ['standard', 'io1', 'gp2', 'sc1', 'st1']: raise ValueError( 'Volume type must be one of [standard, io1, gp2, sc1, ' 'st1]. Got: {0:s}'.format(volume_type)) if not volume_name: volume_name = self._GenerateVolumeName( snapshot, volume_name_prefix=volume_name_prefix) if len(volume_name) > 255: raise errors.InvalidNameError( 'Volume name {0:s} is too long (>255 chars)'.format( volume_name), __name__) if not tags: tags = {} tags['Name'] = volume_name client = self.aws_account.ClientApi(common.EC2_SERVICE) create_volume_args = { 'AvailabilityZone': snapshot.availability_zone, 'SnapshotId': snapshot.snapshot_id, 'TagSpecifications': [common.CreateTags(common.VOLUME, tags)], 'VolumeType': volume_type } if kms_key_id: create_volume_args['Encrypted'] = True create_volume_args['KmsKeyId'] = kms_key_id if volume_type == 'io1': # If using the io1 volume type, we must specify Iops, see # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/ # services/ec2.html#EC2.Client.create_volume. io1 volumes allow for a # ratio of 50 IOPS per 1 GiB. create_volume_args['Iops'] = self.aws_account.ResourceApi( common.EC2_SERVICE).Snapshot( snapshot.snapshot_id).volume_size * 50 try: volume = client.create_volume(**create_volume_args) volume_id = volume['VolumeId'] # Wait for volume creation completion client.get_waiter('volume_available').wait(VolumeIds=[volume_id]) except (client.exceptions.ClientError, botocore.exceptions.WaiterError) as exception: raise errors.ResourceCreationError( 'Could not create volume {0:s} from snapshot {1:s}: {2!s}'. format(volume_name, snapshot.name, exception), __name__) zone = volume['AvailabilityZone'] encrypted = volume['Encrypted'] return AWSVolume(volume_id, self.aws_account, self.aws_account.default_region, zone, encrypted, name=volume_name)
def AddDenyAllFirewallRules(project_id: str, network: str, deny_ingress_tag: str, deny_egress_tag: str, exempted_src_ips: Optional[List[str]] = None, enable_logging: bool = False) -> None: """Add deny-all firewall rules, of highest priority. Args: project_id (str): Google Cloud Project ID. network (str): URL of the network resource for thesee firewall rules. deny_ingress_tag (str): Target tag name to apply deny ingress rule, also used as a deny ingress firewall rule name. deny_egress_tag (str): Target tag name to apply deny egress rule, also used as a deny egress firewall rule name. exempted_src_ips (List[str]): List of IPs exempted from the deny-all ingress firewall rules, ex: analyst IPs. enable_logging (bool): Optional. Enable firewall logging. Default is False. Raises: InvalidNameError: If Tag names are invalid. """ logger.info('Creating deny-all (ingress/egress) ' 'firewall rules in {0:s} network.'.format(network)) project = gcp_project.GoogleCloudProject(project_id) if not common.COMPUTE_RFC1035_REGEX.match(deny_ingress_tag): raise errors.InvalidNameError( 'Deny ingress tag name {0:s} does not comply with {1:s}'.format( deny_ingress_tag, common.COMPUTE_RFC1035_REGEX.pattern), __name__) if not common.COMPUTE_RFC1035_REGEX.match(deny_egress_tag): raise errors.InvalidNameError( 'Deny egress tag name {0:s} does not comply with {1:s}'.format( deny_egress_tag, common.COMPUTE_RFC1035_REGEX.pattern), __name__) source_range = common.GenerateSourceRange(exempted_src_ips) deny_ingress = { 'name': deny_ingress_tag, 'network': network, 'direction': 'INGRESS', 'priority': 0, 'targetTags': [ deny_ingress_tag ], 'denied': [ { 'IPProtocol': 'all' } ], 'logConfig': { 'enable': enable_logging }, 'sourceRanges': source_range } deny_egress = { 'name': deny_egress_tag, 'network': network, 'direction': 'EGRESS', 'priority': 0, 'targetTags': [ deny_egress_tag ], 'denied': [ { 'IPProtocol': 'all' } ], 'logConfig': { 'enable': enable_logging }, 'destinationRanges': [ '0.0.0.0/0' ] } project.compute.InsertFirewallRule(body=deny_ingress) project.compute.InsertFirewallRule(body=deny_egress)
def CreateDiskFromGCSImage( project_id: str, storage_image_path: str, zone: str, name: Optional[str] = None) -> Dict[str, Any]: """Creates a GCE persistent disk from a image in GCS. The method supports raw disk images and most virtual disk file formats. Valid import formats are: [raw (dd), qcow2, qcow , vmdk, vdi, vhd, vhdx, qed, vpc]. The created GCE disk might be larger than the original raw (dd) image stored in GCS to satisfy GCE capacity requirements: https://cloud.google.com/compute/docs/disks/#introduction However the bytes_count and the md5_hash values of the source image are returned with the newly created disk. The md5_hash can be used to verify the integrity of the created GCE disk, it must be compared with the hash of the created GCE disk from byte 0 to bytes_count. i.e: result['md5Hash'] = hash(created_gce_disk, start_byte=0, end_byte=result['bytes_count']) Args: project_id (str): Google Cloud Project ID. storage_image_path (str): Path to the source image in GCS. zone (str): Zone to create the new disk in. name (str): Optional. Name of the disk to create. Default is imported-disk-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: Dict: A key value describing the imported GCE disk. Ex: { 'project_id': 'fake-project', 'disk_name': 'fake-imported-disk', 'zone': 'fake-zone', 'bytes_count': '1234' # Content-Length of source image in bytes. 'md5Hash': 'Source Image MD5 hash string in hex' } Raises: InvalidNameError: If the GCE disk name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName('imported-disk', common.COMPUTE_NAME_LIMIT) project = gcp_project.GoogleCloudProject(project_id) image_object = project.compute.ImportImageFromStorage(storage_image_path) disk_object = project.compute.CreateDiskFromImage( image_object, zone=zone, name=name) storage_object_md = project.storage.GetObjectMetadata(storage_image_path) md5_hash_hex = base64.b64decode(storage_object_md['md5Hash']).hex() result = { 'project_id': disk_object.project_id, 'disk_name': disk_object.name, 'zone': disk_object.zone, 'bytes_count': storage_object_md['size'], 'md5Hash': md5_hash_hex } return result
def ImportImageFromStorage(self, storage_image_path: str, image_name: Optional[str] = None, bootable: bool = False, os_name: Optional[str] = None, guest_environment: bool = True) -> 'GoogleComputeImage': # pylint: disable=line-too-long """Import GCE image from Cloud storage. The import tool supports raw disk images and most virtual disk file formats, valid import formats are: [raw (dd), qcow2, qcow , vmdk, vdi, vhd, vhdx, qed, vpc]. Args: storage_image_path (str): Path to the source image in Cloud Storage. image_name (str): Optional. Name of the imported image, default is "imported-image-" appended with a timestamp in "%Y%m%d%H%M%S" format. bootable (bool): Optional. True if the imported image is bootable. Default is False. If True the os_name must be specified. os_name (str): Optional. Name of the operating system on the bootable image. For supported versions please see: https://cloud.google.com/sdk/gcloud/reference/compute/images/import#--os # pylint: disable=line-too-long For known limitations please see: https://googlecloudplatform.github.io/compute-image-tools/image-import.html#compatibility-and-known-limitations # pylint: disable=line-too-long guest_environment (bool): Optional. Install Google Guest Environment on a bootable image. Relevant only if image is bootable. Default True. Returns: GoogleComputeImage: A Google Compute Image object. Raises: ValueError: If bootable is True and os_name not specified. InvalidNameError: If imported image name is invalid. """ supported_os = [ 'centos-6', 'centos-7', 'centos-8', 'debian-8', 'debian-9', 'opensuse-15', 'rhel-6', 'rhel-6-byol', 'rhel-7', 'rhel-7-byol', 'rhel-8', 'rhel-8-byol', 'sles-12-byol', 'sles-15-byol', 'ubuntu-1404', 'ubuntu-1604', 'ubuntu-1804', 'windows-10-x64-byol', 'windows-10-x86-byol', 'windows-2008r2', 'windows-2008r2-byol', 'windows-2012', 'windows-2012-byol', 'windows-2012r2', 'windows-2012r2-byol', 'windows-2016', 'windows-2016-byol', 'windows-2019', 'windows-2019-byol', 'windows-7-x64-byol', 'windows-7-x86-byol', 'windows-8-x64-byol', 'windows-8-x86-byol' ] if not bootable: img_type = '-data_disk' elif not os_name: raise ValueError('For bootable images, operating system name' ' (os_name) must be specified.') elif os_name not in supported_os: logger.warning( ('Operating system of the imported image is not within the ' 'supported list:\n{0:s}\nFor the up-to-date list please refer ' 'to:\n{1:s}').format( ', '.join(supported_os), 'https://cloud.google.com/sdk/gcloud/reference/compute/images/import#--os')) # pylint: disable=line-too-long else: img_type = '-os={0:s}'.format(os_name) if image_name: if not common.REGEX_DISK_NAME.match(image_name): raise errors.InvalidNameError( 'Imported image name {0:s} does not comply with {1:s}'. format(image_name, common.REGEX_DISK_NAME.pattern), __name__) image_name = image_name[:common.COMPUTE_NAME_LIMIT] else: image_name = common.GenerateUniqueInstanceName( 'imported-image', common.COMPUTE_NAME_LIMIT) args_list = [ '-image_name={0:s}'.format(image_name), '-source_file={0:s}'.format(storage_image_path), '-timeout=86400s', '-client_id=api', img_type ] if bootable and not guest_environment: args_list.append('-no_guest_environment') build_body = { 'steps': [{ 'args': args_list, 'name': 'gcr.io/compute-image-tools/gce_vm_image_import:release', 'env': ['BUILD_ID=$BUILD_ID'] }], 'timeout': '86400s', 'tags': ['gce-daisy', 'gce-daisy-image-import'] } cloud_build = build.GoogleCloudBuild(self.project_id) response = cloud_build.CreateBuild(build_body) cloud_build.BlockOperation(response) logger.info('Image {0:s} imported as GCE image {1:s}.'.format( storage_image_path, image_name)) return GoogleComputeImage(self.project_id, '', image_name)