def CreateImageFromGcsTarGz( self, gcs_uri: str, name: Optional[str] = None) -> 'GoogleComputeImage': """Creates a GCE image from a Gzip compressed Tar archive in GCS. Args: gcs_uri (str): Path to the compressed image archive (image.tar.gz) in Cloud Storage. It must be a gzip compressed tar archive with the extension .tar.gz. ex: 'https://storage.cloud.google.com/foo/bar.tar.gz' 'gs://foo/bar.tar.gz' 'foo/bar.tar.gz' name (str): Optional. Name of the image to create. Default is [src_disk.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeImage: A Google Compute Image object. Raises: InvalidNameError: If the GCE Image name is invalid. ValueError: If the extension of the archived image is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Image name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName('imported-image', common.COMPUTE_NAME_LIMIT) if not gcs_uri.lower().endswith('.tar.gz'): raise ValueError( 'Image imported from {0:s} must be a GZIP compressed TAR ' 'archive with the extension: .tar.gz'.format(gcs_uri)) gcs_uri = os.path.relpath(gcs_uri, 'gs://') if not gcs_uri.startswith(common.STORAGE_LINK_URL): gcs_uri = os.path.join(common.STORAGE_LINK_URL, gcs_uri) image_body = { 'name': name, 'rawDisk': { 'source': gcs_uri } } gce_image_client = self.GceApi().images() request = gce_image_client.insert( project=self.project_id, body=image_body, forceCreate=True) response = request.execute() self.BlockOperation(response) return GoogleComputeImage(self.project_id, '', name)
def CreateDiskFromImage(self, src_image: 'GoogleComputeImage', zone: str, name: Optional[str] = None) -> 'GoogleComputeDisk': """Creates a GCE persistent disk from a GCE image. Args: src_image (GoogleComputeImage): Source image for the disk. zone (str): Zone to create the new disk in. name (str): Optional. Name of the disk to create. Default is [src_image.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeDisk: A Google Compute Disk object. Raises: InvalidNameError: If GCE disk name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName(src_image.name, common.COMPUTE_NAME_LIMIT) disk_body = { 'name': name, 'sourceImage': 'projects/{project_id}/global/images/{src_image}'.format( project_id=src_image.project_id, src_image=src_image.name) } gce_disk_client = self.GceApi().disks() request = gce_disk_client.insert(project=self.project_id, body=disk_body, zone=zone) response = request.execute() self.BlockOperation(response, zone) return GoogleComputeDisk(self.project_id, zone, name)
def CreateImageFromDisk( self, src_disk: 'GoogleComputeDisk', name: Optional[str] = None) -> 'GoogleComputeImage': """Creates an image from a persistent disk. Args: src_disk (GoogleComputeDisk): Source disk for the image. name (str): Optional. Name of the image to create. Default is [src_disk.name]-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: GoogleComputeImage: A Google Compute Image object. Raises: InvalidNameError: If the GCE Image name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Image name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName(src_disk.name, common.COMPUTE_NAME_LIMIT) image_body = { 'name': name, 'sourceDisk': 'projects/{project_id}/zones/{zone}/disks/{src_disk}'.format( project_id=src_disk.project_id, zone=src_disk.zone, src_disk=src_disk.name) } gce_image_client = self.GceApi().images() request = gce_image_client.insert(project=self.project_id, body=image_body, forceCreate=True) response = request.execute() self.BlockOperation(response) return GoogleComputeImage(self.project_id, '', name)
def Snapshot( self, snapshot_name: Optional[str] = None) -> 'GoogleComputeSnapshot': """Create Snapshot of the disk. The Snapshot name must comply with the following RegEx: - ^(?=.{1,63}$)[a-z]([-a-z0-9]*[a-z0-9])?$ i.e., it must be between 1 and 63 chars, the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Args: snapshot_name (str): Optional. Name of the Snapshot. Returns: GoogleComputeSnapshot: A Snapshot object. Raises: InvalidNameError: If the name of the snapshot does not comply with the RegEx. """ if not snapshot_name: snapshot_name = self.name snapshot_name = common.GenerateUniqueInstanceName( snapshot_name, common.COMPUTE_NAME_LIMIT) if not common.REGEX_DISK_NAME.match(snapshot_name): raise errors.InvalidNameError( 'Snapshot name {0:s} does not comply with {1:s}'.format( snapshot_name, common.REGEX_DISK_NAME.pattern), __name__) logger.info( self.FormatLogMessage('New Snapshot: {0:s}'.format(snapshot_name))) operation_config = {'name': snapshot_name} gce_disk_client = self.GceApi().disks() request = gce_disk_client.createSnapshot(disk=self.name, project=self.project_id, zone=self.zone, body=operation_config) response = request.execute() self.BlockOperation(response, zone=self.zone) return GoogleComputeSnapshot(disk=self, name=snapshot_name)
def CreateDiskFromGCSImage( project_id: str, storage_image_path: str, zone: str, name: Optional[str] = None) -> Dict[str, Any]: """Creates a GCE persistent disk from a image in GCS. The method supports raw disk images and most virtual disk file formats. Valid import formats are: [raw (dd), qcow2, qcow , vmdk, vdi, vhd, vhdx, qed, vpc]. The created GCE disk might be larger than the original raw (dd) image stored in GCS to satisfy GCE capacity requirements: https://cloud.google.com/compute/docs/disks/#introduction However the bytes_count and the md5_hash values of the source image are returned with the newly created disk. The md5_hash can be used to verify the integrity of the created GCE disk, it must be compared with the hash of the created GCE disk from byte 0 to bytes_count. i.e: result['md5Hash'] = hash(created_gce_disk, start_byte=0, end_byte=result['bytes_count']) Args: project_id (str): Google Cloud Project ID. storage_image_path (str): Path to the source image in GCS. zone (str): Zone to create the new disk in. name (str): Optional. Name of the disk to create. Default is imported-disk-[TIMESTAMP('%Y%m%d%H%M%S')]. Returns: Dict: A key value describing the imported GCE disk. Ex: { 'project_id': 'fake-project', 'disk_name': 'fake-imported-disk', 'zone': 'fake-zone', 'bytes_count': '1234' # Content-Length of source image in bytes. 'md5Hash': 'Source Image MD5 hash string in hex' } Raises: InvalidNameError: If the GCE disk name is invalid. """ if name: if not common.REGEX_DISK_NAME.match(name): raise errors.InvalidNameError( 'Disk name {0:s} does not comply with {1:s}'.format( name, common.REGEX_DISK_NAME.pattern), __name__) name = name[:common.COMPUTE_NAME_LIMIT] else: name = common.GenerateUniqueInstanceName('imported-disk', common.COMPUTE_NAME_LIMIT) project = gcp_project.GoogleCloudProject(project_id) image_object = project.compute.ImportImageFromStorage(storage_image_path) disk_object = project.compute.CreateDiskFromImage( image_object, zone=zone, name=name) storage_object_md = project.storage.GetObjectMetadata(storage_image_path) md5_hash_hex = base64.b64decode(storage_object_md['md5Hash']).hex() result = { 'project_id': disk_object.project_id, 'disk_name': disk_object.name, 'zone': disk_object.zone, 'bytes_count': storage_object_md['size'], 'md5Hash': md5_hash_hex } return result
def SetUp(self, analysis_project_name, remote_project_name, incident_id=None, zone='us-central1-f', create_analysis_vm=True, boot_disk_size=50, boot_disk_type='pd-standard', cpu_cores=4, remote_instance_name=None, disk_names=None, all_disks=False, image_project='ubuntu-os-cloud', image_family='ubuntu-1804-lts'): """Sets up a Google Cloud Platform(GCP) collector. This method creates and starts an analysis VM in the analysis project and selects disks to copy from the remote project. If analysis_project_name is not specified, analysis_project will be same as remote_project. If disk_names is specified, it will copy the corresponding disks from the project, ignoring disks belonging to any specific instances. If remote_instance_name is specified, two behaviors are possible: * If no other parameters are specified, it will select the instance's boot disk * if all_disks is set to True, it will select all disks in the project that are attached to the instance disk_names takes precedence over instance_names Args: analysis_project_name (str): Optional. name of the project that contains the analysis VM. Default is None. remote_project_name (str): name of the remote project where the disks must be copied from. incident_id (Optional[str]): Optional. Incident identifier on which the name of the analysis VM will be based. Default is None, which means add no label and format VM name as "gcp-forensics-vm-{TIMESTAMP('%Y%m%d%H%M%S')}". zone (Optional[str]): Optional. GCP zone in which new resources should be created. Default is us-central1-f. create_analysis_vm (Optional[bool]): Optional. Create analysis VM in the analysis project. Default is True. boot_disk_size (Optional[float]): Optional. Size of the analysis VM boot disk (in GB). Default is 50. boot_disk_type (Optional[str]): Optional. Disk type to use. Default is pd-standard. cpu_cores (Optional[int]): Optional. Number of CPU cores to create the VM with. Default is 4. remote_instance_name (Optional[str]): Optional. Name of the instance in the remote project containing the disks to be copied. disk_names (Optional[str]): Optional. Comma separated disk names to copy. all_disks (Optional[bool]): Optional. True if all disks attached to the source instance should be copied. image_project (Optional[str]): Optional. Name of the project where the analysis VM image is hosted. image_family (Optional[str]): Optional. Name of the image to use to create the analysis VM. """ if not (remote_instance_name or disk_names): self.ModuleError( 'You need to specify at least an instance name or disks to copy', critical=True) return disk_names = disk_names.split(',') if disk_names else [] self.remote_project = gcp_project.GoogleCloudProject( remote_project_name, default_zone=zone) if analysis_project_name: self.analysis_project = gcp_project.GoogleCloudProject( analysis_project_name, default_zone=zone) else: self.analysis_project = self.remote_project self.remote_instance_name = remote_instance_name self.disk_names = disk_names self.all_disks = all_disks if incident_id: self.incident_id = incident_id self._gcp_label = {'incident_id': self.incident_id} try: if self.remote_instance_name: self.remote_project.compute.GetInstance( self.remote_instance_name) except ResourceNotFoundError as exception: self.ModuleError( message='Instance "{0:s}" not found or insufficient permissions' .format(self.remote_instance_name), critical=True) return if create_analysis_vm: if self.incident_id: analysis_vm_name = 'gcp-forensics-vm-{0:s}'.format( self.incident_id) else: analysis_vm_name = common.GenerateUniqueInstanceName( 'gcp-forensics-vm', common.COMPUTE_NAME_LIMIT) self.logger.info( 'Your analysis VM will be: {0:s}'.format(analysis_vm_name)) self.logger.info('Complimentary gcloud command:') self.logger.info( 'gcloud compute ssh --project {0:s} {1:s} --zone {2:s}'.format( self.analysis_project.project_id, analysis_vm_name, self.analysis_project.default_zone)) self.state.StoreContainer( containers.TicketAttribute( name=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_NAME, type_=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_TYPE, value=analysis_vm_name)) try: # pylint: disable=too-many-function-args # pylint: disable=redundant-keyword-arg self.analysis_vm, _ = gcp_forensics.StartAnalysisVm( self.analysis_project.project_id, analysis_vm_name, self.analysis_project.default_zone, boot_disk_size, boot_disk_type, int(cpu_cores), image_project=image_project, image_family=image_family) if self._gcp_label: self.analysis_vm.AddLabels(self._gcp_label) self.analysis_vm.GetBootDisk().AddLabels(self._gcp_label) except (RefreshError, DefaultCredentialsError) as exception: msg = ( 'Something is wrong with your Application Default Credentials. ' 'Try running:\n $ gcloud auth application-default login\n' ) msg += str(exception) self.ModuleError(msg, critical=True)
def ImportImageFromStorage(self, storage_image_path: str, image_name: Optional[str] = None, bootable: bool = False, os_name: Optional[str] = None, guest_environment: bool = True) -> 'GoogleComputeImage': # pylint: disable=line-too-long """Import GCE image from Cloud storage. The import tool supports raw disk images and most virtual disk file formats, valid import formats are: [raw (dd), qcow2, qcow , vmdk, vdi, vhd, vhdx, qed, vpc]. Args: storage_image_path (str): Path to the source image in Cloud Storage. image_name (str): Optional. Name of the imported image, default is "imported-image-" appended with a timestamp in "%Y%m%d%H%M%S" format. bootable (bool): Optional. True if the imported image is bootable. Default is False. If True the os_name must be specified. os_name (str): Optional. Name of the operating system on the bootable image. For supported versions please see: https://cloud.google.com/sdk/gcloud/reference/compute/images/import#--os # pylint: disable=line-too-long For known limitations please see: https://googlecloudplatform.github.io/compute-image-tools/image-import.html#compatibility-and-known-limitations # pylint: disable=line-too-long guest_environment (bool): Optional. Install Google Guest Environment on a bootable image. Relevant only if image is bootable. Default True. Returns: GoogleComputeImage: A Google Compute Image object. Raises: ValueError: If bootable is True and os_name not specified. InvalidNameError: If imported image name is invalid. """ supported_os = [ 'centos-6', 'centos-7', 'centos-8', 'debian-8', 'debian-9', 'opensuse-15', 'rhel-6', 'rhel-6-byol', 'rhel-7', 'rhel-7-byol', 'rhel-8', 'rhel-8-byol', 'sles-12-byol', 'sles-15-byol', 'ubuntu-1404', 'ubuntu-1604', 'ubuntu-1804', 'windows-10-x64-byol', 'windows-10-x86-byol', 'windows-2008r2', 'windows-2008r2-byol', 'windows-2012', 'windows-2012-byol', 'windows-2012r2', 'windows-2012r2-byol', 'windows-2016', 'windows-2016-byol', 'windows-2019', 'windows-2019-byol', 'windows-7-x64-byol', 'windows-7-x86-byol', 'windows-8-x64-byol', 'windows-8-x86-byol' ] if not bootable: img_type = '-data_disk' elif not os_name: raise ValueError('For bootable images, operating system name' ' (os_name) must be specified.') elif os_name not in supported_os: logger.warning( ('Operating system of the imported image is not within the ' 'supported list:\n{0:s}\nFor the up-to-date list please refer ' 'to:\n{1:s}').format( ', '.join(supported_os), 'https://cloud.google.com/sdk/gcloud/reference/compute/images/import#--os')) # pylint: disable=line-too-long else: img_type = '-os={0:s}'.format(os_name) if image_name: if not common.REGEX_DISK_NAME.match(image_name): raise errors.InvalidNameError( 'Imported image name {0:s} does not comply with {1:s}'. format(image_name, common.REGEX_DISK_NAME.pattern), __name__) image_name = image_name[:common.COMPUTE_NAME_LIMIT] else: image_name = common.GenerateUniqueInstanceName( 'imported-image', common.COMPUTE_NAME_LIMIT) args_list = [ '-image_name={0:s}'.format(image_name), '-source_file={0:s}'.format(storage_image_path), '-timeout=86400s', '-client_id=api', img_type ] if bootable and not guest_environment: args_list.append('-no_guest_environment') build_body = { 'steps': [{ 'args': args_list, 'name': 'gcr.io/compute-image-tools/gce_vm_image_import:release', 'env': ['BUILD_ID=$BUILD_ID'] }], 'timeout': '86400s', 'tags': ['gce-daisy', 'gce-daisy-image-import'] } cloud_build = build.GoogleCloudBuild(self.project_id) response = cloud_build.CreateBuild(build_body) cloud_build.BlockOperation(response) logger.info('Image {0:s} imported as GCE image {1:s}.'.format( storage_image_path, image_name)) return GoogleComputeImage(self.project_id, '', image_name)