def GetObject(self, gcs_path: str, out_file: Optional[str] = None) -> str:
        """Gets the contents of an object in a Google Cloud Storage bucket.

    Args:
      gcs_path (str): Full path to the object (ie: gs://bucket/dir1/dir2/obj)
      out_file (str): Path to the local file that will be written.
        If not provided, will create a temporary file.

    Returns:
      str: The filename of the written object.

    Raises:
      ResourceCreationError: If the file couldn't be downloaded.
    """
        if not gcs_path.startswith('gs://'):
            gcs_path = 'gs://' + gcs_path
        gcs_objects = self.GcsApi().objects()
        (bucket, filename) = SplitStoragePath(gcs_path)
        request = gcs_objects.get_media(bucket=bucket, object=filename)

        if not out_file:
            outputdir = tempfile.mkdtemp()
            logger.info('Created temporary directory {0:s}'.format(outputdir))
            out_file = os.path.join(outputdir, os.path.basename(filename))

        stat = shutil.disk_usage(os.path.dirname(outputdir))
        om = self.GetObjectMetadata(gcs_path)
        if 'size' not in om:
            logger.warning(
                'Unable to retrieve object metadata before fetching')
        else:
            if int(om['size']) > stat.free:
                raise errors.ResourceCreationError(
                    'Target drive does not have enough space ({0!s} free vs {1!s} needed)'  # pylint: disable=line-too-long
                    .format(stat.free, om['size']),
                    __name__)

        with open(out_file, 'wb') as outputfile:
            downloader = googleapiclient.http.MediaIoBaseDownload(
                outputfile, request)

            done = False
            while not done:
                status, done = downloader.next_chunk()
                if status.total_size > stat.free:
                    raise errors.ResourceCreationError(
                        'Target drive does not have enough space ({0!s} free vs {1!s} needed)'  # pylint: disable=line-too-long
                        .format(stat.free, status.total_size),
                        __name__)
                logger.info('Download {}%.'.format(int(status.progress() *
                                                       100)))
            logger.info('File successfully written to {0:s}'.format(out_file))

        return out_file
Exemple #2
0
    def CreateDiskFromSnapshot(
            self,
            snapshot: 'GoogleComputeSnapshot',
            disk_name: Optional[str] = None,
            disk_name_prefix: Optional[str] = None,
            disk_type: str = 'pd-standard') -> 'GoogleComputeDisk':
        """Create a new disk based on a Snapshot.

    Args:
      snapshot (GoogleComputeSnapshot): Snapshot to use.
      disk_name (str): Optional. String to use as new disk name.
      disk_name_prefix (str): Optional. String to prefix the disk name with.
      disk_type (str): Optional. URL of the disk type resource describing
          which disk type to use to create the disk. Default is pd-standard. Use
          pd-ssd to have a SSD disk. You can list all available disk types by
          running the following command: gcloud compute disk-types list

    Returns:
      GoogleComputeDisk: Google Compute Disk.

    Raises:
      ResourceCreationError: If the disk could not be created.
    """

        if not disk_name:
            disk_name = common.GenerateDiskName(snapshot, disk_name_prefix)
        body = {
            'name':
            disk_name,
            'sourceSnapshot':
            snapshot.GetSourceString(),
            'type':
            'projects/{0:s}/zones/{1:s}/diskTypes/{2:s}'.format(
                self.project_id, self.default_zone, disk_type)
        }
        try:
            gce_disks_client = self.GceApi().disks()
            request = gce_disks_client.insert(project=self.project_id,
                                              zone=self.default_zone,
                                              body=body)
            response = request.execute()
        except HttpError as exception:
            if exception.resp.status == 409:
                raise errors.ResourceCreationError(
                    'Disk {0:s} already exists: {1!s}'.format(
                        disk_name, exception), __name__) from exception
            raise errors.ResourceCreationError(
                'Unknown error occurred when creating disk from Snapshot:'
                ' {0!s}'.format(exception), __name__) from exception
        self.BlockOperation(response, zone=self.default_zone)
        return GoogleComputeDisk(project_id=self.project_id,
                                 zone=self.default_zone,
                                 name=disk_name)
Exemple #3
0
  def GenerateSSHKeyPair(self, vm_name: str) -> Tuple[str, str]:
    """Generate a SSH key pair and returns its name and private key.

    Args:
      vm_name (str): The VM name for which to generate the key pair.

    Returns:
      Tuple[str, str]: A tuple containing the key name and the private key for
          the generated SSH key pair.

    Raises:
      ValueError: If vm_name is None.
      ResourceCreationError: If the key could not be created.
    """

    if not vm_name:
      raise ValueError('Parameter vm_name must not be None.')

    # SSH key names need to be unique, therefore we add a random 10 chars hex
    # string.
    key_name = '{0:s}-{1:s}-ssh'.format(
        vm_name, binascii.b2a_hex(os.urandom(10)).decode('utf-8'))
    client = self.aws_account.ClientApi(common.EC2_SERVICE)
    try:
      key = client.create_key_pair(KeyName=key_name)
    except client.exceptions.ClientError as exception:
      raise errors.ResourceCreationError(
          'Could not create SSH key pair: {0!s}'.format(exception), __name__)
    # If the call was successful, the response contains key information
    return key['KeyName'], key['KeyMaterial']
Exemple #4
0
    def Put(self, s3_path: str, filepath: str) -> None:
        """Upload a local file to an S3 bucket.

    Keeps the local filename intact.

    Args:
      s3_path (str): Path to the target S3 bucket.
          Ex: s3://test/bucket
      filepath (str): Path to the file to be uploaded.
          Ex: /tmp/myfile
    Raises:
      ResourceCreationError: If the object couldn't be uploaded.
    """
        client = self.aws_account.ClientApi(common.S3_SERVICE)
        if not s3_path.startswith('s3://'):
            s3_path = 's3://' + s3_path
        if not s3_path.endswith('/'):
            s3_path = s3_path + '/'
        try:
            (bucket, path) = gcp_storage.SplitStoragePath(s3_path)
            client.upload_file(
                filepath, bucket,
                '{0:s}{1:s}'.format(path, os.path.basename(filepath)))
        except FileNotFoundError as exception:
            raise errors.ResourceNotFoundError(
                'Could not upload file {0:s}: {1:s}'.format(
                    filepath, str(exception)), __name__) from exception
        except client.exceptions.ClientError as exception:
            raise errors.ResourceCreationError(
                'Could not upload file {0:s}: {1:s}'.format(
                    filepath, str(exception)), __name__) from exception
  def Snapshot(self,
               snapshot_name: Optional[str] = None,
               tags: Optional[Dict[str, str]] = None) -> 'AZComputeSnapshot':
    """Create a snapshot of the disk.

    Args:
      snapshot_name (str): Optional. A name for the snapshot. If none
          provided, one will be generated based on the disk's name.
      tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          snapshot, for example {'TicketID': 'xxx'}.

    Returns:
      AZComputeSnapshot: A snapshot object.

    Raises:
      InvalidNameError: If the snapshot name does not comply with the RegEx.
      ResourceCreationError: If the snapshot could not be created.
    """

    if not snapshot_name:
      snapshot_name = self.name + '_snapshot'
      truncate_at = 80 - 1
      snapshot_name = snapshot_name[:truncate_at]
      if not common.REGEX_SNAPSHOT_NAME.match(snapshot_name):
        raise errors.InvalidNameError(
            'Snapshot name {0:s} does not comply with {1:s}'.format(
                snapshot_name, common.REGEX_SNAPSHOT_NAME.pattern), __name__)

    creation_data = {
        'location': self.region,
        'creation_data': {
            'sourceResourceId': self.resource_id,
            'create_option': models.DiskCreateOption.copy
        }
    }

    if tags:
      creation_data['tags'] = tags

    try:
      logger.info('Creating snapshot: {0:s}'.format(snapshot_name))
      request = self.compute_client.snapshots.create_or_update(
          self.resource_group_name,
          snapshot_name,
          creation_data)
      while not request.done():
        sleep(5)  # Wait 5 seconds before checking snapshot status again
      snapshot = request.result()
      logger.info('Snapshot {0:s} successfully created'.format(snapshot_name))
    except azure_exceptions.CloudError as exception:
      raise errors.ResourceCreationError(
          'Could not create snapshot for disk {0:s}: {1!s}'.format(
              self.resource_id, exception), __name__)

    return AZComputeSnapshot(self.az_account,
                             snapshot.id,
                             snapshot.name,
                             snapshot.location,
                             self)
    def CreateBucket(
            self,
            bucket: str,
            labels: Optional[Dict[str, str]] = None,
            predefined_acl: str = 'private',
            predefined_default_object_acl: str = 'private') -> Dict[str, Any]:
        """Creates a Google Cloud Storage bucket in the current project.

    Args:
      bucket (str): Name of the desired bucket.
      labels (Dict[str, str]): Mapping of key/value strings to be applied as a label
        to the bucket.
        Rules for acceptable label values are located at
        https://cloud.google.com/storage/docs/key-terms#bucket-labels
      predefined_acl (str): A predefined set of Access Controls
        to apply to the bucket.
      predefined_default_object_acl (str): A predefined set of Access Controls
        to apply to the objects in the bucket.
      Values listed in https://cloud.google.com/storage/docs/json_api/v1/buckets/insert#parameters  # pylint: disable=line-too-long

    Returns:
      Dict[str, Any]: An API operation object for a Google Cloud Storage bucket.
           https://cloud.google.com/storage/docs/json_api/v1/buckets#resource
    """
        if bucket.startswith('gs://'):
            bucket = bucket[5:]
        gcs_buckets = self.GcsApi().buckets()
        body = {'name': bucket, 'labels': labels}
        request = gcs_buckets.insert(
            project=self.project_id,
            predefinedAcl=predefined_acl,
            predefinedDefaultObjectAcl=predefined_default_object_acl,
            body=body)
        try:
            response = request.execute()  # type: Dict[str, Any]
        except HttpError as exception:
            if exception.resp.status == 409:
                raise errors.ResourceCreationError(
                    'Bucket {0:s} already exists: {1!s}'.format(
                        bucket, exception), __name__) from exception
            raise errors.ResourceCreationError(
                'Unknown error occurred when creating bucket:'
                ' {0!s}'.format(exception), __name__) from exception
        return response
Exemple #7
0
    def Snapshot(self, tags: Optional[Dict[str, str]] = None) -> 'AWSSnapshot':
        """Create a snapshot of the volume.

    Args:
      tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          snapshot, for example {'Name': 'my-snapshot-name', 'TicketID': 'xxx'}.

    Returns:
      AWSSnapshot: A snapshot object.

    Raises:
      InvalidNameError: If the snapshot name does not comply with the RegEx.
      ResourceCreationError: If the snapshot could not be created.
    """

        if not tags:
            tags = {}

        snapshot_name = tags.get('Name') or (self.volume_id + '-snapshot')
        truncate_at = 255 - 1
        snapshot_name = snapshot_name[:truncate_at]
        if len(snapshot_name) > 255:
            raise errors.InvalidNameError(
                'Snapshot name {0:s} is too long (>255 chars)'.format(
                    snapshot_name), __name__)
        tags['Name'] = snapshot_name

        client = self.aws_account.ClientApi(common.EC2_SERVICE)
        try:
            snapshot = client.create_snapshot(
                VolumeId=self.volume_id,
                TagSpecifications=[common.CreateTags(common.SNAPSHOT, tags)])

            snapshot_id = snapshot.get('SnapshotId')
            # Wait for snapshot completion
            client.get_waiter('snapshot_completed').wait(
                SnapshotIds=[snapshot_id],
                WaiterConfig={
                    'Delay': 30,
                    'MaxAttempts': 100
                })
        except (client.exceptions.ClientError,
                botocore.exceptions.WaiterError) as exception:
            raise errors.ResourceCreationError(
                'Could not create snapshot for volume {0:s}: {1:s}'.format(
                    self.volume_id, str(exception)), __name__)

        return AWSSnapshot(snapshot_id,
                           self.aws_account,
                           self.aws_account.default_region,
                           self.aws_account.default_availability_zone,
                           self,
                           name=snapshot_name)
Exemple #8
0
    def CreateBucket(self,
                     name: str,
                     region: Optional[str] = None,
                     acl: str = 'private') -> Dict[str, Any]:
        """Create an S3 storage bucket.

    Args:
      name (str): The name of the bucket.
      region (str): Optional. The region in which the bucket resides.
      acl (str): Optional. The canned ACL with which to create the bucket.
        Default is 'private'.
    Appropriate values for the Canned ACLs are here:
    https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl  # pylint: disable=line-too-long

    Returns:
      Dict: An API operation object for a S3 bucket.
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Bucket.create  # pylint: disable=line-too-long

    Raises:
      ResourceCreationError: If the bucket couldn't be created.
    """

        client = self.aws_account.ClientApi(common.S3_SERVICE)
        try:
            bucket = client.create_bucket(
                Bucket=name,
                ACL=acl,
                CreateBucketConfiguration={
                    'LocationConstraint': region
                    or self.aws_account.default_region
                })  # type: Dict[str, Any]
            return bucket
        except client.exceptions.BucketAlreadyOwnedByYou as exception:
            raise errors.ResourceCreationError(
                'Bucket {0:s} already exists: {1:s}'.format(
                    name, str(exception)), __name__) from exception
        except client.exceptions.ClientError as exception:
            raise errors.ResourceCreationError(
                'Could not create bucket {0:s}: {1:s}'.format(
                    name, str(exception)), __name__) from exception
Exemple #9
0
    def CreateKMSKey(self) -> str:
        """Create a KMS key.

    Returns:
      str: The KMS key ID for the key that was created.

    Raises:
      ResourceCreationError: If the key could not be created.
    """

        client = self.aws_account.ClientApi(common.KMS_SERVICE)
        try:
            kms_key = client.create_key()
        except client.exceptions.ClientError as exception:
            raise errors.ResourceCreationError(
                'Could not create KMS key: {0!s}'.format(exception), __name__)

        # The response contains the key ID
        key_id = kms_key['KeyMetadata']['KeyId']  # type: str
        return key_id
    def CreateDiskFromSnapshot(
            self,
            snapshot: 'AZComputeSnapshot',
            region: Optional[str] = None,
            disk_name: Optional[str] = None,
            disk_name_prefix: Optional[str] = None,
            disk_type: str = 'Standard_LRS') -> 'AZComputeDisk':
        """Create a new disk based on a Snapshot.

    Args:
      snapshot (AZComputeSnapshot): Snapshot to use.
      region (str): Optional. The region in which to create the disk. If not
          provided, the disk will be created in the default_region associated to
          the AZAccount object.
      disk_name (str): Optional. String to use as new disk name.
      disk_name_prefix (str): Optional. String to prefix the disk name with.
      disk_type (str): Optional. The sku name for the disk to create. Can be
          Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. The
          default value is Standard_LRS.

    Returns:
      AZComputeDisk: Azure Compute Disk.

    Raises:
      ResourceCreationError: If the disk could not be created.
    """

        if not disk_name:
            disk_name = common.GenerateDiskName(
                snapshot, disk_name_prefix=disk_name_prefix)

        if not region:
            region = self.az_account.default_region

        creation_data = {
            'location': region,
            'creation_data': {
                'sourceResourceId': snapshot.resource_id,
                'create_option': models.DiskCreateOption.copy
            },
            'sku': {
                'name': disk_type
            }
        }

        try:
            logger.info('Creating disk: {0:s}'.format(disk_name))
            request = self.compute_client.disks.create_or_update(
                self.az_account.default_resource_group_name, disk_name,
                creation_data)
            while not request.done():
                sleep(5)  # Wait 5 seconds before checking disk status again
            disk = request.result()
            logger.info('Disk {0:s} successfully created'.format(disk_name))
        except azure_exceptions.CloudError as exception:
            raise errors.ResourceCreationError(
                'Could not create disk from snapshot {0:s}: {1!s}'.format(
                    snapshot.resource_id, exception), __name__) from exception

        return AZComputeDisk(self.az_account, disk.id, disk.name,
                             disk.location, disk.zones)
Exemple #11
0
    def CreateVolumeFromSnapshot(
            self,
            snapshot: AWSSnapshot,
            volume_name: Optional[str] = None,
            volume_name_prefix: Optional[str] = None,
            volume_type: str = 'gp2',
            kms_key_id: Optional[str] = None,
            tags: Optional[Dict[str, str]] = None) -> AWSVolume:
        """Create a new volume based on a snapshot.

    Args:
      snapshot (AWSSnapshot): Snapshot to use.
      volume_name (str): Optional. String to use as new volume name.
      volume_name_prefix (str): Optional. String to prefix the volume name with.
      volume_type (str): Optional. The volume type for the volume to create.
          Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The default is
          'gp2'.
      kms_key_id (str): Optional. A KMS key id to encrypt the volume with.
      tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          volume, for example {'TicketID': 'xxx'}. An entry for the volume
          name is added by default.

    Returns:
      AWSVolume: An AWS EBS Volume.

    Raises:
      InvalidNameError: If the volume name does not comply with the RegEx
      ValueError: If the volume type is invalid.
      ResourceCreationError: If the volume could not be created.
    """

        if volume_type not in ['standard', 'io1', 'gp2', 'sc1', 'st1']:
            raise ValueError(
                'Volume type must be one of [standard, io1, gp2, sc1, '
                'st1]. Got: {0:s}'.format(volume_type))

        if not volume_name:
            volume_name = self._GenerateVolumeName(
                snapshot, volume_name_prefix=volume_name_prefix)

        if len(volume_name) > 255:
            raise errors.InvalidNameError(
                'Volume name {0:s} is too long (>255 chars)'.format(
                    volume_name), __name__)

        if not tags:
            tags = {}
        tags['Name'] = volume_name

        client = self.aws_account.ClientApi(common.EC2_SERVICE)
        create_volume_args = {
            'AvailabilityZone': snapshot.availability_zone,
            'SnapshotId': snapshot.snapshot_id,
            'TagSpecifications': [common.CreateTags(common.VOLUME, tags)],
            'VolumeType': volume_type
        }
        if kms_key_id:
            create_volume_args['Encrypted'] = True
            create_volume_args['KmsKeyId'] = kms_key_id
        if volume_type == 'io1':
            # If using the io1 volume type, we must specify Iops, see
            # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/
            # services/ec2.html#EC2.Client.create_volume. io1 volumes allow for a
            # ratio of 50 IOPS per 1 GiB.
            create_volume_args['Iops'] = self.aws_account.ResourceApi(
                common.EC2_SERVICE).Snapshot(
                    snapshot.snapshot_id).volume_size * 50
        try:
            volume = client.create_volume(**create_volume_args)
            volume_id = volume['VolumeId']
            # Wait for volume creation completion
            client.get_waiter('volume_available').wait(VolumeIds=[volume_id])
        except (client.exceptions.ClientError,
                botocore.exceptions.WaiterError) as exception:
            raise errors.ResourceCreationError(
                'Could not create volume {0:s} from snapshot {1:s}: {2!s}'.
                format(volume_name, snapshot.name, exception), __name__)

        zone = volume['AvailabilityZone']
        encrypted = volume['Encrypted']

        return AWSVolume(volume_id,
                         self.aws_account,
                         self.aws_account.default_region,
                         zone,
                         encrypted,
                         name=volume_name)
Exemple #12
0
    def Copy(
        self,
        kms_key_id: Optional[str] = None,
        delete: bool = False,
        deletion_account: Optional['account.AWSAccount'] = None
    ) -> 'AWSSnapshot':
        """Copy a snapshot.

    Args:
      kms_key_id (str): Optional. A KMS key id to encrypt the snapshot copy
          with. If set to None but the source snapshot is encrypted,
          then the copy will be encrypted too (with the key used by the
          source snapshot).
      delete (bool): Optional. If set to True, the snapshot being copied will
          be deleted prior to returning the copy. Default is False.
      deletion_account (AWSAccount): Optional. An AWSAccount object to use to
          delete the snapshot if 'delete' is set to True. Since accounts operate
          per region, this can be useful when copying snapshots across regions
          (which requires one AWSAccount object per region as per
          boto3.session.Session() requirements) and wanting to delete the source
          snapshot located in a different region than the copy being created.

    Returns:
      AWSSnapshot: A copy of the snapshot.

    Raises:
      ResourceCreationError: If the snapshot could not be copied.
    """

        client = self.aws_account.ClientApi(common.EC2_SERVICE)
        copy_args = {
            'SourceRegion': self.region,
            'SourceSnapshotId': self.snapshot_id
        }  # type: Dict[str, Union[str, bool]]
        if kms_key_id:
            copy_args['Encrypted'] = True
            copy_args['KmsKeyId'] = kms_key_id
        try:
            response = client.copy_snapshot(**copy_args)
        except client.exceptions.ClientError as exception:
            raise errors.ResourceCreationError(
                'Could not copy snapshot {0:s}: {1:s}'.format(
                    self.snapshot_id, str(exception)), __name__)

        snapshot_copy = AWSSnapshot(
            # The response contains the new snapshot ID
            response['SnapshotId'],
            self.aws_account,
            self.aws_account.default_region,
            self.aws_account.default_availability_zone,
            self.volume,
            name='{0:s}-copy'.format(self.snapshot_id))

        # Wait for the copy to be available
        client.get_waiter('snapshot_completed').wait(
            SnapshotIds=[snapshot_copy.snapshot_id],
            WaiterConfig={
                'Delay': 30,
                'MaxAttempts': 100
            })

        if delete:
            if deletion_account:
                self.aws_account = deletion_account
            self.Delete()

        return snapshot_copy
Exemple #13
0
def CreateDiskCopy(
        resource_group_name: str,
        instance_name: Optional[str] = None,
        disk_name: Optional[str] = None,
        disk_type: Optional[str] = None,
        region: str = 'eastus',
        src_profile: Optional[str] = None,
        dst_profile: Optional[str] = None) -> 'compute.AZComputeDisk':
    """Creates a copy of an Azure Compute Disk.

  Args:
    resource_group_name (str): The resource group in which to create the disk
        copy.
    instance_name (str): Optional. Instance name of the instance using the
        disk to be copied. If specified, the boot disk of the instance will be
        copied. If disk_name is also specified, then the disk pointed to by
        disk_name will be copied.
    disk_name (str): Optional. Name of the disk to copy. If not set,
        then instance_name needs to be set and the boot disk will be copied.
    disk_type (str): Optional. The sku name for the disk to create. Can be
        Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. The
        default behavior is to use the same disk type as the source disk.
    region (str): Optional. The region in which to create the disk copy.
        Default is eastus.
    src_profile (str): Optional. The name of the source profile to use for the
        disk copy, i.e. the account information of the Azure account that holds
        the disk. For more information on profiles, see GetCredentials()
        in libcloudforensics.providers.azure.internal.common.py. If not
        provided, credentials will be gathered from environment variables.
    dst_profile (str): Optional. The name of the destination profile to use for
        the disk copy. The disk will be copied into the account linked to
        this profile. If not provided, the default behavior is that the
        destination profile is the same as the source profile.
        For more information on profiles, see GetCredentials() in
        libcloudforensics.providers.azure.internal.common.py

  Returns:
    AZComputeDisk: An Azure Compute Disk object.

  Raises:
    ResourceCreationError: If there are errors copying the disk.
    ValueError: If both instance_name and disk_name are missing.
  """

    if not instance_name and not disk_name:
        raise ValueError(
            'You must specify at least one of [instance_name, disk_name].')

    src_account = account.AZAccount(resource_group_name,
                                    default_region=region,
                                    profile_name=src_profile)
    dst_account = account.AZAccount(resource_group_name,
                                    default_region=region,
                                    profile_name=(dst_profile or src_profile))

    try:
        if disk_name:
            disk_to_copy = src_account.compute.GetDisk(disk_name)
        elif instance_name:
            instance = src_account.compute.GetInstance(instance_name)
            disk_to_copy = instance.GetBootDisk()
        logger.info('Disk copy of {0:s} started...'.format(disk_to_copy.name))

        if not disk_type:
            disk_type = disk_to_copy.GetDiskType()

        snapshot = disk_to_copy.Snapshot()

        subscription_ids = src_account.resource.ListSubscriptionIDs()
        diff_account = dst_account.subscription_id not in subscription_ids
        diff_region = dst_account.default_region != snapshot.region

        # If the destination account is different from the source account or if the
        # destination region is different from the region in which the source
        # disk is, then we need to create the disk from a storage account in
        # which we import the previously created snapshot (cross-region/account
        # sharing).
        if diff_account or diff_region:
            logger.info(
                'Copy requested in a different destination account/region.')
            # Create a link to download the snapshot
            snapshot_uri = snapshot.GrantAccessAndGetURI()
            # Make a snapshot copy in the destination account from the link
            new_disk = dst_account.compute.CreateDiskFromSnapshotURI(
                snapshot,
                snapshot_uri,
                disk_name_prefix=common.DEFAULT_DISK_COPY_PREFIX,
                disk_type=disk_type)
            # Revoke download link and delete the initial copy
            snapshot.RevokeAccessURI()
        else:
            new_disk = dst_account.compute.CreateDiskFromSnapshot(
                snapshot,
                disk_name_prefix=common.DEFAULT_DISK_COPY_PREFIX,
                disk_type=disk_type)
        snapshot.Delete()
        logger.info('Disk {0:s} successfully copied to {1:s}'.format(
            disk_to_copy.name, new_disk.name))
    except (errors.LCFError, RuntimeError) as exception:
        raise errors.ResourceCreationError(
            'Cannot copy disk "{0:s}": {1!s}'.format(str(disk_name),
                                                     exception), __name__)

    return new_disk
Exemple #14
0
def CreateDiskCopy(
    src_proj: str,
    dst_proj: str,
    zone: str,
    instance_name: Optional[str] = None,
    disk_name: Optional[str] = None,
    disk_type: Optional[str] = None) -> 'compute.GoogleComputeDisk':
  """Creates a copy of a Google Compute Disk.

  Args:
    src_proj (str): Name of project that holds the disk to be copied.
    dst_proj (str): Name of project to put the copied disk in.
    zone (str): Zone where the new disk is to be created.
    instance_name (str): Optional. Instance using the disk to be copied.
    disk_name (str): Optional. Name of the disk to copy. If None,
        instance_name must be specified and the boot disk will be copied.
    disk_type (str): Optional. URL of the disk type resource describing
        which disk type to use to create the disk. The default behavior is to
        use the same disk type as the source disk.

  Returns:
    GoogleComputeDisk: A Google Compute Disk object.

  Raises:
    ResourceNotFoundError: If the GCP resource is not found.
    CredentialsConfigurationError: If the library could not authenticate to GCP.
    RuntimeError: If an unknown HttpError is thrown.
    ResourceCreationError: If there are errors copying the disk.
    ValueError: If both instance_name and disk_name are missing.
  """

  if not instance_name and not disk_name:
    raise ValueError(
        'You must specify at least one of [instance_name, disk_name].')

  src_project = gcp_project.GoogleCloudProject(src_proj)
  dst_project = gcp_project.GoogleCloudProject(dst_proj, default_zone=zone)

  try:
    if disk_name:
      disk_to_copy = src_project.compute.GetDisk(disk_name)
    elif instance_name:
      instance = src_project.compute.GetInstance(instance_name)
      disk_to_copy = instance.GetBootDisk()

    if not disk_type:
      disk_type = disk_to_copy.GetDiskType()

    logger.info('Disk copy of {0:s} started...'.format(
        disk_to_copy.name))
    snapshot = disk_to_copy.Snapshot()
    logger.debug('Snapshot created: {0:s}'.format(snapshot.name))
    new_disk = dst_project.compute.CreateDiskFromSnapshot(
        snapshot, disk_name_prefix='evidence', disk_type=disk_type)
    logger.info(
        'Disk {0:s} successfully copied to {1:s}'.format(
            disk_to_copy.name, new_disk.name))
    snapshot.Delete()
    logger.debug('Snapshot {0:s} deleted.'.format(snapshot.name))

  except (RefreshError, DefaultCredentialsError) as exception:
    raise errors.CredentialsConfigurationError(
        'Something is wrong with your Application Default Credentials. Try '
        'running: $ gcloud auth application-default login: {0!s}'.format(
            exception), __name__) from exception
  except HttpError as exception:
    if exception.resp.status == 403:
      raise errors.CredentialsConfigurationError(
          'Make sure you have the appropriate permissions on the project',
          __name__) from exception
    if exception.resp.status == 404:
      raise errors.ResourceNotFoundError(
          'GCP resource not found. Maybe a typo in the project / instance / '
          'disk name?', __name__) from exception
    raise RuntimeError(exception) from exception
  except RuntimeError as exception:
    raise errors.ResourceCreationError(
        'Cannot copy disk "{0:s}": {1!s}'.format(disk_name, exception),
        __name__) from exception

  return new_disk
    def GetOrCreateAnalysisVm(
        self,
        vm_name: str,
        boot_disk_size: int,
        cpu_cores: int,
        memory_in_mb: int,
        ssh_public_key: str,
        region: Optional[str] = None,
        packages: Optional[List[str]] = None,
        tags: Optional[Dict[str, str]] = None
    ) -> Tuple['AZComputeVirtualMachine', bool]:
        """Get or create a new virtual machine for analysis purposes.

    Args:
      vm_name (str): The instance name tag of the virtual machine.
      boot_disk_size (int): The size of the analysis VM boot volume (in GB).
      cpu_cores (int): Number of CPU cores for the analysis VM.
      memory_in_mb (int): The memory size (in MB) for the analysis VM.
      ssh_public_key (str): A SSH public key data to associate with the
          VM. This must be provided as otherwise the VM will not be
          accessible.
      region (str): Optional. The region in which to create the vm. If not
          provided, the vm will be created in the default_region
          associated to the AZAccount object.
      packages (List[str]): Optional. List of packages to install in the VM.
      tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          instance, for example {'TicketID': 'xxx'}. An entry for the
          instance name is added by default.

    Returns:
      Tuple[AZComputeVirtualMachine, bool]: A tuple with an
          AZComputeVirtualMachine object and a boolean indicating if the
          virtual machine was created (True) or reused (False).

    Raises:
      RuntimeError: If the provided SSH key is invalid.
      ResourceCreationError: If the virtual machine cannot be found or created.
    """

        # Re-use instance if it already exists, or create a new one.
        try:
            instance = self.GetInstance(vm_name)
            if instance:
                created = False
                return instance, created
        except errors.ResourceNotFoundError:
            pass

        # Validate SSH public key format
        try:
            sshpubkeys.SSHKey(ssh_public_key, strict=True).parse()
        except sshpubkeys.InvalidKeyError as exception:
            raise RuntimeError('The provided public SSH key is invalid: '
                               '{0:s}'.format(str(exception))) from exception

        instance_type = self._GetInstanceType(cpu_cores, memory_in_mb)
        startup_script = utils.ReadStartupScript()
        if packages:
            startup_script = startup_script.replace('${packages[@]}',
                                                    ' '.join(packages))

        if not region:
            region = self.az_account.default_region

        creation_data = {
            'location': region,
            'properties': {
                'hardwareProfile': {
                    'vmSize': instance_type
                },
                'storageProfile': {
                    'imageReference': {
                        'sku': common.UBUNTU_1804_SKU,
                        'publisher': 'Canonical',
                        'version': 'latest',
                        'offer': 'UbuntuServer'
                    }
                },
                'osDisk': {
                    'caching': 'ReadWrite',
                    'managedDisk': {
                        'storageAccountType': 'Standard_LRS'
                    },
                    'name': 'os-disk-{0:s}'.format(vm_name),
                    'diskSizeGb': boot_disk_size,
                    'createOption': models.DiskCreateOption.from_image
                },
                'osProfile': {
                    'adminUsername':
                    '******',
                    'computerName':
                    vm_name,
                    # Azure requires the startup script to be sent as a b64 string
                    'customData':
                    base64.b64encode(
                        str.encode(startup_script)).decode('utf-8'),
                    'linuxConfiguration': {
                        'ssh': {
                            'publicKeys': [{
                                'path': '/home/AzureUser/.ssh/authorized_keys',
                                'keyData': ssh_public_key
                            }]
                        }
                    }
                },
                'networkProfile': {
                    'networkInterfaces': [
                        # pylint: disable=line-too-long
                        # This is necessary when creating a VM from the SDK.
                        # See https://docs.microsoft.com/en-us/azure/virtual-machines/windows/python
                        # pylint: enable=line-too-long
                        {
                            'id':
                            self.az_account.network.CreateNetworkInterface(
                                vm_name, region)
                        }
                    ]
                }
            }
        }  # type: Dict[str, Any]

        if tags:
            creation_data['tags'] = tags

        try:
            request = self.compute_client.virtual_machines.create_or_update(
                self.az_account.default_resource_group_name, vm_name,
                creation_data)
            while not request.done():
                sleep(5)  # Wait 5 seconds before checking disk status again
            vm = request.result()
        except azure_exceptions.CloudError as exception:
            raise errors.ResourceCreationError(
                'Could not create instance {0:s}: {1!s}'.format(
                    vm_name, exception), __name__) from exception

        instance = AZComputeVirtualMachine(self.az_account,
                                           vm.id,
                                           vm.name,
                                           vm.location,
                                           zones=vm.zones)
        created = True
        return instance, created
    def CreateDiskFromSnapshotURI(
            self,
            snapshot: 'AZComputeSnapshot',
            snapshot_uri: str,
            region: Optional[str] = None,
            disk_name: Optional[str] = None,
            disk_name_prefix: Optional[str] = None,
            disk_type: str = 'Standard_LRS') -> 'AZComputeDisk':
        """Create a new disk based on a SAS snapshot URI.

    This is useful if e.g. one wants to make a copy of a disk in a separate
    Azure account. This method will create a temporary Azure Storage account
    within the destination account, import the snapshot from a downloadable
    link (the source account needs to share the snapshot through a SAS link)
    and then create a disk from the VHD file saved in storage. The Azure
    storage account is then deleted.

    Args:
      snapshot (AZComputeSnapshot): Source snapshot to use.
      snapshot_uri (str): The URI of the snapshot to copy.
      region (str): Optional. The region in which to create the disk. If not
          provided, the disk will be created in the default_region associated to
          the AZAccount object.
      disk_name (str): Optional. String to use as new disk name.
      disk_name_prefix (str): Optional. String to prefix the disk name with.
      disk_type (str): Optional. The sku name for the disk to create. Can be
          Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
          Default is Standard_LRS.

    Returns:
      AZComputeDisk: Azure Compute Disk.

    Raises:
      ResourceCreationError: If the disk could not be created.
    """

        if not region:
            region = self.az_account.default_region

        # Create a temporary Azure account storage to import the snapshot
        storage_account_name = hashlib.sha1(
            snapshot.resource_id.encode('utf-8')).hexdigest()[:23]
        storage_account_url = 'https://{0:s}.blob.core.windows.net'.format(
            storage_account_name)
        # pylint: disable=line-too-long
        storage_account_id, storage_account_access_key = self.az_account.storage.CreateStorageAccount(
            storage_account_name, region=region)
        # pylint: enable=line-too-long
        blob_service_client = blob.BlobServiceClient(
            account_url=storage_account_url,
            credential=storage_account_access_key)

        # Create a container within the Storage to receive the imported snapshot
        container_name = storage_account_name + '-container'
        snapshot_vhd_name = snapshot.name + '.vhd'
        container_client = blob_service_client.get_container_client(
            container_name)
        try:
            logger.info('Creating blob container {0:s}'.format(container_name))
            container_client.create_container()
            logger.info('Blob container {0:s} successfully created'.format(
                container_name))
        except exceptions.ResourceExistsError:
            # The container already exists, so we can re-use it
            logger.warning(
                'Reusing existing container: {0:s}'.format(container_name))

        # Download the snapshot from the URI to the storage
        copied_blob = blob_service_client.get_blob_client(
            container_name, snapshot_vhd_name)
        logger.info(
            'Importing snapshot to container from URI {0:s}. '
            'Depending on the size of the snapshot, this process is going '
            'to take a while.'.format(snapshot_uri))
        copied_blob.start_copy_from_url(snapshot_uri)
        copy_status = copied_blob.get_blob_properties().copy.status
        while copy_status != 'success':
            sleep(
                5
            )  # Wait for the vhd to be imported in the Azure storage container
            copy_status = copied_blob.get_blob_properties().copy.status
            if copy_status in ('aborted', 'failed'):
                raise errors.ResourceCreationError(
                    'Could not import the snapshot from URI {0:s}'.format(
                        snapshot_uri), __name__)
            logger.debug(
                'Importing snapshot from URI {0:s}'.format(snapshot_uri))
        logger.info('Snapshot successfully imported from URI {0:s}'.format(
            snapshot_uri))

        if not disk_name:
            disk_name = common.GenerateDiskName(
                snapshot, disk_name_prefix=disk_name_prefix)

        # Create a new disk from the imported snapshot
        creation_data = {
            'location': region,
            'creation_data': {
                'source_uri': copied_blob.url,
                'storage_account_id': storage_account_id,
                'create_option': models.DiskCreateOption.import_enum
            },
            'sku': {
                'name': disk_type
            }
        }

        try:
            logger.info('Creating disk: {0:s}'.format(disk_name))
            request = self.compute_client.disks.create_or_update(
                self.az_account.default_resource_group_name, disk_name,
                creation_data)
            while not request.done():
                sleep(5)  # Wait 5 seconds before checking disk status again
            disk = request.result()
            logger.info('Disk {0:s} successfully created'.format(disk_name))
        except azure_exceptions.CloudError as exception:
            raise errors.ResourceCreationError(
                'Could not create disk from URI {0:s}: {1!s}'.format(
                    snapshot_uri, exception), __name__) from exception

        # Cleanup the temporary account storage
        self.az_account.storage.DeleteStorageAccount(storage_account_name)

        return AZComputeDisk(self.az_account, disk.id, disk.name,
                             disk.location, disk.zones)
Exemple #17
0
  def GetOrCreateAnalysisVm(
      self,
      vm_name: str,
      boot_volume_size: int,
      ami: str,
      cpu_cores: int,
      boot_volume_type: str = 'gp2',
      packages: Optional[List[str]] = None,
      ssh_key_name: Optional[str] = None,
      tags: Optional[Dict[str, str]] = None) -> Tuple[AWSInstance, bool]:
    """Get or create a new virtual machine for analysis purposes.

    Args:
      vm_name (str): The instance name tag of the virtual machine.
      boot_volume_size (int): The size of the analysis VM boot volume (in GB).
      ami (str): The Amazon Machine Image ID to use to create the VM.
      cpu_cores (int): Number of CPU cores for the analysis VM.
      boot_volume_type (str): Optional. The volume type for the boot volume
          of the VM. Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The
          default is 'gp2'.
      packages (List[str]): Optional. List of packages to install in the VM.
      ssh_key_name (str): Optional. A SSH key pair name linked to the AWS
          account to associate with the VM. If none provided, the VM can only
          be accessed through in-browser SSH from the AWS management console
          with the EC2 client connection package (ec2-instance-connect). Note
          that if this package fails to install on the target VM, then the VM
          will not be accessible. It is therefore recommended to fill in this
          parameter.
      tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          instance, for example {'TicketID': 'xxx'}. An entry for the instance
          name is added by default.

    Returns:
      Tuple[AWSInstance, bool]: A tuple with an AWSInstance object and a
          boolean indicating if the virtual machine was created (True) or
          reused (False).

    Raises:
      ResourceCreationError: If the virtual machine cannot be created.
    """

    # Re-use instance if it already exists, or create a new one.
    instances = self.GetInstancesByName(vm_name)
    if instances:
      created = False
      return instances[0], created

    instance_type = common.GetInstanceTypeByCPU(cpu_cores)
    startup_script = utils.ReadStartupScript()
    if packages:
      startup_script = startup_script.replace('${packages[@]}', ' '.join(
          packages))

    # Install ec2-instance-connect to allow SSH connections from the browser.
    startup_script = startup_script.replace(
        '(exit ${exit_code})',
        'apt -y install ec2-instance-connect && (exit ${exit_code})')

    if not tags:
      tags = {}
    tags['Name'] = vm_name

    client = self.aws_account.ClientApi(common.EC2_SERVICE)
    vm_args = {
        'BlockDeviceMappings':
            [self._GetBootVolumeConfigByAmi(
                ami, boot_volume_size, boot_volume_type)],
        'ImageId': ami,
        'MinCount': 1,
        'MaxCount': 1,
        'InstanceType': instance_type,
        'TagSpecifications': [common.CreateTags(common.INSTANCE, tags)],
        'UserData': startup_script,
        'Placement': {
            'AvailabilityZone': self.aws_account.default_availability_zone}
    }
    if ssh_key_name:
      vm_args['KeyName'] = ssh_key_name
    # Create the instance in AWS
    try:
      instance = client.run_instances(**vm_args)
      # If the call to run_instances was successful, then the API response
      # contains the instance ID for the new instance.
      instance_id = instance['Instances'][0]['InstanceId']
      # Wait for the instance to be running
      client.get_waiter('instance_running').wait(InstanceIds=[instance_id])
      # Wait for the status checks to pass
      client.get_waiter('instance_status_ok').wait(InstanceIds=[instance_id])
    except (client.exceptions.ClientError,
            botocore.exceptions.WaiterError) as exception:
      raise errors.ResourceCreationError(
          'Could not create instance {0:s}: {1!s}'.format(vm_name, exception),
          __name__)

    instance = AWSInstance(self.aws_account,
                           instance_id,
                           self.aws_account.default_region,
                           self.aws_account.default_availability_zone,
                           name=vm_name)
    created = True
    return instance, created
def CreateVolumeCopy(zone: str,
                     dst_zone: Optional[str] = None,
                     instance_id: Optional[str] = None,
                     volume_id: Optional[str] = None,
                     volume_type: Optional[str] = None,
                     src_profile: Optional[str] = None,
                     dst_profile: Optional[str] = None,
                     tags: Optional[Dict[str, str]] = None) -> 'ebs.AWSVolume':
    """Create a copy of an AWS EBS Volume.

  By default, the volume copy will be created in the same AWS account where
  the source volume sits. If you want the volume copy to be created in a
  different AWS account, you can specify one in the dst_profile parameter.
  The following example illustrates how you should configure your AWS
  credentials file for such a use case.

  # AWS credentials file
  [default] # default account to use with AWS
  aws_access_key_id=foo
  aws_secret_access_key=bar

  [investigation] # source account for a particular volume to be copied from
  aws_access_key_id=foo1
  aws_secret_access_key=bar1

  [forensics] # destination account to create the volume copy in
  aws_access_key_id=foo2
  aws_secret_access_key=bar2

  # Copies the boot volume from instance "instance_id" from the default AWS
  # account to the default AWS account.
  volume_copy = CreateDiskCopy(zone, instance_id='instance_id')

  # Copies the boot volume from instance "instance_id" from the default AWS
  # account to the 'forensics' AWS account.
  volume_copy = CreateDiskCopy(
      zone, instance_id='instance_id', dst_profile='forensics')

  # Copies the boot volume from instance "instance_id" from the
  # 'investigation' AWS account to the 'forensics' AWS account.
  volume_copy = CreateDiskCopy(
      zone,
      instance_id='instance_id',
      src_profile='investigation',
      dst_profile='forensics')

  Args:
    zone (str): The AWS zone in which the volume is located, e.g. 'us-east-2b'.
    dst_zone (str): Optional. The AWS zone in which to create the volume
        copy. By default, this is the same as 'zone'.
    instance_id (str): Optional. Instance ID of the instance using the volume
        to be copied. If specified, the boot volume of the instance will be
        copied. If volume_id is also specified, then the volume pointed by
        that volume_id will be copied.
    volume_id (str): Optional. ID of the volume to copy. If not set,
        then instance_id needs to be set and the boot volume will be copied.
    volume_type (str): Optional. The volume type for the volume to be
        created. Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The default
        behavior is to use the same volume type as the source volume.
    src_profile (str): Optional. If the AWS account containing the volume
        that needs to be copied is different from the default account
        specified in the AWS credentials file then you can specify a
        different profile name here (see example above).
    dst_profile (str): Optional. If the volume copy needs to be created in a
        different AWS account, you can specify a different profile name here
        (see example above).
    tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          volume copy, for example {'TicketID': 'xxx'}.

  Returns:
    AWSVolume: An AWS EBS Volume object.

  Raises:
    ResourceCreationError: If there are errors copying the volume, or errors
        during KMS key creation/sharing if the target volume is encrypted.
    ValueError: If both instance_id and volume_id are missing, or if AWS
        account information could not be retrieved.
  """

    if not instance_id and not volume_id:
        raise ValueError(
            'You must specify at least one of [instance_id, volume_id].')

    source_account = account.AWSAccount(zone, aws_profile=src_profile)
    destination_account = account.AWSAccount(zone, aws_profile=dst_profile)
    kms_key_id = None

    try:
        if volume_id:
            volume_to_copy = source_account.ebs.GetVolumeById(volume_id)
        elif instance_id:
            instance = source_account.ec2.GetInstanceById(instance_id)
            volume_to_copy = instance.GetBootVolume()

        if not volume_type:
            volume_type = volume_to_copy.GetVolumeType()

        logger.info('Volume copy of {0:s} started...'.format(
            volume_to_copy.volume_id))
        snapshot = volume_to_copy.Snapshot()
        logger.info('Created snapshot: {0:s}'.format(snapshot.snapshot_id))

        source_account_id = source_account.ebs.GetAccountInformation().get(
            'Account')
        destination_account_id = destination_account.ebs.GetAccountInformation(
        ).get('Account')

        if not (source_account_id and destination_account_id):
            raise ValueError(
                'Could not retrieve AWS account ID: source {0!s}, dest: {1!s}'.
                format(source_account_id, destination_account_id))

        if source_account_id != destination_account_id:
            logger.info(
                'External account detected: source account ID is {0:s} and '
                'destination account ID is {1:s}'.format(
                    source_account_id, destination_account_id))
            if volume_to_copy.encrypted:
                logger.info(
                    'Encrypted volume detected, generating one-time use CMK key'
                )
                # Generate one-time use KMS key that will be shared with the
                # destination account.
                kms_key_id = source_account.kms.CreateKMSKey()
                source_account.kms.ShareKMSKeyWithAWSAccount(
                    kms_key_id, destination_account_id)
                # Create a copy of the initial snapshot and encrypts it with the
                # shared key
                snapshot = snapshot.Copy(kms_key_id=kms_key_id, delete=True)
            snapshot.ShareWithAWSAccount(destination_account_id)
            logger.info('Snapshot successfully shared with external account')

        if dst_zone and dst_zone != zone:
            # Assign the new zone to the destination account and assign it to the
            # snapshot so that it can copy it
            destination_account = account.AWSAccount(dst_zone,
                                                     aws_profile=dst_profile)
            snapshot.aws_account = destination_account
            snapshot = snapshot.Copy(delete=True,
                                     deletion_account=source_account)

        if tags and tags.get('Name'):
            new_volume = destination_account.ebs.CreateVolumeFromSnapshot(
                snapshot,
                volume_type=volume_type,
                volume_name=tags['Name'],
                tags=tags)
        else:
            new_volume = destination_account.ebs.CreateVolumeFromSnapshot(
                snapshot,
                volume_type=volume_type,
                volume_name_prefix='evidence',
                tags=tags)

        logger.info('Volume {0:s} successfully copied to {1:s}'.format(
            volume_to_copy.volume_id, new_volume.volume_id))
        logger.info('Cleaning up...')

        snapshot.Delete()
        # Delete the one-time use KMS key, if one was generated
        source_account.kms.DeleteKMSKey(kms_key_id)
        logger.info('Done')
    except (errors.LCFError, RuntimeError) as exception:
        raise errors.ResourceCreationError(
            'Copying volume {0:s}: {1!s}'.format((volume_id or instance_id),
                                                 exception),
            __name__) from exception

    return new_volume
Exemple #19
0
    def CreateNetworkInterface(self,
                               name: str,
                               region: Optional[str] = None) -> str:
        """Create a network interface and returns its ID.

    Args:
      name (str): The name of the network interface.
      region (str): Optional. The region in which to create the network
          interface. Default uses default_region of the AZAccount object.

    Returns:
      str: The id of the created network interface.

    Raises:
      ValueError: if name is not provided.
      ResourceCreationError: If no network interface could be created.
    """
        if not name:
            raise ValueError(
                'name must be specified. Provided: {0!s}'.format(name))

        if not region:
            region = self.az_account.default_region

        network_interface_name = '{0:s}-nic'.format(name)
        ip_config_name = '{0:s}-ipconfig'.format(name)

        # Check if the network interface already exists, and returns its ID if so.
        try:
            nic = self.network_client.network_interfaces.get(
                self.az_account.default_resource_group_name,
                network_interface_name)
            nic_id = nic.id  # type: str
            return nic_id
        except azure_exceptions.ResourceNotFoundError as exception:
            # NIC doesn't exist, ignore the error as we create it later on.
            pass
        except azure_exceptions.AzureError as exception:
            raise errors.ResourceCreationError(
                'Could not create network interface: {0!s}'.format(exception),
                __name__) from exception

        # pylint: disable=unbalanced-tuple-unpacking
        # IP address, virtual network, subnet, network security group
        public_ip, _, subnet, nsg = self._CreateNetworkInterfaceElements(
            name, region=region)
        # pylint: enable=unbalanced-tuple-unpacking

        creation_data = {
            'location':
            region,
            'ip_configurations': [{
                'name': ip_config_name,
                'public_ip_address': public_ip,
                'subnet': {
                    'id': subnet.id
                }
            }],
            'networkSecurityGroup':
            nsg
        }

        try:
            request = self.network_client.network_interfaces.begin_create_or_update(
                self.az_account.default_resource_group_name,
                network_interface_name, creation_data)
            request.wait()
        except azure_exceptions.AzureError as exception:
            raise errors.ResourceCreationError(
                'Could not create network interface: {0!s}'.format(exception),
                __name__) from exception

        network_interface_id = request.result().id  # type: str
        return network_interface_id
Exemple #20
0
    def _CreateNetworkInterfaceElements(
            self,
            name_prefix: str,
            region: Optional[str] = None) -> Tuple[Any, ...]:
        """Creates required elements for creating a network interface.

    Args:
      name_prefix (str): A name prefix to use for the network interface
          elements to create.
      region (str): Optional. The region in which to create the elements.
          Default uses default_region of the AZAccount object.

    Returns:
      Tuple[Any, Any, Any, Any]: A tuple containing a public IP address object,
          a virtual network object, a subnet object and a network security
          group object.

    Raises:
      ResourceCreationError: If the elements could not be created.
    """

        if not region:
            region = self.az_account.default_region

        # IP address
        public_ip_name = '{0:s}-public-ip'.format(name_prefix)
        # Virtual Network
        vnet_name = '{0:s}-vnet'.format(name_prefix)
        # Subnet
        subnet_name = '{0:s}-subnet'.format(name_prefix)
        # Network security group
        nsg_name = '{0:s}-nsg'.format(name_prefix)

        client_to_creation_data = {
            self.network_client.public_ip_addresses: {
                'resource_group_name':
                self.az_account.default_resource_group_name,
                'public_ip_address_name': public_ip_name,
                'parameters': {
                    'location': region,
                    'public_ip_allocation_method': 'Dynamic'
                }
            },
            self.network_client.virtual_networks: {
                'resource_group_name':
                self.az_account.default_resource_group_name,
                'virtual_network_name': vnet_name,
                'parameters': {
                    'location': region,
                    'address_space': {
                        'address_prefixes': ['10.0.0.0/16']
                    }
                }
            },
            self.network_client.subnets: {
                'resource_group_name':
                self.az_account.default_resource_group_name,
                'virtual_network_name': vnet_name,
                'subnet_name': subnet_name,
                'subnet_parameters': {
                    'address_prefix': '10.0.0.0/24'
                }
            },
            self.network_client.network_security_groups: {
                'resource_group_name':
                self.az_account.default_resource_group_name,
                'network_security_group_name': nsg_name,
                'parameters': {
                    'location':
                    region,
                    # Allow SSH traffic
                    'security_rules': [{
                        'name': 'Allow-SSH',
                        'direction': 'Inbound',
                        'protocol': 'TCP',
                        'source_address_prefix': '*',
                        'destination_address_prefix': '*',
                        'source_port_range': '*',
                        'destination_port_range': 22,
                        'access': 'Allow',
                        'priority': 300
                    }]
                }
            }
        }  # type: Dict[str, Any]

        result = []
        try:
            for client in client_to_creation_data:
                request = common.ExecuteRequest(
                    client, 'begin_create_or_update',
                    client_to_creation_data[client])[0]
                request.wait()
                result.append(request.result())
        except azure_exceptions.AzureError as exception:
            raise errors.ResourceCreationError(
                'Could not create network interface elements: {0!s}'.format(
                    exception), __name__) from exception
        return tuple(result)