예제 #1
0
def GCSToS3(args: 'argparse.Namespace') -> None:
    """Transfer a file from GCS to an S3 bucket.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """
    aws_account = account.AWSAccount(args.zone)
    aws_account.s3.GCSToS3(args.project, args.gcs_path, args.s3_path)

    logger.info('File successfully transferred.')
예제 #2
0
def UploadToBucket(args: 'argparse.Namespace') -> None:
    """Upload a file to an S3 bucket.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """
    aws_account = account.AWSAccount(args.zone)
    aws_account.s3.Put(args.bucket, args.filepath)

    logger.info('File successfully uploaded.')
예제 #3
0
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
  """Start forensic analysis VM.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """
  if args.attach_volumes and len(args.attach_volumes.split(',')) > 11:
    logger.error('--attach_volumes must be < 11')
    return

  attach_volumes = []
  if args.attach_volumes:
    volumes = args.attach_volumes.split(',')
    # Check if volumes parameter exists and if there
    # are any empty entries.
    if not (volumes and all(elements for elements in volumes)):
      logger.error('parameter --attach_volumes: {0:s}'.format(
          args.attach_volumes))
      return

    # AWS recommends using device names that are within /dev/sd[f-p].
    device_letter = ord('f')
    for volume in volumes:
      attach = (volume, '/dev/sd'+chr(device_letter))
      attach_volumes.append(attach)
      device_letter = device_letter + 1

  key_name = args.ssh_key_name
  if args.generate_ssh_key_pair:
    logger.info('Generating SSH key pair for the analysis VM.')
    aws_account = account.AWSAccount(args.zone)
    key_name, private_key = aws_account.ec2.GenerateSSHKeyPair(
        args.instance_name)
    path = os.path.join(os.getcwd(), key_name + '.pem')
    with open(path, 'w') as f:
      f.write(private_key)
    logger.info(
        'Created key pair {0:s} in AWS. Your private key is saved in: '
        '{1:s}'.format(key_name, path))

  logger.info('Starting analysis VM...')
  vm = forensics.StartAnalysisVm(vm_name=args.instance_name,
                                 default_availability_zone=args.zone,
                                 boot_volume_size=int(args.boot_volume_size),
                                 boot_volume_type=args.boot_volume_type,
                                 cpu_cores=int(args.cpu_cores),
                                 ami=args.ami,
                                 ssh_key_name=key_name,
                                 attach_volumes=attach_volumes,
                                 dst_profile=args.dst_profile)

  logger.info('Analysis VM started.')
  logger.info('Name: {0:s}, Started: {1:s}, Region: {2:s}'.format(vm[0].name,
                                                                  str(vm[1]),
                                                                  vm[0].region))
예제 #4
0
def CreateBucket(args: 'argparse.Namespace') -> None:
    """Create an S3 bucket.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """

    aws_account = account.AWSAccount(args.zone)
    bucket = aws_account.s3.CreateBucket(args.name)

    logger.info('Bucket created: {0:s}'.format(bucket['Location']))
예제 #5
0
파일: e2e.py 프로젝트: bbhunt-2020/Canivete
  def testListImages(self):
    """End to end test on AWS.

    Test listing AMI images with a filter.
    """

    aws_account = account.AWSAccount(self.zone)
    qfilter = [{'Name': 'name', 'Values': ['Ubuntu 18.04*']}]
    images = aws_account.ec2.ListImages(qfilter)

    self.assertGreater(len(images), 0)
    self.assertIn('Name', images[0])
예제 #6
0
def StartAnalysisVm(
        vm_name: str,
        default_availability_zone: str,
        boot_volume_size: int,
        ami: str = UBUNTU_1804_AMI,
        cpu_cores: int = 4,
        attach_volumes: Optional[List[Tuple[str, str]]] = None,
        dst_profile: Optional[str] = None,
        ssh_key_name: Optional[str] = None) -> Tuple['ec2.AWSInstance', bool]:
    """Start a virtual machine for analysis purposes.

  Look for an existing AWS instance with tag name vm_name. If found,
  this instance will be started and used as analysis VM. If not found, then a
  new vm with that name will be created, started and returned.

  Args:
    vm_name (str): The name for the virtual machine.
    default_availability_zone (str): Default zone within the region to create
        new resources in.
    boot_volume_size (int): The size of the analysis VM boot volume (in GB).
    ami (str): Optional. The Amazon Machine Image ID to use to create the VM.
        Default is a version of Ubuntu 18.04.
    cpu_cores (int): Optional. The number of CPU cores to create the machine
        with. Default is 4.
    attach_volumes (List[Tuple[str, str]]): Optional. List of tuples
        containing the volume IDs (str) to attach and their respective device
        name (str, e.g. /dev/sdf). Note that it is mandatory to provide a
        unique device name per volume to attach.
    dst_profile (str): Optional. The AWS account in which to create the
        analysis VM. This is the profile name that is defined in your AWS
        credentials file.
    ssh_key_name (str): Optional. A SSH key pair name linked to the AWS
        account to associate with the VM. If none provided, the VM can only
        be accessed through in-browser SSH from the AWS management console
        with the EC2 client connection package (ec2-instance-connect). Note
        that if this package fails to install on the target VM, then the VM
        will not be accessible. It is therefore recommended to fill in this
        parameter.

  Returns:
    Tuple[AWSInstance, bool]: a tuple with a virtual machine object
        and a boolean indicating if the virtual machine was created or not.
  """
    aws_account = account.AWSAccount(default_availability_zone,
                                     aws_profile=dst_profile)
    analysis_vm, created = aws_account.GetOrCreateAnalysisVm(
        vm_name, boot_volume_size, ami, cpu_cores, ssh_key_name=ssh_key_name)
    for volume_id, device_name in (attach_volumes or []):
        analysis_vm.AttachVolume(aws_account.GetVolumeById(volume_id),
                                 device_name)
    return analysis_vm, created
예제 #7
0
def ListVolumes(args: 'argparse.Namespace') -> None:
  """List EBS volumes in AWS account.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """

  aws_account = account.AWSAccount(args.zone)
  volumes = aws_account.ListVolumes()

  print('Volumes found:')
  for volume in volumes:
    print('Name: {0:s}, Zone: {1:s}'.format(
        volume, volumes[volume].availability_zone))
예제 #8
0
def ListInstances(args: 'argparse.Namespace') -> None:
  """List EC2 instances in AWS account.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """

  aws_account = account.AWSAccount(args.zone)
  instances = aws_account.ListInstances()

  print('Instances found:')
  for instance in instances:
    boot_volume = instances[instance].GetBootVolume().volume_id
    print('Name: {0:s}, Boot volume: {1:s}'.format(instance, boot_volume))
예제 #9
0
def ListImages(args: 'argparse.Namespace') -> None:
  """List AMI images and filter on AMI image 'name'.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """
  aws_account = account.AWSAccount(args.zone)

  qfilter = [{'Name': 'name', 'Values': [args.filter]}]

  images = aws_account.ListImages(qfilter)

  for image in images:
    print('Name: {0:s}, ImageId: {1:s}, Location: {2:s}'.format(
        image['Name'], image['ImageId'], image['ImageLocation']))
예제 #10
0
파일: e2e.py 프로젝트: bbhunt-2020/Canivete
 def setUpClass(cls):
   try:
     project_info = utils.ReadProjectInfo(['instance', 'zone'])
   except (OSError, RuntimeError, ValueError) as exception:
     raise unittest.SkipTest(str(exception))
   cls.instance_to_analyse = project_info['instance']
   cls.zone = project_info['zone']
   cls.dst_zone = project_info.get('destination_zone', None)
   cls.volume_to_copy = project_info.get('volume_id', None)
   cls.encrypted_volume_to_copy = project_info.get('encrypted_volume_id', None)
   cls.aws = account.AWSAccount(cls.zone)
   cls.analysis_vm_name = 'new-vm-for-analysis'
   cls.analysis_vm, _ = forensics.StartAnalysisVm(cls.analysis_vm_name,
                                                  cls.zone,
                                                  10)
   cls.volumes = []  # List of (AWSAccount, AWSVolume) tuples
예제 #11
0
파일: e2e.py 프로젝트: bbhunt-2020/Canivete
  def testVolumeCopyToOtherZone(self):
    """End to end test on AWS.

    Test copying a specific volume to a different AWS availability zone.
    """

    if not (self.volume_to_copy and self.dst_zone):
      return

    volume_copy = forensics.CreateVolumeCopy(
        self.zone, dst_zone=self.dst_zone, volume_id=self.volume_to_copy)
    # The volume should be created in AWS
    aws_account = account.AWSAccount(self.dst_zone)
    aws_volume = aws_account.ResourceApi(EC2_SERVICE).Volume(
        volume_copy.volume_id)
    self.assertEqual(aws_volume.volume_id, volume_copy.volume_id)
    self._StoreVolumeForCleanup(aws_account, aws_volume)
예제 #12
0
def QueryLogs(args: 'argparse.Namespace') -> None:
  """Query AWS CloudTrail log events.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """
  ct = aws_log.AWSCloudTrail(account.AWSAccount(args.zone))

  params = {}
  if args.filter:
    params['qfilter'] = args.filter
  if args.start:
    params['starttime'] = datetime.strptime(args.start, '%Y-%m-%d %H:%M:%S')
  if args.end:
    params['endtime'] = datetime.strptime(args.end, '%Y-%m-%d %H:%M:%S')

  result = ct.LookupEvents(**params)

  if result:
    print('Log events found: {0:d}'.format(len(result)))
    for event in result:
      print(event)
예제 #13
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws module."""
import typing
import unittest

import mock

from libcloudforensics.providers.aws.internal import account, common, ebs, ec2
from libcloudforensics.providers.aws.internal import log as aws_log
from libcloudforensics.providers.aws import forensics

FAKE_AWS_ACCOUNT = account.AWSAccount(default_availability_zone='fake-zone-2b')
FAKE_INSTANCE = ec2.AWSInstance(FAKE_AWS_ACCOUNT, 'fake-instance-id',
                                'fake-zone-2', 'fake-zone-2b')
FAKE_INSTANCE_WITH_NAME = ec2.AWSInstance(FAKE_AWS_ACCOUNT,
                                          'fake-instance-with-name-id',
                                          'fake-zone-2',
                                          'fake-zone-2b',
                                          name='fake-instance')
FAKE_VOLUME = ebs.AWSVolume('fake-volume-id', FAKE_AWS_ACCOUNT, 'fake-zone-2',
                            'fake-zone-2b', False)
FAKE_BOOT_VOLUME = ebs.AWSVolume('fake-boot-volume-id',
                                 FAKE_AWS_ACCOUNT,
                                 'fake-zone-2',
                                 'fake-zone-2b',
                                 False,
                                 name='fake-boot-volume',
예제 #14
0
def CreateVolumeCopy(zone: str,
                     dst_zone: Optional[str] = None,
                     instance_id: Optional[str] = None,
                     volume_id: Optional[str] = None,
                     volume_type: Optional[str] = None,
                     src_profile: Optional[str] = None,
                     dst_profile: Optional[str] = None,
                     tags: Optional[Dict[str, str]] = None) -> 'ebs.AWSVolume':
  """Create a copy of an AWS EBS Volume.

  By default, the volume copy will be created in the same AWS account where
  the source volume sits. If you want the volume copy to be created in a
  different AWS account, you can specify one in the dst_profile parameter.
  The following example illustrates how you should configure your AWS
  credentials file for such a use case.

  # AWS credentials file
  [default] # default account to use with AWS
  aws_access_key_id=foo
  aws_secret_access_key=bar

  [investigation] # source account for a particular volume to be copied from
  aws_access_key_id=foo1
  aws_secret_access_key=bar1

  [forensics] # destination account to create the volume copy in
  aws_access_key_id=foo2
  aws_secret_access_key=bar2

  # Copies the boot volume from instance "instance_id" from the default AWS
  # account to the default AWS account.
  volume_copy = CreateDiskCopy(zone, instance_id='instance_id')

  # Copies the boot volume from instance "instance_id" from the default AWS
  # account to the 'forensics' AWS account.
  volume_copy = CreateDiskCopy(
      zone, instance_id='instance_id', dst_profile='forensics')

  # Copies the boot volume from instance "instance_id" from the
  # 'investigation' AWS account to the 'forensics' AWS account.
  volume_copy = CreateDiskCopy(
      zone,
      instance_id='instance_id',
      src_profile='investigation',
      dst_profile='forensics')

  Args:
    zone (str): The AWS zone in which the volume is located, e.g. 'us-east-2b'.
    dst_zone (str): Optional. The AWS zone in which to create the volume
        copy. By default, this is the same as 'zone'.
    instance_id (str): Optional. Instance ID of the instance using the volume
        to be copied. If specified, the boot volume of the instance will be
        copied. If volume_id is also specified, then the volume pointed by
        that volume_id will be copied.
    volume_id (str): Optional. ID of the volume to copy. If not set,
        then instance_id needs to be set and the boot volume will be copied.
    volume_type (str): Optional. The volume type for the volume to be
        created. Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The default
        behavior is to use the same volume type as the source volume.
    src_profile (str): Optional. If the AWS account containing the volume
        that needs to be copied is different from the default account
        specified in the AWS credentials file then you can specify a
        different profile name here (see example above).
    dst_profile (str): Optional. If the volume copy needs to be created in a
        different AWS account, you can specify a different profile name here
        (see example above).
    tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          volume copy, for example {'TicketID': 'xxx'}.

  Returns:
    AWSVolume: An AWS EBS Volume object.

  Raises:
    RuntimeError: If there are errors copying the volume, or errors during
        KMS key creation/sharing if the target volume is encrypted.
    ValueError: If both instance_id and volume_id are missing.
  """

  if not instance_id and not volume_id:
    raise ValueError(
        'You must specify at least one of [instance_id, volume_id].')

  source_account = account.AWSAccount(zone, aws_profile=src_profile)
  destination_account = account.AWSAccount(zone, aws_profile=dst_profile)
  kms_key_id = None

  try:
    if volume_id:
      volume_to_copy = source_account.GetVolumeById(volume_id)
    elif instance_id:
      instance = source_account.GetInstanceById(instance_id)
      volume_to_copy = instance.GetBootVolume()

    if not volume_type:
      volume_type = volume_to_copy.GetVolumeType()

    logger.info('Volume copy of {0:s} started...'.format(
        volume_to_copy.volume_id))
    snapshot = volume_to_copy.Snapshot()
    logger.info('Created snapshot: {0:s}'.format(snapshot.snapshot_id))

    source_account_id = source_account.GetAccountInformation('Account')
    destination_account_id = destination_account.GetAccountInformation(
        'Account')

    if source_account_id != destination_account_id:
      logger.info('External account detected: source account ID is {0:s} and '
                  'destination account ID is {1:s}'.format(
                      source_account_id, destination_account_id))
      if volume_to_copy.encrypted:
        logger.info(
            'Encrypted volume detected, generating one-time use CMK key')
        # Generate one-time use KMS key that will be shared with the
        # destination account.
        kms_key_id = source_account.CreateKMSKey()
        source_account.ShareKMSKeyWithAWSAccount(
            kms_key_id, destination_account_id)
        # Create a copy of the initial snapshot and encrypts it with the
        # shared key
        snapshot = snapshot.Copy(kms_key_id=kms_key_id, delete=True)
      snapshot.ShareWithAWSAccount(destination_account_id)
      logger.info('Snapshot successfully shared with external account')

    if dst_zone and dst_zone != zone:
      # Assign the new zone to the destination account and assign it to the
      # snapshot so that it can copy it
      destination_account = account.AWSAccount(
          dst_zone, aws_profile=dst_profile)
      snapshot.aws_account = destination_account
      snapshot = snapshot.Copy(delete=True, deletion_account=source_account)

    if tags and tags.get('Name'):
      new_volume = destination_account.CreateVolumeFromSnapshot(
          snapshot,
          volume_type=volume_type,
          volume_name=tags['Name'],
          tags=tags)
    else:
      new_volume = destination_account.CreateVolumeFromSnapshot(
          snapshot,
          volume_type=volume_type,
          volume_name_prefix='evidence',
          tags=tags)

    logger.info('Volume {0:s} successfully copied to {1:s}'.format(
        volume_to_copy.volume_id, new_volume.volume_id))
    logger.info('Cleaning up...')

    snapshot.Delete()
    # Delete the one-time use KMS key, if one was generated
    source_account.DeleteKMSKey(kms_key_id)
    logger.info('Done')
  except RuntimeError as exception:
    error_msg = 'Copying volume {0:s}: {1!s}'.format(
        (volume_id or instance_id), exception)
    raise RuntimeError(error_msg)

  return new_volume
예제 #15
0
def StartAnalysisVm(
    vm_name: str,
    default_availability_zone: str,
    boot_volume_size: int,
    boot_volume_type: str = 'gp2',
    ami: Optional[str] = None,
    cpu_cores: int = 4,
    attach_volumes: Optional[List[Tuple[str, str]]] = None,
    dst_profile: Optional[str] = None,
    ssh_key_name: Optional[str] = None,
    tags: Optional[Dict[str, str]] = None) -> Tuple['ec2.AWSInstance', bool]:
  """Start a virtual machine for analysis purposes.

  Look for an existing AWS instance with tag name vm_name. If found,
  this instance will be started and used as analysis VM. If not found, then a
  new vm with that name will be created, started and returned.

  Args:
    vm_name (str): The name for the virtual machine.
    default_availability_zone (str): Default zone within the region to create
        new resources in.
    boot_volume_size (int): The size of the analysis VM boot volume (in GB).
    boot_volume_type (str): Optional. The volume type for the boot volume
          of the VM. Can be one of 'standard'|'io1'|'gp2'|'sc1'|'st1'. The
          default is 'gp2'.
    ami (str): Optional. The Amazon Machine Image ID to use to create the VM.
        Default is a version of Ubuntu 18.04.
    cpu_cores (int): Optional. The number of CPU cores to create the machine
        with. Default is 4.
    attach_volumes (List[Tuple[str, str]]): Optional. List of tuples
        containing the volume IDs (str) to attach and their respective device
        name (str, e.g. /dev/sdf). Note that it is mandatory to provide a
        unique device name per volume to attach.
    dst_profile (str): Optional. The AWS account in which to create the
        analysis VM. This is the profile name that is defined in your AWS
        credentials file.
    ssh_key_name (str): Optional. A SSH key pair name linked to the AWS
        account to associate with the VM. If none provided, the VM can only
        be accessed through in-browser SSH from the AWS management console
        with the EC2 client connection package (ec2-instance-connect). Note
        that if this package fails to install on the target VM, then the VM
        will not be accessible. It is therefore recommended to fill in this
        parameter.
    tags (Dict[str, str]): Optional. A dictionary of tags to add to the
          instance, for example {'TicketID': 'xxx'}. An entry for the instance
          name is added by default.

  Returns:
    Tuple[AWSInstance, bool]: a tuple with a virtual machine object
        and a boolean indicating if the virtual machine was created or not.

  Raises:
    RuntimeError: When multiple AMI images are returned.
  """

  aws_account = account.AWSAccount(
      default_availability_zone, aws_profile=dst_profile)

  # If no AMI ID is given we use the default Ubuntu 18.04
  # in the region requested.
  if not ami:
    logger.info('No AMI provided, fetching one for Ubuntu 18.04')
    qfilter = [{'Name': 'name', 'Values': [UBUNTU_1804_FILTER]}]
    ami_list = aws_account.ListImages(qfilter)
    # We should only get 1 AMI image back, if we get multiple we
    # have no way of knowing which one to use.
    if len(ami_list) > 1:
      image_names = [image['Name'] for image in ami_list]
      raise RuntimeError('error - ListImages returns >1 AMI image: [{0:s}]'
                         .format(', '.join(image_names)))
    ami = ami_list[0]['ImageId']
  assert ami  # Mypy: assert that ami is not None

  logger.info('Starting analysis VM {0:s}'.format(vm_name))
  analysis_vm, created = aws_account.GetOrCreateAnalysisVm(
      vm_name,
      boot_volume_size,
      ami,
      cpu_cores,
      boot_volume_type=boot_volume_type,
      ssh_key_name=ssh_key_name,
      tags=tags)
  logger.info('VM started.')
  for volume_id, device_name in (attach_volumes or []):
    logger.info('Attaching volume {0:s} to device {1:s}'.format(
        volume_id, device_name))
    analysis_vm.AttachVolume(aws_account.GetVolumeById(volume_id), device_name)
  logger.info('VM ready.')
  return analysis_vm, created
예제 #16
0
파일: aws.py 프로젝트: Ctfbuster/dftimewolf
  def SetUp(self,
            remote_profile_name,
            remote_zone,
            incident_id,
            remote_instance_id=None,
            volume_ids=None,
            all_volumes=False,
            analysis_profile_name=None,
            analysis_zone=None,
            boot_volume_size=50,
            cpu_cores=16,
            ami=None):
    """Sets up an Amazon web Services (AWS) collector.

    This method creates and starts an analysis VM in the AWS account and
    selects volumes to copy from the target instance / list of volumes passed
    in parameter.

    If volume_ids is specified, it will copy the corresponding volumes from the
    account, ignoring volumes belonging to any specific instances.

    If remote_instance_id is specified, two behaviors are possible:
    * If no other parameters are specified, it will select the instance's boot
      volume.
    * if all_volumes is set to True, it will select all volumes in the account
      that are attached to the instance.

    volume_ids takes precedence over remote_instance_id.

    Args:
      remote_profile_name (str): The AWS account in which the
          volume(s) exist(s). This is the profile name that is defined in
          your AWS credentials file.
      remote_zone (str): The AWS zone in which the source volume(s) exist(s).
      incident_id (str): Incident identifier used to name the analysis VM.
      remote_instance_id (str): Optional. Instance ID that needs forensicating.
      volume_ids (str): Optional. Comma-separated list of volume ids to
          copy.
      all_volumes (bool): Optional. True if all volumes attached to the source
          instance should be copied.
      analysis_profile_name (str): Optional. The AWS account in which to
          create the analysis VM. This is the profile name that is defined in
          your AWS credentials file.
      analysis_zone (str): Optional. The AWS zone in which to create the VM.
          If not specified, the VM will be created in the same zone where the
          volume(s) exist(s).
      boot_volume_size (int): Optional. The size (in GB) of the boot volume
          for the analysis VM. Default is 50 GB.
      cpu_cores (int): Optional. The number of CPU cores to use for the
          analysis VM. Default is 16.
      ami (str): Optional. The Amazon Machine Image ID to use to create the
          analysis VM. If not specified, will default to selecting Ubuntu 18.04
          TLS.
    """

    if not (remote_instance_id or volume_ids):
      self.ModuleError(
          'You need to specify at least an instance name or volume ids to copy',
          critical=True)
      return

    if not (remote_profile_name and remote_zone):
      self.ModuleError('You must specify "remote_profile_name" and "zone" '
                       'parameters', critical=True)
      return

    self.remote_profile_name = remote_profile_name
    self.remote_zone = remote_zone
    self.source_account = aws_account.AWSAccount(
        self.remote_zone, aws_profile=self.remote_profile_name)

    self.incident_id = incident_id
    self.remote_instance_id = remote_instance_id

    self.volume_ids = volume_ids.split(',') if volume_ids else []
    self.all_volumes = all_volumes
    self.analysis_zone = analysis_zone or remote_zone
    self.analysis_profile_name = analysis_profile_name or remote_profile_name

    analysis_vm_name = 'aws-forensics-vm-{0:s}'.format(self.incident_id)
    print('Your analysis VM will be: {0:s}'.format(analysis_vm_name))
    self.state.StoreContainer(
        containers.TicketAttribute(
            name=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_NAME,
            type_=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_TYPE,
            value=analysis_vm_name))
    self.analysis_vm, _ = aws_forensics.StartAnalysisVm(
        analysis_vm_name,
        self.analysis_zone,
        boot_volume_size,
        ami=ami,
        cpu_cores=cpu_cores,
        dst_profile=self.analysis_profile_name,
    )