コード例 #1
0
ファイル: gcloud_e2e.py プロジェクト: someguyiknow/dftimewolf
def CleanUp(project_id, zone, instance_name):
    """Clean up GCP project.

  Remove the instance [instance_name] in the GCP project [project_id] and its
  disks that were created as part of the end to end test.

  Attributes:
    project_id (str): the project id of the GCP project.
    zone (str): the zone for the project.
    instance_name (str): the name of the analysis VM to remove.
  """

    gcp_client = common.GoogleCloudComputeClient(project_id=project_id)
    project = gcp_project.GoogleCloudProject(project_id, zone)
    disks = compute.GoogleComputeInstance(project.project_id, zone,
                                          instance_name).ListDisks()

    # delete the created forensics VMs
    log.info('Deleting analysis instance: {0:s}.'.format(instance_name))
    gce_instances_client = gcp_client.GceApi().instances()
    request = gce_instances_client.delete(project=project.project_id,
                                          zone=project.default_zone,
                                          instance=instance_name)
    try:
        request.execute()
    except HttpError:
        # GceOperation triggers a while(True) loop that checks on the
        # operation ID. Sometimes it loops one more time right when the
        # operation has finished and thus the associated ID doesn't exists
        # anymore, throwing an HttpError. We can ignore this.
        pass
    log.info('Instance {0:s} successfully deleted.'.format(instance_name))

    # delete the copied disks
    # we ignore the disk that was created for the analysis VM (disks[0]) as
    # it is deleted in the previous operation
    gce_disks_client = gcp_client.GceApi().disks()
    for disk in list(disks.keys())[1:]:
        log.info('Deleting disk: {0:s}.'.format(disk))
        while True:
            try:
                request = gce_disks_client.delete(project=project.project_id,
                                                  zone=project.default_zone,
                                                  disk=disk)
                request.execute()
                break
            except HttpError as exception:
                # GceApi() will throw a 400 error until the analysis VM deletion is
                # correctly propagated. When the disk is finally deleted, it will
                # throw a 404 not found if it looped again after deletion.
                if exception.resp.status == 404:
                    break
                if exception.resp.status != 400:
                    log.warning(
                        'Could not delete the disk {0:s}: {1:s}'.format(
                            disk, str(exception)))
                # Throttle the requests to one every 10 seconds
                time.sleep(10)

        log.info('Disk {0:s} successfully deleted.'.format(disk))
コード例 #2
0
ファイル: google_cloud.py プロジェクト: rgayon/turbinia
def PostprocessDetachDisk(disk_name, local_path):
    """Detaches Google Cloud Disk from an instance.

  Args:
    disk_name(str): The name of the Cloud Disk to detach.
    local_path(str): The local path to the block device to detach.
  """
    #TODO: can local_path be something diffferent than the /dev/disk/by-id/google*
    if local_path:
        path = local_path
    else:
        path = '/dev/disk/by-id/google-{0:s}'.format(disk_name)

    if not IsBlockDevice(path):
        log.info('Disk {0:s} already detached!'.format(disk_name))
        return

    config.LoadConfig()
    instance_name = GetLocalInstanceName()
    project = gcp_project.GoogleCloudProject(config.TURBINIA_PROJECT,
                                             default_zone=config.TURBINIA_ZONE)
    instance = project.compute.GetInstance(instance_name,
                                           zone=config.TURBINIA_ZONE)
    disk = instance.GetDisk(disk_name)
    log.info('Detaching disk {0:s} from instance {1:s}'.format(
        disk_name, instance_name))
    instance.DetachDisk(disk)

    # Make sure device is Detached
    for _ in xrange(RETRY_MAX):
        if not os.path.exists(path):
            log.info('Block device {0:s} is no longer attached'.format(path))
            break
        time.sleep(5)
コード例 #3
0
    def SetUp(self,
              source_project_name,
              source_disk_name,
              gcs_output_location,
              analysis_project_name=None,
              exported_disk_name=None):
        """Sets up a Google Cloud Platform (GCP) Disk Export.

    This method creates the required objects to initialize
    the GoogleCloudDiskExport class attributes.

    If the analysis_project_name is not specified it will use the
    source_project_name instead.

    Args:
      source_project_name (str): Source project ID containing
          the disk to export.
      source_disk_name (str): Disk that needs to be exported.
      gcs_output_location (str): Google Cloud Storage parent bucket/folder
          path of the exported image.
      analysis_project_name (Optional[str]): Project ID where the
          disk image is created then exported. If not exit,
          source_project_name will be used.
      exported_disk_name (Optional[str]): Name of the output file, must comply
          with ^[A-Za-z0-9-]*$' and '.tar.gz' will be appended to the name.
          If not exist, random name will be generated.
    """
        self.source_project = gcp_project.GoogleCloudProject(
            source_project_name)
        if analysis_project_name:
            self.analysis_project = gcp_project.GoogleCloudProject(
                analysis_project_name)
        else:
            self.analysis_project = self.source_project
        self.source_disk = self.source_project.compute.GetDisk(
            source_disk_name)
        self.gcs_output_location = gcs_output_location
        self._image_name = '{0:s}-image-df-export-temp'.format(
            self.source_disk.name)
        if exported_disk_name:
            self.exported_disk_name = exported_disk_name
        else:
            self.exported_disk_name = self._image_name
コード例 #4
0
ファイル: google_cloud.py プロジェクト: jaegeral/turbinia
def PreprocessAttachDisk(disk_name):
  """Attaches Google Cloud Disk to an instance.

  Args:
    disk_name(str): The name of the Cloud Disk to attach.

  Returns:
    (str, list(str)): a tuple consisting of the path to the 'disk' block device
      and a list of paths to partition block devices. For example:
      (
       '/dev/disk/by-id/google-disk0',
       ['/dev/disk/by-id/google-disk0-part1', '/dev/disk/by-id/google-disk0-p2']
      )

  Raises:
    TurbiniaException: If the device is not a block device.
  """
  path = '/dev/disk/by-id/google-{0:s}'.format(disk_name)
  if IsBlockDevice(path):
    log.info('Disk {0:s} already attached!'.format(disk_name))
    return (path, sorted(glob.glob('{0:s}-part*'.format(path))))

  config.LoadConfig()
  instance_name = GetLocalInstanceName()
  project = gcp_project.GoogleCloudProject(
      config.TURBINIA_PROJECT, default_zone=config.TURBINIA_ZONE)
  instance = project.compute.GetInstance(instance_name)

  disk = project.compute.GetDisk(disk_name)
  log.info(
      'Attaching disk {0:s} to instance {1:s}'.format(disk_name, instance_name))
  instance.AttachDisk(disk)

  # Make sure we have a proper block device
  for _ in range(RETRY_MAX):
    if IsBlockDevice(path):
      log.info('Block device {0:s} successfully attached'.format(path))
      break
    if os.path.exists(path):
      log.info(
          'Block device {0:s} mode is {1}'.format(path,
                                                  os.stat(path).st_mode))
    time.sleep(1)

  message = None
  if not os.path.exists(path):
    turbinia_nonexisting_disk_path.inc()
    message = 'Device path {0:s} does not exist'.format(path)
  elif not IsBlockDevice(path):
    message = 'Device path {0:s} is not a block device'.format(path)
  if message:
    log.error(message)
    raise TurbiniaException(message)

  return (path, sorted(glob.glob('{0:s}-part*'.format(path))))
コード例 #5
0
def ListDisks(args: 'argparse.Namespace') -> None:
    """List GCE disks in GCP project.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """

    project = gcp_project.GoogleCloudProject(args.project)
    disks = project.compute.ListDisks()
    logger.info('Disks found:')
    for disk in disks:
        logger.info('Name: {0:s}, Zone: {1:s}'.format(disk, disks[disk].zone))
コード例 #6
0
def VMRemoveServiceAccount(project_id: str,
                           instance_name: str,
                           leave_stopped: bool = False) -> bool:
  """
  Remove a service account attachment from a GCP VM.

  Service account attachments to VMs allow the VM to obtain credentials
  via the instance metadata service to perform API actions. Removing
  the service account attachment will prevent credentials being issued.

  Note that the instance will be powered down, if it isn't already for
  this action.

  Args:
    project_id (str): Google Cloud Project ID.
    instance_name (str): The name of the virtual machine.
    leave_stopped (bool): Optional. True to leave the machine powered off.

  Returns:
    bool: True if the service account was successfully removed, False otherwise.
  """
  logger.info('Removing service account attachment from "{0:s}",'
              ' in project {1:s}'.format(instance_name, project_id))

  valid_starting_states = ['RUNNING', 'STOPPING', 'TERMINATED']

  project = gcp_project.GoogleCloudProject(project_id)
  instance = project.compute.GetInstance(instance_name)

  # Get the initial powered state of the instance
  initial_state = instance.GetPowerState()

  if not initial_state in valid_starting_states:
    logger.error('Instance "{0:s}" is currently {1:s} which is an invalid '
               'state for this operation'.format(instance_name, initial_state))
    return False

  try:
    # Stop the instance if it is not already (or on the way)....
    if not initial_state in ('TERMINATED', 'STOPPING'):
      instance.Stop()

    # Remove the service account
    instance.DetachServiceAccount()

    # If the instance was running initially, and the option has been set,
    # start up the instance again
    if initial_state == 'RUNNING' and not leave_stopped:
      instance.Start()
  except errors.LCFError as exception:
    logger.error('Fatal exception encountered: {0:s}'.format(str(exception)))

  return True
コード例 #7
0
def ListInstances(args: 'argparse.Namespace') -> None:
  """List GCE instances in GCP project.

  Args:
    args (argparse.Namespace): Arguments from ArgumentParser.
  """

  project = gcp_project.GoogleCloudProject(args.project)
  instances = project.compute.ListInstances()

  print('Instances found:')
  for instance in instances:
    bootdisk = instances[instance].GetBootDisk()
    if bootdisk:
      print('Name: {0:s}, Bootdisk: {1:s}'.format(instance, bootdisk.name))
コード例 #8
0
def InstanceNetworkQuarantine(project_id: str,
                              instance_name: str,
                              exempted_src_ips: Optional[List[str]] = None,
                              enable_logging: bool = False) -> None:
  """Put a Google Cloud instance in network quarantine.

  Network quarantine is imposed via applying deny-all
  ingress/egress firewall rules on each network interface.

  Args:
    project_id (str): Google Cloud Project ID.
    instance_name (str): : The name of the virtual machine.
    exempted_src_ips (List[str]): List of IPs exempted from the deny-all
        ingress firewall rules, ex: analyst IPs.
    enable_logging (bool): Optional. Enable firewall logging.
        Default is False.
  """
  logger.info('Putting instance "{0:s}", in project {1:s}, in network '
              'quarantine.'.format(instance_name, project_id))
  project = gcp_project.GoogleCloudProject(project_id)
  instance = project.compute.GetInstance(instance_name)
  get_operation = instance.GetOperation()
  network_interfaces = get_operation['networkInterfaces']
  target_tags = []
  for interface in network_interfaces:
    network_url = interface["network"]
    # Adding a random suffix to the tag to avoid name collisions,
    # tags are used as firewall rule names, which need to be unique.
    tag_suffix = random.randint(10**(19),(10**20)-1)
    deny_ingress_tag = 'deny-ingress-tag-' + str(tag_suffix)
    deny_egress_tag = 'deny-egress-tag-' + str(tag_suffix)
    AddDenyAllFirewallRules(
        project_id,
        network_url,
        deny_ingress_tag,
        deny_egress_tag,
        exempted_src_ips,
        enable_logging)
    target_tags.append(deny_ingress_tag)
    target_tags.append(deny_egress_tag)
  instance.SetTags(target_tags)
  if exempted_src_ips:
    logger.info('From a host with an exempted IP, '
        'connect to the quarantined instance using:\n'
        'gcloud compute ssh --zone "{0:s}" "{1:s}" --project "{2:s}"\n'
        'Connecting from the browser via GCP console will not work.'.format(
              instance.zone, instance_name, project_id))
コード例 #9
0
def StartAnalysisVm(
    project: str,
    vm_name: str,
    zone: str,
    boot_disk_size: int,
    boot_disk_type: str,
    cpu_cores: int,
    attach_disks: Optional[List[str]] = None,
    image_project: str = 'ubuntu-os-cloud',
    image_family: str = 'ubuntu-1804-lts') -> Tuple['compute.GoogleComputeInstance', bool]:  # pylint: disable=line-too-long
    """Start a virtual machine for analysis purposes.

  Args:
    project (str): Project id for virtual machine.
    vm_name (str): The name of the virtual machine.
    zone (str): Zone for the virtual machine.
    boot_disk_size (int): The size of the analysis VM boot disk (in GB).
    boot_disk_type (str): URL of the disk type resource describing
        which disk type to use to create the disk. Use pd-standard for a
        standard disk and pd-ssd for a SSD disk.
    cpu_cores (int): The number of CPU cores to create the machine with.
    attach_disks (List[str]): Optional. List of disk names to attach.
    image_project (str): Optional. Name of the project where the analysis VM
        image is hosted.
    image_family (str): Optional. Name of the image to use to create the
        analysis VM.

  Returns:
    Tuple(GoogleComputeInstance, bool): A tuple with a virtual machine object
        and a boolean indicating if the virtual machine was created or not.
  """

    proj = gcp_project.GoogleCloudProject(project, default_zone=zone)
    logger.info('Starting analysis VM {0:s}'.format(vm_name))
    analysis_vm, created = proj.compute.GetOrCreateAnalysisVm(
        vm_name,
        boot_disk_size,
        disk_type=boot_disk_type,
        cpu_cores=cpu_cores,
        image_project=image_project,
        image_family=image_family)
    logger.info('VM started.')
    for disk_name in (attach_disks or []):
        logger.info('Attaching disk {0:s}'.format(disk_name))
        analysis_vm.AttachDisk(proj.compute.GetDisk(disk_name))
    logger.info('VM ready.')
    return analysis_vm, created
コード例 #10
0
ファイル: google_cloud.py プロジェクト: rgayon/turbinia
def PreprocessAttachDisk(disk_name):
    """Attaches Google Cloud Disk to an instance.

  Args:
    disk_name(str): The name of the Cloud Disk to attach.

  Returns:
    (str, list(str)): a tuple consisting of the path to the 'disk' block device
      and a list of paths to partition block devices. For example:
      (
       '/dev/disk/by-id/google-disk0',
       ['/dev/disk/by-id/google-disk0-part1', '/dev/disk/by-id/google-disk0-p2']
      )
  """
    path = '/dev/disk/by-id/google-{0:s}'.format(disk_name)
    if IsBlockDevice(path):
        log.info('Disk {0:s} already attached!'.format(disk_name))
        return (path, glob.glob('{0:s}-part*'.format(path)))

    config.LoadConfig()
    instance_name = GetLocalInstanceName()
    project = gcp_project.GoogleCloudProject(config.TURBINIA_PROJECT,
                                             default_zone=config.TURBINIA_ZONE)
    instance = project.compute.GetInstance(instance_name,
                                           zone=config.TURBINIA_ZONE)

    disk = instance.GetDisk(disk_name)
    log.info('Attaching disk {0:s} to instance {1:s}'.format(
        disk_name, instance_name))
    instance.AttachDisk(disk)

    # Make sure we have a proper block device
    for _ in xrange(RETRY_MAX):
        if IsBlockDevice(path):
            log.info('Block device {0:s} successfully attached'.format(path))
            break
        if os.path.exists(path):
            log.info('Block device {0:s} mode is {1}'.format(
                path,
                os.stat(path).st_mode))
        time.sleep(1)

    return (path, glob.glob('{0:s}-part*'.format(path)))
コード例 #11
0
 def setUpClass(cls):
   try:
     project_info = utils.ReadProjectInfo(['project_id', 'instance', 'zone'])
   except (OSError, RuntimeError, ValueError) as exception:
     raise unittest.SkipTest(str(exception))
   cls.project_id = project_info['project_id']
   cls.instance_to_analyse = project_info['instance']
   # Optional: test a disk other than the boot disk
   cls.disk_to_forensic = project_info.get('disk', None)
   cls.zone = project_info['zone']
   cls.gcp = gcp_project.GoogleCloudProject(cls.project_id, cls.zone)
   cls.analysis_vm_name = 'new-vm-for-analysis'
   # Create and start the analysis VM
   cls.analysis_vm, _ = forensics.StartAnalysisVm(project=cls.project_id,
                                                  vm_name=cls.analysis_vm_name,
                                                  zone=cls.zone,
                                                  boot_disk_size=10,
                                                  boot_disk_type='pd-ssd',
                                                  cpu_cores=4)
コード例 #12
0
def CreateDiskCopy(
        src_proj: str,
        dst_proj: str,
        instance_name: str,
        zone: str,
        disk_name: Optional[str] = None,
        disk_type: str = 'pd-standard') -> 'compute.GoogleComputeDisk':
    """Creates a copy of a Google Compute Disk.

  Args:
    src_proj (str): Name of project that holds the disk to be copied.
    dst_proj (str): Name of project to put the copied disk in.
    instance_name (str): Instance using the disk to be copied.
    zone (str): Zone where the new disk is to be created.
    disk_name (str): Optional. Name of the disk to copy. If None, boot disk
        will be copied.
    disk_type (str): Optional. URL of the disk type resource describing
        which disk type to use to create the disk. Default is pd-standard. Use
        pd-ssd to have a SSD disk.

  Returns:
    GoogleComputeDisk: A Google Compute Disk object.

  Raises:
    RuntimeError: If there are errors copying the disk
  """

    src_project = gcp_project.GoogleCloudProject(src_proj)
    dst_project = gcp_project.GoogleCloudProject(dst_proj, default_zone=zone)
    instance = src_project.compute.GetInstance(
        instance_name) if instance_name else None

    try:
        if disk_name:
            disk_to_copy = src_project.compute.GetDisk(disk_name)
        else:
            disk_to_copy = instance.GetBootDisk()  # type: ignore

        common.LOGGER.info('Disk copy of {0:s} started...'.format(
            disk_to_copy.name))
        snapshot = disk_to_copy.Snapshot()
        new_disk = dst_project.compute.CreateDiskFromSnapshot(
            snapshot, disk_name_prefix='evidence', disk_type=disk_type)
        snapshot.Delete()
        common.LOGGER.info('Disk {0:s} successfully copied to {1:s}'.format(
            disk_to_copy.name, new_disk.name))

    except RefreshError as exception:
        error_msg = ('Something is wrong with your gcloud access token: '
                     '{0:s}.').format(exception)
        raise RuntimeError(error_msg)
    except DefaultCredentialsError as exception:
        error_msg = ('Something is wrong with your Application Default '
                     'Credentials. '
                     'Try running:\n  $ gcloud auth application-default login')
        raise RuntimeError(error_msg)
    except HttpError as exception:
        if exception.resp.status == 403:
            raise RuntimeError(
                'Make sure you have the appropriate permissions on the project'
            )
        if exception.resp.status == 404:
            raise RuntimeError(
                'GCP resource not found. Maybe a typo in the project / instance / '
                'disk name?')
        raise RuntimeError(exception)
    except RuntimeError as exception:
        error_msg = 'Cannot copy disk "{0:s}": {1!s}'.format(
            disk_name, exception)
        raise RuntimeError(error_msg)

    return new_disk
コード例 #13
0
    def SetUp(self,
              analysis_project_name,
              remote_project_name,
              incident_id,
              zone,
              boot_disk_size,
              boot_disk_type,
              cpu_cores,
              remote_instance_name=None,
              disk_names=None,
              all_disks=False,
              image_project='ubuntu-os-cloud',
              image_family='ubuntu-1804-lts'):
        """Sets up a Google Cloud Platform(GCP) collector.

    This method creates and starts an analysis VM in the analysis project and
    selects disks to copy from the remote project.

    If disk_names is specified, it will copy the corresponding disks from the
    project, ignoring disks belonging to any specific instances.

    If remote_instance_name is specified, two behaviors are possible:
    * If no other parameters are specified, it will select the instance's boot
      disk
    * if all_disks is set to True, it will select all disks in the project
      that are attached to the instance

    disk_names takes precedence over instance_names

    Args:
      analysis_project_name (str): name of the project that contains
          the analysis VM.
      remote_project_name (str): name of the remote project where the disks
          must be copied from.
      incident_id (str): incident identifier on which the name of the analysis
          VM will be based.
      zone (str): GCP zone in which new resources should be created.
      boot_disk_size (float): size of the analysis VM boot disk (in GB).
      boot_disk_type (str): Disk type to use [pd-standard, pd-ssd]
      cpu_cores (int): number of CPU cores to create the VM with.
      remote_instance_name (Optional[str]): name of the instance in
          the remote project containing the disks to be copied.
      disk_names (Optional[str]): Comma separated disk names to copy.
      all_disks (Optional[bool]): True if all disks attached to the source
          instance should be copied.
      image_project (Optional[str]): name of the project where the analysis
          VM image is hosted.
      image_family (Optional[str]): name of the image to use to create the
          analysis VM.
    """
        if not (remote_instance_name or disk_names):
            self.state.AddError(
                'You need to specify at least an instance name or disks to copy',
                critical=True)
            return

        disk_names = disk_names.split(',') if disk_names else []

        self.analysis_project = gcp_project.GoogleCloudProject(
            analysis_project_name, default_zone=zone)
        self.remote_project = gcp_project.GoogleCloudProject(
            remote_project_name)

        self.remote_instance_name = remote_instance_name
        self.disk_names = disk_names
        self.incident_id = incident_id
        self.all_disks = all_disks
        self._gcp_label = {'incident_id': self.incident_id}

        analysis_vm_name = 'gcp-forensics-vm-{0:s}'.format(self.incident_id)

        self.logger.info(
            'Your analysis VM will be: {0:s}'.format(analysis_vm_name))
        self.logger.info('Complimentary gcloud command:')
        self.logger.info(
            'gcloud compute ssh --project {0:s} {1:s} --zone {2:s}'.format(
                self.analysis_project.project_id, analysis_vm_name, zone))

        self.state.StoreContainer(
            containers.TicketAttribute(
                name=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_NAME,
                type_=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_TYPE,
                value=analysis_vm_name))

        try:
            # TODO: Make creating an analysis VM optional
            # pylint: disable=too-many-function-args
            # pylint: disable=redundant-keyword-arg
            self.analysis_vm, _ = gcp_forensics.StartAnalysisVm(
                self.analysis_project.project_id,
                analysis_vm_name,
                zone,
                boot_disk_size,
                boot_disk_type,
                int(cpu_cores),
                image_project=image_project,
                image_family=image_family)
            self.analysis_vm.AddLabels(self._gcp_label)
            self.analysis_vm.GetBootDisk().AddLabels(self._gcp_label)

        except (RefreshError, DefaultCredentialsError) as exception:
            msg = (
                'Something is wrong with your Application Default Credentials. '
                'Try running:\n  $ gcloud auth application-default login\n')
            msg += str(exception)
            self.ModuleError(msg, critical=True)
コード例 #14
0
"""Tests the GoogleCloudDiskExport."""

import os
import unittest

import mock
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import compute

from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.containers import containers
from dftimewolf.lib.exporters import gce_disk_export


FAKE_SOURCE_PROJECT = gcp_project.GoogleCloudProject(
    'fake-source-project', 'fake-zone')
FAKE_DISK = compute.GoogleComputeDisk(
    FAKE_SOURCE_PROJECT.project_id,
    'fake_zone',
    'fake-source-disk')
FAKE_IMAGE = compute.GoogleComputeImage(
    FAKE_SOURCE_PROJECT.project_id,
    'fake-zone',
    'fake-source-disk-image-df-export-temp')


class GoogleCloudDiskExportTest(unittest.TestCase):
  """Tests for the Google Cloud disk exporter."""

  def testInitialization(self):
    """Tests that the disk exporter can be initialized."""
コード例 #15
0
    def SetUp(self,
              analysis_project_name,
              remote_project_name,
              incident_id=None,
              zone='us-central1-f',
              create_analysis_vm=True,
              boot_disk_size=50,
              boot_disk_type='pd-standard',
              cpu_cores=4,
              remote_instance_name=None,
              disk_names=None,
              all_disks=False,
              image_project='ubuntu-os-cloud',
              image_family='ubuntu-1804-lts'):
        """Sets up a Google Cloud Platform(GCP) collector.

    This method creates and starts an analysis VM in the analysis project and
    selects disks to copy from the remote project.

    If analysis_project_name is not specified, analysis_project will be same
    as remote_project.

    If disk_names is specified, it will copy the corresponding disks from the
    project, ignoring disks belonging to any specific instances.

    If remote_instance_name is specified, two behaviors are possible:
    * If no other parameters are specified, it will select the instance's boot
      disk
    * if all_disks is set to True, it will select all disks in the project
      that are attached to the instance

    disk_names takes precedence over instance_names

    Args:
      analysis_project_name (str): Optional. name of the project that contains
          the analysis VM. Default is None.
      remote_project_name (str): name of the remote project where the disks
          must be copied from.
      incident_id (Optional[str]): Optional. Incident identifier on which the
          name of the analysis VM will be based. Default is None, which means
          add no label and format VM name as
          "gcp-forensics-vm-{TIMESTAMP('%Y%m%d%H%M%S')}".
      zone (Optional[str]): Optional. GCP zone in which new resources should
          be created. Default is us-central1-f.
      create_analysis_vm (Optional[bool]): Optional. Create analysis VM in
          the analysis project. Default is True.
      boot_disk_size (Optional[float]): Optional. Size of the analysis VM boot
          disk (in GB). Default is 50.
      boot_disk_type (Optional[str]): Optional. Disk type to use.
          Default is pd-standard.
      cpu_cores (Optional[int]): Optional. Number of CPU cores to
          create the VM with. Default is 4.
      remote_instance_name (Optional[str]): Optional. Name of the instance in
          the remote project containing the disks to be copied.
      disk_names (Optional[str]): Optional. Comma separated disk names to copy.
      all_disks (Optional[bool]): Optional. True if all disks attached to the
          source instance should be copied.
      image_project (Optional[str]): Optional. Name of the project where the
          analysis VM image is hosted.
      image_family (Optional[str]): Optional. Name of the image to use to
          create the analysis VM.
    """
        if not (remote_instance_name or disk_names):
            self.ModuleError(
                'You need to specify at least an instance name or disks to copy',
                critical=True)
            return

        disk_names = disk_names.split(',') if disk_names else []
        self.remote_project = gcp_project.GoogleCloudProject(
            remote_project_name, default_zone=zone)
        if analysis_project_name:
            self.analysis_project = gcp_project.GoogleCloudProject(
                analysis_project_name, default_zone=zone)
        else:
            self.analysis_project = self.remote_project

        self.remote_instance_name = remote_instance_name
        self.disk_names = disk_names
        self.all_disks = all_disks
        if incident_id:
            self.incident_id = incident_id
            self._gcp_label = {'incident_id': self.incident_id}

        try:
            if self.remote_instance_name:
                self.remote_project.compute.GetInstance(
                    self.remote_instance_name)
        except ResourceNotFoundError as exception:
            self.ModuleError(
                message='Instance "{0:s}" not found or insufficient permissions'
                .format(self.remote_instance_name),
                critical=True)
            return

        if create_analysis_vm:
            if self.incident_id:
                analysis_vm_name = 'gcp-forensics-vm-{0:s}'.format(
                    self.incident_id)
            else:
                analysis_vm_name = common.GenerateUniqueInstanceName(
                    'gcp-forensics-vm', common.COMPUTE_NAME_LIMIT)

            self.logger.info(
                'Your analysis VM will be: {0:s}'.format(analysis_vm_name))
            self.logger.info('Complimentary gcloud command:')
            self.logger.info(
                'gcloud compute ssh --project {0:s} {1:s} --zone {2:s}'.format(
                    self.analysis_project.project_id, analysis_vm_name,
                    self.analysis_project.default_zone))

            self.state.StoreContainer(
                containers.TicketAttribute(
                    name=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_NAME,
                    type_=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_TYPE,
                    value=analysis_vm_name))

            try:
                # pylint: disable=too-many-function-args
                # pylint: disable=redundant-keyword-arg
                self.analysis_vm, _ = gcp_forensics.StartAnalysisVm(
                    self.analysis_project.project_id,
                    analysis_vm_name,
                    self.analysis_project.default_zone,
                    boot_disk_size,
                    boot_disk_type,
                    int(cpu_cores),
                    image_project=image_project,
                    image_family=image_family)
                if self._gcp_label:
                    self.analysis_vm.AddLabels(self._gcp_label)
                    self.analysis_vm.GetBootDisk().AddLabels(self._gcp_label)

            except (RefreshError, DefaultCredentialsError) as exception:
                msg = (
                    'Something is wrong with your Application Default Credentials. '
                    'Try running:\n  $ gcloud auth application-default login\n'
                )
                msg += str(exception)
                self.ModuleError(msg, critical=True)
コード例 #16
0
    def SetUp(self,
              source_project_name,
              gcs_output_location,
              analysis_project_name=None,
              source_disk_names=None,
              remote_instance_name=None,
              all_disks=False,
              exported_image_name=None):
        """Sets up a Google Cloud Platform (GCP) Disk Export.

    This method creates the required objects to initialize
    the GoogleCloudDiskExport class attributes.

    If the analysis_project_name is not specified it will use the
    source_project_name instead.

    If source_disk_names is specified, it will copy the corresponding disks from
    the project, ignoring disks belonging to any specific instances.

    If remote_instance_name is specified, two behaviors are possible:
    * If no other parameters are specified, it will select the instance's boot
      disk
    * if all_disks is set to True, it will select all disks in the project
      that are attached to the instance

    Args:
      source_project_name (str): Source project ID containing
          the disk to export.
      gcs_output_location (str): Google Cloud Storage parent bucket/folder
          path of the exported image.
      analysis_project_name (Optional[str]): Optional. Project ID where the
          disk image is created then exported. If not specified,
          source_project_name will be used.
      source_disk_names (Optional[str]): Optional. Comma separated disk names
          to copy.Default is None.
      remote_instance_name (Optional[str]): Optional. Instance in source
          project to export its disks. Default, if not exist, source_disk_names
          will be used.
      all_disks (Optional[bool]): Optional. True if all disks attached to
          the source instance should be copied. Default is False. If False
          and remote_instance_name is provided it will select the instance's
          boot disk.
      exported_image_name (Optional[str]): Optional. Name of the output file,
          must comply with ^[A-Za-z0-9-]*$' and '.tar.gz' will be appended to
          the name. Default is None, if not exist or if more than one disk
          is selected, exported image name as
          "exported-image-{TIMESTAMP('%Y%m%d%H%M%S')}".
    """
        self.source_project = gcp_project.GoogleCloudProject(
            source_project_name)
        if analysis_project_name:
            self.analysis_project = gcp_project.GoogleCloudProject(
                analysis_project_name)
        else:
            self.analysis_project = self.source_project

        if not (remote_instance_name or source_disk_names):
            self.ModuleError(
                'You need to specify at least an instance name or disks to copy',
                critical=True)

        self.remote_instance_name = remote_instance_name
        if source_disk_names:
            source_disk_names = source_disk_names.split(',')
        else:
            source_disk_names = []
        self.source_disk_names = source_disk_names
        self.all_disks = all_disks

        self.source_disks = self._FindDisksToCopy()
        self.gcs_output_location = gcs_output_location
        if exported_image_name and len(self.source_disks) == 1:
            self.exported_image_name = exported_image_name
コード例 #17
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GoogleCloudCollector."""

import unittest

import mock
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import compute

from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.containers import containers
from dftimewolf.lib.collectors import gcloud

FAKE_PROJECT = gcp_project.GoogleCloudProject('test-target-project-name',
                                              'fake_zone')
FAKE_ANALYSIS_VM = compute.GoogleComputeInstance(FAKE_PROJECT.project_id,
                                                 'fake_zone',
                                                 'fake-analysis-vm')
FAKE_INSTANCE = compute.GoogleComputeInstance(FAKE_PROJECT.project_id,
                                              'fake_zone', 'fake-instance')
FAKE_DISK = compute.GoogleComputeDisk(FAKE_PROJECT.project_id, 'fake_zone',
                                      'disk1')
FAKE_BOOT_DISK = compute.GoogleComputeDisk(FAKE_PROJECT.project_id,
                                           'fake_zone', 'bootdisk')
FAKE_SNAPSHOT = compute.GoogleComputeSnapshot(FAKE_DISK, 'fake_snapshot')
FAKE_DISK_COPY = compute.GoogleComputeDisk(FAKE_PROJECT.project_id,
                                           'fake_zone', 'disk1-copy')


class GoogleCloudCollectorTest(unittest.TestCase):
コード例 #18
0
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCP mocks used across tests."""

import re

# pylint: disable=line-too-long
from libcloudforensics.providers.gcp.internal import build as gcp_build
from libcloudforensics.providers.gcp.internal import compute
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import log as gcp_log
from libcloudforensics.providers.gcp.internal import monitoring as gcp_monitoring
from libcloudforensics.providers.gcp.internal import storage as gcp_storage
# pylint: enable=line-too-long

FAKE_ANALYSIS_PROJECT = gcp_project.GoogleCloudProject('fake-target-project',
                                                       'fake-zone')

FAKE_ANALYSIS_VM = compute.GoogleComputeInstance(
    FAKE_ANALYSIS_PROJECT.project_id, 'fake-zone', 'fake-analysis-vm')

FAKE_IMAGE = compute.GoogleComputeImage(FAKE_ANALYSIS_PROJECT.project_id, '',
                                        'fake-image')

# Source project with the instance that needs forensicating
FAKE_SOURCE_PROJECT = gcp_project.GoogleCloudProject('fake-source-project',
                                                     'fake-zone')

FAKE_INSTANCE = compute.GoogleComputeInstance(FAKE_SOURCE_PROJECT.project_id,
                                              'fake-zone', 'fake-instance')

FAKE_DISK = compute.GoogleComputeDisk(FAKE_SOURCE_PROJECT.project_id,
コード例 #19
0
def CreateDiskFromGCSImage(
    project_id: str,
    storage_image_path: str,
    zone: str,
    name: Optional[str] = None) -> Dict[str, Any]:
  """Creates a GCE persistent disk from a image in GCS.

  The method supports raw disk images and most virtual disk
  file formats. Valid import formats are:
  [raw (dd), qcow2, qcow , vmdk, vdi, vhd, vhdx, qed, vpc].

  The created GCE disk might be larger than the original raw (dd)
  image stored in GCS to satisfy GCE capacity requirements:
  https://cloud.google.com/compute/docs/disks/#introduction
  However the bytes_count and the md5_hash values of the source
  image are returned with the newly created disk.
  The md5_hash can be used to verify the integrity of the
  created GCE disk, it must be compared with the hash of the
  created GCE disk from byte 0 to bytes_count. i.e:
  result['md5Hash'] = hash(created_gce_disk,
                            start_byte=0,
                            end_byte=result['bytes_count'])

  Args:
    project_id (str): Google Cloud Project ID.
    storage_image_path (str): Path to the source image in GCS.
    zone (str): Zone to create the new disk in.
    name (str): Optional. Name of the disk to create. Default
        is imported-disk-[TIMESTAMP('%Y%m%d%H%M%S')].

  Returns:
    Dict: A key value describing the imported GCE disk.
        Ex: {
          'project_id': 'fake-project',
          'disk_name': 'fake-imported-disk',
          'zone': 'fake-zone',
          'bytes_count': '1234'  # Content-Length of source image in bytes.
          'md5Hash': 'Source Image MD5 hash string in hex'
        }

  Raises:
    InvalidNameError: If the GCE disk name is invalid.
  """

  if name:
    if not common.REGEX_DISK_NAME.match(name):
      raise errors.InvalidNameError(
          'Disk name {0:s} does not comply with {1:s}'.format(
              name, common.REGEX_DISK_NAME.pattern), __name__)
    name = name[:common.COMPUTE_NAME_LIMIT]
  else:
    name = common.GenerateUniqueInstanceName('imported-disk',
                                             common.COMPUTE_NAME_LIMIT)

  project = gcp_project.GoogleCloudProject(project_id)
  image_object = project.compute.ImportImageFromStorage(storage_image_path)
  disk_object = project.compute.CreateDiskFromImage(
      image_object, zone=zone, name=name)
  storage_object_md = project.storage.GetObjectMetadata(storage_image_path)
  md5_hash_hex = base64.b64decode(storage_object_md['md5Hash']).hex()
  result = {
      'project_id': disk_object.project_id,
      'disk_name': disk_object.name,
      'zone': disk_object.zone,
      'bytes_count': storage_object_md['size'],
      'md5Hash': md5_hash_hex
  }
  return result
コード例 #20
0
def CreateDiskCopy(
    src_proj: str,
    dst_proj: str,
    zone: str,
    instance_name: Optional[str] = None,
    disk_name: Optional[str] = None,
    disk_type: Optional[str] = None) -> 'compute.GoogleComputeDisk':
  """Creates a copy of a Google Compute Disk.

  Args:
    src_proj (str): Name of project that holds the disk to be copied.
    dst_proj (str): Name of project to put the copied disk in.
    zone (str): Zone where the new disk is to be created.
    instance_name (str): Optional. Instance using the disk to be copied.
    disk_name (str): Optional. Name of the disk to copy. If None,
        instance_name must be specified and the boot disk will be copied.
    disk_type (str): Optional. URL of the disk type resource describing
        which disk type to use to create the disk. The default behavior is to
        use the same disk type as the source disk.

  Returns:
    GoogleComputeDisk: A Google Compute Disk object.

  Raises:
    ResourceNotFoundError: If the GCP resource is not found.
    CredentialsConfigurationError: If the library could not authenticate to GCP.
    RuntimeError: If an unknown HttpError is thrown.
    ResourceCreationError: If there are errors copying the disk.
    ValueError: If both instance_name and disk_name are missing.
  """

  if not instance_name and not disk_name:
    raise ValueError(
        'You must specify at least one of [instance_name, disk_name].')

  src_project = gcp_project.GoogleCloudProject(src_proj)
  dst_project = gcp_project.GoogleCloudProject(dst_proj, default_zone=zone)

  try:
    if disk_name:
      disk_to_copy = src_project.compute.GetDisk(disk_name)
    elif instance_name:
      instance = src_project.compute.GetInstance(instance_name)
      disk_to_copy = instance.GetBootDisk()

    if not disk_type:
      disk_type = disk_to_copy.GetDiskType()

    logger.info('Disk copy of {0:s} started...'.format(
        disk_to_copy.name))
    snapshot = disk_to_copy.Snapshot()
    logger.debug('Snapshot created: {0:s}'.format(snapshot.name))
    new_disk = dst_project.compute.CreateDiskFromSnapshot(
        snapshot, disk_name_prefix='evidence', disk_type=disk_type)
    logger.info(
        'Disk {0:s} successfully copied to {1:s}'.format(
            disk_to_copy.name, new_disk.name))
    snapshot.Delete()
    logger.debug('Snapshot {0:s} deleted.'.format(snapshot.name))

  except (RefreshError, DefaultCredentialsError) as exception:
    raise errors.CredentialsConfigurationError(
        'Something is wrong with your Application Default Credentials. Try '
        'running: $ gcloud auth application-default login: {0!s}'.format(
            exception), __name__) from exception
  except HttpError as exception:
    if exception.resp.status == 403:
      raise errors.CredentialsConfigurationError(
          'Make sure you have the appropriate permissions on the project',
          __name__) from exception
    if exception.resp.status == 404:
      raise errors.ResourceNotFoundError(
          'GCP resource not found. Maybe a typo in the project / instance / '
          'disk name?', __name__) from exception
    raise RuntimeError(exception) from exception
  except RuntimeError as exception:
    raise errors.ResourceCreationError(
        'Cannot copy disk "{0:s}": {1!s}'.format(disk_name, exception),
        __name__) from exception

  return new_disk
コード例 #21
0
def AddDenyAllFirewallRules(project_id: str,
                            network: str,
                            deny_ingress_tag: str,
                            deny_egress_tag: str,
                            exempted_src_ips: Optional[List[str]] = None,
                            enable_logging: bool = False) -> None:
  """Add deny-all firewall rules, of highest priority.

  Args:
    project_id (str): Google Cloud Project ID.
    network (str): URL of the network resource for thesee firewall rules.
    deny_ingress_tag (str): Target tag name to apply deny ingress rule,
        also used as a deny ingress firewall rule name.
    deny_egress_tag (str): Target tag name to apply deny egress rule,
        also used as a deny egress firewall rule name.
    exempted_src_ips (List[str]): List of IPs exempted from the deny-all
      ingress firewall rules, ex: analyst IPs.
    enable_logging (bool): Optional. Enable firewall logging.
        Default is False.

  Raises:
    InvalidNameError: If Tag names are invalid.
  """

  logger.info('Creating deny-all (ingress/egress) '
          'firewall rules in {0:s} network.'.format(network))
  project = gcp_project.GoogleCloudProject(project_id)
  if not common.COMPUTE_RFC1035_REGEX.match(deny_ingress_tag):
    raise errors.InvalidNameError(
        'Deny ingress tag name {0:s} does not comply with {1:s}'.format(
            deny_ingress_tag, common.COMPUTE_RFC1035_REGEX.pattern), __name__)
  if not common.COMPUTE_RFC1035_REGEX.match(deny_egress_tag):
    raise errors.InvalidNameError(
        'Deny egress tag name {0:s} does not comply with {1:s}'.format(
            deny_egress_tag, common.COMPUTE_RFC1035_REGEX.pattern), __name__)

  source_range = common.GenerateSourceRange(exempted_src_ips)

  deny_ingress = {
    'name': deny_ingress_tag,
    'network': network,
    'direction': 'INGRESS',
    'priority': 0,
    'targetTags': [
      deny_ingress_tag
    ],
    'denied': [
      {
        'IPProtocol': 'all'
      }
    ],
    'logConfig': {
      'enable': enable_logging
    },
    'sourceRanges': source_range
  }
  deny_egress = {
    'name': deny_egress_tag,
    'network': network,
    'direction': 'EGRESS',
    'priority': 0,
    'targetTags': [
      deny_egress_tag
    ],
    'denied': [
      {
        'IPProtocol': 'all'
      }
    ],
    'logConfig': {
      'enable': enable_logging
    },
    'destinationRanges': [
      '0.0.0.0/0'
    ]
  }
  project.compute.InsertFirewallRule(body=deny_ingress)
  project.compute.InsertFirewallRule(body=deny_egress)