예제 #1
0
def CleanUp(project_id, zone, instance_name):
    """Clean up GCP project.

  Remove the instance [instance_name] in the GCP project [project_id] and its
  disks that were created as part of the end to end test.

  Attributes:
    project_id (str): the project id of the GCP project.
    zone (str): the zone for the project.
    instance_name (str): the name of the analysis VM to remove.
  """

    gcp_project = gcp.GoogleCloudProject(project_id, zone)
    disks = gcp.GoogleComputeInstance(gcp_project, zone,
                                      instance_name).ListDisks()

    # delete the created forensics VMs
    log.info('Deleting analysis instance: {0:s}.'.format(instance_name))
    gce_instances_client = gcp_project.GceApi().instances()
    request = gce_instances_client.delete(project=gcp_project.project_id,
                                          zone=gcp_project.default_zone,
                                          instance=instance_name)
    try:
        request.execute()
    except HttpError:
        # GceOperation triggers a while(True) loop that checks on the
        # operation ID. Sometimes it loops one more time right when the
        # operation has finished and thus the associated ID doesn't exists
        # anymore, throwing an HttpError. We can ignore this.
        pass
    log.info('Instance {0:s} successfully deleted.'.format(instance_name))

    # delete the copied disks
    # we ignore the disk that was created for the analysis VM (disks[0]) as
    # it is deleted in the previous operation
    gce_disks_client = gcp_project.GceApi().disks()
    for disk in disks[1:]:
        log.info('Deleting disk: {0:s}.'.format(disk))
        while True:
            try:
                request = gce_disks_client.delete(
                    project=gcp_project.project_id,
                    zone=gcp_project.default_zone,
                    disk=disk)
                request.execute()
                break
            except HttpError as exception:
                # GceApi() will throw a 400 error until the analysis VM deletion is
                # correctly propagated. When the disk is finally deleted, it will
                # throw a 404 not found if it looped again after deletion.
                if exception.resp.status == 404:
                    break
                if exception.resp.status != 400:
                    log.warning(
                        'Could not delete the disk {0:s}: {1:s}'.format(
                            disk, str(exception)))
                # Throttle the requests to one every 10 seconds
                time.sleep(10)

        log.info('Disk {0:s} successfully deleted.'.format(disk))
예제 #2
0
 def __init__(self, *args, **kwargs):
     super(EndToEndTest, self).__init__(*args, **kwargs)
     try:
         project_info = ReadProjectInfo()
     except (OSError, RuntimeError, ValueError) as exception:
         self.error_msg = str(exception)
         return
     self.project_id = project_info['project_id']
     self.instance_to_analyse = project_info['instance']
     # Optional: test a disk other than the boot disk
     self.disk_to_forensic = project_info.get('disk', None)
     self.zone = project_info['zone']
     self.gcp = gcp.GoogleCloudProject(self.project_id, self.zone)
예제 #3
0
    def tearDown(self):
        project = gcp.GoogleCloudProject(project_id=self.project_id,
                                         default_zone=self.zone)

        disks = self.analysis_vm.ListDisks()

        # delete the created forensics VMs
        log.info('Deleting analysis instance: {0:s}.'.format(
            self.analysis_vm.name))
        operation = project.GceApi().instances().delete(
            project=project.project_id,
            zone=self.zone,
            instance=self.analysis_vm.name).execute()
        try:
            project.GceOperation(operation, block=True)
        except HttpError:
            # GceOperation triggers a while(True) loop that checks on the
            # operation ID. Sometimes it loops one more time right when the
            # operation has finished and thus the associated ID doesn't exists
            # anymore, throwing an HttpError. We can ignore this.
            pass
        log.info('Instance {0:s} successfully deleted.'.format(
            self.analysis_vm.name))

        # delete the copied disks
        # we ignore the disk that was created for the analysis VM (disks[0]) as
        # it is deleted in the previous operation
        for disk in disks[1:]:
            log.info('Deleting disk: {0:s}.'.format(disk))
            while True:
                try:
                    operation = project.GceApi().disks().delete(
                        project=project.project_id, zone=self.zone,
                        disk=disk).execute()
                    project.GceOperation(operation, block=True)
                    break
                except HttpError as exception:
                    # The gce api will throw a 400 until the analysis vm's deletion is
                    # correctly propagated. When the disk is finally deleted, it will
                    # throw a 404 not found if it looped one more time after deletion.
                    if exception.resp.status == 404:
                        break
                    if exception.resp.status != 400:
                        log.warning(
                            'Could not delete the disk {0:s}: {1:s}'.format(
                                disk, str(exception)))
                    # Throttle the requests to one every 10 seconds
                    time.sleep(10)

            log.info('Disk {0:s} successfully deleted.'.format(disk))
예제 #4
0
 def setUpClass(cls):
     try:
         project_info = ReadProjectInfo()
     except (OSError, RuntimeError, ValueError) as exception:
         raise unittest.SkipTest(str(exception))
     cls.project_id = project_info['project_id']
     cls.instance_to_analyse = project_info['instance']
     # Optional: test a disk other than the boot disk
     cls.disk_to_forensic = project_info.get('disk', None)
     cls.zone = project_info['zone']
     cls.gcp = gcp.GoogleCloudProject(cls.project_id, cls.zone)
     cls.analysis_vm_name = 'new-vm-for-analysis'
     # Create and start the analysis VM
     cls.analysis_vm, _ = gcp.StartAnalysisVm(project=cls.project_id,
                                              vm_name=cls.analysis_vm_name,
                                              zone=cls.zone,
                                              boot_disk_size=10,
                                              boot_disk_type='pd-ssd',
                                              cpu_cores=4)
예제 #5
0
from __future__ import unicode_literals

import os
import unittest
import re

from googleapiclient.errors import HttpError

import mock
import six

from libcloudforensics import gcp

# For the forensics analysis
FAKE_ANALYSIS_PROJECT = gcp.GoogleCloudProject('fake-target-project',
                                               'fake-zone')
FAKE_ANALYSIS_VM = gcp.GoogleComputeInstance(FAKE_ANALYSIS_PROJECT,
                                             'fake-zone', 'fake-analysis-vm')

# Source project with the instance that needs forensicating
FAKE_SOURCE_PROJECT = gcp.GoogleCloudProject('fake-source-project',
                                             'fake-zone')
FAKE_INSTANCE = gcp.GoogleComputeInstance(FAKE_SOURCE_PROJECT, 'fake-zone',
                                          'fake-instance')
FAKE_DISK = gcp.GoogleComputeDisk(FAKE_SOURCE_PROJECT, 'fake-zone',
                                  'fake-disk')
FAKE_BOOT_DISK = gcp.GoogleComputeDisk(FAKE_SOURCE_PROJECT, 'fake-zone',
                                       'fake-boot-disk')
FAKE_SNAPSHOT = gcp.GoogleComputeSnapshot(FAKE_DISK, 'fake-snapshot')
FAKE_SNAPSHOT_LONG_NAME = gcp.GoogleComputeSnapshot(
    FAKE_DISK,
예제 #6
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GoogleCloudCollector."""

from __future__ import unicode_literals

import unittest

import mock
from libcloudforensics import gcp

from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.collectors import gcloud

FAKE_PROJECT = gcp.GoogleCloudProject('test-target-project-name', 'fake_zone')
FAKE_ANALYSIS_VM = gcp.GoogleComputeInstance(FAKE_PROJECT, 'fake_zone',
                                             'fake-analysis-vm')
FAKE_INSTANCE = gcp.GoogleComputeInstance(FAKE_PROJECT, 'fake_zone',
                                          'fake-instance')
FAKE_DISK = gcp.GoogleComputeDisk(FAKE_PROJECT, 'fake_zone', 'disk1')
FAKE_BOOT_DISK = gcp.GoogleComputeDisk(FAKE_PROJECT, 'fake_zone', 'bootdisk')
FAKE_SNAPSHOT = gcp.GoogleComputeSnapshot(FAKE_DISK, FAKE_PROJECT)
FAKE_DISK_COPY = gcp.GoogleComputeDisk(FAKE_PROJECT, 'fake_zone', 'disk1-copy')


def ReturnFakeDisk(disk_name):
    """Generate fake GoogleCloudComputeDisk objects depending on provided name."""
    return gcp.GoogleComputeDisk(FAKE_PROJECT, 'fakezone', disk_name)

예제 #7
0
    def SetUp(self,
              analysis_project_name,
              remote_project_name,
              incident_id,
              zone,
              boot_disk_size,
              cpu_cores,
              remote_instance_name=None,
              disk_names=None,
              all_disks=False,
              image_project='ubuntu-os-cloud',
              image_family='ubuntu-1804-lts'):
        """Sets up a Google Cloud (GCP) collector.

    This method creates and starts an analysis VM in the analysis project and
    selects disks to copy from the remote project.

    If disk_names is specified, it will copy the corresponding disks from the
    project, ignoring disks belonging to any specific instances.

    If remote_instance_name is specified, two behaviors are possible:
    * If no other parameters are specified, it will select the instance's boot
      disk
    * if all_disks is set to True, it will select all disks in the project
      that are attached to the instance

    disk_names takes precedence over instance_names

    Args:
      analysis_project_name (str): name of the project that contains
          the analysis VM.
      remote_project_name (str): name of the remote project where the disks
          must be copied from.
      incident_id (str): incident identifier on which the name of the analysis
          VM will be based.
      zone (str): GCP zone in which new resources should be created.
      boot_disk_size (float): size of the analysis VM boot disk (in GB).
      cpu_cores (int): number of CPU cores to create the VM with.
      remote_instance_name (Optional[str]): name of the instance in
          the remote project containing the disks to be copied.
      disk_names (Optional[str]): Comma separated disk names to copy.
      all_disks (Optional[bool]): True if all disks attached to the source
          instance should be copied.
      image_project (Optional[str]): name of the project where the analysis
          VM image is hosted.
      image_family (Optional[str]): name of the image to use to create the
          analysis VM.
    """
        disk_names = disk_names.split(',') if disk_names else []

        self.analysis_project = gcp.GoogleCloudProject(analysis_project_name,
                                                       default_zone=zone)
        self.remote_project = gcp.GoogleCloudProject(remote_project_name)

        self.remote_instance_name = remote_instance_name
        self.disk_names = disk_names
        self.incident_id = incident_id
        self.all_disks = all_disks
        self._gcp_label = {'incident_id': self.incident_id}

        analysis_vm_name = 'gcp-forensics-vm-{0:s}'.format(self.incident_id)

        print('Your analysis VM will be: {0:s}'.format(analysis_vm_name))
        print('Complimentary gcloud command:')
        print('gcloud compute ssh --project {0:s} {1:s} --zone {2:s}'.format(
            self.analysis_project.project_id, analysis_vm_name, zone))

        self.state.StoreContainer(
            containers.TicketAttribute(
                name=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_NAME,
                type_=self._ANALYSIS_VM_CONTAINER_ATTRIBUTE_TYPE,
                value=analysis_vm_name))

        try:
            # TODO: Make creating an analysis VM optional
            # pylint: disable=too-many-function-args

            self.analysis_vm, _ = gcp.StartAnalysisVm(
                self.analysis_project.project_id,
                analysis_vm_name,
                zone,
                boot_disk_size,
                int(cpu_cores),
                attach_disk=None,
                image_project=image_project,
                image_family=image_family)
            self.analysis_vm.AddLabels(self._gcp_label)
            self.analysis_vm.GetBootDisk().AddLabels(self._gcp_label)

        except (AccessTokenRefreshError,
                ApplicationDefaultCredentialsError) as exception:
            self.state.AddError(
                'Something is wrong with your Application Default Credentials. '
                'Try running:\n  $ gcloud auth application-default login')
            self.state.AddError(exception, critical=True)
 def setUp(self):
     super(GoogleCloudProjectTest, self).setUp()
     self.project = gcp.GoogleCloudProject('test-project')
     self.project.GceApi = mock.MagicMock()
     self.project.GceOperation = mock.MagicMock()