def test_get_utc_now_microtimestamp(self):
     # 2018/01/01 00:00:00.123456
     mock_date = datetime(2018, 1, 1, 0, 0, 0, 123456)
     result = date_time.get_utc_now_microtimestamp(mock_date)
     # Expected timestamp = 1514764800 * 1000000 + 123456
     expected_result = 1514764800123456
     self.assertEqual(expected_result, result)
Пример #2
0
 def test_scanner_index_create(self, mock_date_time):
     """`ScannerIndex` create() works as expected."""
     utc_now = datetime.utcnow()
     mock_date_time.return_value = utc_now
     expected_id = date_time.get_utc_now_microtimestamp(utc_now)
     db_row = scanner_dao.ScannerIndex.create(expected_id)
     self.assertEquals(expected_id, db_row.id)
     self.assertEquals(utc_now, db_row.created_at_datetime)
     self.assertEquals(IndexState.CREATED, db_row.scanner_status)
 def test_init_scanner_index(self, mock_date_time):
     utc_now = datetime.utcnow()
     mock_date_time.return_value = utc_now
     scanner.init_scanner_index(self.session, self.inv_index_id3)
     expected_id = date_time.get_utc_now_microtimestamp(utc_now)
     db_row = (self.session.query(scanner_dao.ScannerIndex).filter(
         scanner_dao.ScannerIndex.id == expected_id).one())
     self.assertEquals(IndexState.RUNNING, db_row.scanner_status)
     self.assertEquals(utc_now, db_row.created_at_datetime)
Пример #4
0
    def test_scanner_index_complete(self, mock_date_time):
        """`ScannerIndex` complete() works as expected."""
        start = datetime.utcnow()
        end = start + timedelta(minutes=5)
        # ScannerIndex.create() calls get_utc_now_datetime() twice.
        mock_date_time.side_effect = [start, end]
        expected_id = date_time.get_utc_now_microtimestamp(start)

        db_row = scanner_dao.ScannerIndex.create(expected_id)
        self.assertEquals(expected_id, db_row.id)
        db_row.complete()
        self.assertEquals(end, db_row.completed_at_datetime)
        self.assertEquals(IndexState.SUCCESS, db_row.scanner_status)
Пример #5
0
    def create(cls):
        """Create a new inventory index row.

        Returns:
            InventoryIndex: InventoryIndex row object.
        """
        utc_now = date_time.get_utc_now_datetime()
        micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now)
        return InventoryIndex(id=micro_timestamp,
                              created_at_datetime=utc_now,
                              completed_at_datetime=None,
                              inventory_status=IndexState.CREATED,
                              schema_version=CURRENT_SCHEMA,
                              counter=0)
    def _enforce_projects(self,
                          project_policies,
                          prechange_callback=None,
                          new_result_callback=None,
                          add_rule_callback=None):
        """Do a single enforcement run on the projects.

        Args:
          project_policies (iterable): An iterable of
              (project_id, firewall_policy) tuples to enforce.
          prechange_callback (Callable): See docstring for self.Run().
          new_result_callback (Callable): See docstring for self.Run().
          add_rule_callback (Callable): See docstring for self.Run().

        Returns:
          int: The number of projects that were enforced.
        """
        # Get a 64 bit int to use as the unique batch ID for this run.
        batch_id = date_time.get_utc_now_microtimestamp()
        self.enforcement_log.summary.batch_id = batch_id

        projects_enforced_count = 0
        future_to_key = {}
        with (concurrent.futures.ThreadPoolExecutor(
                max_workers=self._concurrent_workers)) as executor:
            for (project_id, firewall_policy) in project_policies:
                future = executor.submit(self._enforce_project, project_id,
                                         firewall_policy, prechange_callback,
                                         add_rule_callback)
                future_to_key[future] = project_id

            for future in concurrent.futures.as_completed(future_to_key):
                project_id = future_to_key[future]
                LOGGER.debug('Project %s finished enforcement run.',
                             project_id)
                projects_enforced_count += 1

                result = self.enforcement_log.results.add()
                result.CopyFrom(future.result())

                # Make sure all results have the current batch_id set
                result.batch_id = batch_id
                result.run_context = enforcer_log_pb2.ENFORCER_BATCH

                if new_result_callback:
                    new_result_callback(result)

        return projects_enforced_count
Пример #7
0
    def create(cls, inv_index_id):
        """Create a new scanner index row.

        Args:
            inv_index_id (str): Id of the inventory index.

        Returns:
            object: ScannerIndex row object.
        """
        utc_now = date_time.get_utc_now_datetime()
        micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now)
        return ScannerIndex(id=micro_timestamp,
                            inventory_index_id=inv_index_id,
                            created_at_datetime=utc_now,
                            scanner_status=IndexState.CREATED,
                            schema_version=CURRENT_SCHEMA)
    def __init__(self,
                 project_id,
                 global_configs=None,
                 compute_client=None,
                 dry_run=False,
                 project_sema=None,
                 max_running_operations=0):
        """Initialize.

        Args:
            project_id (str): The project id for the project to enforce.
            global_configs (dict): Global configurations.
            compute_client (ComputeClient): A Compute API client.
                If not provided, one will be created using the default
                credentials.
            dry_run (bool): Set to true to ensure no actual changes are made to
                the project. EnforcePolicy will still return a ProjectResult
                proto showing the changes that would have been made.
            project_sema (threading.BoundedSemaphore): An optional semaphore
                object, used to limit the number of concurrent projects getting
                written to.
            max_running_operations (int): [DEPRECATED] Used to limit the number
                of concurrent running operations on an API.
        """
        self.project_id = project_id

        if not compute_client:
            compute_client = compute.ComputeClient(global_configs,
                                                   dry_run=dry_run)

        self.compute_client = compute_client

        self.result = enforcer_log_pb2.ProjectResult()
        self.result.status = STATUS_UNSPECIFIED
        self.result.project_id = self.project_id
        self.result.timestamp_sec = date_time.get_utc_now_microtimestamp()

        self._dry_run = dry_run

        self._project_sema = project_sema
        if max_running_operations:
            LOGGER.warning(
                'Max running operations is deprecated. Argument ignored.')

        self._operation_sema = None
Пример #9
0
    def test_get_latest_scanner_index_id(self, mock_date_time):
        """The method under test returns the newest `ScannerIndex` row."""
        time1 = datetime.utcnow()
        time2 = time1 + timedelta(minutes=5)
        time3 = time1 + timedelta(minutes=7)
        mock_date_time.side_effect = [time1, time2, time3]

        expected_id = date_time.get_utc_now_microtimestamp(time2)

        self.session.add(scanner_dao.ScannerIndex.create(expected_id))
        index2 = scanner_dao.ScannerIndex.create(expected_id)
        index2.scanner_status = IndexState.SUCCESS
        self.session.add(index2)
        self.session.add(scanner_dao.ScannerIndex.create(expected_id))
        self.session.flush()
        self.assertEquals(
            expected_id,
            scanner_dao.get_latest_scanner_index_id(self.session, expected_id))
    def run(self,
            project_policies,
            prechange_callback=None,
            new_result_callback=None,
            add_rule_callback=None):
        """Runs the enforcer over all projects passed in to the function.

        Args:
          project_policies (iterable): An iterable of
              (project_id, firewall_policy) tuples to enforce or a dictionary
              in the format {project_id: firewall_policy}

          prechange_callback (Callable): A callback function that will get
              called if the firewall policy for a project does not match the
              expected policy, before any changes are actually applied. If the
              callback returns False then no changes will be made to the
              project. If it returns True then the changes will be pushed.

              See FirewallEnforcer.apply_firewall() docstring for more details.

          new_result_callback (Callable): An optional function to call with each
              new result proto message as they are returned from a
              ProjectEnforcer thread.

          add_rule_callback (Callable): A callback function that checks whether
              a firewall rule should be applied. If the callback returns False,
              that rule will not be modified.

        Returns:
          enforcer_log_pb2.EnforcerLog: The EnforcerLog proto for the last run,
          including individual results for each project, and a summary of all
          results.
        """
        if self._dry_run:
            LOGGER.info('Simulating changes')

        if isinstance(project_policies, dict):
            project_policies = list(project_policies.items())

        self.enforcement_log.Clear()
        self.enforcement_log.summary.projects_total = len(project_policies)

        started_time = date_time.get_utc_now_datetime()
        LOGGER.info('starting enforcement wave: %s', started_time)

        projects_enforced_count = self._enforce_projects(
            project_policies, prechange_callback, new_result_callback,
            add_rule_callback)

        finished_time = date_time.get_utc_now_datetime()

        started_timestamp = date_time.get_utc_now_unix_timestamp(started_time)
        finished_timestamp = date_time.get_utc_now_unix_timestamp(
            finished_time)
        total_time = finished_timestamp - started_timestamp

        LOGGER.info('finished wave in %i seconds', total_time)

        self.enforcement_log.summary.timestamp_start_msec = (
            date_time.get_utc_now_microtimestamp(started_time))
        self.enforcement_log.summary.timestamp_end_msec = (
            date_time.get_utc_now_microtimestamp(finished_time))

        self._summarize_results()

        if not projects_enforced_count:
            LOGGER.warning('No projects enforced on the last run, exiting.')

        return self.enforcement_log
Пример #11
0
import shutil
import tempfile
import unittest

from datetime import datetime

from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.dao import create_engine
from google.cloud.forseti.services.dao import ModelManager
from google.cloud.forseti.services.model.importer import importer
from google.cloud.forseti.services.model.importer.importer import InventoryImporter

FAKE_DATETIME = datetime(2018, 1, 28, 10, 20, 30, 0)
FAKE_DATETIME_TIMESTAMP = date_time.get_utc_now_microtimestamp(FAKE_DATETIME)


class ServiceConfig(object):
    """Helper class to implement dependency injection to Forseti Server services.
    """
    def __init__(self, db_connect_string):
        engine = create_engine(db_connect_string, echo=False)
        self.model_manager = ModelManager(engine)

    def run_in_background(self, function):
        """Runs a function in a thread pool in the background."""
        return function()


def get_db_file_path(db_name):