def main(): """Create inventory and copy to test_data.""" def create_inventory(client): """Create inventory from mock data.""" for progress in client.inventory.create(background=False, import_as=''): continue fake_time = importer_test.FAKE_DATETIME _ = mock.patch.object( date_time, 'get_utc_now_datetime', return_value=fake_time).start() # Ensure test data doesn't get deleted mock_unlink = mock.patch.object( os, 'unlink', autospec=True).start() mock_copy_file_from_gcs = mock.patch.object( file_loader, 'copy_file_from_gcs', autospec=True).start() # Mock copy_file_from_gcs to return correct test data file def _copy_file_from_gcs(file_path, *args, **kwargs): """Fake copy_file_from_gcs.""" if 'resource' in file_path: return os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump') elif 'iam_policy' in file_path: return os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump') mock_copy_file_from_gcs.side_effect = _copy_file_from_gcs engine, tmpfile = create_test_engine_with_file() config = TestServiceConfig(engine) with gcp_api_mocks.mock_gcp(): runner = ApiTestRunner(config, [GrpcInventoryFactory]) runner.run(create_inventory) time.sleep(5) copy_db_file_to_test(tmpfile, 'forseti-test.db') engine, tmpfile = create_test_engine_with_file() config = TestCompositeServiceConfig(engine) with gcp_api_mocks.mock_gcp(): runner = ApiTestRunner(config, [GrpcInventoryFactory]) runner.run(create_inventory) time.sleep(5) copy_db_file_to_test(tmpfile, 'forseti-composite-test.db')
def main(): """Create inventory and copy to test_data.""" def create_inventory(client): """Create inventory from mock data.""" for progress in client.inventory.create(background=False, import_as=''): continue fake_time = importer_test.FAKE_DATETIME _ = mock.patch.object(date_time, 'get_utc_now_datetime', return_value=fake_time).start() # Ensure test data doesn't get deleted mock_unlink = mock.patch.object(os, 'unlink', autospec=True).start() # Mock download to return correct test data file def _fake_download(full_bucket_path, output_file): """Fake copy_file_from_gcs.""" if 'resource' in full_bucket_path: fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump') elif 'iam_policy' in full_bucket_path: fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump') with open(fake_file, 'rb') as f: output_file.write(f.read()) engine, tmpfile = create_test_engine_with_file() config = TestServiceConfig(engine) with gcp_api_mocks.mock_gcp() as gcp_mocks: gcp_mocks.mock_storage.download.side_effect = _fake_download runner = ApiTestRunner(config, [GrpcInventoryFactory]) runner.run(create_inventory) engine.dispose() time.sleep(5) copy_db_file_to_test(tmpfile, 'forseti-test.db') engine, tmpfile = create_test_engine_with_file() config = TestCompositeServiceConfig(engine) with gcp_api_mocks.mock_gcp() as gcp_mocks: gcp_mocks.mock_storage.download.side_effect = _fake_download runner = ApiTestRunner(config, [GrpcInventoryFactory]) runner.run(create_inventory) engine.dispose() time.sleep(5) copy_db_file_to_test(tmpfile, 'forseti-composite-test.db')
def _run_crawler(self, config): """Runs the crawler with a specific InventoryConfig. Args: config (InventoryConfig): The configuration to test. Returns: dict: the resource counts returned by the crawler. """ # Mock download to return correct test data file def _fake_download(full_bucket_path, output_file): if 'resource' in full_bucket_path: fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump') elif 'iam_policy' in full_bucket_path: fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump') with open(fake_file, 'rb') as f: output_file.write(f.read()) with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp() as gcp_mocks: gcp_mocks.mock_storage.download.side_effect = _fake_download run_crawler(storage, progresser, config, parallel=False, threads=1) self.assertEqual(0, progresser.errors, 'No errors should have occurred') return self._get_resource_counts_from_storage(storage)
def _run_crawler(self, config, has_org_access=True): """Runs the crawler with a specific InventoryConfig. Args: config (InventoryConfig): The configuration to test. has_org_access (bool): True if crawler has access to the org resource. client (object): An API Client implementation, used for CAI testing. Returns: dict: the resource counts returned by the crawler. """ with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp(has_org_access=has_org_access): run_crawler(storage, progresser, config, parallel=False, threads=1) self.assertEqual(0, progresser.errors, 'No errors should have occurred') return self._get_resource_counts_from_storage(storage)
def _run_crawler(self, config, has_org_access=True, session=None): """Runs the crawler with a specific InventoryConfig. Args: config (InventoryConfig): The configuration to test. has_org_access (bool): True if crawler has access to the org resource. session (object): An existing sql session, required for testing Cloud Asset API integration. Returns: dict: the resource counts returned by the crawler. """ with MemoryStorage(session=session) as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp(has_org_access=has_org_access): run_crawler(storage, progresser, config, parallel=True) self.assertEqual(0, progresser.errors, 'No errors should have occurred') return self._get_resource_counts_from_storage(storage)
def main(): """Create CAI dump files from fake data.""" logger.enable_console_log() config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID, '', {}, '', {'enabled': False}) service_config = TestServiceConfig('sqlite', config) config.set_service_config(service_config) resources = [] iam_policies = [] with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp(): run_crawler(storage, progresser, config, parallel=False) for item in storage.mem.values(): (resource, iam_policy) = convert_item_to_assets(item) if resource: resources.append(resource) if iam_policy: iam_policies.append(iam_policy) with open(ADDITIONAL_RESOURCES_FILE, 'r') as f: for line in f: if line.startswith('#'): continue resources.append(line.strip()) with open(ADDITIONAL_IAM_POLCIIES_FILE, 'r') as f: for line in f: if line.startswith('#'): continue iam_policies.append(line.strip()) write_data(resources, RESOURCE_DUMP_FILE) write_data(iam_policies, IAM_POLICY_DUMP_FILE)
def test_basic_background(self): """Test: Create inventory, background & no import.""" def test(client): """API test callback.""" progress = None inventory_index = None for progress in client.inventory.create(background=True, import_as=''): continue while True: # Give background running time to complete. time.sleep(5) if [x for x in client.inventory.list()]: break self.assertGreater(len([x for x in client.inventory.list()]), 0, 'Assert list not empty') for inventory_index in client.inventory.list(): self.assertTrue(inventory_index.id == progress.id) self.assertEqual( inventory_index.id, (client.inventory.get(inventory_index.id).inventory.id)) self.assertEqual( inventory_index.id, (client.inventory.delete(inventory_index.id).inventory.id)) self.assertEqual([], [i for i in client.inventory.list()]) with gcp_api_mocks.mock_gcp(): setup = create_tester() setup.run(test)
def test_basic(self): """Test: Create inventory, foreground & no import.""" def test(client): """API test callback.""" progress = None inventory_index = None for progress in client.inventory.create(background=False, import_as=''): continue self.assertGreater(len([x for x in client.inventory.list()]), 0, 'Assert list not empty') for inventory_index in client.inventory.list(): self.assertTrue(inventory_index.id == progress.id) self.assertEqual(inventory_index, (client.inventory.get(inventory_index.id) .inventory)) self.assertEqual(inventory_index, (client.inventory.delete(inventory_index.id) .inventory)) self.assertEqual([], [i for i in client.inventory.list()]) with gcp_api_mocks.mock_gcp(): setup = create_tester() setup.run(test)
def test_error(self): """Test: Create inventory, foreground, exception raised.""" def test(client): """API test callback.""" progress = None inventory_index = None for progress in client.inventory.create(background=False, import_as=''): continue for inventory_index in client.inventory.list(): self.assertTrue(inventory_index.id == progress.id) result = client.inventory.get(inventory_index.id).inventory # Ensure inventory failure. self.assertEqual('FAILURE', result.status) self.assertIn('Boom!', result.errors) with unittest.mock.patch.object(Storage, 'write') as mock_write: mock_write.side_effect = Exception('Boom!') with gcp_api_mocks.mock_gcp(): setup = create_tester() setup.run(test)
def test_crawl_cai_data_with_asset_types(self): """Validate including asset_types in the CAI inventory config works.""" asset_types = [ 'cloudresourcemanager.googleapis.com/Folder', 'cloudresourcemanager.googleapis.com/Organization', 'cloudresourcemanager.googleapis.com/Project' ] inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID, '', {}, 0, { 'enabled': True, 'gcs_path': 'gs://test-bucket', 'asset_types': asset_types }) inventory_config.set_service_config(FakeServerConfig('fake_engine')) # Create subsets of the mock resource dumps that only contain the # filtered asset types filtered_assets = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_assets.append(line) filtered_assets = ''.join(filtered_assets) filtered_iam = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_iam.append(line) filtered_iam = ''.join(filtered_iam) filtered_org = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_org_policies.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_org.append(line) filtered_org = ''.join(filtered_org) filtered_access = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_access_policies.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_access.append(line) filtered_access = ''.join(filtered_access) with unittest_utils.create_temp_file(filtered_assets) as resources: with unittest_utils.create_temp_file(filtered_iam) as iam_policies: with unittest_utils.create_temp_file( filtered_org) as org_policies: with unittest_utils.create_temp_file( filtered_access) as access_policies: # Mock download to return correct test data file def _fake_download(full_bucket_path, output_file): if 'resource' in full_bucket_path: fake_file = resources elif 'iam_policy' in full_bucket_path: fake_file = iam_policies elif 'org_policy' in full_bucket_path: fake_file = org_policies elif 'access_policy' in full_bucket_path: fake_file = access_policies with open(fake_file, 'rb') as f: output_file.write(f.read()) with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp() as gcp_mocks: gcp_mocks.mock_storage.download.side_effect = ( _fake_download) run_crawler(storage, progresser, inventory_config) # Validate export_assets called with asset_types expected_calls = [ mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='RESOURCE', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY), mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='IAM_POLICY', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY), mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='ORG_POLICY', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY), mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='ACCESS_POLICY', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY) ] (gcp_mocks.mock_cloudasset.export_assets. assert_has_calls(expected_calls, any_order=True)) self.assertEqual(0, progresser.errors, 'No errors should have occurred') result_counts = self._get_resource_counts_from_storage( storage) expected_counts = { 'crm_access_level': { 'resource': 3 }, 'crm_access_policy': { 'resource': 1 }, 'crm_org_policy': { 'resource': 3 }, 'crm_service_perimeter': { 'resource': 1 }, 'folder': { 'iam_policy': 3, 'resource': 3 }, 'gsuite_group': { 'resource': 4 }, 'gsuite_group_member': { 'resource': 1 }, 'gsuite_groups_settings': { 'resource': 4 }, 'gsuite_user': { 'resource': 4 }, 'gsuite_user_member': { 'resource': 3 }, 'lien': { 'resource': 1 }, 'organization': { 'iam_policy': 1, 'resource': 1 }, 'project': { 'billing_info': 4, 'enabled_apis': 4, 'iam_policy': 4, 'resource': 4 }, 'role': { 'resource': 18 }, 'sink': { 'resource': 6 }, } self.assertEqual(expected_counts, result_counts)
def test_crawl_cai_data_with_asset_types(self): """Validate including asset_types in the CAI inventory config works.""" asset_types = [ 'cloudresourcemanager.googleapis.com/Folder', 'cloudresourcemanager.googleapis.com/Organization', 'cloudresourcemanager.googleapis.com/Project' ] inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID, '', {}, 0, { 'enabled': True, 'gcs_path': 'gs://test-bucket', 'asset_types': asset_types }) inventory_config.set_service_config(FakeServerConfig(self.engine)) # Create subsets of the mock resource dumps that only contain the # filtered asset types filtered_assets = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_assets.append(line) filtered_assets = ''.join(filtered_assets) filtered_iam = [] with open( os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump'), 'r') as f: for line in f: if any('"%s"' % asset_type in line for asset_type in asset_types): filtered_iam.append(line) filtered_iam = ''.join(filtered_iam) with unittest_utils.create_temp_file(filtered_assets) as resources: with unittest_utils.create_temp_file(filtered_iam) as iam_policies: def _copy_file_from_gcs(file_path, *args, **kwargs): """Fake copy_file_from_gcs.""" del args, kwargs if 'resource' in file_path: return resources elif 'iam_policy' in file_path: return iam_policies self.mock_copy_file_from_gcs.side_effect = _copy_file_from_gcs with MemoryStorage(session=self.session) as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp() as gcp_mocks: run_crawler(storage, progresser, inventory_config) # Validate export_assets called with asset_types expected_calls = [ mock.call(gcp_api_mocks.ORGANIZATION_ID, mock.ANY, content_type='RESOURCE', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY), mock.call(gcp_api_mocks.ORGANIZATION_ID, mock.ANY, content_type='IAM_POLICY', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY) ] (gcp_mocks.mock_cloudasset.export_assets. assert_has_calls(expected_calls, any_order=True)) self.assertEqual(0, progresser.errors, 'No errors should have occurred') result_counts = self._get_resource_counts_from_storage( storage) expected_counts = { 'crm_org_policy': { 'resource': 5 }, 'folder': { 'iam_policy': 3, 'resource': 3 }, 'gsuite_group': { 'resource': 4 }, 'gsuite_group_member': { 'resource': 1 }, 'gsuite_groups_settings': { 'resource': 4 }, 'gsuite_user': { 'resource': 4 }, 'gsuite_user_member': { 'resource': 3 }, 'kubernetes_cluster': { 'resource': 1, 'service_config': 1 }, 'lien': { 'resource': 1 }, 'organization': { 'iam_policy': 1, 'resource': 1 }, 'project': { 'billing_info': 4, 'enabled_apis': 4, 'iam_policy': 4, 'resource': 4 }, 'role': { 'resource': 18 }, 'sink': { 'resource': 6 }, } self.assertEqual(expected_counts, result_counts)