def testSyncInventoryGroupsToNdbHostGroupConfig(self): ndb.put_multi([ datastore_entities.HostGroupConfig( id='foo_jump', lab_name='foo', parent_groups=['foo_all', 'foo_bar']), datastore_entities.HostGroupConfig(id='foo_bar', lab_name='foo', parent_groups=[ 'foo_all', ]) ]) config_syncer_gcs_to_ndb.SyncInventoryGroupsToNDB() ndb.get_context().clear_cache() res = datastore_entities.HostGroupConfig.query( datastore_entities.HostGroupConfig.lab_name == 'foo').fetch() for g in res: if g.name is None: self.assertIsNone(g) group_map = {g.name: g for g in res} self.assertLen(group_map, 10) self.assertSameElements(group_map['all'].parent_groups, []) self.assertSameElements(group_map['jump'].parent_groups, ['server']) self.assertSameElements(group_map['dhcp'].parent_groups, ['server']) self.assertSameElements(group_map['pxe'].parent_groups, ['server']) self.assertSameElements(group_map['server'].parent_groups, []) self.assertSameElements(group_map['dtf'].parent_groups, ['tf']) self.assertSameElements(group_map['storage_tf'].parent_groups, ['tf'])
def testSyncInventoryGroupVarAccountsToNDB(self): datastore_entities.LabConfig(id='foo', lab_name='foo', owners=['foo-admin']).put() datastore_entities.HostGroupConfig(id='foo_dhcp', name='dhcp', lab_name='foo', account_principals={ 'foo': { 'principals': ['user1', 'user2'] } }).put() config_syncer_gcs_to_ndb.SyncInventoryGroupVarAccountsToNDB() ndb.get_context().clear_cache() group = datastore_entities.HostGroupConfig.get_by_id('foo_dhcp') self.assertEqual( group.account_principals, { 'android-test-admin': { 'enable_sudo': 'true', 'principals': ['group/group-one', 'group/group-two', 'user1', 'user2'] }, 'android-test': { 'principals': ['group/group-three', 'user3', 'user4'] } })
def setUp(self): super(CommandAttemptMonitorTest, self).setUp() self.testapp = webtest.TestApp(command_attempt_monitor.APP) self.plugin_patcher = mock.patch( '__main__.env_config.CONFIG.plugin') self.plugin_patcher.start() self.request = request_manager.CreateRequest( request_id='1001', user='******', command_infos=[ datastore_entities.CommandInfo( command_line='command_line', cluster='cluster', run_target='run_target'), ]) self.command = command_manager.CreateCommands( request_id=self.request.key.id(), command_infos=[ datastore_entities.CommandInfo( command_line='long command line', cluster='foobar', run_target='foo', run_count=1, shard_count=1), ], shard_indexes=list(range(1)), request_plugin_data={ 'ants_invocation_id': 'i123', 'ants_work_unit_id': 'w123' })[0] # Clear Datastore cache ndb.get_context().clear_cache()
def testUpdateHostConfigs(self): """Tests that check host configs are updated.""" self._CreateHostConfigEntity('homer-atc1', tf_global_config_path='old_path.xml') lab_config_pb = config_syncer_gcs_to_ndb.GetLabConfigFromGCS( config_syncer_gcs_to_ndb.LAB_CONFIG_DIR_PATH + LAB_CONFIG_FILE) config_syncer_gcs_to_ndb._UpdateHostConfigs( lab_config_pb.cluster_configs[0].host_configs, lab_config_pb.cluster_configs[0], lab_config_pb) ndb.get_context().clear_cache() # homer-atc1 is overrided. res = datastore_entities.HostConfig.get_by_id('homer-atc1') self.assertEqual(res.hostname, 'homer-atc1') self.assertEqual(res.tf_global_config_path, 'configs/homer-atc1/config.xml') self.assertEqual(res.lab_name, 'lab1') self.assertEqual(res.cluster_name, 'cluster1') self.assertCountEqual(res.owners, ['lab_user1', 'user1', 'owner1', 'owner2']) self.assertTrue(res.graceful_shutdown) self.assertTrue(res.enable_ui_update) self.assertEqual(res.shutdown_timeout_sec, 1000) res = datastore_entities.HostConfig.get_by_id('homer-atc2') self.assertEqual(res.hostname, 'homer-atc2') self.assertEqual(res.tf_global_config_path, 'configs/homer-atc2/config.xml') self.assertEqual(res.lab_name, 'lab1') self.assertCountEqual(res.owners, ['lab_user1', 'user1', 'owner1', 'owner2']) self.assertTrue(res.graceful_shutdown) self.assertFalse(res.enable_ui_update) self.assertEqual(res.shutdown_timeout_sec, 1000)
def setUp(self): super(CommandMonitorTest, self).setUp() self.testapp = webtest.TestApp(command_monitor.APP) self.plugin_patcher = mock.patch( '__main__.env_config.CONFIG.plugin') self.plugin_patcher.start() self.request = request_manager.CreateRequest( request_id='1001', user='******', command_infos=[ datastore_entities.CommandInfo( command_line='command_line', cluster='cluster', run_target='run_target') ]) self.request_2 = request_manager.CreateRequest( request_id='1002', user='******', command_infos=[ datastore_entities.CommandInfo( command_line='command_line', cluster='cluster', run_target='run_target') ]) # Clear Datastore cache ndb.get_context().clear_cache()
def testUpdateLabConfig(self): """Tests that check lab config is updated.""" lab_config_pb = config_syncer_gcs_to_ndb.GetLabConfigFromGCS( config_syncer_gcs_to_ndb.LAB_CONFIG_DIR_PATH + LAB_CONFIG_FILE) config_syncer_gcs_to_ndb._UpdateLabConfig(lab_config_pb) ndb.get_context().clear_cache() res = datastore_entities.LabConfig.get_by_id('lab1') self.assertEqual('lab1', res.lab_name) self.assertEqual(['lab_user1', 'user1'], res.owners) res = datastore_entities.LabInfo.get_by_id('lab1') self.assertEqual('lab1', res.lab_name)
def testSyncToNDB(self): """test SyncToNDB.""" config_syncer_gcs_to_ndb.SyncToNDB() ndb.get_context().clear_cache() res = datastore_entities.LabConfig.get_by_id('lab1') self.assertEqual('lab1', res.lab_name) res = datastore_entities.ClusterConfig.get_by_id('cluster1') self.assertEqual('cluster1', res.cluster_name) res = datastore_entities.ClusterConfig.get_by_id('cluster2') self.assertEqual('cluster2', res.cluster_name) res = datastore_entities.HostConfig.get_by_id('homer-atc1') self.assertEqual('homer-atc1', res.hostname) res = datastore_entities.HostConfig.get_by_id('homer-atc2') self.assertEqual('homer-atc2', res.hostname) res = datastore_entities.HostConfig.get_by_id('homer-atc3') self.assertEqual('homer-atc3', res.hostname) res = datastore_entities.HostConfig.get_by_id('homer-atc4') self.assertEqual('homer-atc4', res.hostname)
def _AddTask(queue_name, payload, target=None, name=None, eta=None, transactional=False): """Add a task using a selected task scheduler implementation. Args: queue_name: a queue name. payload: a task payload. target: a target module name. name: a task name. eta: a ETA for task execution. transactional: a flag to indicate whether this task should be tied to datastore transaction. Returns: A Task object. Raises: TaskTooLargeError: if a task is too large. """ if MAX_TASK_SIZE_BYTES < len(payload): raise TaskTooLargeError("Task %s-%s is larger than %s bytes" % (queue_name, name, MAX_TASK_SIZE_BYTES)) if transactional and not name: name = str(uuid.uuid1()) def Callback(): """A callback function to add a task.""" try: return _GetTaskScheduler().AddTask(queue_name=queue_name, payload=payload, target=target, task_name=name, eta=eta) except Exception as e: logging.info("Failed to add task", exc_info=True) raise Error(e) if transactional: ndb.get_context().call_on_commit(Callback) return plugins_base.Task(name=name, payload=payload, eta=eta) return Callback()
def testSyncInventoryGroupsToNdbHostConfig(self): config_syncer_gcs_to_ndb.SyncInventoryGroupsToNDB() ndb.get_context().clear_cache() res = datastore_entities.HostConfig.get_by_id( 'dhcp1.ntc-tpkd.google.com') self.assertEqual(res.lab_name, 'foo') self.assertEqual(res.hostname, 'dhcp1.ntc-tpkd.google.com') self.assertSetEqual(set(res.inventory_groups), set(('jump', 'dhcp', 'pxe', 'server'))) res = datastore_entities.HostConfig.get_by_id( 'dhcp2.ntc-tpkd.google.com') self.assertSetEqual(set(res.inventory_groups), set(('jump', 'dhcp', 'pxe', 'server'))) res = datastore_entities.HostConfig.get_by_id( 'tim-test.ntc-tpkd.google.com') self.assertSetEqual(set(res.inventory_groups), set(('pixellab', ))) res = datastore_entities.HostConfig.get_by_id( 'tfpu00101.ntc-tpkd.google.com') self.assertSetEqual(set(res.inventory_groups), set(('dtf', 'tf'))) res = datastore_entities.HostConfig.get_by_id( 'tfpu00201.ntc-tpkd.google.com') self.assertSetEqual(set(res.inventory_groups), set( ('storage_tf', 'tf')))
def testUpdateClusterConfigs_withDuplicateClusterConfig(self): """Tests that check lab configs with duplicated clusters are updated.""" config_file = 'lab-config-with-duplicated-cluster.yaml' file_path = _GetTestFilePath(config_file) with self.mock_file_storage.OpenFile( (config_syncer_gcs_to_ndb.LAB_CONFIG_DIR_PATH + config_file), 'w') as storage_file: with open(file_path, 'r') as f: for line in f: storage_file.write(six.ensure_binary(line)) self._CreateClusterConfigEntity( 'cluster1', tf_global_config_path='old_tf_global_path.xml') lab_config_pb = config_syncer_gcs_to_ndb.GetLabConfigFromGCS( config_syncer_gcs_to_ndb.LAB_CONFIG_DIR_PATH + config_file) config_syncer_gcs_to_ndb._UpdateClusterConfigs( lab_config_pb.cluster_configs) ndb.get_context().clear_cache() # Cluster1 is overried. res = datastore_entities.ClusterConfig.get_by_id('cluster1') self.assertEqual('cluster1', res.cluster_name) self.assertEqual('configs/cluster1_duplicated/config.xml', res.tf_global_config_path)
def testUpdateClusterConfigs(self): """Tests that check cluster configs are updated.""" self._CreateClusterConfigEntity( 'cluster1', tf_global_config_path='old_tf_global_path.xml') lab_config_pb = config_syncer_gcs_to_ndb.GetLabConfigFromGCS( config_syncer_gcs_to_ndb.LAB_CONFIG_DIR_PATH + LAB_CONFIG_FILE) config_syncer_gcs_to_ndb._UpdateClusterConfigs( lab_config_pb.cluster_configs) ndb.get_context().clear_cache() # Cluster1 is overried. res = datastore_entities.ClusterConfig.get_by_id('cluster1') self.assertEqual('cluster1', res.cluster_name) self.assertEqual('login_user1', res.host_login_name) self.assertEqual(['owner1', 'owner2'], res.owners) self.assertEqual('configs/cluster1/config.xml', res.tf_global_config_path) res = datastore_entities.ClusterConfig.get_by_id('cluster2') self.assertEqual('cluster2', res.cluster_name) self.assertEqual('login_user2', res.host_login_name) self.assertEqual(['owner1'], res.owners) self.assertEqual('configs/cluster2/config.xml', res.tf_global_config_path)
def setUp(self): testbed_dependent_test.TestbedDependentTest.setUp(self) self.host1 = datastore_test_util.CreateHost('free', 'atl-01.mtv', lab_name='alab') self.device_1 = datastore_test_util.CreateDevice('free', 'atl-01.mtv', 'serial_1', run_target='shamu', state='Allocated') self.device_2 = datastore_test_util.CreateDevice('free', 'atl-01.mtv', 'serial_2', run_target='shamu', state='Allocated') self.device_3 = datastore_test_util.CreateDevice('free', 'atl-01.mtv', 'serial_3', run_target='shamu', state='Available') self.device_4 = datastore_test_util.CreateDevice( 'free', 'atl-01.mtv', 'serial_4', run_target='hammerhead', state='Available') self.device_5 = datastore_test_util.CreateDevice( 'free', 'atl-01.mtv', 'serial_5', run_target='hammerhead', state='Ignored') self.host1_devices = [ self.device_1, self.device_2, self.device_3, self.device_4, self.device_5 ] device_manager._CountDeviceForHost('atl-01.mtv') datastore_test_util.CreateHost('presubmit', 'atl-02.mtv', lab_name='alab') self.device_6 = datastore_test_util.CreateDevice( 'presubmit', 'atl-02.mtv', 'serial_6', run_target='hammerhead', state='Allocated') self.device_7 = datastore_test_util.CreateDevice( 'presubmit', 'atl-02.mtv', 'serial_7', run_target='hammerhead', state='Unavailable') self.device_8 = datastore_test_util.CreateDevice('presubmit', 'atl-02.mtv', 'serial_8', run_target='bullhead', state='Gone') self.device_9 = datastore_test_util.CreateDevice('presubmit', 'atl-02.mtv', 'serial_9', run_target='angler', state='Allocated', hidden=True) device_manager._CountDeviceForHost('atl-02.mtv') self.cloud_host = datastore_test_util.CreateHost('dockerized-tf-gke', 'cloud-tf-1234', lab_name='cloud-tf') self.device_10 = datastore_test_util.CreateDevice( 'dockerized-tf-gke', 'cloud-tf-1234', 'null-device-0', run_target='NullDevice') device_manager._CountDeviceForHost('cloud-tf-1234') datastore_test_util.CreateHost('', 'mh.host', lab_name='mh') self.testapp = webtest.TestApp(device_monitor.APP) # Clear Datastore cache ndb.get_context().clear_cache()