def test_repr(): cfg = Config() rep = cfg.__repr__() expected_rep = (f"Current values: \n {cfg.current_config} \n" f"Current paths: \n {cfg._loaded_config_files} \n" f"{super(Config, cfg).__repr__()}") assert rep == expected_rep
def test_set_guid_workstatio_code(ws, monkeypatch): monkeypatch.setattr('builtins.input', lambda x: str(ws)) orig_cfg = Config().current_config original_ws = orig_cfg['GUID_components']['work_station'] with protected_config(): set_guid_work_station_code() cfg = Config().current_config if 16777216 > ws > 0: assert cfg['GUID_components']['work_station'] == ws else: assert cfg['GUID_components']['work_station'] == original_ws
def test_set_guid_location_code(loc, monkeypatch): monkeypatch.setattr('builtins.input', lambda x: str(loc)) orig_cfg = Config().current_config original_loc = orig_cfg['GUID_components']['location'] with protected_config(): set_guid_location_code() cfg = Config().current_config if 257 > loc > 0: assert cfg['GUID_components']['location'] == loc else: assert cfg['GUID_components']['location'] == original_loc
def test_update_from_path(path_to_config_file_on_disk): with default_config(): cfg = Config() # check that the default is still the default assert cfg["core"]["db_debug"] is False cfg.update_config(path=path_to_config_file_on_disk) assert cfg['core']['db_debug'] is True # check that the settings NOT specified in our config file on path # are still saved as configurations assert cfg['gui']['notebook'] is True assert cfg['station']['default_folder'] == '.' expected_path = os.path.join(path_to_config_file_on_disk, 'qcodesrc.json') assert cfg.current_config_path == expected_path
def test_generate_guid(loc, stat, smpl): # update config to generate a particular guid. Read it back to verify with protected_config(): cfg = Config() cfg['GUID_components']['location'] = loc cfg['GUID_components']['work_station'] = stat cfg['GUID_components']['sample'] = smpl cfg.save_to_home() guid = generate_guid() gen_time = int(np.round(time.time() * 1000)) comps = parse_guid(guid) if smpl == 0: smpl = int('a' * 8, base=16) assert comps['location'] == loc assert comps['work_station'] == stat assert comps['sample'] == smpl assert comps['time'] - gen_time < 2
def set_guid_location_code() -> None: """ Interactive function to set the location code. """ cfg = Config() old_loc = cfg['GUID_components']['location'] print(f'Updating GUID location code. Current location code is: {old_loc}') if old_loc != 0: print('That is a non-default location code. Perhaps you should not ' 'change it? Re-enter that code to leave it unchanged.') loc_str = input('Please enter the new location code (1-256): ') try: location = int(loc_str) except ValueError: print('The location code must be an integer. No update performed.') return if not (257 > location > 0): print('The location code must be between 1 and 256 (both included). ' 'No update performed') return cfg['GUID_components']['location'] = location cfg.save_to_home()
def location_and_station_set_to(location: int, work_station: int): cfg = Config() old_cfg = deepcopy(cfg.current_config) cfg['GUID_components']['location'] = location cfg['GUID_components']['work_station'] = work_station cfg.save_to_home() try: yield finally: cfg.current_config = old_cfg cfg.save_to_home()
def default_config(user_config: Optional[str] = None): """ Context manager to temporarily establish default config settings. This is achieved by overwriting the config paths of the user-, environment-, and current directory-config files with the path of the config file in the qcodes repository. Additionally the current config object `qcodes.config` gets copied and reestablished. Args: user_config: represents the user config file content. """ home_file_name = Config.home_file_name schema_home_file_name = Config.schema_home_file_name env_file_name = Config.env_file_name schema_env_file_name = Config.schema_env_file_name cwd_file_name = Config.cwd_file_name schema_cwd_file_name = Config.schema_cwd_file_name Config.home_file_name = '' with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, 'user_config.json') file_name_schema = os.path.join(tmpdirname, 'user_config_schema.json') if user_config is not None: with open(file_name, 'w') as f: f.write(user_config) Config.home_file_name = file_name Config.schema_home_file_name = file_name_schema Config.env_file_name = '' Config.schema_env_file_name = '' Config.cwd_file_name = '' Config.schema_cwd_file_name = '' default_config_obj: Optional[DotDict] = copy.\ deepcopy(qcodes.config.current_config) qcodes.config = Config() try: yield finally: Config.home_file_name = home_file_name Config.schema_home_file_name = schema_home_file_name Config.env_file_name = env_file_name Config.schema_env_file_name = schema_env_file_name Config.cwd_file_name = cwd_file_name Config.schema_cwd_file_name = schema_cwd_file_name qcodes.config.current_config = default_config_obj
def test_add_and_describe(): """ Test that a key an be added and described """ with default_config(): key = 'newkey' value = 'testvalue' value_type = 'string' description = 'A test' default = 'testdefault' cfg = Config() cfg.add(key=key, value=value, value_type=value_type, description=description, default=default) desc = cfg.describe(f'user.{key}') expected_desc = (f"{description}.\nCurrent value: {value}. " f"Type: {value_type}. Default: {default}.") assert desc == expected_desc
def set_guid_work_station_code() -> None: """ Interactive function to set the work station code """ cfg = Config() old_ws = cfg['GUID_components']['work_station'] print('Updating GUID work station code. ' f'Current work station code is: {old_ws}') if old_ws != 0: print('That is a non-default work station code. Perhaps you should not' ' change it? Re-enter that code to leave it unchanged.') ws_str = input('Please enter the new work station code (1-16777216): ') try: work_station = int(ws_str) except ValueError: print('The work station code must be an integer. No update performed.') return if not (16777216 > work_station > 0): print('The work staion code must be between 1 and 256 (both included).' ' No update performed') return cfg['GUID_components']['work_station'] = work_station cfg.save_to_home()
def generate_guid(timeint: Union[int, None] = None, sampleint: Union[int, None] = None) -> str: """ Generate a guid string to go into the GUID column of the runs table. The GUID is based on the GUID-components in the qcodesrc file. The generated string is of the format '12345678-1234-1234-1234-123456789abc', where the first eight hex numbers comprise the 4 byte sample code, the next 2 hex numbers comprise the 1 byte location, the next 2+4 hex numbers are the 3 byte work station code, and the final 4+12 hex number give the 8 byte integer time in ms since epoch time Args: timeint: An integer of miliseconds since unix epoch time sampleint: A code for the sample """ cfg = Config() try: guid_comp = cfg['GUID_components'] except KeyError: raise RuntimeError('Invalid QCoDeS config file! No GUID_components ' 'specified. Can not proceed.') location = guid_comp['location'] station = guid_comp['work_station'] if timeint is None: # ms resolution, checked on windows timeint = int(np.round(time.time() * 1000)) if sampleint is None: sampleint = guid_comp['sample'] if sampleint == 0: sampleint = int('a' * 8, base=16) loc_str = f'{location:02x}' stat_str = f'{station:06x}' smpl_str = f'{sampleint:08x}' time_str = f'{timeint:016x}' guid = (f'{smpl_str}-{loc_str}{stat_str[:2]}-{stat_str[2:]}-' f'{time_str[:4]}-{time_str[4:]}') return guid
def protected_config(): """ Context manager to be used in all tests that modify the config to ensure that the config is left untouched even if the tests fail """ ocfg: DotDict = Config().current_config original_config = deepcopy(ocfg) try: yield finally: cfg = Config() cfg.current_config = original_config cfg.save_to_home()
def setUp(self): self.conf = Config()
class TestConfig(TestCase): def setUp(self): self.conf = Config() def test_missing_config_file(self): with self.assertRaises(FileNotFoundError): self.conf.load_config("./missing.json") @patch.object(Config, 'current_schema', new_callable=PropertyMock) @patch.object(Config, 'env_file_name', new_callable=PropertyMock) @patch.object(Config, 'load_config') @patch('os.path.isfile') @unittest.skipIf(Path.cwd() == Path.home(), 'This test requires that working dir is different from' 'homedir.') def test_default_config_files(self, isfile, load_config, env, schema): # don't try to load custom schemas self.conf.schema_cwd_file_name = None self.conf.schema_home_file_name = None self.conf.schema_env_file_name = None schema.return_value = SCHEMA env.return_value = ENV_KEY isfile.return_value = True load_config.side_effect = partial(side_effect, GOOD_CONFIG_MAP) self.conf.defaults, self.defaults_schema = self.conf.load_default() config = self.conf.update_config() self.assertEqual(config, CONFIG) @patch.object(Config, 'current_schema', new_callable=PropertyMock) @patch.object(Config, 'env_file_name', new_callable=PropertyMock) @patch.object(Config, 'load_config') @patch('os.path.isfile') @unittest.skipIf(Path.cwd() == Path.home(), 'This test requires that working dir is different from' 'homedir.') def test_bad_config_files(self, isfile, load_config, env, schema): # don't try to load custom schemas self.conf.schema_cwd_file_name = None self.conf.schema_home_file_name = None self.conf.schema_env_file_name = None schema.return_value = SCHEMA env.return_value = ENV_KEY isfile.return_value = True load_config.side_effect = partial(side_effect, BAD_CONFIG_MAP) with self.assertRaises(jsonschema.exceptions.ValidationError): self.conf.defaults, self.defaults_schema = self.conf.load_default() self.conf.update_config() @patch.object(Config, 'current_schema', new_callable=PropertyMock) @patch.object(Config, 'env_file_name', new_callable=PropertyMock) @patch.object(Config, 'load_config') @patch('os.path.isfile') @patch("builtins.open", mock_open(read_data=USER_SCHEMA)) @unittest.skipIf(Path.cwd() == Path.home(), 'This test requires that working dir is different from' 'homedir.') def test_user_schema(self, isfile, load_config, env, schema): schema.return_value = copy.deepcopy(SCHEMA) env.return_value = ENV_KEY isfile.return_value = True load_config.side_effect = partial(side_effect, GOOD_CONFIG_MAP) self.conf.defaults, self.defaults_schema = self.conf.load_default() config = self.conf.update_config() self.assertEqual(config, CONFIG) @patch.object(Config, 'current_schema', new_callable=PropertyMock) @patch.object(Config, 'env_file_name', new_callable=PropertyMock) @patch.object(Config, 'load_config') @patch('os.path.isfile') @patch("builtins.open", mock_open(read_data=USER_SCHEMA)) def test_bad_user_schema(self, isfile, load_config, env, schema): schema.return_value = copy.deepcopy(SCHEMA) env.return_value = ENV_KEY isfile.return_value = True load_config.side_effect = partial(side_effect, BAD_CONFIG_MAP) with self.assertRaises(jsonschema.exceptions.ValidationError): self.conf.defaults, self.defaults_schema = self.conf.load_default() self.conf.update_config() @patch.object(Config, "current_config", new_callable=PropertyMock) def test_update_user_config(self, config): # deep copy because we mutate state config.return_value = copy.deepcopy(CONFIG) self.conf.add("foo", "bar") self.assertEqual(self.conf.current_config, UPDATED_CONFIG) @patch.object(Config, 'current_schema', new_callable=PropertyMock) @patch.object(Config, "current_config", new_callable=PropertyMock) def test_update_and_validate_user_config(self, config, schema): self.maxDiff = None schema.return_value = copy.deepcopy(SCHEMA) # deep copy because we mutate state config.return_value = copy.deepcopy(CONFIG) self.conf.add("foo", "bar", "string", "foo", "bar") self.assertEqual(self.conf.current_config, UPDATED_CONFIG) self.assertEqual(self.conf.current_schema, UPDATED_SCHEMA)
def test_filter_guid(locs, stats, smpls): def make_test_guid(cfg, loc: int, smpl: int, stat: int): cfg['GUID_components']['location'] = loc cfg['GUID_components']['work_station'] = stat cfg['GUID_components']['sample'] = smpl cfg.save_to_home() guid = generate_guid() gen_time = int(np.round(time.time() * 1000)) comps = parse_guid(guid) assert comps['location'] == loc assert comps['work_station'] == stat assert comps['sample'] == smpl assert comps['time'] - gen_time < 2 return guid with protected_config(): guids = [] cfg = Config() corrected_smpls = [ smpl if smpl != 0 else int('a' * 8, base=16) for smpl in smpls ] # there is a possibility that we could generate 0 and 2863311530, which # are considered equivalent since int('a' * 8, base=16) == 2863311530. # We want unique samples, so we exclude this case. assume(corrected_smpls[0] != corrected_smpls[1]) # first we generate a guid that we are going to match against guids.append(make_test_guid(cfg, locs[0], corrected_smpls[0], stats[0])) # now generate some guids that will not match because one of the # components changed guids.append(make_test_guid(cfg, locs[1], corrected_smpls[0], stats[0])) guids.append(make_test_guid(cfg, locs[0], corrected_smpls[1], stats[0])) guids.append(make_test_guid(cfg, locs[0], corrected_smpls[0], stats[1])) assert len(guids) == 4 # first filter on all parts. This should give exactly one matching guid filtered_guids = filter_guids_by_parts(guids, location=locs[0], sample_id=corrected_smpls[0], work_station=stats[0]) assert len(filtered_guids) == 1 assert filtered_guids[0] == guids[0] # now filter on 2 components filtered_guids = filter_guids_by_parts(guids, location=locs[0], sample_id=corrected_smpls[0]) assert len(filtered_guids) == 2 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[3] filtered_guids = filter_guids_by_parts(guids, location=locs[0], work_station=stats[0]) assert len(filtered_guids) == 2 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[2] filtered_guids = filter_guids_by_parts(guids, sample_id=corrected_smpls[0], work_station=stats[0]) assert len(filtered_guids) == 2 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[1] # now filter on 1 component filtered_guids = filter_guids_by_parts(guids, location=locs[0]) assert len(filtered_guids) == 3 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[2] assert filtered_guids[2] == guids[3] filtered_guids = filter_guids_by_parts(guids, work_station=stats[0]) assert len(filtered_guids) == 3 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[1] assert filtered_guids[2] == guids[2] filtered_guids = filter_guids_by_parts( guids, sample_id=corrected_smpls[0], ) assert len(filtered_guids) == 3 assert filtered_guids[0] == guids[0] assert filtered_guids[1] == guids[1] assert filtered_guids[2] == guids[3]
def _make_config(): conf = Config() yield conf
def test_combine_runs(two_empty_temp_db_connections, empty_temp_db_connection, some_interdeps): """ Test that datasets that are exported in random order from 2 datasets can be reloaded by the original captured_run_id and the experiment name. """ source_conn_1, source_conn_2 = two_empty_temp_db_connections target_conn = empty_temp_db_connection source_1_exp = Experiment(conn=source_conn_1, name='exp1', sample_name='no_sample') source_1_datasets = [ DataSet(conn=source_conn_1, exp_id=source_1_exp.exp_id) for i in range(10) ] source_2_exp = Experiment(conn=source_conn_2, name='exp2', sample_name='no_sample') source_2_datasets = [ DataSet(conn=source_conn_2, exp_id=source_2_exp.exp_id) for i in range(10) ] source_all_datasets = source_1_datasets + source_2_datasets shuffled_datasets = source_all_datasets.copy() random.shuffle(shuffled_datasets) for ds in source_all_datasets: ds.set_interdependencies(some_interdeps[1]) ds.mark_started() ds.add_results([{name: 0.0 for name in some_interdeps[1].names}]) ds.mark_completed() # now let's insert all datasets in random order for ds in shuffled_datasets: extract_runs_into_db(ds.conn.path_to_dbfile, target_conn.path_to_dbfile, ds.run_id) for ds in source_all_datasets: loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id, experiment_name=ds.exp_name, conn=target_conn) assert ds.the_same_dataset_as(loaded_ds) for ds in source_all_datasets: loaded_ds = load_by_run_spec(captured_run_id=ds.captured_counter, experiment_name=ds.exp_name, conn=target_conn) assert ds.the_same_dataset_as(loaded_ds) # Now test that we generate the correct table for the guids above # this could be split out into its own test # but the test above has the useful side effect of # setting up datasets for this test. guids = [ds.guid for ds in source_all_datasets] table = generate_dataset_table(guids, conn=target_conn) lines = table.split('\n') headers = re.split(r'\s+', lines[0].strip()) cfg = Config() guid_comp = cfg['GUID_components'] # borrowed fallback logic from generate_guid sampleint = guid_comp['sample'] if sampleint == 0: sampleint = int('a' * 8, base=16) for i in range(2, len(lines)): split_line = re.split(r'\s+', lines[i].strip()) mydict = {headers[j]: split_line[j] for j in range(len(split_line))} ds = load_by_guid(guids[i - 2], conn=target_conn) assert ds.captured_run_id == int(mydict['captured_run_id']) assert ds.captured_counter == int(mydict['captured_counter']) assert ds.exp_name == mydict['experiment_name'] assert ds.sample_name == mydict['sample_name'] assert int(mydict['sample_id']) == sampleint assert guid_comp['location'] == int(mydict['location']) assert guid_comp['work_station'] == int(mydict['work_station'])