def test_get_configs(self): validator = Validator(self.scen, self.trajectory, self.output_rh, self.rng) self.assertEqual(1, len(validator._get_configs("def"))) self.assertEqual(1, len(validator._get_configs("inc"))) self.assertEqual(2, len(validator._get_configs("def+inc"))) self.assertEqual(9, len(validator._get_configs("time"))) self.assertEqual(9, len(validator._get_configs("all")))
def test_passed_runhistory(self): ''' test if passed runhistory is in resulting runhistory ''' scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality', 'instances' : self.train_insts, 'test_instances': self.test_insts}) scen.instance_specific = self.inst_specs validator = Validator(scen, self.trajectory, self.rng) # Add a few runs and check, if they are correctly processed old_configs = [entry["incumbent"] for entry in self.trajectory] old_rh = RunHistory(average_cost) seeds = [127 for i in range(int(len(old_configs)/2))] seeds[-1] = 126 # Test instance_seed-structure in validation for config in old_configs[:int(len(old_configs)/2)]: old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0', seed=seeds[old_configs.index(config)]) configs = validator._get_configs('all') insts = validator._get_instances('train') runs_w_rh = validator.get_runs(configs, insts, repetitions=2, runhistory=old_rh) runs_wo_rh = validator.get_runs(configs, insts, repetitions=2) self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4) self.assertEqual(len(runs_w_rh[1].data), 4) self.assertEqual(len(runs_wo_rh[1].data), 0)
def test_passed_runhistory_deterministic(self): ''' test if passed runhistory is in resulting runhistory ''' scen = Scenario(self.scen_fn, cmd_options={ 'run_obj': 'quality', 'train_insts': self.train_insts, 'deterministic': True }) scen.instance_specific = self.inst_specs validator = Validator(scen, self.trajectory, self.rng) # Add a few runs and check, if they are correctly processed old_configs = [entry["incumbent"] for entry in self.trajectory] old_rh = RunHistory() for config in old_configs[:int(len(old_configs) / 2)]: old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0') configs = validator._get_configs('all') insts = validator._get_instances('train') runs_w_rh = validator._get_runs(configs, insts, repetitions=2, runhistory=old_rh) runs_wo_rh = validator._get_runs(configs, insts, repetitions=2) self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4) self.assertEqual(len(runs_w_rh[1].data), 4) self.assertEqual(len(runs_wo_rh[1].data), 0)
def test_passed_runhistory(self): ''' test if passed runhistory is in resulting runhistory ''' self.scen.train_insts = self.train_insts self.scen.test_insts = self.test_insts validator = Validator(self.scen, self.trajectory, self.output_rh, self.rng) # Add a few runs and check, if they are correctly processed old_configs = [entry["incumbent"] for entry in self.trajectory] old_rh = RunHistory(average_cost) for config in old_configs[:int(len(old_configs) / 2)]: old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0', seed=127) configs = validator._get_configs('all') insts = validator._get_instances('train') runs_w_rh = validator.get_runs(configs, insts, repetitions=2, runhistory=old_rh) runs_wo_rh = validator.get_runs(configs, insts, repetitions=2) self.assertEqual(len(runs_w_rh), len(runs_wo_rh) - 4)
def test_get_configs(self): scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'}) validator = Validator(scen, self.trajectory, self.rng) self.assertEqual(1, len(validator._get_configs("def"))) self.assertEqual(1, len(validator._get_configs("inc"))) self.assertEqual(2, len(validator._get_configs("def+inc"))) self.assertEqual(7, len(validator._get_configs("wallclock_time"))) self.assertEqual(8, len(validator._get_configs("cpu_time"))) self.assertEqual(10, len(validator._get_configs("all"))) # Using maxtime validator.scen.wallclock_limit = 65 validator.scen.algo_runs_timelimit = 33 self.assertEqual(8, len(validator._get_configs("wallclock_time"))) self.assertEqual(9, len(validator._get_configs("cpu_time"))) # Exceptions self.assertRaises(ValueError, validator._get_configs, "notanoption") self.assertRaises(ValueError, validator._get_instances, "notanoption")
def test_passed_runhistory_no_insts(self): ''' test passed runhistory, without instances ''' scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'}) scen.instance_specific = self.inst_specs validator = Validator(scen, self.trajectory, self.rng) # Add a few runs and check, if they are correctly processed old_configs = [entry["incumbent"] for entry in self.trajectory] old_rh = RunHistory(average_cost) for config in old_configs[:int(len(old_configs)/2)]: old_rh.add(config, 1, 1, StatusType.SUCCESS, seed=127) configs = validator._get_configs('all') insts = validator._get_instances('train') runs_w_rh = validator.get_runs(configs, insts, repetitions=2, runhistory=old_rh) runs_wo_rh = validator.get_runs(configs, insts, repetitions=2) self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4) self.assertEqual(len(runs_w_rh[1].data), 4) self.assertEqual(len(runs_wo_rh[1].data), 0)