def test_validate(self):
        ''' test validation '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Test basic usage
        rh = validator.validate(config_mode='def', instance_mode='test',
                                repetitions=3)
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 9)

        rh = validator.validate(config_mode='inc', instance_mode='train+test')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 6)

        rh = validator.validate(config_mode='wallclock_time', instance_mode='train')
        self.assertEqual(len(rh.get_all_configs()), 7)
        self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
                              rh.get_all_configs()]), 21)

        # Test with backend multiprocessing
        rh = validator.validate(config_mode='def', instance_mode='test',
                                repetitions=3, backend='multiprocessing')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 9)
    def test_passed_runhistory(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        seeds = [127 for i in range(int(len(old_configs)/2))]
        seeds[-1] = 126  # Test instance_seed-structure in validation
        for config in old_configs[:int(len(old_configs)/2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                       seed=seeds[old_configs.index(config)])

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator.get_runs(configs, insts, repetitions=2,
                                       runhistory=old_rh)
        runs_wo_rh = validator.get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
Example #3
0
    def test_inst_no_feat(self):
        ''' test if scenarios are treated correctly if no features are
        specified.'''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'test_insts': self.test_insts
                        })
        self.assertTrue(scen.feature_array is None)
        self.assertEqual(len(scen.feature_dict), 0)

        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory()
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       instance_id='0',
                       seed=127)
        rh = validator.validate_epm('all', 'train+test', 1, old_rh)
        self.assertEqual(len(old_rh.get_all_configs()), 4)
        self.assertEqual(len(rh.get_all_configs()), 10)
Example #4
0
    def test_passed_runhistory_deterministic(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'deterministic': True
                        })
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory()
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator._get_runs(configs,
                                        insts,
                                        repetitions=2,
                                        runhistory=old_rh)
        runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
 def test_validate_deterministic(self):
     ''' deterministic ta '''
     scen = Scenario(self.scen_fn,
                     cmd_args={'run_obj':'quality',
                               'instances' : self.train_insts,
                               'deterministic': True})
     scen.instance_specific = self.inst_specs
     validator = Validator(scen, self.trajectory, self.rng)
     rh = validator.validate(config_mode='def+inc',
                             instance_mode='train', repetitions=3)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
                           rh.get_all_configs()]), 6)
Example #6
0
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'train_insts': self.train_insts,
                                     'test_insts': self.test_insts})
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        self.maxDiff = None
        expected = [_Run(config='config1', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config1', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config1', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config1', inst='5', seed=1201263687, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=1201263687, inst_specs='five')]

        runs = validator._get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [_Run(config='config1', inst='0', seed=423734972, inst_specs='null'),
                    _Run(config='config1', inst='0', seed=415968276, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=670094950, inst_specs='one'),
                    _Run(config='config1', inst='1', seed=1914837113, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=669991378, inst_specs='two'),
                    _Run(config='config1', inst='2', seed=429389014, inst_specs='two')]

        runs = validator._get_runs(['config1'], scen.train_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [_Run(config='config1', inst='0', seed=249467210, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=1972458954, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=1572714583, inst_specs='two'),
                    _Run(config='config1', inst='3', seed=1433267572, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=434285667, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=613608295, inst_specs='five')]

        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator._get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        expected = [Run(inst_specs='three', seed=1608637542, inst='3', config='config1'),
                    Run(inst_specs='three', seed=1608637542, inst='3', config='config2'),
                    Run(inst_specs='three', seed=1935803228, inst='3', config='config1'),
                    Run(inst_specs='three', seed=1935803228, inst='3', config='config2'),
                    Run(inst_specs='four',  seed=996406378, inst='4', config='config1'),
                    Run(inst_specs='four',  seed=996406378, inst='4', config='config2'),
                    Run(inst_specs='four',  seed=423734972,  inst='4', config='config1'),
                    Run(inst_specs='four',  seed=423734972,  inst='4', config='config2'),
                    Run(inst_specs='five',  seed=670094950,  inst='5', config='config1'),
                    Run(inst_specs='five',  seed=670094950,  inst='5', config='config2'),
                    Run(inst_specs='five',  seed=669991378, inst='5', config='config1'),
                    Run(inst_specs='five',  seed=669991378, inst='5', config='config2')]

        runs = validator.get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [Run(inst_specs='null', seed=249467210,  inst='0', config='config1'),
                    Run(inst_specs='null', seed=1572714583,  inst='0', config='config1'),
                    Run(inst_specs='one',  seed=434285667,  inst='1', config='config1'),
                    Run(inst_specs='one',  seed=893664919, inst='1', config='config1'),
                    Run(inst_specs='two',  seed=88409749,  inst='2', config='config1'),
                    Run(inst_specs='two',  seed=2018247425,  inst='2', config='config1')]

        runs = validator.get_runs(['config1'], scen.train_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [Run(inst='0', seed=1427830251,  config='config1', inst_specs='null' ),
                    Run(inst='1', seed=911989541, config='config1', inst_specs='one'  ),
                    Run(inst='2', seed=780932287, config='config1', inst_specs='two'  ),
                    Run(inst='3', seed=787716372, config='config1', inst_specs='three'),
                    Run(inst='4', seed=1306710475,  config='config1', inst_specs='four' ),
                    Run(inst='5', seed=106328085,  config='config1', inst_specs='five' )]
        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator.get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)
 def test_validate_epm(self):
     ''' test using epm to validate '''
     scen = Scenario(self.scen_fn,
                     cmd_args={'run_obj':'quality',
                               'instances' : self.train_insts,
                               'test_instances': self.test_insts,
                               'features': self.feature_dict})
     scen.instance_specific = self.inst_specs
     validator = Validator(scen, self.trajectory, self.rng)
     # Add a few runs and check, if they are correctly processed
     old_configs = [entry["incumbent"] for entry in self.trajectory]
     old_rh = RunHistory(average_cost)
     for config in old_configs[:int(len(old_configs)/2)]:
         old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                    seed=127)
     validator.validate_epm('all', 'train', 1, old_rh)
    def test_passed_runhistory_no_insts(self):
        ''' test passed runhistory, without instances '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality'})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        for config in old_configs[:int(len(old_configs)/2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, seed=127)

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator.get_runs(configs, insts, repetitions=2,
                                       runhistory=old_rh)
        runs_wo_rh = validator.get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)