Ejemplo n.º 1
0
    def test_passed_runhistory_deterministic(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'deterministic': True
                        })
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory()
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator._get_runs(configs,
                                        insts,
                                        repetitions=2,
                                        runhistory=old_rh)
        runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
Ejemplo n.º 2
0
    def test_passed_runhistory(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'test_insts': self.test_insts
                        })
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        seeds = [127 for i in range(int(len(old_configs) / 2))]
        seeds[-1] = 126  # Test instance_seed-structure in validation
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       instance_id='0',
                       seed=seeds[old_configs.index(config)])

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator._get_runs(configs,
                                        insts,
                                        repetitions=2,
                                        runhistory=old_rh)
        runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
Ejemplo n.º 3
0
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'train_insts': self.train_insts,
                                     'test_insts': self.test_insts})
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        self.maxDiff = None
        expected = [_Run(config='config1', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config1', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config1', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config1', inst='5', seed=1201263687, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=1201263687, inst_specs='five')]

        runs = validator._get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [_Run(config='config1', inst='0', seed=423734972, inst_specs='null'),
                    _Run(config='config1', inst='0', seed=415968276, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=670094950, inst_specs='one'),
                    _Run(config='config1', inst='1', seed=1914837113, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=669991378, inst_specs='two'),
                    _Run(config='config1', inst='2', seed=429389014, inst_specs='two')]

        runs = validator._get_runs(['config1'], scen.train_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [_Run(config='config1', inst='0', seed=249467210, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=1972458954, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=1572714583, inst_specs='two'),
                    _Run(config='config1', inst='3', seed=1433267572, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=434285667, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=613608295, inst_specs='five')]

        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator._get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)
Ejemplo n.º 4
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'instances': ['0']})

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [Configuration(scen.cs, values={'x1': i, 'x2': i}) for i in range(1, 7)]
        old_rh = RunHistory()
        old_rh.add(old_configs[0], 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
        old_rh.add(old_configs[1], 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[2], 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
        old_rh.add(old_configs[3], 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add(old_configs[4], 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[5], 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [_Run(inst_specs='0', seed=0, inst='0', config=old_configs[2]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[3]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[5])]

        runs = validator._get_runs(old_configs, ['0'], repetitions=1, runhistory=old_rh)
        self.assertEqual(runs[0], expected)
Ejemplo n.º 5
0
    def test_passed_runhistory_no_insts(self):
        ''' test passed runhistory, without instances '''
        scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, seed=127)

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator._get_runs(configs,
                                        insts,
                                        repetitions=2,
                                        runhistory=old_rh)
        runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
Ejemplo n.º 6
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'instances': ['0']
                        })

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [
            'config1', 'config2', 'config3', 'config4', 'config5', 'config6'
        ]
        old_rh = RunHistory(average_cost)
        old_rh.add('config1',
                   1,
                   1,
                   StatusType.SUCCESS,
                   instance_id='0',
                   seed=0)
        old_rh.add('config2',
                   1,
                   1,
                   StatusType.TIMEOUT,
                   instance_id='0',
                   seed=0)
        old_rh.add('config3',
                   1,
                   1,
                   StatusType.CRASHED,
                   instance_id='0',
                   seed=0)
        old_rh.add('config4', 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add('config5', 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add('config6', 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [
            _Run(inst_specs='0', seed=0, inst='0', config='config3'),
            _Run(inst_specs='0', seed=0, inst='0', config='config4'),
            _Run(inst_specs='0', seed=0, inst='0', config='config6')
        ]

        runs = validator._get_runs(old_configs, ['0'],
                                   repetitions=1,
                                   runhistory=old_rh)
        self.assertEqual(runs[0], expected)