예제 #1
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'instances': ['0']})

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [Configuration(scen.cs, values={'x1': i, 'x2': i}) for i in range(1, 7)]
        old_rh = RunHistory()
        old_rh.add(old_configs[0], 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
        old_rh.add(old_configs[1], 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[2], 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
        old_rh.add(old_configs[3], 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add(old_configs[4], 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[5], 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [_Run(inst_specs='0', seed=0, inst='0', config=old_configs[2]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[3]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[5])]

        runs = validator._get_runs(old_configs, ['0'], repetitions=1, runhistory=old_rh)
        self.assertEqual(runs[0], expected)
예제 #2
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'instances': ['0']
                        })

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [
            'config1', 'config2', 'config3', 'config4', 'config5', 'config6'
        ]
        old_rh = RunHistory(average_cost)
        old_rh.add('config1',
                   1,
                   1,
                   StatusType.SUCCESS,
                   instance_id='0',
                   seed=0)
        old_rh.add('config2',
                   1,
                   1,
                   StatusType.TIMEOUT,
                   instance_id='0',
                   seed=0)
        old_rh.add('config3',
                   1,
                   1,
                   StatusType.CRASHED,
                   instance_id='0',
                   seed=0)
        old_rh.add('config4', 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add('config5', 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add('config6', 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [
            _Run(inst_specs='0', seed=0, inst='0', config='config3'),
            _Run(inst_specs='0', seed=0, inst='0', config='config4'),
            _Run(inst_specs='0', seed=0, inst='0', config='config6')
        ]

        runs = validator._get_runs(old_configs, ['0'],
                                   repetitions=1,
                                   runhistory=old_rh)
        self.assertEqual(runs[0], expected)
예제 #3
0
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'test_insts': self.test_insts
                        })
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        self.maxDiff = None
        expected = [
            _Run(config='config1',
                 inst='3',
                 seed=1608637542,
                 inst_specs='three'),
            _Run(config='config2',
                 inst='3',
                 seed=1608637542,
                 inst_specs='three'),
            _Run(config='config1',
                 inst='3',
                 seed=1273642419,
                 inst_specs='three'),
            _Run(config='config2',
                 inst='3',
                 seed=1273642419,
                 inst_specs='three'),
            _Run(config='config1',
                 inst='4',
                 seed=1935803228,
                 inst_specs='four'),
            _Run(config='config2',
                 inst='4',
                 seed=1935803228,
                 inst_specs='four'),
            _Run(config='config1', inst='4', seed=787846414,
                 inst_specs='four'),
            _Run(config='config2', inst='4', seed=787846414,
                 inst_specs='four'),
            _Run(config='config1', inst='5', seed=996406378,
                 inst_specs='five'),
            _Run(config='config2', inst='5', seed=996406378,
                 inst_specs='five'),
            _Run(config='config1',
                 inst='5',
                 seed=1201263687,
                 inst_specs='five'),
            _Run(config='config2',
                 inst='5',
                 seed=1201263687,
                 inst_specs='five')
        ]

        runs = validator._get_runs(['config1', 'config2'],
                                   scen.test_insts,
                                   repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [
            _Run(config='config1', inst='0', seed=423734972,
                 inst_specs='null'),
            _Run(config='config1', inst='0', seed=415968276,
                 inst_specs='null'),
            _Run(config='config1', inst='1', seed=670094950, inst_specs='one'),
            _Run(config='config1', inst='1', seed=1914837113,
                 inst_specs='one'),
            _Run(config='config1', inst='2', seed=669991378, inst_specs='two'),
            _Run(config='config1', inst='2', seed=429389014, inst_specs='two')
        ]

        runs = validator._get_runs(['config1'],
                                   scen.train_insts,
                                   repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [
            _Run(config='config1', inst='0', seed=249467210,
                 inst_specs='null'),
            _Run(config='config1', inst='1', seed=1972458954,
                 inst_specs='one'),
            _Run(config='config1', inst='2', seed=1572714583,
                 inst_specs='two'),
            _Run(config='config1',
                 inst='3',
                 seed=1433267572,
                 inst_specs='three'),
            _Run(config='config1', inst='4', seed=434285667,
                 inst_specs='four'),
            _Run(config='config1', inst='5', seed=613608295, inst_specs='five')
        ]

        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator._get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)