Exemplo n.º 1
0
 def test_pass_tae(self):
     scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
     tae = ExecuteTARunOld(ta=scen.ta, stats=self.stats)
     validator = Validator(scen, self.trajectory)
     rh_mock = mock.Mock()
     with mock.patch.object(
             Validator,
             "_validate_parallel",
             return_value=[
                 mock.MagicMock(),
                 mock.MagicMock(),
                 mock.MagicMock(),
                 mock.MagicMock()
             ],
     ) as validate_parallel_mock:
         with mock.patch.object(
                 Validator,
                 "_get_runs",
                 return_value=[[
                     mock.Mock(),
                     mock.Mock(),
                     mock.Mock(),
                     mock.Mock()
                 ], rh_mock],
         ):
             validator.validate(tae=tae)
             self.assertIs(validate_parallel_mock.call_args[0][0], tae)
             self.assertEqual(rh_mock.add.call_count, 4)
Exemplo n.º 2
0
Arquivo: smbo.py Projeto: midasc/SMAC3
    def validate(self, config_mode='inc', instance_mode='train+test',
                 repetitions=1, n_jobs=-1, backend='threading'):
        """Create validator-object and run validation, using
        scenario-information, runhistory from smbo and tae_runner from intensify

        Parameters
        ----------
        config_mode: string
            what configurations to validate
            from [def, inc, def+inc, time, all], time means evaluation at
            timesteps 2^-4, 2^-3, 2^-2, 2^-1, 2^0, 2^1, ...
        instance_mode: string
            what instances to use for validation, from [train, test, train+test]
        repetitions: int
            number of repetitions in nondeterministic algorithms (in
            deterministic will be fixed to 1)
        n_jobs: int
            number of parallel processes used by joblib

        Returns
        -------
        runhistory: RunHistory
            runhistory containing all specified runs
        """
        traj_fn = os.path.join(self.scenario.output_dir, "traj_aclib2.json")
        trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=self.scenario.cs)
        new_rh_path = os.path.join(self.scenario.output_dir, "validated_runhistory.json")

        validator = Validator(self.scenario, trajectory, new_rh_path, self.rng)
        new_rh = validator.validate(config_mode, instance_mode, repetitions, n_jobs,
                                    backend, self.runhistory,
                                    self.intensifier.tae_runner)
        return new_rh
Exemplo n.º 3
0
 def test_parallel(self):
     ''' test parallel '''
     scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
     validator = Validator(scen, self.trajectory, self.rng)
     validator.validate(config_mode='all',
                        instance_mode='train+test',
                        n_jobs=-1)
Exemplo n.º 4
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'instances': ['0']})

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [Configuration(scen.cs, values={'x1': i, 'x2': i}) for i in range(1, 7)]
        old_rh = RunHistory()
        old_rh.add(old_configs[0], 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
        old_rh.add(old_configs[1], 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[2], 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
        old_rh.add(old_configs[3], 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add(old_configs[4], 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[5], 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [_Run(inst_specs='0', seed=0, inst='0', config=old_configs[2]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[3]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[5])]

        runs = validator._get_runs(old_configs, ['0'], repetitions=1, runhistory=old_rh)
        self.assertEqual(runs[0], expected)
Exemplo n.º 5
0
    def test_inst_no_feat(self):
        ''' test if scenarios are treated correctly if no features are
        specified.'''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'test_insts': self.test_insts
                        })
        self.assertTrue(scen.feature_array is None)
        self.assertEqual(len(scen.feature_dict), 0)

        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory()
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       instance_id='0',
                       seed=127)
        rh = validator.validate_epm('all', 'train+test', 1, old_rh)
        self.assertEqual(len(old_rh.get_all_configs()), 4)
        self.assertEqual(len(rh.get_all_configs()), 10)
Exemplo n.º 6
0
def optimize(optimizer, scenario, trajectory=None):
    then = time.time()
    best_conf = optimizer.optimize()
    print(best_conf)
    print('training   time:', time.time() - then)

    traj_logger = TrajLogger(None, Stats(scenario))
    trajectory = trajectory or traj_logger.read_traj_aclib_format(
        "smac-output/run_1/traj_aclib2.json", scenario.cs)
    validator = Validator(scenario, trajectory, rng=np.random.RandomState(42))

    # evaluate on test instances and calculate cpu time
    then = time.time()
    runhis_dev = validator.validate(config_mode="def", instance_mode="test")
    runhis_inc = validator.validate(config_mode="inc", instance_mode="test")
    print('validating time:', time.time() - then)

    default_conf = runhis_dev.ids_config[1]
    incumbent_conf = runhis_inc.ids_config[1]
    dev_vals = get_instance_costs_for_config(runhis_dev, default_conf)
    inc_vals = get_instance_costs_for_config(runhis_inc, incumbent_conf)

    # ###### Filter runs for plotting #######
    dev_x = []
    inc_x = []
    for key in set(dev_vals.keys()) & set(inc_vals.keys()):
        dev_x.append(dev_vals[key])
        inc_x.append(inc_vals[key])

    # print(dev_vals)
    # print(inc_vals)
    print(dev_x)
    print(inc_x)

    print('PAR10:', np.mean(inc_x), '/', np.mean(dev_x))
    max_x = 1000.0
    par1er = lambda xx: np.mean([(x / 10 if x == max_x else x) for x in xx])
    print('PAR1 :', par1er(inc_x), '/', par1er(dev_x))
    to_counter = lambda xx: len([x for x in xx if x == max_x])
    print('TOs  :', to_counter(inc_x), '/', to_counter(dev_x))
    print('wins :', len([i for i in range(len(dev_x)) if dev_x[i] > inc_x[i]]),
          '/', len(dev_x))

    fig, ax = plt.subplots()
    ax.scatter(dev_x, inc_x, marker="x")
    ax.set_xlabel("Default Configuration")
    ax.set_ylabel("Incumbent Configuration")

    lims = [
        np.min([ax.get_xlim(), ax.get_ylim()]),
        np.max([ax.get_xlim(), ax.get_ylim()])
    ]
    ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
    # ax.set_xlim(lims)
    # ax.set_ylim(lims)

    ax.set_xscale('log')
    ax.set_yscale('log')

    fig.savefig("fig-smac.png")
Exemplo n.º 7
0
 def test_parallel(self):
     ''' test parallel '''
     validator = Validator(self.scen, self.trajectory, self.output_rh,
                           self.rng)
     validator.validate(config_mode='all',
                        instance_mode='train+test',
                        n_jobs=-1)
Exemplo n.º 8
0
 def test_pass_tae(self):
     scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'})
     tae = ExecuteTARunOld(ta=scen.ta)
     validator = Validator(scen, self.trajectory)
     with mock.patch.object(Validator, "_validate_parallel",
                            return_value=[(1,2,3,4)]):
         self.assertEqual(1, len(validator.validate(tae=tae).data))
Exemplo n.º 9
0
    def _init_pimp_and_validator(self, rh, alternative_output_dir=None):
        """Create ParameterImportance-object and use it's trained model for  validation and further predictions
        We pass validated runhistory, so that the returned model will be based on as much information as possible

        Parameters
        ----------
        rh: RunHistory
            runhistory used to build EPM
        alternative_output_dir: str
            e.g. for budgets we want pimp to use an alternative output-dir (subfolders per budget)
        """
        self.logger.debug(
            "Using '%s' as output for pimp", alternative_output_dir
            if alternative_output_dir else self.output_dir)
        self.pimp = Importance(
            scenario=copy.deepcopy(self.scenario),
            runhistory=rh,
            incumbent=self.default,  # Inject correct incumbent later
            parameters_to_evaluate=4,
            save_folder=alternative_output_dir
            if alternative_output_dir else self.output_dir,
            seed=self.rng.randint(1, 100000),
            max_sample_size=self.pimp_max_samples,
            fANOVA_pairwise=self.fanova_pairwise,
            preprocess=False)
        self.model = self.pimp.model

        # Validator (initialize without trajectory)
        self.validator = Validator(self.scenario, None, None)
        self.validator.epm = self.model
Exemplo n.º 10
0
    def _init_pimp_and_validator(
        self,
        alternative_output_dir=None,
    ):
        """Create ParameterImportance-object and use it's trained model for validation and further predictions.
        We pass a combined (original + validated) runhistory, so that the returned model will be based on as much
        information as possible

        Parameters
        ----------
        alternative_output_dir: str
            e.g. for budgets we want pimp to use an alternative output-dir (subfolders per budget)
        """
        self.logger.debug(
            "Using '%s' as output for pimp", alternative_output_dir
            if alternative_output_dir else self.output_dir)
        self.pimp = Importance(
            scenario=copy.deepcopy(self.scenario),
            runhistory=self.combined_runhistory,
            incumbent=self.incumbent if self.incumbent else self.default,
            save_folder=alternative_output_dir
            if alternative_output_dir is not None else self.output_dir,
            seed=self.rng.randint(1, 100000),
            max_sample_size=self.options['fANOVA'].getint("pimp_max_samples"),
            fANOVA_pairwise=self.options['fANOVA'].getboolean(
                "fanova_pairwise"),
            preprocess=False,
            verbose=1,  # disable progressbars
        )
        # Validator (initialize without trajectory)
        self.validator = Validator(self.scenario, None, None)
        self.validator.epm = self.pimp.model
Exemplo n.º 11
0
Arquivo: smbo.py Projeto: maxc01/SMAC3
    def validate(self,
                 config_mode: typing.Union[str, typing.List[Configuration]] = 'inc',
                 instance_mode: typing.Union[str, typing.List[str]] = 'train+test',
                 repetitions: int = 1,
                 use_epm: bool = False,
                 n_jobs: int = -1,
                 backend: str = 'threading') -> RunHistory:
        """Create validator-object and run validation, using
        scenario-information, runhistory from smbo and tae_runner from intensify

        Parameters
        ----------
        config_mode: str or list<Configuration>
            string or directly a list of Configuration
            str from [def, inc, def+inc, wallclock_time, cpu_time, all]
            time evaluates at cpu- or wallclock-timesteps of:
            [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]
            with max_time being the highest recorded time
        instance_mode: string
            what instances to use for validation, from [train, test, train+test]
        repetitions: int
            number of repetitions in nondeterministic algorithms (in
            deterministic will be fixed to 1)
        use_epm: bool
            whether to use an EPM instead of evaluating all runs with the TAE
        n_jobs: int
            number of parallel processes used by joblib

        Returns
        -------
        runhistory: RunHistory
            runhistory containing all specified runs
        """
        if isinstance(config_mode, str):
            assert self.scenario.output_dir_for_this_run is not None  # Please mypy
            traj_fn = os.path.join(self.scenario.output_dir_for_this_run, "traj_aclib2.json")
            trajectory = (
                TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=self.config_space)
            )  # type: typing.Optional[typing.List[typing.Dict[str, typing.Union[float, int, Configuration]]]]
        else:
            trajectory = None
        if self.scenario.output_dir_for_this_run:
            new_rh_path = os.path.join(self.scenario.output_dir_for_this_run, "validated_runhistory.json")  # type: typing.Optional[str] # noqa E501
        else:
            new_rh_path = None

        validator = Validator(self.scenario, trajectory, self.rng)
        if use_epm:
            new_rh = validator.validate_epm(config_mode=config_mode,
                                            instance_mode=instance_mode,
                                            repetitions=repetitions,
                                            runhistory=self.runhistory,
                                            output_fn=new_rh_path)
        else:
            new_rh = validator.validate(config_mode, instance_mode, repetitions,
                                        n_jobs, backend, self.runhistory,
                                        self.intensifier.tae_runner,
                                        output_fn=new_rh_path)
        return new_rh
Exemplo n.º 12
0
 def test_rng(self):
     scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'})
     validator = Validator(scen, self.trajectory, 42)
     self.assertTrue(isinstance(validator.rng, np.random.RandomState))
     validator = Validator(scen, self.trajectory)
     self.assertTrue(isinstance(validator.rng, np.random.RandomState))
     validator = Validator(scen, self.trajectory, np.random.RandomState())
     self.assertTrue(isinstance(validator.rng, np.random.RandomState))
Exemplo n.º 13
0
 def test_no_feature_dict(self):
     scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'})
     scen.feature_array = None
     validator = Validator(scen, self.trajectory)
     old_rh = RunHistory(average_cost)
     for config in [e["incumbent"] for e in self.trajectory]:
         old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                    seed=127)
     validator.validate_epm(runhistory=old_rh)
Exemplo n.º 14
0
 def test_validate_no_insts(self):
     ''' no instances '''
     scen = Scenario(self.scen_fn,
                     cmd_args={'run_obj':'quality'})
     validator = Validator(scen, self.trajectory, self.rng)
     rh = validator.validate(config_mode='def+inc', instance_mode='train',
                             repetitions=3, output_fn=self.output_rh)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
                           rh.get_all_configs()]), 6)
Exemplo n.º 15
0
 def test_objective_runtime(self):
     ''' test if everything is ok with objective runtime (imputing!) '''
     scen = Scenario(self.scen_fn, cmd_args={'run_obj' : 'runtime',
                                             'cutoff_time' : 5})
     validator = Validator(scen, self.trajectory, self.rng)
     old_configs = [entry["incumbent"] for entry in self.trajectory]
     old_rh = RunHistory(average_cost)
     for config in old_configs[:int(len(old_configs)/2)]:
         old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')
     validator.validate_epm('all', 'train', 1, old_rh)
Exemplo n.º 16
0
 def test_validate_no_insts(self):
     ''' no instances '''
     validator = Validator(self.scen, self.trajectory, self.output_rh,
                           self.rng)
     rh = validator.validate(config_mode='def+inc',
                             instance_mode='train',
                             repetitions=3)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(
         sum([len(rh.get_runs_for_config(c))
              for c in rh.get_all_configs()]), 6)
Exemplo n.º 17
0
 def test_validate_deterministic(self):
     ''' deterministic ta '''
     self.scen.deterministic = True
     self.scen.train_insts = self.train_insts
     validator = Validator(self.scen, self.trajectory, self.output_rh,
                           self.rng)
     rh = validator.validate(config_mode='def+inc',
                             instance_mode='train',
                             repetitions=3)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(
         sum([len(rh.get_runs_for_config(c))
              for c in rh.get_all_configs()]), 6)
Exemplo n.º 18
0
 def test_validate_deterministic(self):
     ''' deterministic ta '''
     scen = Scenario(self.scen_fn,
                     cmd_args={'run_obj':'quality',
                               'instances' : self.train_insts,
                               'deterministic': True})
     scen.instance_specific = self.inst_specs
     validator = Validator(scen, self.trajectory, self.rng)
     rh = validator.validate(config_mode='def+inc',
                             instance_mode='train', repetitions=3)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
                           rh.get_all_configs()]), 6)
Exemplo n.º 19
0
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'train_insts': self.train_insts,
                                     'test_insts': self.test_insts})
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        self.maxDiff = None
        expected = [_Run(config='config1', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1608637542, inst_specs='three'),
                    _Run(config='config1', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config2', inst='3', seed=1273642419, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=1935803228, inst_specs='four'),
                    _Run(config='config1', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config2', inst='4', seed=787846414, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=996406378, inst_specs='five'),
                    _Run(config='config1', inst='5', seed=1201263687, inst_specs='five'),
                    _Run(config='config2', inst='5', seed=1201263687, inst_specs='five')]

        runs = validator._get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [_Run(config='config1', inst='0', seed=423734972, inst_specs='null'),
                    _Run(config='config1', inst='0', seed=415968276, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=670094950, inst_specs='one'),
                    _Run(config='config1', inst='1', seed=1914837113, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=669991378, inst_specs='two'),
                    _Run(config='config1', inst='2', seed=429389014, inst_specs='two')]

        runs = validator._get_runs(['config1'], scen.train_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [_Run(config='config1', inst='0', seed=249467210, inst_specs='null'),
                    _Run(config='config1', inst='1', seed=1972458954, inst_specs='one'),
                    _Run(config='config1', inst='2', seed=1572714583, inst_specs='two'),
                    _Run(config='config1', inst='3', seed=1433267572, inst_specs='three'),
                    _Run(config='config1', inst='4', seed=434285667, inst_specs='four'),
                    _Run(config='config1', inst='5', seed=613608295, inst_specs='five')]

        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator._get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)
Exemplo n.º 20
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'instances': ['0']
                        })

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [
            'config1', 'config2', 'config3', 'config4', 'config5', 'config6'
        ]
        old_rh = RunHistory(average_cost)
        old_rh.add('config1',
                   1,
                   1,
                   StatusType.SUCCESS,
                   instance_id='0',
                   seed=0)
        old_rh.add('config2',
                   1,
                   1,
                   StatusType.TIMEOUT,
                   instance_id='0',
                   seed=0)
        old_rh.add('config3',
                   1,
                   1,
                   StatusType.CRASHED,
                   instance_id='0',
                   seed=0)
        old_rh.add('config4', 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add('config5', 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add('config6', 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [
            _Run(inst_specs='0', seed=0, inst='0', config='config3'),
            _Run(inst_specs='0', seed=0, inst='0', config='config4'),
            _Run(inst_specs='0', seed=0, inst='0', config='config6')
        ]

        runs = validator._get_runs(old_configs, ['0'],
                                   repetitions=1,
                                   runhistory=old_rh)
        self.assertEqual(runs[0], expected)
Exemplo n.º 21
0
    def test_get_runs(self):
        ''' test if the runs are generated as expected '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs

        validator = Validator(scen, self.trajectory, self.rng)
        # Get multiple configs
        expected = [Run(inst_specs='three', seed=1608637542, inst='3', config='config1'),
                    Run(inst_specs='three', seed=1608637542, inst='3', config='config2'),
                    Run(inst_specs='three', seed=1935803228, inst='3', config='config1'),
                    Run(inst_specs='three', seed=1935803228, inst='3', config='config2'),
                    Run(inst_specs='four',  seed=996406378, inst='4', config='config1'),
                    Run(inst_specs='four',  seed=996406378, inst='4', config='config2'),
                    Run(inst_specs='four',  seed=423734972,  inst='4', config='config1'),
                    Run(inst_specs='four',  seed=423734972,  inst='4', config='config2'),
                    Run(inst_specs='five',  seed=670094950,  inst='5', config='config1'),
                    Run(inst_specs='five',  seed=670094950,  inst='5', config='config2'),
                    Run(inst_specs='five',  seed=669991378, inst='5', config='config1'),
                    Run(inst_specs='five',  seed=669991378, inst='5', config='config2')]

        runs = validator.get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Only train
        expected = [Run(inst_specs='null', seed=249467210,  inst='0', config='config1'),
                    Run(inst_specs='null', seed=1572714583,  inst='0', config='config1'),
                    Run(inst_specs='one',  seed=434285667,  inst='1', config='config1'),
                    Run(inst_specs='one',  seed=893664919, inst='1', config='config1'),
                    Run(inst_specs='two',  seed=88409749,  inst='2', config='config1'),
                    Run(inst_specs='two',  seed=2018247425,  inst='2', config='config1')]

        runs = validator.get_runs(['config1'], scen.train_insts, repetitions=2)
        self.assertEqual(runs[0], expected)

        # Test and train
        expected = [Run(inst='0', seed=1427830251,  config='config1', inst_specs='null' ),
                    Run(inst='1', seed=911989541, config='config1', inst_specs='one'  ),
                    Run(inst='2', seed=780932287, config='config1', inst_specs='two'  ),
                    Run(inst='3', seed=787716372, config='config1', inst_specs='three'),
                    Run(inst='4', seed=1306710475,  config='config1', inst_specs='four' ),
                    Run(inst='5', seed=106328085,  config='config1', inst_specs='five' )]
        insts = self.train_insts
        insts.extend(self.test_insts)
        runs = validator.get_runs(['config1'], insts, repetitions=1)
        self.assertEqual(runs[0], expected)
Exemplo n.º 22
0
 def test_validate_epm(self):
     ''' test using epm to validate '''
     scen = Scenario(self.scen_fn,
                     cmd_args={'run_obj':'quality',
                               'instances' : self.train_insts,
                               'test_instances': self.test_insts,
                               'features': self.feature_dict})
     scen.instance_specific = self.inst_specs
     validator = Validator(scen, self.trajectory, self.rng)
     # Add a few runs and check, if they are correctly processed
     old_configs = [entry["incumbent"] for entry in self.trajectory]
     old_rh = RunHistory(average_cost)
     for config in old_configs[:int(len(old_configs)/2)]:
         old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                    seed=127)
     validator.validate_epm('all', 'train', 1, old_rh)
Exemplo n.º 23
0
 def test_epm_reuse_rf(self):
     """ if no runhistory is passed to epm, but there was a model trained
     before, that model should be reused! (if reuse_epm flag is set) """
     scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'})
     scen.feature_array = None
     validator = Validator(scen, self.trajectory)
     old_rh = RunHistory(average_cost)
     for config in [e["incumbent"] for e in self.trajectory]:
         old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                    seed=127)
     self.assertTrue(isinstance(validator.validate_epm(runhistory=old_rh),
                                RunHistory))
     self.assertTrue(isinstance(validator.validate_epm(
                                 output_fn="test/test_files/validation/"),
                                 RunHistory))
     self.assertRaises(ValueError, validator.validate_epm, reuse_epm=False)
Exemplo n.º 24
0
 def test_validate_no_insts(self):
     ''' no instances '''
     scen = Scenario(self.scen_fn,
                     cmd_options={
                         'run_obj': 'quality',
                         'save-instantly': False,
                         'deterministic': False,
                     })
     validator = Validator(scen, self.trajectory, self.rng)
     rh = validator.validate(config_mode='def+inc',
                             instance_mode='train',
                             repetitions=3,
                             output_fn=self.output_rh)
     self.assertEqual(len(rh.get_all_configs()), 2)
     self.assertEqual(
         sum([
             len(rh.get_runs_for_config(c, only_max_observed_budget=True))
             for c in rh.get_all_configs()
         ]), 6)
Exemplo n.º 25
0
 def test_get_configs(self):
     scen = Scenario(self.scen_fn, cmd_args={'run_obj':'quality'})
     validator = Validator(scen, self.trajectory, self.rng)
     self.assertEqual(1, len(validator._get_configs("def")))
     self.assertEqual(1, len(validator._get_configs("inc")))
     self.assertEqual(2, len(validator._get_configs("def+inc")))
     self.assertEqual(7, len(validator._get_configs("wallclock_time")))
     self.assertEqual(8, len(validator._get_configs("cpu_time")))
     self.assertEqual(10, len(validator._get_configs("all")))
     # Using maxtime
     validator.scen.wallclock_limit = 65
     validator.scen.algo_runs_timelimit = 33
     self.assertEqual(8, len(validator._get_configs("wallclock_time")))
     self.assertEqual(9, len(validator._get_configs("cpu_time")))
     # Exceptions
     self.assertRaises(ValueError, validator._get_configs, "notanoption")
     self.assertRaises(ValueError, validator._get_instances, "notanoption")
Exemplo n.º 26
0
    def test_validate(self):
        ''' test validation '''
        self.scen.train_insts = self.train_insts
        self.scen.test_insts = self.test_insts
        validator = Validator(self.scen, self.trajectory, self.output_rh,
                              self.rng)
        # Test basic usage
        rh = validator.validate(config_mode='def',
                                instance_mode='test',
                                repetitions=3)
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])),
                         9)

        rh = validator.validate(config_mode='inc', instance_mode='train+test')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])),
                         6)

        rh = validator.validate(config_mode='time', instance_mode='train')
        self.assertEqual(len(rh.get_all_configs()), 9)
        self.assertEqual(
            sum([len(rh.get_runs_for_config(c))
                 for c in rh.get_all_configs()]), 27)

        # Test with backend multiprocessing
        rh = validator.validate(config_mode='def',
                                instance_mode='test',
                                repetitions=3,
                                backend='multiprocessing')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])),
                         9)
Exemplo n.º 27
0
    def test_passed_runhistory(self):
        ''' test if passed runhistory is in resulting runhistory '''
        self.scen.train_insts = self.train_insts
        self.scen.test_insts = self.test_insts
        validator = Validator(self.scen, self.trajectory, self.output_rh,
                              self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       instance_id='0',
                       seed=127)

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator.get_runs(configs,
                                       insts,
                                       repetitions=2,
                                       runhistory=old_rh)
        runs_wo_rh = validator.get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh), len(runs_wo_rh) - 4)
Exemplo n.º 28
0
    def test_validate(self):
        ''' test validation '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Test basic usage
        rh = validator.validate(config_mode='def', instance_mode='test',
                                repetitions=3)
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 9)

        rh = validator.validate(config_mode='inc', instance_mode='train+test')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 6)

        rh = validator.validate(config_mode='wallclock_time', instance_mode='train')
        self.assertEqual(len(rh.get_all_configs()), 7)
        self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
                              rh.get_all_configs()]), 21)

        # Test with backend multiprocessing
        rh = validator.validate(config_mode='def', instance_mode='test',
                                repetitions=3, backend='multiprocessing')
        self.assertEqual(len(rh.get_all_configs()), 1)
        self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 9)
Exemplo n.º 29
0
    def test_passed_runhistory_deterministic(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_options={
                            'run_obj': 'quality',
                            'train_insts': self.train_insts,
                            'deterministic': True
                        })
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory()
        for config in old_configs[:int(len(old_configs) / 2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator._get_runs(configs,
                                        insts,
                                        repetitions=2,
                                        runhistory=old_rh)
        runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)
Exemplo n.º 30
0
    def test_passed_runhistory(self):
        ''' test if passed runhistory is in resulting runhistory '''
        scen = Scenario(self.scen_fn,
                        cmd_args={'run_obj':'quality',
                                  'instances' : self.train_insts,
                                  'test_instances': self.test_insts})
        scen.instance_specific = self.inst_specs
        validator = Validator(scen, self.trajectory, self.rng)
        # Add a few runs and check, if they are correctly processed
        old_configs = [entry["incumbent"] for entry in self.trajectory]
        old_rh = RunHistory(average_cost)
        seeds = [127 for i in range(int(len(old_configs)/2))]
        seeds[-1] = 126  # Test instance_seed-structure in validation
        for config in old_configs[:int(len(old_configs)/2)]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                       seed=seeds[old_configs.index(config)])

        configs = validator._get_configs('all')
        insts = validator._get_instances('train')
        runs_w_rh = validator.get_runs(configs, insts, repetitions=2,
                                       runhistory=old_rh)
        runs_wo_rh = validator.get_runs(configs, insts, repetitions=2)
        self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
        self.assertEqual(len(runs_w_rh[1].data), 4)
        self.assertEqual(len(runs_wo_rh[1].data), 0)