Exemple #1
0
    def test_datasets(self):
        for getter in get_dataset_getters():
            testname = '%s_%s' % (os.path.basename(__file__).replace(
                '.pyc', '').replace('.py', ''), getter.__name__)
            with self.subTest(testname):
                D = getter()
                output_directory = os.path.join(os.path.dirname(__file__),
                                                '.%s' % testname)
                self.output_directories.append(output_directory)
                err = np.zeros([N_TEST_RUNS])
                for i in range(N_TEST_RUNS):
                    D_ = copy.deepcopy(D)
                    evaluator = CVEvaluator(D_, output_directory, None)

                    err[i] = evaluator.fit_predict_and_loss()[0]

                    self.assertTrue(np.isfinite(err[i]))
                    self.assertEqual(err[i], 1.0)
                    for model_idx in range(10):
                        indices = evaluator.indices[model_idx]
                        self.assertIsNotNone(indices)

                    D_ = copy.deepcopy(D)
                    evaluator = CVEvaluator(D_, output_directory, None)
                    for j in range(5):
                        evaluator.partial_fit_predict_and_loss(j)
                        indices = evaluator.indices[j]
                        self.assertIsNotNone(indices)
                    for j in range(5, 10):
                        indices = evaluator.indices[j]
                        self.assertIsNone(indices)
Exemple #2
0
def make_mode_cv(data, seed, configuration, num_run, folds):
    global evaluator
    evaluator = CVEvaluator(data,
                            configuration,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run,
                            **_get_base_dict())
    evaluator.fit()
    signal.signal(15, empty_signal_handler)
    evaluator.finish_up()
Exemple #3
0
def make_mode_cv(data, seed, configuration, num_run, folds, output_dir):
    global evaluator
    evaluator = CVEvaluator(data,
                            output_dir,
                            configuration,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run,
                            all_scoring_functions=False,
                            **_get_base_dict())
    loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()
    evaluator.finish_up(loss, opt_pred, valid_pred, test_pred)
Exemple #4
0
def make_mode_partial_cv(data, seed, configuration, num_run, metric, fold,
                         folds, output_dir):
    global evaluator
    evaluator = CVEvaluator(data,
                            output_dir,
                            configuration,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run,
                            all_scoring_functions=False,
                            **_get_base_dict())

    loss, opt_pred, valid_pred, test_pred = \
        evaluator.partial_fit_predict_and_loss(fold)
    duration = time.time() - evaluator.starttime

    additional_run_info = 'duration: ' + str(duration)

    print(metric, loss, additional_run_info)
    print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
          ('SAT', abs(duration), loss, evaluator.seed, additional_run_info))
Exemple #5
0
def make_mode_partial_cv(data, seed, configuration, num_run, metric, fold,
                         folds):
    global evaluator
    evaluator = CVEvaluator(data,
                            configuration,
                            cv_folds=folds,
                            seed=seed,
                            num_run=num_run,
                            **_get_base_dict())
    evaluator.partial_fit(fold)
    signal.signal(15, empty_signal_handler)
    scores, _, _, _ = evaluator.predict()
    duration = time.time() - evaluator.starttime

    score = scores[metric]
    additional_run_info = ';'.join(
        ['%s: %s' % (m_, value) for m_, value in scores.items()])
    additional_run_info += ';' + 'duration: ' + str(duration)

    print(metric, score, additional_run_info)
    print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
          ('SAT', abs(duration), score, evaluator.seed, additional_run_info))