コード例 #1
0
def make_mode_test(data, seed, configuration, metric):
    global evaluator
    evaluator = TestEvaluator(data, configuration, seed=seed, all_scoring_functions=True, with_predictions=True)
    evaluator.fit()
    signal.signal(15, empty_signal_handler)
    scores, _, _, _ = evaluator.predict()
    duration = time.time() - evaluator.starttime

    score = scores[metric]
    additional_run_info = ";".join(["%s: %s" % (m_, value) for m_, value in scores.items()])
    additional_run_info += ";" + "duration: " + str(duration)

    print(
        "Result for ParamILS: %s, %f, 1, %f, %d, %s"
        % ("SAT", abs(duration), score, evaluator.seed, additional_run_info)
    )
コード例 #2
0
    def test_datasets(self):
        for getter in get_dataset_getters():
            testname = '%s_%s' % (os.path.basename(__file__).
                                  replace('.pyc', '').replace('.py', ''),
                                  getter.__name__)
            with self.subTest(testname):
                D = getter()
                output_directory = os.path.join(os.path.dirname(__file__),
                                                '.%s' % testname)
                self.output_directories.append(output_directory)
                err = np.zeros([N_TEST_RUNS])
                for i in range(N_TEST_RUNS):
                    D_ = copy.deepcopy(D)
                    evaluator = TestEvaluator(D_, output_directory, None)

                    err[i] = evaluator.fit_predict_and_loss()[0]

                    self.assertTrue(np.isfinite(err[i]))
コード例 #3
0
def make_mode_test(data, seed, configuration, metric):
    global evaluator
    evaluator = TestEvaluator(data,
                              configuration,
                              seed=seed,
                              all_scoring_functions=True,
                              with_predictions=True)
    evaluator.fit()
    signal.signal(15, empty_signal_handler)
    scores, _, _, _ = evaluator.predict()
    duration = time.time() - evaluator.starttime

    score = scores[metric]
    additional_run_info = ';'.join(
        ['%s: %s' % (m_, value) for m_, value in scores.items()])
    additional_run_info += ';' + 'duration: ' + str(duration)

    print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
          ('SAT', abs(duration), score, evaluator.seed, additional_run_info))
コード例 #4
0
    def test_datasets(self):
        for getter in get_dataset_getters():
            testname = '%s_%s' % (os.path.basename(__file__).replace(
                '.pyc', '').replace('.py', ''), getter.__name__)

            with self.subTest(testname):
                backend_mock = unittest.mock.Mock(spec=Backend)
                backend_mock.get_model_dir.return_value = 'dutirapbdxvltcrpbdlcatepdeau'
                D = getter()
                D_ = copy.deepcopy(D)
                y = D.data['Y_train']
                if len(y.shape) == 2 and y.shape[1] == 1:
                    D_.data['Y_train'] = y.flatten()
                queue_ = multiprocessing.Queue()
                evaluator = TestEvaluator(D_, backend_mock, queue_)

                evaluator.fit_predict_and_loss()
                duration, result, seed, run_info, status = evaluator.queue.get(
                    timeout=1)
                self.assertTrue(np.isfinite(result))