def testEnsembleSelection():
    """
    Makes sure ensemble selection fit method creates an ensemble correctly
    """

    ensemble = EnsembleSelection(ensemble_size=10,
                                 task_type=REGRESSION,
                                 random_state=0,
                                 metric=root_mean_squared_error)

    # We create a problem such that we encourage the addition of members to the ensemble
    # Fundamentally, the average of 10 sequential number is 5.5
    y_true = np.full((100), 5.5)
    predictions = []
    for i in range(1, 20):
        pred = np.full((100), i, dtype=np.float32)
        pred[i*5:5*(i+1)] = 5.5 * i
        predictions.append(pred)

    ensemble.fit(predictions, y_true, identifiers=[(i, i, i) for i in range(20)])

    np.testing.assert_array_equal(ensemble.weights_,
                                  np.array([0.1, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1,
                                           0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
                                           0.,  0., 0.]))

    assert ensemble.identifiers_ == [(i, i, i) for i in range(20)]

    np.testing.assert_array_almost_equal(np.array(ensemble.trajectory_),
                                         np.array([3.462296925452813, 2.679202306657711,
                                                  2.2748626436960375, 2.065717187806695,
                                                  1.7874562615598728, 1.6983448128441783,
                                                  1.559451106330085, 1.5316326052614575,
                                                  1.3801950121782542, 1.3554980575295374]))
Ejemplo n.º 2
0
    def testPredict(self):
        # Test that ensemble prediction applies weights correctly to given
        # predictions. There are two possible cases:
        # 1) predictions.shape[0] == len(self.weights_). In this case,
        # predictions include those made by zero-weighted models. Therefore,
        # we simply apply each weights to the corresponding model preds.
        # 2) predictions.shape[0] < len(self.weights_). In this case,
        # predictions exclude those made by zero-weighted models. Therefore,
        # we first exclude all occurrences of zero in self.weights_, and then
        # apply the weights.
        # If none of the above is the case, predict() raises Error.
        ensemble = EnsembleSelection(
            ensemble_size=3,
            task_type=1,
            random_state=np.random.RandomState(0),
            metric=accuracy,
        )
        # Test for case 1. Create (3, 2, 2) predictions.
        per_model_pred = np.array([[[0.9, 0.1], [0.4, 0.6]],
                                   [[0.8, 0.2], [0.3, 0.7]],
                                   [[1.0, 0.0], [0.1, 0.9]]])
        # Weights of 3 hypothetical models
        ensemble.weights_ = [0.7, 0.2, 0.1]
        pred = ensemble.predict(per_model_pred)
        truth = np.array([
            [0.89, 0.11],  # This should be the true prediction.
            [0.35, 0.65]
        ])
        self.assertTrue(np.allclose(pred, truth))

        # Test for case 2.
        per_model_pred = np.array([[[0.9, 0.1], [0.4, 0.6]],
                                   [[0.8, 0.2], [0.3, 0.7]],
                                   [[1.0, 0.0], [0.1, 0.9]]])
        # The third model now has weight of zero.
        ensemble.weights_ = [0.7, 0.2, 0.0, 0.1]
        pred = ensemble.predict(per_model_pred)
        truth = np.array([[0.89, 0.11], [0.35, 0.65]])
        self.assertTrue(np.allclose(pred, truth))

        # Test for error case.
        per_model_pred = np.array([[[0.9, 0.1], [0.4, 0.6]],
                                   [[0.8, 0.2], [0.3, 0.7]],
                                   [[1.0, 0.0], [0.1, 0.9]]])
        # Now the weights have 2 zero weights and 2 non-zero weights,
        # which is incompatible.
        ensemble.weights_ = [0.6, 0.0, 0.0, 0.4]

        with self.assertRaises(ValueError):
            ensemble.predict(per_model_pred)
def evaluate(input_directory, validation_files, test_files, ensemble_size=50):

    backend = create(input_directory,
                     input_directory + "_output",
                     delete_tmp_folder_after_terminate=False,
                     delete_output_folder_after_terminate=False,
                     shared_mode=True)

    valid_labels = backend.load_targets_ensemble()
    D = backend.load_datamanager()
    test_labels = D.data["Y_test"]

    score = balanced_accuracy

    # Read the modification time of the predictions file and
    # compute the interval to the first prediction file.
    # This interval will be add to the time we needed to build the ensemble
    time_function_evaluation = validation_files[-1][-1]

    # Build the ensemble
    start = time.time()
    ensemble_selection = EnsembleSelection(
        ensemble_size=ensemble_size,
        task_type=D.info['task'],
        metric=score,
        random_state=np.random.RandomState())

    validation_predictions = np.array([v[0] for v in validation_files])
    test_predictions = np.array([t[0] for t in test_files])

    ensemble_selection.fit(validation_predictions,
                           valid_labels,
                           identifiers=None)
    y_hat_ensemble = ensemble_selection.predict(
        np.array(validation_predictions))
    y_hat_test = ensemble_selection.predict(np.array(test_predictions))

    # Compute validation error
    ensemble_error = 1 - score(valid_labels, y_hat_ensemble)

    # Compute test error
    ensemble_test_error = 1 - score(test_labels, y_hat_test)

    ensemble_time = time.time() - start

    rval = {
        'ensemble_time': ensemble_time,
        'time_function_evaluation': time_function_evaluation,
        'ensemble_error': ensemble_error,
        'ensemble_test_error': ensemble_test_error
    }

    return rval
Ejemplo n.º 4
0
    def main(self):

        watch = StopWatch()
        watch.start_task('ensemble_builder')

        used_time = 0
        time_iter = 0
        index_run = 0
        num_iteration = 0
        current_num_models = 0
        last_hash = None
        current_hash = None

        backend = Backend(self.output_dir, self.autosklearn_tmp_dir)
        dir_ensemble = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn',
                                    'predictions_ensemble')
        dir_valid = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn',
                                 'predictions_valid')
        dir_test = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn',
                                'predictions_test')
        paths_ = [dir_ensemble, dir_valid, dir_test]

        dir_ensemble_list_mtimes = []

        self.logger.debug(
            'Starting main loop with %f seconds and %d iterations '
            'left.' % (self.limit - used_time, num_iteration))
        while used_time < self.limit or (self.max_iterations > 0 and
                                         self.max_iterations >= num_iteration):
            num_iteration += 1
            self.logger.debug('Time left: %f', self.limit - used_time)
            self.logger.debug('Time last ensemble building: %f', time_iter)

            # Reload the ensemble targets every iteration, important, because cv may
            # update the ensemble targets in the cause of running auto-sklearn
            # TODO update cv in order to not need this any more!
            targets_ensemble = backend.load_targets_ensemble()

            # Load the predictions from the models
            exists = [os.path.isdir(dir_) for dir_ in paths_]
            if not exists[0]:  # all(exists):
                self.logger.debug('Prediction directory %s does not exist!' %
                                  dir_ensemble)
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if self.shared_mode is False:
                dir_ensemble_list = sorted(
                    glob.glob(
                        os.path.join(
                            dir_ensemble,
                            'predictions_ensemble_%s_*.npy' % self.seed)))
                if exists[1]:
                    dir_valid_list = sorted(
                        glob.glob(
                            os.path.join(
                                dir_valid,
                                'predictions_valid_%s_*.npy' % self.seed)))
                else:
                    dir_valid_list = []
                if exists[2]:
                    dir_test_list = sorted(
                        glob.glob(
                            os.path.join(
                                dir_test,
                                'predictions_test_%s_*.npy' % self.seed)))
                else:
                    dir_test_list = []
            else:
                dir_ensemble_list = sorted(os.listdir(dir_ensemble))
                dir_valid_list = sorted(
                    os.listdir(dir_valid)) if exists[1] else []
                dir_test_list = sorted(
                    os.listdir(dir_test)) if exists[2] else []

            # Check the modification times because predictions can be updated
            # over time!
            old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
            dir_ensemble_list_mtimes = []

            for dir_ensemble_file in dir_ensemble_list:
                if dir_ensemble_file.endswith("/"):
                    dir_ensemble_file = dir_ensemble_file[:-1]
                basename = os.path.basename(dir_ensemble_file)
                dir_ensemble_file = os.path.join(dir_ensemble, basename)
                mtime = os.path.getmtime(dir_ensemble_file)
                dir_ensemble_list_mtimes.append(mtime)

            if len(dir_ensemble_list) == 0:
                self.logger.debug('Directories are empty')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if len(dir_ensemble_list) <= current_num_models and \
                    old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes:
                self.logger.debug('Nothing has changed since the last time')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            watch.start_task('index_run' + str(index_run))
            watch.start_task('ensemble_iter_' + str(num_iteration))

            # List of num_runs (which are in the filename) which will be included
            #  later
            include_num_runs = []
            backup_num_runs = []
            model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$')
            if self.ensemble_nbest is not None:
                # Keeps track of the single scores of each model in our ensemble
                scores_nbest = []
                # The indices of the model that are currently in our ensemble
                indices_nbest = []
                # The names of the models
                model_names = []

            model_names_to_scores = dict()

            model_idx = 0
            for model_name in dir_ensemble_list:
                if model_name.endswith("/"):
                    model_name = model_name[:-1]
                basename = os.path.basename(model_name)

                try:
                    if self.precision is "16":
                        predictions = np.load(
                            os.path.join(dir_ensemble,
                                         basename)).astype(dtype=np.float16)
                    elif self.precision is "32":
                        predictions = np.load(
                            os.path.join(dir_ensemble,
                                         basename)).astype(dtype=np.float32)
                    elif self.precision is "64":
                        predictions = np.load(
                            os.path.join(dir_ensemble,
                                         basename)).astype(dtype=np.float64)
                    else:
                        predictions = np.load(
                            os.path.join(dir_ensemble, basename))

                    score = calculate_score(targets_ensemble, predictions,
                                            self.task_type, self.metric,
                                            predictions.shape[1])

                except Exception as e:
                    self.logger.warning('Error loading %s: %s', basename, e)
                    score = -1

                model_names_to_scores[model_name] = score
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))

                if self.ensemble_nbest is not None:
                    if score <= 0.001:
                        self.logger.error('Model only predicts at random: ' +
                                          model_name + ' has score: ' +
                                          str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    # If we have less models in our ensemble than ensemble_nbest add
                    # the current model if it is better than random
                    elif len(scores_nbest) < self.ensemble_nbest:
                        scores_nbest.append(score)
                        indices_nbest.append(model_idx)
                        include_num_runs.append((automl_seed, num_run))
                        model_names.append(model_name)
                    else:
                        # Take the worst performing model in our ensemble so far
                        idx = np.argmin(np.array([scores_nbest]))

                        # If the current model is better than the worst model in
                        # our ensemble replace it by the current model
                        if scores_nbest[idx] < score:
                            self.logger.debug(
                                'Worst model in our ensemble: %s with '
                                'score %f will be replaced by model %s '
                                'with score %f', model_names[idx],
                                scores_nbest[idx], model_name, score)
                            # Exclude the old model
                            del scores_nbest[idx]
                            scores_nbest.append(score)
                            del include_num_runs[idx]
                            del indices_nbest[idx]
                            indices_nbest.append(model_idx)
                            include_num_runs.append((automl_seed, num_run))
                            del model_names[idx]
                            model_names.append(model_name)

                        # Otherwise exclude the current model from the ensemble
                        else:
                            # include_num_runs.append(True)
                            pass

                else:
                    # Load all predictions that are better than random
                    if score <= 0.001:
                        # include_num_runs.append(True)
                        self.logger.error('Model only predicts at random: ' +
                                          model_name + ' has score: ' +
                                          str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    else:
                        include_num_runs.append((automl_seed, num_run))

                model_idx += 1

            # If there is no model better than random guessing, we have to use
            # all models which do random guessing
            if len(include_num_runs) == 0:
                include_num_runs = backup_num_runs

            indices_to_model_names = dict()
            indices_to_run_num = dict()
            for i, model_name in enumerate(dir_ensemble_list):
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))
                if (automl_seed, num_run) in include_num_runs:
                    num_indices = len(indices_to_model_names)
                    indices_to_model_names[num_indices] = model_name
                    indices_to_run_num[num_indices] = (automl_seed, num_run)

            try:
                all_predictions_train, all_predictions_valid, all_predictions_test =\
                    self.get_all_predictions(dir_ensemble, dir_ensemble_list,
                                             dir_valid, dir_valid_list,
                                             dir_test, dir_test_list,
                                             include_num_runs,
                                             model_and_automl_re,
                                             self.precision)
            except IOError:
                self.logger.error('Could not load the predictions.')
                continue

            if len(include_num_runs) == 0:
                self.logger.error('All models do just random guessing')
                time.sleep(2)
                continue

            else:
                ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
                                             task_type=self.task_type,
                                             metric=self.metric)

                try:
                    ensemble.fit(all_predictions_train, targets_ensemble,
                                 include_num_runs)
                    self.logger.info(ensemble)

                except ValueError as e:
                    self.logger.error('Caught ValueError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except IndexError as e:
                    self.logger.error('Caught IndexError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except Exception as e:
                    self.logger.error('Caught error! %s', str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue

                # Output the score
                self.logger.info('Training performance: %f' %
                                 ensemble.train_score_)

                self.logger.info(
                    'Building the ensemble took %f seconds' %
                    watch.wall_elapsed('ensemble_iter_' + str(num_iteration)))

            # Set this variable here to avoid re-running the ensemble builder
            # every two seconds in case the ensemble did not change
            current_num_models = len(dir_ensemble_list)

            ensemble_predictions = ensemble.predict(all_predictions_train)
            if sys.version_info[0] == 2:
                ensemble_predictions.flags.writeable = False
                current_hash = hash(ensemble_predictions.data)
            else:
                current_hash = hash(ensemble_predictions.data.tobytes())

            # Only output a new ensemble and new predictions if the output of the
            # ensemble would actually change!
            # TODO this is neither safe (collisions, tests only with the ensemble
            #  prediction, but not the ensemble), implement a hash function for
            # each possible ensemble builder.
            if last_hash is not None:
                if current_hash == last_hash:
                    self.logger.info('Ensemble output did not change.')
                    time.sleep(2)
                    continue
                else:
                    last_hash = current_hash
            else:
                last_hash = current_hash

            # Save the ensemble for later use in the main auto-sklearn module!
            backend.save_ensemble(ensemble, index_run, self.seed)

            # Save predictions for valid and test data set
            if len(dir_valid_list) == len(dir_ensemble_list):
                all_predictions_valid = np.array(all_predictions_valid)
                ensemble_predictions_valid = ensemble.predict(
                    all_predictions_valid)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_valid = ensemble_predictions_valid[:,
                                                                            1]
                if self.low_precision:
                    if self.task_type in [
                            BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION,
                            MULTILABEL_CLASSIFICATION
                    ]:
                        ensemble_predictions_valid[
                            ensemble_predictions_valid < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_valid.shape,
                                             dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                                ensemble_predictions_valid.shape[1] == 1):
                            bin_array[ensemble_predictions_valid >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_valid.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_valid[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_valid = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_valid.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_valid.size > 1000000:
                            precision = 4
                        else:
                            # File size maximally 2.1MB
                            precision = 6

                backend.save_predictions_as_txt(ensemble_predictions_valid,
                                                'valid',
                                                index_run,
                                                prefix=self.dataset_name,
                                                precision=precision)
            else:
                self.logger.info(
                    'Could not find as many validation set predictions (%d)'
                    'as ensemble predictions (%d)!.', len(dir_valid_list),
                    len(dir_ensemble_list))

            del all_predictions_valid

            if len(dir_test_list) == len(dir_ensemble_list):
                all_predictions_test = np.array(all_predictions_test)
                ensemble_predictions_test = ensemble.predict(
                    all_predictions_test)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_test = ensemble_predictions_test[:, 1]
                if self.low_precision:
                    if self.task_type in [
                            BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION,
                            MULTILABEL_CLASSIFICATION
                    ]:
                        ensemble_predictions_test[
                            ensemble_predictions_test < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_test.shape,
                                             dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                                ensemble_predictions_test.shape[1] == 1):
                            bin_array[ensemble_predictions_test >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_test.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_test[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_test = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_test.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_test.size > 1000000:
                            precision = 4
                        else:
                            precision = 6

                backend.save_predictions_as_txt(ensemble_predictions_test,
                                                'test',
                                                index_run,
                                                prefix=self.dataset_name,
                                                precision=precision)
            else:
                self.logger.info(
                    'Could not find as many test set predictions (%d) as '
                    'ensemble predictions (%d)!', len(dir_test_list),
                    len(dir_ensemble_list))

            del all_predictions_test

            current_num_models = len(dir_ensemble_list)
            watch.stop_task('index_run' + str(index_run))
            time_iter = watch.get_wall_dur('index_run' + str(index_run))
            used_time = watch.wall_elapsed('ensemble_builder')
            index_run += 1
        return
Ejemplo n.º 5
0
    def main(self):

        watch = StopWatch()
        watch.start_task('ensemble_builder')

        used_time = 0
        time_iter = 0
        index_run = 0
        num_iteration = 0
        current_num_models = 0
        last_hash = None
        current_hash = None

        backend = Backend(self.output_dir, self.autosklearn_tmp_dir)
        dir_ensemble = os.path.join(self.autosklearn_tmp_dir,
                                    '.auto-sklearn',
                                    'predictions_ensemble')
        dir_valid = os.path.join(self.autosklearn_tmp_dir,
                                 '.auto-sklearn',
                                 'predictions_valid')
        dir_test = os.path.join(self.autosklearn_tmp_dir,
                                '.auto-sklearn',
                                'predictions_test')
        paths_ = [dir_ensemble, dir_valid, dir_test]

        dir_ensemble_list_mtimes = []

        self.logger.debug('Starting main loop with %f seconds and %d iterations '
                          'left.' % (self.limit - used_time, num_iteration))
        while used_time < self.limit or (self.max_iterations > 0 and
                                         self.max_iterations >= num_iteration):
            num_iteration += 1
            self.logger.debug('Time left: %f', self.limit - used_time)
            self.logger.debug('Time last ensemble building: %f', time_iter)

            # Reload the ensemble targets every iteration, important, because cv may
            # update the ensemble targets in the cause of running auto-sklearn
            # TODO update cv in order to not need this any more!
            targets_ensemble = backend.load_targets_ensemble()

            # Load the predictions from the models
            exists = [os.path.isdir(dir_) for dir_ in paths_]
            if not exists[0]:  # all(exists):
                self.logger.debug('Prediction directory %s does not exist!' %
                              dir_ensemble)
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if self.shared_mode is False:
                dir_ensemble_list = sorted(glob.glob(os.path.join(
                    dir_ensemble, 'predictions_ensemble_%s_*.npy' % self.seed)))
                if exists[1]:
                    dir_valid_list = sorted(glob.glob(os.path.join(
                        dir_valid, 'predictions_valid_%s_*.npy' % self.seed)))
                else:
                    dir_valid_list = []
                if exists[2]:
                    dir_test_list = sorted(glob.glob(os.path.join(
                        dir_test, 'predictions_test_%s_*.npy' % self.seed)))
                else:
                    dir_test_list = []
            else:
                dir_ensemble_list = sorted(os.listdir(dir_ensemble))
                dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
                dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []

            # Check the modification times because predictions can be updated
            # over time!
            old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
            dir_ensemble_list_mtimes = []

            for dir_ensemble_file in dir_ensemble_list:
                if dir_ensemble_file.endswith("/"):
                    dir_ensemble_file = dir_ensemble_file[:-1]
                basename = os.path.basename(dir_ensemble_file)
                dir_ensemble_file = os.path.join(dir_ensemble, basename)
                mtime = os.path.getmtime(dir_ensemble_file)
                dir_ensemble_list_mtimes.append(mtime)

            if len(dir_ensemble_list) == 0:
                self.logger.debug('Directories are empty')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if len(dir_ensemble_list) <= current_num_models and \
                    old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes:
                self.logger.debug('Nothing has changed since the last time')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            watch.start_task('index_run' + str(index_run))
            watch.start_task('ensemble_iter_' + str(num_iteration))

            # List of num_runs (which are in the filename) which will be included
            #  later
            include_num_runs = []
            backup_num_runs = []
            model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$')
            if self.ensemble_nbest is not None:
                # Keeps track of the single scores of each model in our ensemble
                scores_nbest = []
                # The indices of the model that are currently in our ensemble
                indices_nbest = []
                # The names of the models
                model_names = []

            model_names_to_scores = dict()

            model_idx = 0
            for model_name in dir_ensemble_list:
                if model_name.endswith("/"):
                    model_name = model_name[:-1]
                basename = os.path.basename(model_name)

                try:
                    if self.precision is "16":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float16)
                    elif self.precision is "32":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float32)
                    elif self.precision is "64":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float64)
                    else:
                        predictions = np.load(os.path.join(dir_ensemble, basename))

                    score = calculate_score(targets_ensemble, predictions,
                                            self.task_type, self.metric,
                                            predictions.shape[1])

                except Exception as e:
                    self.logger.warning('Error loading %s: %s', basename, e)
                    score = -1

                model_names_to_scores[model_name] = score
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))

                if self.ensemble_nbest is not None:
                    if score <= 0.001:
                        self.logger.error('Model only predicts at random: ' +
                                      model_name + ' has score: ' + str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    # If we have less models in our ensemble than ensemble_nbest add
                    # the current model if it is better than random
                    elif len(scores_nbest) < self.ensemble_nbest:
                        scores_nbest.append(score)
                        indices_nbest.append(model_idx)
                        include_num_runs.append((automl_seed, num_run))
                        model_names.append(model_name)
                    else:
                        # Take the worst performing model in our ensemble so far
                        idx = np.argmin(np.array([scores_nbest]))

                        # If the current model is better than the worst model in
                        # our ensemble replace it by the current model
                        if scores_nbest[idx] < score:
                            self.logger.debug('Worst model in our ensemble: %s with '
                                          'score %f will be replaced by model %s '
                                          'with score %f', model_names[idx],
                                          scores_nbest[idx], model_name, score)
                            # Exclude the old model
                            del scores_nbest[idx]
                            scores_nbest.append(score)
                            del include_num_runs[idx]
                            del indices_nbest[idx]
                            indices_nbest.append(model_idx)
                            include_num_runs.append((automl_seed, num_run))
                            del model_names[idx]
                            model_names.append(model_name)

                        # Otherwise exclude the current model from the ensemble
                        else:
                            # include_num_runs.append(True)
                            pass

                else:
                    # Load all predictions that are better than random
                    if score <= 0.001:
                        # include_num_runs.append(True)
                        self.logger.error('Model only predicts at random: ' +
                                      model_name + ' has score: ' + str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    else:
                        include_num_runs.append((automl_seed, num_run))

                model_idx += 1

            # If there is no model better than random guessing, we have to use
            # all models which do random guessing
            if len(include_num_runs) == 0:
                include_num_runs = backup_num_runs

            indices_to_model_names = dict()
            indices_to_run_num = dict()
            for i, model_name in enumerate(dir_ensemble_list):
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))
                if (automl_seed, num_run) in include_num_runs:
                    num_indices = len(indices_to_model_names)
                    indices_to_model_names[num_indices] = model_name
                    indices_to_run_num[num_indices] = (automl_seed, num_run)

            try:
                all_predictions_train, all_predictions_valid, all_predictions_test =\
                    self.get_all_predictions(dir_ensemble, dir_ensemble_list,
                                             dir_valid, dir_valid_list,
                                             dir_test, dir_test_list,
                                             include_num_runs,
                                             model_and_automl_re,
                                             self.precision)
            except IOError:
                self.logger.error('Could not load the predictions.')
                continue

            if len(include_num_runs) == 0:
                self.logger.error('All models do just random guessing')
                time.sleep(2)
                continue

            else:
                ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
                                             task_type=self.task_type,
                                             metric=self.metric)

                try:
                    ensemble.fit(all_predictions_train, targets_ensemble,
                                 include_num_runs)
                    self.logger.info(ensemble)

                except ValueError as e:
                    self.logger.error('Caught ValueError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except IndexError as e:
                    self.logger.error('Caught IndexError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except Exception as e:
                    self.logger.error('Caught error! %s', str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue

                # Output the score
                self.logger.info('Training performance: %f' % ensemble.train_score_)

                self.logger.info('Building the ensemble took %f seconds' %
                            watch.wall_elapsed('ensemble_iter_' + str(num_iteration)))

            # Set this variable here to avoid re-running the ensemble builder
            # every two seconds in case the ensemble did not change
            current_num_models = len(dir_ensemble_list)

            ensemble_predictions = ensemble.predict(all_predictions_train)
            if sys.version_info[0] == 2:
                ensemble_predictions.flags.writeable = False
                current_hash = hash(ensemble_predictions.data)
            else:
                current_hash = hash(ensemble_predictions.data.tobytes())

            # Only output a new ensemble and new predictions if the output of the
            # ensemble would actually change!
            # TODO this is neither safe (collisions, tests only with the ensemble
            #  prediction, but not the ensemble), implement a hash function for
            # each possible ensemble builder.
            if last_hash is not None:
                if current_hash == last_hash:
                    self.logger.info('Ensemble output did not change.')
                    time.sleep(2)
                    continue
                else:
                    last_hash = current_hash
            else:
                last_hash = current_hash

            # Save the ensemble for later use in the main auto-sklearn module!
            backend.save_ensemble(ensemble, index_run, self.seed)

            # Save predictions for valid and test data set
            if len(dir_valid_list) == len(dir_ensemble_list):
                all_predictions_valid = np.array(all_predictions_valid)
                ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
                if self.low_precision:
                    if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]:
                        ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_valid.shape, dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                            ensemble_predictions_valid.shape[1] == 1):
                            bin_array[ensemble_predictions_valid >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_valid.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_valid[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_valid = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_valid.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_valid.size > 1000000:
                            precision = 4
                        else:
                            # File size maximally 2.1MB
                            precision = 6

                backend.save_predictions_as_txt(ensemble_predictions_valid,
                                                'valid', index_run, prefix=self.dataset_name,
                                                precision=precision)
            else:
                self.logger.info('Could not find as many validation set predictions (%d)'
                             'as ensemble predictions (%d)!.',
                            len(dir_valid_list), len(dir_ensemble_list))

            del all_predictions_valid

            if len(dir_test_list) == len(dir_ensemble_list):
                all_predictions_test = np.array(all_predictions_test)
                ensemble_predictions_test = ensemble.predict(all_predictions_test)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_test = ensemble_predictions_test[:, 1]
                if self.low_precision:
                    if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]:
                        ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_test.shape,
                                             dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                                    ensemble_predictions_test.shape[1] == 1):
                            bin_array[ensemble_predictions_test >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_test.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_test[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_test = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_test.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_test.size > 1000000:
                            precision = 4
                        else:
                            precision = 6

                backend.save_predictions_as_txt(ensemble_predictions_test,
                                                'test', index_run, prefix=self.dataset_name,
                                                precision=precision)
            else:
                self.logger.info('Could not find as many test set predictions (%d) as '
                             'ensemble predictions (%d)!',
                            len(dir_test_list), len(dir_ensemble_list))

            del all_predictions_test

            current_num_models = len(dir_ensemble_list)
            watch.stop_task('index_run' + str(index_run))
            time_iter = watch.get_wall_dur('index_run' + str(index_run))
            used_time = watch.wall_elapsed('ensemble_builder')
            index_run += 1
        return
Ejemplo n.º 6
0
    def fit_ensemble(self, selected_keys: list):
        """
            fit ensemble 
            
            Parameters
            ---------
            selected_keys: list
                list of selected keys of self.read_preds
                
            Returns
            -------
            ensemble: EnsembleSelection
                trained Ensemble
        """

        predictions_train = np.array(
            [self.read_preds[k][Y_ENSEMBLE] for k in selected_keys])
        include_num_runs = [(self.read_preds[k]["seed"],
                             self.read_preds[k]["num_run"])
                            for k in selected_keys]

        # check hash if ensemble training data changed
        current_hash = hash(predictions_train.data.tobytes())
        if self.last_hash == current_hash:
            self.logger.debug(
                "No new model predictions selected -- skip ensemble building "
                "-- current performance: %f",
                self.validation_performance_,
            )
            return None
        self.last_hash = current_hash

        ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
                                     task_type=self.task_type,
                                     metric=self.metric)

        try:
            self.logger.debug(
                "Fitting the ensemble on %d models.",
                len(predictions_train),
            )
            start_time = time.time()
            ensemble.fit(predictions_train, self.y_true_ensemble,
                         include_num_runs)
            end_time = time.time()
            self.logger.debug(
                "Fitting the ensemble took %.2f seconds.",
                end_time - start_time,
            )
            self.logger.info(ensemble)
            self.validation_performance_ = min(
                self.validation_performance_,
                ensemble.get_validation_performance(),
            )

        except ValueError as e:
            self.logger.error('Caught ValueError: %s', traceback.format_exc())
            time.sleep(self.sleep_duration)
            return None
        except IndexError as e:
            self.logger.error('Caught IndexError: %s' + traceback.format_exc())
            time.sleep(self.sleep_duration)
            return None

        return ensemble
Ejemplo n.º 7
0
def main(autosklearn_tmp_dir,
         dataset_name,
         task_type,
         metric,
         limit,
         output_dir,
         ensemble_size=None,
         ensemble_nbest=None,
         seed=1,
         shared_mode=False,
         max_iterations=-1,
         precision="32"):

    watch = StopWatch()
    watch.start_task('ensemble_builder')

    used_time = 0
    time_iter = 0
    index_run = 0
    num_iteration = 0
    current_num_models = 0

    backend = Backend(output_dir, autosklearn_tmp_dir)
    dir_ensemble = os.path.join(autosklearn_tmp_dir, '.auto-sklearn',
                                'predictions_ensemble')
    dir_valid = os.path.join(autosklearn_tmp_dir, '.auto-sklearn',
                             'predictions_valid')
    dir_test = os.path.join(autosklearn_tmp_dir, '.auto-sklearn',
                            'predictions_test')
    paths_ = [dir_ensemble, dir_valid, dir_test]

    dir_ensemble_list_mtimes = []

    while used_time < limit or (max_iterations > 0 and max_iterations >= num_iteration):
        num_iteration += 1
        logger.debug('Time left: %f', limit - used_time)
        logger.debug('Time last iteration: %f', time_iter)

        # Reload the ensemble targets every iteration, important, because cv may
        # update the ensemble targets in the cause of running auto-sklearn
        # TODO update cv in order to not need this any more!
        targets_ensemble = backend.load_targets_ensemble()

        # Load the predictions from the models
        exists = [os.path.isdir(dir_) for dir_ in paths_]
        if not exists[0]:  # all(exists):
            logger.debug('Prediction directory %s does not exist!' %
                          dir_ensemble)
            time.sleep(2)
            used_time = watch.wall_elapsed('ensemble_builder')
            continue

        if shared_mode is False:
            dir_ensemble_list = sorted(glob.glob(os.path.join(
                dir_ensemble, 'predictions_ensemble_%s_*.npy' % seed)))
            if exists[1]:
                dir_valid_list = sorted(glob.glob(os.path.join(
                    dir_valid, 'predictions_valid_%s_*.npy' % seed)))
            else:
                dir_valid_list = []
            if exists[2]:
                dir_test_list = sorted(glob.glob(os.path.join(
                    dir_test, 'predictions_test_%s_*.npy' % seed)))
            else:
                dir_test_list = []
        else:
            dir_ensemble_list = sorted(os.listdir(dir_ensemble))
            dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
            dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []

        # Check the modification times because predictions can be updated
        # over time!
        old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
        dir_ensemble_list_mtimes = []

        for dir_ensemble_file in dir_ensemble_list:
            if dir_ensemble_file.endswith("/"):
                dir_ensemble_file = dir_ensemble_file[:-1]
            basename = os.path.basename(dir_ensemble_file)
            dir_ensemble_file = os.path.join(dir_ensemble, basename)
            mtime = os.path.getmtime(dir_ensemble_file)
            dir_ensemble_list_mtimes.append(mtime)

        if len(dir_ensemble_list) == 0:
            logger.debug('Directories are empty')
            time.sleep(2)
            used_time = watch.wall_elapsed('ensemble_builder')
            continue

        if len(dir_ensemble_list) <= current_num_models and \
                old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes:
            logger.debug('Nothing has changed since the last time')
            time.sleep(2)
            used_time = watch.wall_elapsed('ensemble_builder')
            continue

        watch.start_task('ensemble_iter_' + str(index_run))

        # List of num_runs (which are in the filename) which will be included
        #  later
        include_num_runs = []
        backup_num_runs = []
        model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$')
        if ensemble_nbest is not None:
            # Keeps track of the single scores of each model in our ensemble
            scores_nbest = []
            # The indices of the model that are currently in our ensemble
            indices_nbest = []
            # The names of the models
            model_names = []

        model_names_to_scores = dict()

        model_idx = 0
        for model_name in dir_ensemble_list:
            if model_name.endswith("/"):
                model_name = model_name[:-1]
            basename = os.path.basename(model_name)

            if precision is "16":
                predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float16)
            elif precision is "32":
                predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float32)
            elif precision is "64":
                predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float64)
            else:
                predictions = np.load(os.path.join(dir_ensemble, basename))

            try:
                score = calculate_score(targets_ensemble, predictions,
                                        task_type, metric,
                                        predictions.shape[1])
            except:
                score = -1

            model_names_to_scores[model_name] = score
            match = model_and_automl_re.search(model_name)
            automl_seed = int(match.group(1))
            num_run = int(match.group(2))

            if ensemble_nbest is not None:
                if score <= 0.001:
                    logger.error('Model only predicts at random: ' +
                                  model_name + ' has score: ' + str(score))
                    backup_num_runs.append((automl_seed, num_run))
                # If we have less models in our ensemble than ensemble_nbest add
                # the current model if it is better than random
                elif len(scores_nbest) < ensemble_nbest:
                    scores_nbest.append(score)
                    indices_nbest.append(model_idx)
                    include_num_runs.append((automl_seed, num_run))
                    model_names.append(model_name)
                else:
                    # Take the worst performing model in our ensemble so far
                    idx = np.argmin(np.array([scores_nbest]))

                    # If the current model is better than the worst model in
                    # our ensemble replace it by the current model
                    if scores_nbest[idx] < score:
                        logger.debug('Worst model in our ensemble: %s with '
                                      'score %f will be replaced by model %s '
                                      'with score %f', model_names[idx],
                                      scores_nbest[idx], model_name, score)
                        # Exclude the old model
                        del scores_nbest[idx]
                        scores_nbest.append(score)
                        del include_num_runs[idx]
                        del indices_nbest[idx]
                        indices_nbest.append(model_idx)
                        include_num_runs.append((automl_seed, num_run))
                        del model_names[idx]
                        model_names.append(model_name)

                    # Otherwise exclude the current model from the ensemble
                    else:
                        # include_num_runs.append(True)
                        pass

            else:
                # Load all predictions that are better than random
                if score <= 0.001:
                    # include_num_runs.append(True)
                    logger.error('Model only predicts at random: ' +
                                  model_name + ' has score: ' + str(score))
                    backup_num_runs.append((automl_seed, num_run))
                else:
                    include_num_runs.append((automl_seed, num_run))

            model_idx += 1

        # If there is no model better than random guessing, we have to use
        # all models which do random guessing
        if len(include_num_runs) == 0:
            include_num_runs = backup_num_runs

        indices_to_model_names = dict()
        indices_to_run_num = dict()
        for i, model_name in enumerate(dir_ensemble_list):
            match = model_and_automl_re.search(model_name)
            automl_seed = int(match.group(1))
            num_run = int(match.group(2))
            if (automl_seed, num_run) in include_num_runs:
                num_indices = len(indices_to_model_names)
                indices_to_model_names[num_indices] = model_name
                indices_to_run_num[num_indices] = (automl_seed, num_run)

        try:
            all_predictions_train, all_predictions_valid, all_predictions_test =\
                get_all_predictions(dir_ensemble, dir_ensemble_list,
                                    dir_valid, dir_valid_list,
                                    dir_test, dir_test_list,
                                    include_num_runs,
                                    model_and_automl_re,
                                    precision)
        except IOError:
            logger.error('Could not load the predictions.')
            continue

        if len(include_num_runs) == 0:
            logger.error('All models do just random guessing')
            time.sleep(2)
            continue

        else:
            ensemble = EnsembleSelection(ensemble_size=ensemble_size,
                                         task_type=task_type,
                                         metric=metric)

            try:
                ensemble.fit(all_predictions_train, targets_ensemble,
                             include_num_runs)
                logger.info(ensemble)

            except ValueError as e:
                logger.error('Caught ValueError: ' + str(e))
                used_time = watch.wall_elapsed('ensemble_builder')
                time.sleep(2)
                continue
            except IndexError as e:
                logger.error('Caught IndexError: ' + str(e))
                used_time = watch.wall_elapsed('ensemble_builder')
                time.sleep(2)
                continue
            except Exception as e:
                logger.error('Caught error! %s', e.message)
                used_time = watch.wall_elapsed('ensemble_builder')
                time.sleep(2)
                continue

            # Output the score
            logger.info('Training performance: %f' % ensemble.train_score_)

        # Save the ensemble for later use in the main auto-sklearn module!
        backend.save_ensemble(ensemble, index_run, seed)

        # Save predictions for valid and test data set
        if len(dir_valid_list) == len(dir_ensemble_list):
            all_predictions_valid = np.array(all_predictions_valid)
            ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
            backend.save_predictions_as_txt(ensemble_predictions_valid,
                                            'valid', index_run, prefix=dataset_name)
        else:
            logger.info('Could not find as many validation set predictions (%d)'
                         'as ensemble predictions (%d)!.',
                        len(dir_valid_list), len(dir_ensemble_list))

        del all_predictions_valid

        if len(dir_test_list) == len(dir_ensemble_list):
            all_predictions_test = np.array(all_predictions_test)
            ensemble_predictions_test = ensemble.predict(all_predictions_test)
            backend.save_predictions_as_txt(ensemble_predictions_test,
                                            'test', index_run, prefix=dataset_name)
        else:
            logger.info('Could not find as many test set predictions (%d) as '
                         'ensemble predictions (%d)!',
                        len(dir_test_list), len(dir_ensemble_list))

        del all_predictions_test

        current_num_models = len(dir_ensemble_list)
        watch.stop_task('ensemble_iter_' + str(index_run))
        time_iter = watch.get_wall_dur('ensemble_iter_' + str(index_run))
        used_time = watch.wall_elapsed('ensemble_builder')
        index_run += 1
    return
def ensemble_loop(iterations, starttime, info,
                  ensemble_size, valid_labels, test_labels):

    written_first_line = False
    all_predictions = []
    all_test_predictions = []
    identifiers = []
    csv_writer_list = []

    # Assign the time of the "current" model
    time_function_evaluation = os.path.getmtime(iterations[-1]) - starttime

    ids = os.path.basename(iterations[-1]).split(".")[0].split('_')[-1]
    print(ids)

    for index, iters in enumerate(iterations):
        test_fname = iters.replace('ensemble', 'valid')

        if not os.path.isfile(test_fname):
            continue

        predictions = np.load(iters)
        all_predictions.append(predictions)

        identifiers.append(index)
        test_predictions = np.load(test_fname)
        all_test_predictions.append(test_predictions)

    # Build the ensemble
    start = time.time()

    es_cls = EnsembleSelection(ensemble_size, info["task"], info["metric"])
    es_cls.fit(np.array(all_predictions), valid_labels, identifiers)
    order = es_cls.indices_

    # Compute validation error
    s1 = time.time()
    ensemble_error = 1 - calculate_score(
        valid_labels,
        np.nanmean(np.array(all_predictions)[order], axis=0),
        info["task"], info["metric"], info['label_num'])

    # Compute test error
    ensemble_test_error = 1 - calculate_score(
        test_labels,
        np.nanmean(np.array(all_test_predictions)[order], axis=0),
        info["task"], info["metric"], info['label_num'])

    ensemble_time = time.time() - start

    # We have to add an additional row for the first iteration
    if len(iterations) == 1:
        csv_writer_list.append({'Time': 0,
                                'Training (Empirical) Performance': ensemble_error,
                                'Test Set Performance': ensemble_test_error,
                                'AC Overhead Time': 0,
                                'Validation Configuration ID': 0})
        written_first_line = True

    csv_writer_list.append({'Time': ensemble_time + time_function_evaluation,
                            'Training (Empirical) Performance': ensemble_error,
                            'Test Set Performance': ensemble_test_error,
                            'AC Overhead Time': 0,
                            'Validation Configuration ID': ids})

    return csv_writer_list
Ejemplo n.º 9
0
    def fit_ensemble(self, selected_keys:list):
        """
            fit ensemble 
            
            Parameters
            ---------
            selected_keys: list
                list of selected keys of self.read_preds
                
            Returns
            -------
            ensemble: EnsembleSelection
                trained Ensemble
        """

        predictions_train = np.array([self.read_preds[k][Y_ENSEMBLE] for k in selected_keys])
        include_num_runs = [(self.read_preds[k]["seed"], self.read_preds[k]["num_run"]) for k in selected_keys]
        
        # check hash if ensemble training data changed
        current_hash = hash(predictions_train.data.tobytes())
        if self.last_hash == current_hash:
            self.logger.debug(
                "No new model predictions selected -- skip ensemble building "
                "-- current performance: %f",
                self.validation_performance_,
            )
            return None
        self.last_hash = current_hash
        
        ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
                                     task_type=self.task_type,
                                     metric=self.metric)
        
        try:
            self.logger.debug(
                "Fitting the ensemble on %d models.",
                len(predictions_train),
            )
            start_time = time.time()
            ensemble.fit(predictions_train, self.y_true_ensemble,
                         include_num_runs)
            end_time = time.time()
            self.logger.debug(
                "Fitting the ensemble took %.2f seconds.",
                end_time - start_time,
            )
            self.logger.info(ensemble)
            self.validation_performance_ = min(
                self.validation_performance_,
                ensemble.get_validation_performance(),
            )

        except ValueError as e:
            self.logger.error('Caught ValueError: %s', traceback.format_exc())
            time.sleep(self.sleep_duration)
            return None
        except IndexError as e:
            self.logger.error('Caught IndexError: %s' + traceback.format_exc())
            time.sleep(self.sleep_duration)
            return None
        
        return ensemble
def ensemble_loop(iterations, starttime, info, ensemble_size, valid_labels,
                  test_labels):

    written_first_line = False
    all_predictions = []
    all_test_predictions = []
    identifiers = []
    csv_writer_list = []

    # Assign the time of the "current" model
    time_function_evaluation = os.path.getmtime(iterations[-1]) - starttime

    ids = os.path.basename(iterations[-1]).split(".")[0].split('_')[-1]
    print(ids)

    for index, iters in enumerate(iterations):
        test_fname = iters.replace('ensemble', 'valid')

        if not os.path.isfile(test_fname):
            continue

        predictions = np.load(iters)
        all_predictions.append(predictions)

        identifiers.append(index)
        test_predictions = np.load(test_fname)
        all_test_predictions.append(test_predictions)

    # Build the ensemble
    start = time.time()

    es_cls = EnsembleSelection(ensemble_size, info["task"], info["metric"])
    es_cls.fit(np.array(all_predictions), valid_labels, identifiers)
    order = es_cls.indices_

    # Compute validation error
    s1 = time.time()
    ensemble_error = 1 - calculate_score(
        valid_labels, np.nanmean(np.array(all_predictions)[order], axis=0),
        info["task"], info["metric"], info['label_num'])

    # Compute test error
    ensemble_test_error = 1 - calculate_score(
        test_labels, np.nanmean(np.array(all_test_predictions)[order], axis=0),
        info["task"], info["metric"], info['label_num'])

    ensemble_time = time.time() - start

    # We have to add an additional row for the first iteration
    if len(iterations) == 1:
        csv_writer_list.append({
            'Time': 0,
            'Training (Empirical) Performance': ensemble_error,
            'Test Set Performance': ensemble_test_error,
            'AC Overhead Time': 0,
            'Validation Configuration ID': 0
        })
        written_first_line = True

    csv_writer_list.append({
        'Time': ensemble_time + time_function_evaluation,
        'Training (Empirical) Performance': ensemble_error,
        'Test Set Performance': ensemble_test_error,
        'AC Overhead Time': 0,
        'Validation Configuration ID': ids
    })

    return csv_writer_list