def main(self): watch = StopWatch() watch.start_task('ensemble_builder') used_time = 0 time_iter = 0 index_run = 0 num_iteration = 0 current_num_models = 0 last_hash = None current_hash = None backend = Backend(self.output_dir, self.autosklearn_tmp_dir) dir_ensemble = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_ensemble') dir_valid = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_valid') dir_test = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_test') paths_ = [dir_ensemble, dir_valid, dir_test] dir_ensemble_list_mtimes = [] self.logger.debug( 'Starting main loop with %f seconds and %d iterations ' 'left.' % (self.limit - used_time, num_iteration)) while used_time < self.limit or (self.max_iterations > 0 and self.max_iterations >= num_iteration): num_iteration += 1 self.logger.debug('Time left: %f', self.limit - used_time) self.logger.debug('Time last ensemble building: %f', time_iter) # Reload the ensemble targets every iteration, important, because cv may # update the ensemble targets in the cause of running auto-sklearn # TODO update cv in order to not need this any more! targets_ensemble = backend.load_targets_ensemble() # Load the predictions from the models exists = [os.path.isdir(dir_) for dir_ in paths_] if not exists[0]: # all(exists): self.logger.debug('Prediction directory %s does not exist!' % dir_ensemble) time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if self.shared_mode is False: dir_ensemble_list = sorted( glob.glob( os.path.join( dir_ensemble, 'predictions_ensemble_%s_*.npy' % self.seed))) if exists[1]: dir_valid_list = sorted( glob.glob( os.path.join( dir_valid, 'predictions_valid_%s_*.npy' % self.seed))) else: dir_valid_list = [] if exists[2]: dir_test_list = sorted( glob.glob( os.path.join( dir_test, 'predictions_test_%s_*.npy' % self.seed))) else: dir_test_list = [] else: dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted( os.listdir(dir_valid)) if exists[1] else [] dir_test_list = sorted( os.listdir(dir_test)) if exists[2] else [] # Check the modification times because predictions can be updated # over time! old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes dir_ensemble_list_mtimes = [] for dir_ensemble_file in dir_ensemble_list: if dir_ensemble_file.endswith("/"): dir_ensemble_file = dir_ensemble_file[:-1] basename = os.path.basename(dir_ensemble_file) dir_ensemble_file = os.path.join(dir_ensemble, basename) mtime = os.path.getmtime(dir_ensemble_file) dir_ensemble_list_mtimes.append(mtime) if len(dir_ensemble_list) == 0: self.logger.debug('Directories are empty') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if len(dir_ensemble_list) <= current_num_models and \ old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes: self.logger.debug('Nothing has changed since the last time') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('index_run' + str(index_run)) watch.start_task('ensemble_iter_' + str(num_iteration)) # List of num_runs (which are in the filename) which will be included # later include_num_runs = [] backup_num_runs = [] model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$') if self.ensemble_nbest is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] # The names of the models model_names = [] model_names_to_scores = dict() model_idx = 0 for model_name in dir_ensemble_list: if model_name.endswith("/"): model_name = model_name[:-1] basename = os.path.basename(model_name) try: if self.precision is "16": predictions = np.load( os.path.join(dir_ensemble, basename)).astype(dtype=np.float16) elif self.precision is "32": predictions = np.load( os.path.join(dir_ensemble, basename)).astype(dtype=np.float32) elif self.precision is "64": predictions = np.load( os.path.join(dir_ensemble, basename)).astype(dtype=np.float64) else: predictions = np.load( os.path.join(dir_ensemble, basename)) score = calculate_score(targets_ensemble, predictions, self.task_type, self.metric, predictions.shape[1]) except Exception as e: self.logger.warning('Error loading %s: %s', basename, e) score = -1 model_names_to_scores[model_name] = score match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if self.ensemble_nbest is not None: if score <= 0.001: self.logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) # If we have less models in our ensemble than ensemble_nbest add # the current model if it is better than random elif len(scores_nbest) < self.ensemble_nbest: scores_nbest.append(score) indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) model_names.append(model_name) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: self.logger.debug( 'Worst model in our ensemble: %s with ' 'score %f will be replaced by model %s ' 'with score %f', model_names[idx], scores_nbest[idx], model_name, score) # Exclude the old model del scores_nbest[idx] scores_nbest.append(score) del include_num_runs[idx] del indices_nbest[idx] indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) del model_names[idx] model_names.append(model_name) # Otherwise exclude the current model from the ensemble else: # include_num_runs.append(True) pass else: # Load all predictions that are better than random if score <= 0.001: # include_num_runs.append(True) self.logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) else: include_num_runs.append((automl_seed, num_run)) model_idx += 1 # If there is no model better than random guessing, we have to use # all models which do random guessing if len(include_num_runs) == 0: include_num_runs = backup_num_runs indices_to_model_names = dict() indices_to_run_num = dict() for i, model_name in enumerate(dir_ensemble_list): match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if (automl_seed, num_run) in include_num_runs: num_indices = len(indices_to_model_names) indices_to_model_names[num_indices] = model_name indices_to_run_num[num_indices] = (automl_seed, num_run) try: all_predictions_train, all_predictions_valid, all_predictions_test =\ self.get_all_predictions(dir_ensemble, dir_ensemble_list, dir_valid, dir_valid_list, dir_test, dir_test_list, include_num_runs, model_and_automl_re, self.precision) except IOError: self.logger.error('Could not load the predictions.') continue if len(include_num_runs) == 0: self.logger.error('All models do just random guessing') time.sleep(2) continue else: ensemble = EnsembleSelection(ensemble_size=self.ensemble_size, task_type=self.task_type, metric=self.metric) try: ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs) self.logger.info(ensemble) except ValueError as e: self.logger.error('Caught ValueError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except IndexError as e: self.logger.error('Caught IndexError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except Exception as e: self.logger.error('Caught error! %s', str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue # Output the score self.logger.info('Training performance: %f' % ensemble.train_score_) self.logger.info( 'Building the ensemble took %f seconds' % watch.wall_elapsed('ensemble_iter_' + str(num_iteration))) # Set this variable here to avoid re-running the ensemble builder # every two seconds in case the ensemble did not change current_num_models = len(dir_ensemble_list) ensemble_predictions = ensemble.predict(all_predictions_train) if sys.version_info[0] == 2: ensemble_predictions.flags.writeable = False current_hash = hash(ensemble_predictions.data) else: current_hash = hash(ensemble_predictions.data.tobytes()) # Only output a new ensemble and new predictions if the output of the # ensemble would actually change! # TODO this is neither safe (collisions, tests only with the ensemble # prediction, but not the ensemble), implement a hash function for # each possible ensemble builder. if last_hash is not None: if current_hash == last_hash: self.logger.info('Ensemble output did not change.') time.sleep(2) continue else: last_hash = current_hash else: last_hash = current_hash # Save the ensemble for later use in the main auto-sklearn module! backend.save_ensemble(ensemble, index_run, self.seed) # Save predictions for valid and test data set if len(dir_valid_list) == len(dir_ensemble_list): all_predictions_valid = np.array(all_predictions_valid) ensemble_predictions_valid = ensemble.predict( all_predictions_valid) if self.task_type == BINARY_CLASSIFICATION: ensemble_predictions_valid = ensemble_predictions_valid[:, 1] if self.low_precision: if self.task_type in [ BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION ]: ensemble_predictions_valid[ ensemble_predictions_valid < 1e-4] = 0. if self.metric in [BAC_METRIC, F1_METRIC]: bin_array = np.zeros(ensemble_predictions_valid.shape, dtype=np.int32) if (self.task_type != MULTICLASS_CLASSIFICATION) or ( ensemble_predictions_valid.shape[1] == 1): bin_array[ensemble_predictions_valid >= 0.5] = 1 else: sample_num = ensemble_predictions_valid.shape[0] for i in range(sample_num): j = np.argmax(ensemble_predictions_valid[i, :]) bin_array[i, j] = 1 ensemble_predictions_valid = bin_array if self.task_type in CLASSIFICATION_TASKS: if ensemble_predictions_valid.size < (20000 * 20): precision = 3 else: precision = 2 else: if ensemble_predictions_valid.size > 1000000: precision = 4 else: # File size maximally 2.1MB precision = 6 backend.save_predictions_as_txt(ensemble_predictions_valid, 'valid', index_run, prefix=self.dataset_name, precision=precision) else: self.logger.info( 'Could not find as many validation set predictions (%d)' 'as ensemble predictions (%d)!.', len(dir_valid_list), len(dir_ensemble_list)) del all_predictions_valid if len(dir_test_list) == len(dir_ensemble_list): all_predictions_test = np.array(all_predictions_test) ensemble_predictions_test = ensemble.predict( all_predictions_test) if self.task_type == BINARY_CLASSIFICATION: ensemble_predictions_test = ensemble_predictions_test[:, 1] if self.low_precision: if self.task_type in [ BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION ]: ensemble_predictions_test[ ensemble_predictions_test < 1e-4] = 0. if self.metric in [BAC_METRIC, F1_METRIC]: bin_array = np.zeros(ensemble_predictions_test.shape, dtype=np.int32) if (self.task_type != MULTICLASS_CLASSIFICATION) or ( ensemble_predictions_test.shape[1] == 1): bin_array[ensemble_predictions_test >= 0.5] = 1 else: sample_num = ensemble_predictions_test.shape[0] for i in range(sample_num): j = np.argmax(ensemble_predictions_test[i, :]) bin_array[i, j] = 1 ensemble_predictions_test = bin_array if self.task_type in CLASSIFICATION_TASKS: if ensemble_predictions_test.size < (20000 * 20): precision = 3 else: precision = 2 else: if ensemble_predictions_test.size > 1000000: precision = 4 else: precision = 6 backend.save_predictions_as_txt(ensemble_predictions_test, 'test', index_run, prefix=self.dataset_name, precision=precision) else: self.logger.info( 'Could not find as many test set predictions (%d) as ' 'ensemble predictions (%d)!', len(dir_test_list), len(dir_ensemble_list)) del all_predictions_test current_num_models = len(dir_ensemble_list) watch.stop_task('index_run' + str(index_run)) time_iter = watch.get_wall_dur('index_run' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return
def main(self): watch = StopWatch() watch.start_task('ensemble_builder') used_time = 0 time_iter = 0 index_run = 0 num_iteration = 0 current_num_models = 0 last_hash = None current_hash = None backend = Backend(self.output_dir, self.autosklearn_tmp_dir) dir_ensemble = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_ensemble') dir_valid = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_valid') dir_test = os.path.join(self.autosklearn_tmp_dir, '.auto-sklearn', 'predictions_test') paths_ = [dir_ensemble, dir_valid, dir_test] dir_ensemble_list_mtimes = [] self.logger.debug('Starting main loop with %f seconds and %d iterations ' 'left.' % (self.limit - used_time, num_iteration)) while used_time < self.limit or (self.max_iterations > 0 and self.max_iterations >= num_iteration): num_iteration += 1 self.logger.debug('Time left: %f', self.limit - used_time) self.logger.debug('Time last ensemble building: %f', time_iter) # Reload the ensemble targets every iteration, important, because cv may # update the ensemble targets in the cause of running auto-sklearn # TODO update cv in order to not need this any more! targets_ensemble = backend.load_targets_ensemble() # Load the predictions from the models exists = [os.path.isdir(dir_) for dir_ in paths_] if not exists[0]: # all(exists): self.logger.debug('Prediction directory %s does not exist!' % dir_ensemble) time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if self.shared_mode is False: dir_ensemble_list = sorted(glob.glob(os.path.join( dir_ensemble, 'predictions_ensemble_%s_*.npy' % self.seed))) if exists[1]: dir_valid_list = sorted(glob.glob(os.path.join( dir_valid, 'predictions_valid_%s_*.npy' % self.seed))) else: dir_valid_list = [] if exists[2]: dir_test_list = sorted(glob.glob(os.path.join( dir_test, 'predictions_test_%s_*.npy' % self.seed))) else: dir_test_list = [] else: dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else [] dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else [] # Check the modification times because predictions can be updated # over time! old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes dir_ensemble_list_mtimes = [] for dir_ensemble_file in dir_ensemble_list: if dir_ensemble_file.endswith("/"): dir_ensemble_file = dir_ensemble_file[:-1] basename = os.path.basename(dir_ensemble_file) dir_ensemble_file = os.path.join(dir_ensemble, basename) mtime = os.path.getmtime(dir_ensemble_file) dir_ensemble_list_mtimes.append(mtime) if len(dir_ensemble_list) == 0: self.logger.debug('Directories are empty') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if len(dir_ensemble_list) <= current_num_models and \ old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes: self.logger.debug('Nothing has changed since the last time') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('index_run' + str(index_run)) watch.start_task('ensemble_iter_' + str(num_iteration)) # List of num_runs (which are in the filename) which will be included # later include_num_runs = [] backup_num_runs = [] model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$') if self.ensemble_nbest is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] # The names of the models model_names = [] model_names_to_scores = dict() model_idx = 0 for model_name in dir_ensemble_list: if model_name.endswith("/"): model_name = model_name[:-1] basename = os.path.basename(model_name) try: if self.precision is "16": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float16) elif self.precision is "32": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float32) elif self.precision is "64": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float64) else: predictions = np.load(os.path.join(dir_ensemble, basename)) score = calculate_score(targets_ensemble, predictions, self.task_type, self.metric, predictions.shape[1]) except Exception as e: self.logger.warning('Error loading %s: %s', basename, e) score = -1 model_names_to_scores[model_name] = score match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if self.ensemble_nbest is not None: if score <= 0.001: self.logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) # If we have less models in our ensemble than ensemble_nbest add # the current model if it is better than random elif len(scores_nbest) < self.ensemble_nbest: scores_nbest.append(score) indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) model_names.append(model_name) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: self.logger.debug('Worst model in our ensemble: %s with ' 'score %f will be replaced by model %s ' 'with score %f', model_names[idx], scores_nbest[idx], model_name, score) # Exclude the old model del scores_nbest[idx] scores_nbest.append(score) del include_num_runs[idx] del indices_nbest[idx] indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) del model_names[idx] model_names.append(model_name) # Otherwise exclude the current model from the ensemble else: # include_num_runs.append(True) pass else: # Load all predictions that are better than random if score <= 0.001: # include_num_runs.append(True) self.logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) else: include_num_runs.append((automl_seed, num_run)) model_idx += 1 # If there is no model better than random guessing, we have to use # all models which do random guessing if len(include_num_runs) == 0: include_num_runs = backup_num_runs indices_to_model_names = dict() indices_to_run_num = dict() for i, model_name in enumerate(dir_ensemble_list): match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if (automl_seed, num_run) in include_num_runs: num_indices = len(indices_to_model_names) indices_to_model_names[num_indices] = model_name indices_to_run_num[num_indices] = (automl_seed, num_run) try: all_predictions_train, all_predictions_valid, all_predictions_test =\ self.get_all_predictions(dir_ensemble, dir_ensemble_list, dir_valid, dir_valid_list, dir_test, dir_test_list, include_num_runs, model_and_automl_re, self.precision) except IOError: self.logger.error('Could not load the predictions.') continue if len(include_num_runs) == 0: self.logger.error('All models do just random guessing') time.sleep(2) continue else: ensemble = EnsembleSelection(ensemble_size=self.ensemble_size, task_type=self.task_type, metric=self.metric) try: ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs) self.logger.info(ensemble) except ValueError as e: self.logger.error('Caught ValueError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except IndexError as e: self.logger.error('Caught IndexError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except Exception as e: self.logger.error('Caught error! %s', str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue # Output the score self.logger.info('Training performance: %f' % ensemble.train_score_) self.logger.info('Building the ensemble took %f seconds' % watch.wall_elapsed('ensemble_iter_' + str(num_iteration))) # Set this variable here to avoid re-running the ensemble builder # every two seconds in case the ensemble did not change current_num_models = len(dir_ensemble_list) ensemble_predictions = ensemble.predict(all_predictions_train) if sys.version_info[0] == 2: ensemble_predictions.flags.writeable = False current_hash = hash(ensemble_predictions.data) else: current_hash = hash(ensemble_predictions.data.tobytes()) # Only output a new ensemble and new predictions if the output of the # ensemble would actually change! # TODO this is neither safe (collisions, tests only with the ensemble # prediction, but not the ensemble), implement a hash function for # each possible ensemble builder. if last_hash is not None: if current_hash == last_hash: self.logger.info('Ensemble output did not change.') time.sleep(2) continue else: last_hash = current_hash else: last_hash = current_hash # Save the ensemble for later use in the main auto-sklearn module! backend.save_ensemble(ensemble, index_run, self.seed) # Save predictions for valid and test data set if len(dir_valid_list) == len(dir_ensemble_list): all_predictions_valid = np.array(all_predictions_valid) ensemble_predictions_valid = ensemble.predict(all_predictions_valid) if self.task_type == BINARY_CLASSIFICATION: ensemble_predictions_valid = ensemble_predictions_valid[:, 1] if self.low_precision: if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]: ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0. if self.metric in [BAC_METRIC, F1_METRIC]: bin_array = np.zeros(ensemble_predictions_valid.shape, dtype=np.int32) if (self.task_type != MULTICLASS_CLASSIFICATION) or ( ensemble_predictions_valid.shape[1] == 1): bin_array[ensemble_predictions_valid >= 0.5] = 1 else: sample_num = ensemble_predictions_valid.shape[0] for i in range(sample_num): j = np.argmax(ensemble_predictions_valid[i, :]) bin_array[i, j] = 1 ensemble_predictions_valid = bin_array if self.task_type in CLASSIFICATION_TASKS: if ensemble_predictions_valid.size < (20000 * 20): precision = 3 else: precision = 2 else: if ensemble_predictions_valid.size > 1000000: precision = 4 else: # File size maximally 2.1MB precision = 6 backend.save_predictions_as_txt(ensemble_predictions_valid, 'valid', index_run, prefix=self.dataset_name, precision=precision) else: self.logger.info('Could not find as many validation set predictions (%d)' 'as ensemble predictions (%d)!.', len(dir_valid_list), len(dir_ensemble_list)) del all_predictions_valid if len(dir_test_list) == len(dir_ensemble_list): all_predictions_test = np.array(all_predictions_test) ensemble_predictions_test = ensemble.predict(all_predictions_test) if self.task_type == BINARY_CLASSIFICATION: ensemble_predictions_test = ensemble_predictions_test[:, 1] if self.low_precision: if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]: ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0. if self.metric in [BAC_METRIC, F1_METRIC]: bin_array = np.zeros(ensemble_predictions_test.shape, dtype=np.int32) if (self.task_type != MULTICLASS_CLASSIFICATION) or ( ensemble_predictions_test.shape[1] == 1): bin_array[ensemble_predictions_test >= 0.5] = 1 else: sample_num = ensemble_predictions_test.shape[0] for i in range(sample_num): j = np.argmax(ensemble_predictions_test[i, :]) bin_array[i, j] = 1 ensemble_predictions_test = bin_array if self.task_type in CLASSIFICATION_TASKS: if ensemble_predictions_test.size < (20000 * 20): precision = 3 else: precision = 2 else: if ensemble_predictions_test.size > 1000000: precision = 4 else: precision = 6 backend.save_predictions_as_txt(ensemble_predictions_test, 'test', index_run, prefix=self.dataset_name, precision=precision) else: self.logger.info('Could not find as many test set predictions (%d) as ' 'ensemble predictions (%d)!', len(dir_test_list), len(dir_ensemble_list)) del all_predictions_test current_num_models = len(dir_ensemble_list) watch.stop_task('index_run' + str(index_run)) time_iter = watch.get_wall_dur('index_run' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return
def main(autosklearn_tmp_dir, basename, task_type, metric, limit, output_dir, ensemble_size=None, ensemble_nbest=None, seed=1, shared_mode=False, max_iterations=-1, precision="32"): watch = StopWatch() watch.start_task('ensemble_builder') used_time = 0 time_iter = 0 index_run = 0 num_iteration = 0 current_num_models = 0 backend = Backend(output_dir, autosklearn_tmp_dir) dir_ensemble = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_ensemble') dir_valid = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_valid') dir_test = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_test') paths_ = [dir_ensemble, dir_valid, dir_test] targets_ensemble = backend.load_targets_ensemble() dir_ensemble_list_mtimes = [] while used_time < limit or (max_iterations > 0 and max_iterations >= num_iteration): num_iteration += 1 logger.debug('Time left: %f', limit - used_time) logger.debug('Time last iteration: %f', time_iter) # Load the predictions from the models exists = [os.path.isdir(dir_) for dir_ in paths_] if not exists[0]: # all(exists): logger.debug('Prediction directory %s does not exist!' % dir_ensemble) time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if shared_mode is False: dir_ensemble_list = sorted(glob.glob(os.path.join( dir_ensemble, 'predictions_ensemble_%s_*.npy' % seed))) if exists[1]: dir_valid_list = sorted(glob.glob(os.path.join( dir_valid, 'predictions_valid_%s_*.npy' % seed))) else: dir_valid_list = [] if exists[2]: dir_test_list = sorted(glob.glob(os.path.join( dir_test, 'predictions_test_%s_*.npy' % seed))) else: dir_test_list = [] else: dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else [] dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else [] # Check the modification times because predictions can be updated # over time! old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes dir_ensemble_list_mtimes = [] for dir_ensemble_file in dir_ensemble_list: dir_ensemble_file = os.path.join(dir_ensemble, dir_ensemble_file) mtime = os.path.getmtime(dir_ensemble_file) dir_ensemble_list_mtimes.append(mtime) if len(dir_ensemble_list) == 0: logger.debug('Directories are empty') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if len(dir_ensemble_list) <= current_num_models and \ old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes: logger.debug('Nothing has changed since the last time') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('ensemble_iter_' + str(index_run)) # List of num_runs (which are in the filename) which will be included # later include_num_runs = [] backup_num_runs = [] model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$') if ensemble_nbest is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] # The names of the models model_names = [] model_names_to_scores = dict() model_idx = 0 for model_name in dir_ensemble_list: if precision is "16": predictions = np.load(os.path.join(dir_ensemble, model_name)).astype(dtype=np.float16) elif precision is "32": predictions = np.load(os.path.join(dir_ensemble, model_name)).astype(dtype=np.float32) elif precision is "64": predictions = np.load(os.path.join(dir_ensemble, model_name)).astype(dtype=np.float64) else: predictions = np.load(os.path.join(dir_ensemble, model_name)) score = calculate_score(targets_ensemble, predictions, task_type, metric, predictions.shape[1]) model_names_to_scores[model_name] = score match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if ensemble_nbest is not None: if score <= 0.001: # include_num_runs.append(True) logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append(num_run) # If we have less models in our ensemble than ensemble_nbest add # the current model if it is better than random elif len(scores_nbest) < ensemble_nbest: scores_nbest.append(score) indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) model_names.append(model_name) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: logger.debug('Worst model in our ensemble: %s with ' 'score %f will be replaced by model %s ' 'with score %f', model_names[idx], scores_nbest[idx], model_name, score) # Exclude the old model del scores_nbest[idx] scores_nbest.append(score) del include_num_runs[idx] del indices_nbest[idx] indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) del model_names[idx] model_names.append(model_name) # Otherwise exclude the current model from the ensemble else: # include_num_runs.append(True) pass else: # Load all predictions that are better than random if score <= 0.001: # include_num_runs.append(True) logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) else: include_num_runs.append((automl_seed, num_run)) model_idx += 1 # If there is no model better than random guessing, we have to use # all models which do random guessing if len(include_num_runs) == 0: include_num_runs = backup_num_runs indices_to_model_names = dict() indices_to_run_num = dict() for i, model_name in enumerate(dir_ensemble_list): match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if (automl_seed, num_run) in include_num_runs: num_indices = len(indices_to_model_names) indices_to_model_names[num_indices] = model_name indices_to_run_num[num_indices] = (automl_seed, num_run) # logging.info("Indices to model names:") # logging.info(indices_to_model_names) # for i, item in enumerate(sorted(model_names_to_scores.items(), # key=lambda t: t[1])): # logging.info("%d: %s", i, item) include_num_runs = set(include_num_runs) all_predictions_train = get_predictions(dir_ensemble, dir_ensemble_list, include_num_runs, model_and_automl_re, precision) # if len(all_predictions_train) == len(all_predictions_test) == len( # all_predictions_valid) == 0: if len(include_num_runs) == 0: logger.error('All models do just random guessing') time.sleep(2) continue else: try: indices, trajectory = ensemble_selection( np.array(all_predictions_train), targets_ensemble, ensemble_size, task_type, metric) logger.info('Trajectory and indices!') logger.info(trajectory) logger.info(indices) except ValueError as e: logger.error('Caught ValueError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except Exception as e: logger.error('Caught error! %s', e.message) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue # Output the score logger.info('Training performance: %f' % trajectory[-1]) # Print the ensemble members: ensemble_members_run_numbers = dict() ensemble_members = Counter(indices).most_common() ensemble_members_string = 'Ensemble members:\n' logger.info(ensemble_members) for ensemble_member in ensemble_members: weight = float(ensemble_member[1]) / len(indices) ensemble_members_string += \ (' %s; weight: %10f; performance: %10f\n' % (indices_to_model_names[ensemble_member[0]], weight, model_names_to_scores[ indices_to_model_names[ensemble_member[0]]])) ensemble_members_run_numbers[ indices_to_run_num[ ensemble_member[0]]] = weight logger.info(ensemble_members_string) # Save the ensemble indices for later use! backend.save_ensemble_indices_weights(ensemble_members_run_numbers, index_run, seed) all_predictions_valid = get_predictions(dir_valid, dir_valid_list, include_num_runs, model_and_automl_re, precision) # Save predictions for valid and test data set if len(dir_valid_list) == len(dir_ensemble_list): all_predictions_valid = np.array(all_predictions_valid) ensemble_predictions_valid = np.mean( all_predictions_valid[indices.astype(int)], axis=0) backend.save_predictions_as_txt(ensemble_predictions_valid, 'valid', index_run, prefix=basename) else: logger.info('Could not find as many validation set predictions (%d)' 'as ensemble predictions (%d)!.', len(dir_valid_list), len(dir_ensemble_list)) del all_predictions_valid all_predictions_test = get_predictions(dir_test, dir_test_list, include_num_runs, model_and_automl_re, precision) if len(dir_test_list) == len(dir_ensemble_list): all_predictions_test = np.array(all_predictions_test) ensemble_predictions_test = np.mean( all_predictions_test[indices.astype(int)], axis=0) backend.save_predictions_as_txt(ensemble_predictions_test, 'test', index_run, prefix=basename) else: logger.info('Could not find as many test set predictions (%d) as ' 'ensemble predictions (%d)!', len(dir_test_list), len(dir_ensemble_list)) del all_predictions_test current_num_models = len(dir_ensemble_list) watch.stop_task('ensemble_iter_' + str(index_run)) time_iter = watch.get_wall_dur('ensemble_iter_' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return
def main(autosklearn_tmp_dir, dataset_name, task_type, metric, limit, output_dir, ensemble_size=None, ensemble_nbest=None, seed=1, shared_mode=False, max_iterations=-1, precision="32"): watch = StopWatch() watch.start_task('ensemble_builder') used_time = 0 time_iter = 0 index_run = 0 num_iteration = 0 current_num_models = 0 backend = Backend(output_dir, autosklearn_tmp_dir) dir_ensemble = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_ensemble') dir_valid = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_valid') dir_test = os.path.join(autosklearn_tmp_dir, '.auto-sklearn', 'predictions_test') paths_ = [dir_ensemble, dir_valid, dir_test] dir_ensemble_list_mtimes = [] while used_time < limit or (max_iterations > 0 and max_iterations >= num_iteration): num_iteration += 1 logger.debug('Time left: %f', limit - used_time) logger.debug('Time last iteration: %f', time_iter) # Reload the ensemble targets every iteration, important, because cv may # update the ensemble targets in the cause of running auto-sklearn # TODO update cv in order to not need this any more! targets_ensemble = backend.load_targets_ensemble() # Load the predictions from the models exists = [os.path.isdir(dir_) for dir_ in paths_] if not exists[0]: # all(exists): logger.debug('Prediction directory %s does not exist!' % dir_ensemble) time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if shared_mode is False: dir_ensemble_list = sorted(glob.glob(os.path.join( dir_ensemble, 'predictions_ensemble_%s_*.npy' % seed))) if exists[1]: dir_valid_list = sorted(glob.glob(os.path.join( dir_valid, 'predictions_valid_%s_*.npy' % seed))) else: dir_valid_list = [] if exists[2]: dir_test_list = sorted(glob.glob(os.path.join( dir_test, 'predictions_test_%s_*.npy' % seed))) else: dir_test_list = [] else: dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else [] dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else [] # Check the modification times because predictions can be updated # over time! old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes dir_ensemble_list_mtimes = [] for dir_ensemble_file in dir_ensemble_list: if dir_ensemble_file.endswith("/"): dir_ensemble_file = dir_ensemble_file[:-1] basename = os.path.basename(dir_ensemble_file) dir_ensemble_file = os.path.join(dir_ensemble, basename) mtime = os.path.getmtime(dir_ensemble_file) dir_ensemble_list_mtimes.append(mtime) if len(dir_ensemble_list) == 0: logger.debug('Directories are empty') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue if len(dir_ensemble_list) <= current_num_models and \ old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes: logger.debug('Nothing has changed since the last time') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('ensemble_iter_' + str(index_run)) # List of num_runs (which are in the filename) which will be included # later include_num_runs = [] backup_num_runs = [] model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$') if ensemble_nbest is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] # The names of the models model_names = [] model_names_to_scores = dict() model_idx = 0 for model_name in dir_ensemble_list: if model_name.endswith("/"): model_name = model_name[:-1] basename = os.path.basename(model_name) if precision is "16": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float16) elif precision is "32": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float32) elif precision is "64": predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float64) else: predictions = np.load(os.path.join(dir_ensemble, basename)) try: score = calculate_score(targets_ensemble, predictions, task_type, metric, predictions.shape[1]) except: score = -1 model_names_to_scores[model_name] = score match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if ensemble_nbest is not None: if score <= 0.001: logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) # If we have less models in our ensemble than ensemble_nbest add # the current model if it is better than random elif len(scores_nbest) < ensemble_nbest: scores_nbest.append(score) indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) model_names.append(model_name) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: logger.debug('Worst model in our ensemble: %s with ' 'score %f will be replaced by model %s ' 'with score %f', model_names[idx], scores_nbest[idx], model_name, score) # Exclude the old model del scores_nbest[idx] scores_nbest.append(score) del include_num_runs[idx] del indices_nbest[idx] indices_nbest.append(model_idx) include_num_runs.append((automl_seed, num_run)) del model_names[idx] model_names.append(model_name) # Otherwise exclude the current model from the ensemble else: # include_num_runs.append(True) pass else: # Load all predictions that are better than random if score <= 0.001: # include_num_runs.append(True) logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) backup_num_runs.append((automl_seed, num_run)) else: include_num_runs.append((automl_seed, num_run)) model_idx += 1 # If there is no model better than random guessing, we have to use # all models which do random guessing if len(include_num_runs) == 0: include_num_runs = backup_num_runs indices_to_model_names = dict() indices_to_run_num = dict() for i, model_name in enumerate(dir_ensemble_list): match = model_and_automl_re.search(model_name) automl_seed = int(match.group(1)) num_run = int(match.group(2)) if (automl_seed, num_run) in include_num_runs: num_indices = len(indices_to_model_names) indices_to_model_names[num_indices] = model_name indices_to_run_num[num_indices] = (automl_seed, num_run) try: all_predictions_train, all_predictions_valid, all_predictions_test =\ get_all_predictions(dir_ensemble, dir_ensemble_list, dir_valid, dir_valid_list, dir_test, dir_test_list, include_num_runs, model_and_automl_re, precision) except IOError: logger.error('Could not load the predictions.') continue if len(include_num_runs) == 0: logger.error('All models do just random guessing') time.sleep(2) continue else: ensemble = EnsembleSelection(ensemble_size=ensemble_size, task_type=task_type, metric=metric) try: ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs) logger.info(ensemble) except ValueError as e: logger.error('Caught ValueError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except IndexError as e: logger.error('Caught IndexError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue except Exception as e: logger.error('Caught error! %s', e.message) used_time = watch.wall_elapsed('ensemble_builder') time.sleep(2) continue # Output the score logger.info('Training performance: %f' % ensemble.train_score_) # Save the ensemble for later use in the main auto-sklearn module! backend.save_ensemble(ensemble, index_run, seed) # Save predictions for valid and test data set if len(dir_valid_list) == len(dir_ensemble_list): all_predictions_valid = np.array(all_predictions_valid) ensemble_predictions_valid = ensemble.predict(all_predictions_valid) backend.save_predictions_as_txt(ensemble_predictions_valid, 'valid', index_run, prefix=dataset_name) else: logger.info('Could not find as many validation set predictions (%d)' 'as ensemble predictions (%d)!.', len(dir_valid_list), len(dir_ensemble_list)) del all_predictions_valid if len(dir_test_list) == len(dir_ensemble_list): all_predictions_test = np.array(all_predictions_test) ensemble_predictions_test = ensemble.predict(all_predictions_test) backend.save_predictions_as_txt(ensemble_predictions_test, 'test', index_run, prefix=dataset_name) else: logger.info('Could not find as many test set predictions (%d) as ' 'ensemble predictions (%d)!', len(dir_test_list), len(dir_ensemble_list)) del all_predictions_test current_num_models = len(dir_ensemble_list) watch.stop_task('ensemble_iter_' + str(index_run)) time_iter = watch.get_wall_dur('ensemble_iter_' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return
def main(logger, predictions_dir, basename, task_type, metric, limit, output_dir, ensemble_size=None): watch = StopWatch() watch.start_task('ensemble_builder') used_time = 0 time_iter = 0 index_run = 0 current_num_models = 0 while used_time < limit: logger.debug('Time left: %f' % (limit - used_time)) logger.debug('Time last iteration: %f' % time_iter) # Load the true labels of the validation data true_labels = np.load(os.path.join(predictions_dir, 'true_labels_ensemble.npy')) # Load the predictions from the models all_predictions_train = [] dir_ensemble = os.path.join(predictions_dir, 'predictions_ensemble/') dir_valid = os.path.join(predictions_dir, 'predictions_valid/') dir_test = os.path.join(predictions_dir, 'predictions_test/') if not os.path.isdir(dir_ensemble) or not os.path.isdir(dir_valid) or \ not os.path.isdir(dir_test): logger.debug('Prediction directory does not exist') time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted(os.listdir(dir_valid)) dir_test_list = sorted(os.listdir(dir_test)) if check_data(logger, len(dir_ensemble_list), len(dir_valid_list), len(dir_test_list), current_num_models): time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('ensemble_iter_' + str(index_run)) # Binary mask where True indicates that the corresponding will be # excluded from the ensemble exclude_mask = [] if ensemble_size is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] model_idx = 0 for f in dir_ensemble_list: predictions = np.load(os.path.join(dir_ensemble, f)) score = calculate_score(true_labels, predictions, task_type, metric, predictions.shape[1]) if ensemble_size is not None: if score <= 0.001: exclude_mask.append(True) logger.error('Model only predicts at random: ' + f + ' has score: ' + str(score)) # If we have less model in our ensemble than ensemble_size add # the current model if it is better than random elif len(scores_nbest) < ensemble_size: scores_nbest.append(score) indices_nbest.append(model_idx) exclude_mask.append(False) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: logger.debug( 'Worst model in our ensemble: %d with score %f will be replaced by model %d with score %f' % (idx, scores_nbest[idx], model_idx, score)) scores_nbest[idx] = score # Exclude the old model exclude_mask[int(indices_nbest[idx])] = True indices_nbest[idx] = model_idx exclude_mask.append(False) # Otherwise exclude the current model from the ensemble else: exclude_mask.append(True) else: # Load all predictions that are better than random if score <= 0.001: exclude_mask.append(True) logger.error('Model only predicts at random: ' + f + ' has score: ' + str(score)) else: exclude_mask.append(False) all_predictions_train.append(predictions) model_idx += 1 print(exclude_mask) all_predictions_valid = get_predictions(dir_valid, dir_valid_list, exclude_mask) all_predictions_test = get_predictions(dir_test, dir_test_list, exclude_mask) if len(all_predictions_train) == len(all_predictions_test) == len( all_predictions_valid) == 0: logger.error('All models do just random guessing') time.sleep(2) continue if len(all_predictions_train) == 1: logger.debug('Only one model so far we just copy its predictions') Y_valid = all_predictions_valid[0] Y_test = all_predictions_test[0] else: try: # Compute the weights for the ensemble # Use equally initialized weights n_models = len(all_predictions_train) init_weights = np.ones([n_models]) / n_models weights = weighted_ensemble(logger.debug, np.array(all_predictions_train), true_labels, task_type, metric, init_weights) except ValueError: logger.error('Caught ValueError!') used_time = watch.wall_elapsed('ensemble_builder') continue except Exception: logger.error('Caught error!') used_time = watch.wall_elapsed('ensemble_builder') continue # Compute the ensemble predictions for the valid data Y_valid = ensemble_prediction(np.array(all_predictions_valid), weights) # Compute the ensemble predictions for the test data Y_test = ensemble_prediction(np.array(all_predictions_test), weights) # Save predictions for valid and test data set filename_test = os.path.join( output_dir, basename + '_valid_' + str(index_run).zfill(3) + '.predict') save_predictions(os.path.join(predictions_dir, filename_test), Y_valid) filename_test = os.path.join( output_dir, basename + '_test_' + str(index_run).zfill(3) + '.predict') save_predictions(os.path.join(predictions_dir, filename_test), Y_test) current_num_models = len(dir_ensemble_list) watch.stop_task('ensemble_iter_' + str(index_run)) time_iter = watch.get_wall_dur('ensemble_iter_' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return
def main(logger, predictions_dir, basename, task_type, metric, limit, output_dir, ensemble_size=None, seed=1, indices_output_dir='.'): watch = StopWatch() watch.start_task('ensemble_builder') task_type = STRING_TO_TASK_TYPES[task_type] used_time = 0 time_iter = 0 index_run = 0 current_num_models = 0 dir_ensemble = join(predictions_dir, 'predictions_ensemble_%s/' % seed) dir_valid = join(predictions_dir, 'predictions_valid_%s/' % seed) dir_test = join(predictions_dir, 'predictions_test_%s/' % seed) paths_ = [dir_ensemble, dir_valid, dir_test] tru_labels_path = join(predictions_dir, 'true_labels_ensemble.npy') while used_time < limit: logger.debug('Time left: %f', limit - used_time) logger.debug('Time last iteration: %f', time_iter) # Load the true labels of the validation data true_labels = np.load(tru_labels_path) # Load the predictions from the models exists = [os.path.isdir(dir_) for dir_ in paths_] if not exists[0]: # all(exists): logger.debug('Prediction directory %s does not exist!' % dir_ensemble) time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue dir_ensemble_list = sorted(os.listdir(dir_ensemble)) dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else [] dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else [] if check_data(logger, len(dir_ensemble_list), current_num_models): time.sleep(2) used_time = watch.wall_elapsed('ensemble_builder') continue watch.start_task('ensemble_iter_' + str(index_run)) # List of num_runs (which are in the filename) which will be included # later include_num_runs = [] re_num_run = re.compile(r'_([0-9]*)\.npy$') if ensemble_size is not None: # Keeps track of the single scores of each model in our ensemble scores_nbest = [] # The indices of the model that are currently in our ensemble indices_nbest = [] # The names of the models model_names = [] # The num run of the models num_runs = [] model_names_to_scores = dict() model_idx = 0 for model_name in dir_ensemble_list: predictions = np.load(os.path.join(dir_ensemble, model_name)) score = calculate_score(true_labels, predictions, task_type, metric, predictions.shape[1]) model_names_to_scores[model_name] = score num_run = int(re_num_run.search(model_name).group(1)) if ensemble_size is not None: if score <= 0.001: # include_num_runs.append(True) logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) # If we have less models in our ensemble than ensemble_size add # the current model if it is better than random elif len(scores_nbest) < ensemble_size: scores_nbest.append(score) indices_nbest.append(model_idx) include_num_runs.append(num_run) model_names.append(model_name) num_runs.append(num_run) else: # Take the worst performing model in our ensemble so far idx = np.argmin(np.array([scores_nbest])) # If the current model is better than the worst model in # our ensemble replace it by the current model if scores_nbest[idx] < score: logger.debug('Worst model in our ensemble: %s with ' 'score %f will be replaced by model %s ' 'with score %f', model_names[idx], scores_nbest[idx], model_name, score) # Exclude the old model del scores_nbest[idx] scores_nbest.append(score) del include_num_runs[idx] del indices_nbest[idx] indices_nbest.append(model_idx) include_num_runs.append(num_run) del model_names[idx] model_names.append(model_name) del num_runs[idx] num_runs.append(num_run) # Otherwise exclude the current model from the ensemble else: # include_num_runs.append(True) pass else: # Load all predictions that are better than random if score <= 0.001: # include_num_runs.append(True) logger.error('Model only predicts at random: ' + model_name + ' has score: ' + str(score)) else: include_num_runs.append(num_run) model_idx += 1 indices_to_model_names = dict() indices_to_run_num = dict() for i, model_name in enumerate(dir_ensemble_list): num_run = int(re_num_run.search(model_name).group(1)) if num_run in include_num_runs: num_indices = len(indices_to_model_names) indices_to_model_names[num_indices] = model_name indices_to_run_num[num_indices] = num_run # logging.info("Indices to model names:") # logging.info(indices_to_model_names) # for i, item in enumerate(sorted(model_names_to_scores.items(), # key=lambda t: t[1])): # logging.info("%d: %s", i, item) include_num_runs = set(include_num_runs) all_predictions_train = get_predictions(dir_ensemble, dir_ensemble_list, include_num_runs, re_num_run) all_predictions_valid = get_predictions(dir_valid, dir_valid_list, include_num_runs, re_num_run) all_predictions_test = get_predictions(dir_test, dir_test_list, include_num_runs, re_num_run) if len(all_predictions_train) == len(all_predictions_test) == len( all_predictions_valid) == 0: logger.error('All models do just random guessing') time.sleep(2) continue elif len(all_predictions_train) == 1: logger.debug('Only one model so far we just copy its predictions') ensemble_members_run_numbers = {0: 1.0} # Output the score logger.info('Training performance: %f' % np.max(model_names_to_scores.values())) else: try: indices, trajectory = ensemble_selection( np.array(all_predictions_train), true_labels, ensemble_size, task_type, metric) logger.info('Trajectory and indices!') logger.info(trajectory) logger.info(indices) except ValueError as e: logger.error('Caught ValueError: ' + str(e)) used_time = watch.wall_elapsed('ensemble_builder') continue except Exception as e: logger.error('Caught error! %s', e.message) used_time = watch.wall_elapsed('ensemble_builder') continue # Output the score logger.info('Training performance: %f' % trajectory[-1]) # Print the ensemble members: ensemble_members_run_numbers = dict() ensemble_members = Counter(indices).most_common() ensemble_members_string = 'Ensemble members:\n' logger.info(ensemble_members) for ensemble_member in ensemble_members: weight = float(ensemble_member[1]) / len(indices) ensemble_members_string += \ (' %s; weight: %10f; performance: %10f\n' % (indices_to_model_names[ensemble_member[0]], weight, model_names_to_scores[ indices_to_model_names[ensemble_member[0]]])) ensemble_members_run_numbers[ indices_to_run_num[ ensemble_member[0]]] = weight logger.info(ensemble_members_string) # Save the ensemble indices for later use! filename_indices = os.path.join(indices_output_dir, str(index_run).zfill(5) + '.indices') logger.info(ensemble_members_run_numbers) with open(filename_indices, 'w') as fh: pickle.dump(ensemble_members_run_numbers, fh) # Save predictions for valid and test data set if len(dir_valid_list) == len(dir_ensemble_list): ensemble_predictions_valid = np.mean( all_predictions_valid[indices.astype(int)], axis=0) filename_test = os.path.join( output_dir, basename + '_valid_' + str(index_run).zfill(3) + '.predict') save_predictions( os.path.join(predictions_dir, filename_test), ensemble_predictions_valid) else: logger.info('Could not find as many validation set predictions ' 'as ensemble predictions!.') if len(dir_test_list) == len(dir_ensemble_list): ensemble_predictions_test = np.mean( all_predictions_test[indices.astype(int)], axis=0) filename_test = os.path.join( output_dir, basename + '_test_' + str(index_run).zfill(3) + '.predict') save_predictions( os.path.join(predictions_dir, filename_test), ensemble_predictions_test) else: logger.info('Could not find as many test set predictions as ' 'ensemble predictions!') current_num_models = len(dir_ensemble_list) watch.stop_task('ensemble_iter_' + str(index_run)) time_iter = watch.get_wall_dur('ensemble_iter_' + str(index_run)) used_time = watch.wall_elapsed('ensemble_builder') index_run += 1 return