def select_by_round_robin(tensor_run, column_states, current_f_list, col_list, steps, use_sum=True): for s in range(steps): col_id = int(np.min(np.argmin(column_states))) column_states[col_id] += 1 col_list.append(col_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_estimated_max_recall_impact(tensor_run, column_states, current_f_list, col_list, estimated_scores, steps, use_sum=True): for s in range(steps): estimated_impact = calculateEstimatedRecallImpacts(column_states, estimated_scores) chosen_id = np.argmax(estimated_impact) column_states[chosen_id] += 1 col_list.append(chosen_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_max_prediction_change(tensor_run, column_states, current_f_list, col_list, matrix_change_sum, steps, use_sum=True): for s in range(steps): max_change_sum = -1.0 max_change_id = -1 for col_i in range(len(column_states)): if matrix_change_sum[col_i][column_states[col_i]] > max_change_sum: max_change_sum = matrix_change_sum[col_i][column_states[col_i]] max_change_id = col_i column_states[max_change_id] += 1 col_list.append(max_change_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_random(tensor_run, column_states, current_f_list, col_list, steps, use_sum=True): for s in range(steps): col_id = np.random.randint(len(column_states)) column_states[col_id] += 1 col_list.append(col_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_max_uncertainty_all(tensor_run, column_states, current_f_list, col_list, matrix_all_certainty_sum, steps, use_sum=True): for s in range(steps): max_uncertainty_sum = 10000000.0 max_uncertainty_id = -1 for col_i in range(len(column_states)): if matrix_all_certainty_sum[col_i][column_states[col_i]] < max_uncertainty_sum: max_uncertainty_sum = matrix_all_certainty_sum[col_i][column_states[col_i]] max_uncertainty_id = col_i column_states[max_uncertainty_id] += 1 col_list.append(max_uncertainty_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def find_max_total_f_new(tensor_run, column_states, current_f_list, col_list, steps, done={}, use_sum=True): if steps > 0: all_list = [] all_col_list = [] all_sum = [] for col in range(len(column_states)): if (column_states[col] + 1) < len( tensor_run[col]) and not col in done: new_column_states = column_states.copy() new_column_states[col] += 1 new_current_f_list = list(current_f_list) new_current_f_list.append( calc_total_f1(tensor_run, new_column_states)) new_col_list = list(col_list) new_col_list.append(col) column_f1 = calc_column_f1(tensor_run, new_column_states, col) new_done = dict(done) if column_f1 == 1.0: new_done[col] = True # print new_current_f_list result_list, result_col_list = find_max_total_f_new( tensor_run, new_column_states, new_current_f_list, new_col_list, steps - 1, new_done) all_list.append(result_list) all_col_list.append(result_col_list) if use_sum: all_sum.append(np.sum(result_list)) else: all_sum.append(np.max(result_list)) if len(all_sum) > 0: argmax = np.argmax(all_sum) return all_list[argmax], all_col_list[argmax] else: return current_f_list, col_list else: return current_f_list, col_list
def select_by_max_false_prediction(tensor_run, column_states, current_f_list, col_list, fpfn, steps, use_sum=True): for s in range(steps): max_false_pred = -1.0 max_false_id = -1 for col_i in range(len(column_states)): if fpfn[col_i][column_states[col_i]] > max_false_pred: max_false_pred = fpfn[col_i][column_states[col_i]] max_false_id = col_i column_states[max_false_id] += 1 col_list.append(max_false_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_min_cross_val(tensor_run, column_states, current_f_list, col_list, matrix_crossval, steps, use_sum=True): for s in range(steps): min_cross = 10000000.0 min_cross_id = -1 for col_i in range(len(column_states)): if matrix_crossval[col_i][column_states[col_i]] < min_cross: min_cross = matrix_crossval[col_i][column_states[col_i]] min_cross_id = col_i column_states[min_cross_id] += 1 col_list.append(min_cross_id) current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list
def select_by_max_uncertainty_all_prob(tensor_run, column_states, current_f_list, col_list, matrix_all_certainty_sum, steps, use_sum=True): for s in range(steps): certainties = np.zeros(len(column_states)) for col_i in range(len(column_states)): certainties[col_i] = matrix_all_certainty_sum[col_i][column_states[col_i]] certainties -= np.ones(len(column_states)) #certainties = np.square(certainties) certainties = certainties / np.sum(certainties) max_uncertainty_id = np.random.choice(len(certainties), 1, p=certainties)[0] #max_uncertainty_id = np.argmax(certainties) column_states[max_uncertainty_id] += 1 col_list.append(max_uncertainty_id) print column_states print tensor_run.shape current_f_list.append(calc_total_f1(tensor_run, column_states)) return current_f_list, col_list