def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model1 = NNSmall().to(device) model2 = NNSmall().to(device) model3 = NNSmall().to(device) self.models = [model1, model2, model3] opt1 = torch.optim.Adam(model1.parameters()) opt2 = torch.optim.Adam(model2.parameters()) opt3 = torch.optim.Adam(model3.parameters()) self.optimizers = [opt1, opt2, opt3] schedule_matrix_load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/' + str( self.num_schedules) + 'matrixes.pkl' self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb")) self.kmeans_model, self.label = self.cluster_matrices( self.matrices, self.num_schedules)
def generate_test_data(self): """ Generates a bunch of counterfactual data (poorly done) :return: """ num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) data_matrix = [] output_matrix = [] # variables to keep track of loss and number of tasks trained over for j in range(0, num_schedules): # sample a timestep before the cutoff for cross_validation schedule_bounds = schedule_array[j] step = schedule_bounds[0] truth = Y[step] # find feature vector of true action taken while step < schedule_bounds[1]: # find feature vector of true action taken phi_i_num = truth + step phi_i = self.X[phi_i_num] phi_i_numpy = np.asarray(phi_i) # iterate over pairwise comparisons for counter in range(step, step + 20): if counter == phi_i_num: # if counter == phi_i_num: continue else: phi_j = self.X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy data_matrix.append(list(feature_input)) output_matrix.append(1) for counter in range(step, step + 20): if counter == phi_i_num: continue else: phi_j = self.X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_j_numpy - phi_i_numpy data_matrix.append(list(feature_input)) output_matrix.append(0) # add average loss to array step += 20 return data_matrix, output_matrix
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = LSTMSmall().to(device) print(self.model.state_dict()) params = list(self.model.parameters()) self.opt = torch.optim.Adam(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = NNwEmbedding().to(device) print(self.model.state_dict()) self.opt = torch.optim.Adam( [{ 'params': list(self.model.parameters())[:-1] }, { 'params': self.model.EmbeddingList.parameters(), 'lr': .01 }], lr=.001) self.embedding_optimizer = torch.optim.SGD( self.model.EmbeddingList.parameters(), lr=.01) self.embedding_list = [ torch.ones(3) * 1 / 3 for _ in range(self.num_schedules) ]
def __init__(self, alpha): self.arguments = Logger() self.alpha = alpha self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/scheduling_environment/new_data_pickle/' + str( self.num_schedules) + 'pairwise.pkl' self.X = None self.Y = None self.schedule_array = None bayesian_embedding_dim = 14 self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = self.create_sets_of_20_from_x_for_pairwise_comparisions( ) self.embedding_list = [ torch.ones(bayesian_embedding_dim) * 1 / 3 for _ in range(self.num_schedules) ] # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=bayesian_embedding_dim, alpha=1.5, use_gpu=True, vectorized=True, is_value=False).cuda() use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) del params[0] self.opt = torch.optim.RMSprop([{ 'params': params }, { 'params': self.model.bayesian_embedding, 'lr': .001 }]) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self): num_schedules = 150 self.num_schedules = num_schedules load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( num_schedules, self.data) self.start_of_each_set_twenty = self.create_sets_of_20_from_x_for_pairwise_comparisions( )
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data(self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(self.X) self.model_will_deepen = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=8, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) self.model_will_not_deepen = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=8, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) del params[0] self.opt = torch.optim.RMSprop([{'params': params}, {'params': self.model.bayesian_embedding, 'lr': .001}]) # TODO: make optimizers seperate self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.max_depth = 10 self.embedding_list = [torch.ones(8) * 1 / 3 for _ in range(self.num_schedules)]
def __init__(self, bayesian_dim): self.arguments = Logger() self.alpha = .9 self.num_schedules = 200 # test on 40, train on 160 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_BDFIL_hetero_deadline_pairwise.pkl' self.bayesian_embedding_dim = int(bayesian_dim) self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.schedule_array) self.embedding_list = [ torch.ones(self.bayesian_embedding_dim) * 1 / 3 for _ in range(self.num_schedules) ] # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") use_gpu = True self.model = ProLoNet( input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=self.bayesian_embedding_dim, alpha=1.5, use_gpu=use_gpu, vectorized=True, is_value=False).cuda() if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) # delete embedding parameter del params[0] self.opt = torch.optim.RMSprop(params, lr=.0001) # optimizer for the embedding self.embedding_optimizer = torch.optim.Adam([{ 'params': self.model.bayesian_embedding, 'lr': .01 }]) self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 2000 self.distribution_epsilon = .0001
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'high_low_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data(self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = NNSmall().to(device) print(self.model.state_dict()) self.opt = torch.optim.SGD(self.model.parameters(), lr=.0001) # TODO: tune weight decay
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_homog_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = self.create_sets_of_20_from_x_for_pairwise_comparisions( ) self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=64, output_dim=2, bayesian_embedding_dim=None, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) self.opt = torch.optim.RMSprop(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_BDFIL_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = BNNSmall().to(device) print(self.model.state_dict()) bnn_params = list(self.model.parameters()) del bnn_params[10] self.opt = torch.optim.Adam([{ 'params': bnn_params }]) # TODO: try together and not together self.embedding_optimizer = torch.optim.Adam( self.model.EmbeddingList.parameters(), lr=.001) # params = list(self.model.parameters()) # self.opt = torch.optim.Adam(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.embedding_list = [ torch.ones(1, 8) * 1 / 3 for _ in range(self.num_schedules) ]
def test_again(self, embedding_list): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) # define embedding things embedding_list = embedding_list # learning rate increased self.opt = torch.optim.SGD(self.model.EmbeddingList.parameters(), lr=.0001) criterion = torch.nn.BCELoss() prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] self.model.set_bayesian_embedding(embedding_list[j]) while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets to get preferences preference_prob = self.model.forward(feature_input) probability_matrix[m][n] = preference_prob[0].data.detach()[ 0].item() # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 highest_val = max(column_vec) all_indexes_that_have_highest_val = [i for i, e in enumerate(list(column_vec)) if e == highest_val] if len(all_indexes_that_have_highest_val) > 1: print('length of indexes greater than 1: ', all_indexes_that_have_highest_val) # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # Then do training update loop phi_i_num = truth + step phi_i = X[phi_i_num] phi_i_numpy = np.asarray(phi_i) # iterate over pairwise comparisons for counter in range(step, step + 20): if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) label = Variable(torch.Tensor(torch.ones((1, 1))).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) label = Variable(torch.Tensor(torch.ones((1, 1)))) output = self.model(feature_input) loss = criterion(output, label) # prepare optimizer, compute gradient, update params self.embedding_optimizer.zero_grad() loss.backward() # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5) self.embedding_optimizer.step() print(self.model.EmbeddingList.state_dict()) for counter in range(step, step + 20): if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_j_numpy - phi_i_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) label = Variable(torch.Tensor(torch.zeros((1, 1)))) output = self.model.forward(feature_input) self.embedding_optimizer.zero_grad() loss = criterion(output, label) loss.backward() # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5) self.embedding_optimizer.step() # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] print('top1_mean: ', np.mean(percentage_accuracy_top1))
def evaluate_on_test_data(self, models, schedules_trained_on, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ autoencoder_class = AutoEncoderTrain(150) checkpoint = torch.load( '/home/ghost/PycharmProjects/bayesian_prolo/saved_models/Autoencoder150.tar' ) autoencoder_class.model.load_state_dict(checkpoint['nn_state_dict']) states = self.create_iterables() load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( 100) + 'test_dist_early_hili_naive.pkl' data = pickle.load(open(load_directory, "rb")) X_naive, Y_naive, schedule_array = create_new_dataset(data, 100) for i, each_element in enumerate(X_naive): X_naive[i] = each_element + list(range(20)) num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] mean_input = [ 1.3277743, 0.32837677, 1.4974482, -1.3519306, -0.64621973, 0.10534518, -2.338118, -2.7345326, 1.7558736, -3.0746384, -3.485554 ] for j in range(0, num_schedules): current_schedule_matrix = np.zeros((2048, 20)) schedule_bounds = schedule_array[j] step = schedule_bounds[0] while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) if current_schedule_matrix.sum() == 0: cluster_num = self.kmeans_model.predict( current_schedule_matrix.reshape(1, -1)) else: matrix = np.divide(current_schedule_matrix, current_schedule_matrix.sum()) cluster_num = self.kmeans_model.predict( matrix.reshape(1, -1)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) # push through nets to get preferences preference_prob = self.models[int( cluster_num)].forward(feature_input) probability_matrix[m][n] = preference_prob[ 0].data.detach()[0].item( ) # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 highest_val = max(column_vec) all_indexes_that_have_highest_val = [ i for i, e in enumerate(list(column_vec)) if e == highest_val ] if len(all_indexes_that_have_highest_val) > 1: print('length of indexes greater than 1: ', all_indexes_that_have_highest_val) # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 embedding_copy = np.zeros((1, 11)) input_element = autoencoder_class.model.forward_only_encoding( Variable( torch.Tensor( np.asarray(X_naive[int(step / 20)]).reshape( 1, 242)).cuda())) for z, each_element in enumerate(mean_input): if each_element > input_element[0][z].item(): embedding_copy[0][z] = 0 else: embedding_copy[0][z] = 1 index = self.pass_in_embedding_out_state_ID( states, embedding_copy[0]) action = Y[step] current_schedule_matrix[index][int(action)] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'pairwise_NN_kmeans.pkl') return percentage_accuracy_top1
def evaluate_on_test_data(self, models, schedules_trained_on): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ num_schedules = 75 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_high_low_hetero_deadline_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) ### take a side step and do some of the clustering stuff autoencoder_class = AutoEncoderTrain(num_schedules) autoencoder_class.model.load('/home/ghost/PycharmProjects/bayesian_prolo/saved_models/Autoencoder' + str(schedules_trained_on) + '.tar') autoencoder_class.compute_mean() autoencoder_class.create_iterables() autoencoder_class.round_each_encoding_and_create_array() autoencoder_class.populate_a_matrix_per_schedule() test_matrices = autoencoder_class.save_matrices() gmm_model, labels = self.cluster_matrices(test_matrices, num_schedules) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] self.model = models[labels[j]] while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = self.model.forward(feature_input) probability_matrix[m][n] = preference_prob[0].data.detach()[ 0].item() # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 choice = np.argmax(column_vec) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'HIFI_LIFI_gmm_nn_small_pairwise' + str(schedules_trained_on) + '.pkl')
def evaluate_on_test_data(self, model, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. This is tested on 20% of the data and will be stored in a text file. Note this function is called after training convergence :return: """ # define new optimizer that only optimizes gradient num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] if load_in_model: model.load_state_dict(torch.load('/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/model_homog.tar')['nn_state_dict']) for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] while step < schedule_bounds[1]: probability_vector = np.zeros((1, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) feature_input = phi_i_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) sig = torch.nn.Sigmoid() preference_prob = sig(preference_prob) probability_vector[0][m] = preference_prob[0].data.detach()[ 0].item() # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() print(probability_vector) highest_val = max(probability_vector[0]) all_indexes_that_have_highest_val = [i for i, e in enumerate(list(probability_vector[0])) if e == highest_val] # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 3 _, top_three = torch.topk(torch.Tensor(probability_vector), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] self.save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'DDT_pointwise'+ str(self.num_schedules))
def evaluate_on_test_data(self, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ # define new optimizer that only optimizes gradient num_schedules = 75 loss_func = AlphaLoss() load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/test/' + str( num_schedules) + '_inf_hetero_deadline_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( X) embedding_optimizer = torch.optim.RMSprop([{ 'params': self.model.bayesian_embedding, 'lr': .001 }]) embedding_list = [torch.ones(8) * 1 / 3 for _ in range(num_schedules)] prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] if load_in_model: self.model.load_state_dict( torch.load( '/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/model_homog.tar' )['nn_state_dict']) for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] load_in_embedding(self.model, embedding_list, j) while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = self.model.forward(feature_input) probability_matrix[m][n] = preference_prob[ 0].data.detach()[0].item() # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 choice = np.argmax(column_vec) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # forward phi_i_num = truth + step # old method: set_of_twenty[0] + truth phi_i = X[phi_i_num] phi_i_numpy = np.asarray(phi_i) # iterate over pairwise comparisons for counter in range(step, step + 20): if counter == phi_i_num: # if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy # label = add_noise_pairwise(label, self.noise_percentage) feature_input, P = transform_into_torch_vars( feature_input, self.distribution_epsilon, True, torch.cuda.is_available()) output = self.model(feature_input) loss = loss_func.forward(P, output, self.alpha) # prepare optimizer, compute gradient, update params if loss.item() < .001 or loss.item() > 50: pass else: embedding_optimizer.zero_grad() if loss.item() < .001 or loss.item() > 50: pass else: loss.backward() torch.nn.utils.clip_grad_norm_( self.model.parameters(), 0.5) embedding_optimizer.step() for counter in range(step, step + 20): if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_j_numpy - phi_i_numpy feature_input, P = transform_into_torch_vars( feature_input, self.distribution_epsilon, False, torch.cuda.is_available()) output = self.model(feature_input) loss = loss_func.forward(P, output, self.alpha) # print('loss is :', loss.item()) # clip any very high gradients # prepare optimizer, compute gradient, update params if loss.item() < .001 or loss.item() > 50: pass else: embedding_optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_( self.model.parameters(), 0.5) embedding_optimizer.step() # add average loss to array embedding_list = store_embedding_back(self.model, embedding_list, j) step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] print('top1_mean for ', self.alpha, ' is : ', np.mean(percentage_accuracy_top1))
def evaluate(self, clf): """ Evaluate performance of a DT :return: """ num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] while step < schedule_bounds[1]: probability_vector = np.zeros((1, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) feature_input = phi_i_numpy # push through nets preference_prob = clf.predict(feature_input.reshape(1, -1)) probability_vector[0][m] = preference_prob[0] # feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)) # Set of twenty is completed highest_val = max(probability_vector[0]) all_indexes_that_have_highest_val = [ i for i, e in enumerate(list(probability_vector[0])) if e == highest_val ] # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 1 # choice = np.argmax(column_vec) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) prediction_accuracy = [0] print(np.mean(percentage_accuracy_top1)) print(np.std(percentage_accuracy_top1))
def evaluate_on_test_data(self, model): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' # embedding_optimizer = torch.optim.SGD(model.EmbeddingList.parameters(), lr=.4) criterion = torch.nn.BCELoss() data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) embedding_list = [torch.ones(3) * 1 / 3 for i in range(num_schedules)] prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] # for each schedule, set the embedding model.set_bayesian_embedding(embedding_list[j]) while step < schedule_bounds[1]: probability_vector = np.zeros((1, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) feature_input = phi_i_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) probability_vector[0][m] = preference_prob[0].data.detach( )[0].item( ) # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # given all inputs, and their liklihood of being scheduled, predict the output highest_val = max(probability_vector[0]) all_indexes_that_have_highest_val = [ i for i, e in enumerate(list(probability_vector[0])) if e == highest_val ] if len(all_indexes_that_have_highest_val) > 1: print('length of indexes greater than 1: ', all_indexes_that_have_highest_val) # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 3 _, top_three = torch.topk(torch.Tensor(probability_vector), 3) truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # update loop truth = Y[step] phi_i_num = truth + step # old method: set_of_twenty[0] + truth for counter in range(step, step + 20): if counter == phi_i_num: # if counter == phi_i_num: label = torch.ones((1, 1)) else: label = torch.zeros((1, 1)) phi_j = X[counter] phi = np.asarray(phi_j) feature_input = phi if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) label = Variable(torch.Tensor(label).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) label = Variable(torch.Tensor(label.reshape(1, 1))) output = model.forward(feature_input) self.embedding_optimizer.zero_grad() loss = criterion(output, label) loss.backward() # torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) self.embedding_optimizer.step() print(model.EmbeddingList.state_dict()) embedding_list[j] = torch.Tensor( model.get_bayesian_embedding().detach().cpu().numpy() [0]) # very ugly # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'pointwise_NN_unimodal.pkl') return percentage_accuracy_top1
def test_again_crisp(self, model, test_embeddings): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. This is tested on 20% of the data and will be stored in a text file. Note this function is called after training convergence :return: """ # define new optimizer that only optimizes gradient self.model = convert_to_crisp(model, None) num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' sig = torch.nn.Sigmoid() data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] embedding_optimizer = torch.optim.SGD([{'params': self.model.bayesian_embedding.parameters()}], lr=.01) criterion = torch.nn.BCELoss() embedding_list = test_embeddings for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] model.set_bayesian_embedding(embedding_list[j]) while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) sig = torch.nn.Sigmoid() preference_prob = sig(preference_prob) probability_matrix[m][n] = preference_prob[0].data.detach()[ 0].item() # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) embedding_list[j] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()) # very ugly # top 1 # given all inputs, and their liklihood of being scheduled, predict the output highest_val = max(column_vec) all_indexes_that_have_highest_val = [i for i, e in enumerate(list(column_vec)) if e == highest_val] if len(all_indexes_that_have_highest_val) > 1: print('length of indexes greater than 1: ', all_indexes_that_have_highest_val) # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) embedding_list[j] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()) # very ugly prediction_accuracy = [0, 0] print(np.mean(prediction_accuracy[0]))
def evaluate_on_test_data(model, schedules_trained_on, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) probability_matrix[m][n] = preference_prob[ 0].data.detach()[0].item( ) # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 # given all inputs, and their liklihood of being scheduled, predict the output highest_val = max(column_vec) all_indexes_that_have_highest_val = [ i for i, e in enumerate(list(column_vec)) if e == highest_val ] if len(all_indexes_that_have_highest_val) > 1: print('length of indexes greater than 1: ', all_indexes_that_have_highest_val) # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(column_vec) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'NN_pairwise.pkl')
def evaluate_on_test_data(self, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. Note this function is called after training convergence :return: """ num_schedules = 75 # load in new data loss_func = AlphaLoss() load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/test/' + str( num_schedules) + '_inf_hetero_deadline_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( X) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] embedding_optimizer = torch.optim.Adam( self.model.EmbeddingList.parameters(), lr=.001) embedding_list = [ torch.ones(1, 8) * 1 / 3 for i in range(num_schedules) ] if load_in_model: # TODO: somehow get the string when the update_model flag is true self.model.load_state_dict( torch.load( '/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/NN_homog.tar' )['nn_state_dict']) for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] load_in_embedding_bnn(self.model, embedding_list, j) self.model.reinitialize_hidden_to_random() while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) previous_hidden_state = tuple( [t.detach().cuda() for t in self.model.hidden]) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagonals set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = self.model.forward( feature_input, previous_hidden_state) probability_matrix[m][n] = preference_prob[ 0].data.detach()[0].item( ) # TODO: you can do a check if only this line leads to the same thing as the line below # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 choice = np.argmax(column_vec) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # forward phi_i_num = truth + step # old method: set_of_twenty[0] + truth phi_i = X[phi_i_num] phi_i_numpy = np.asarray(phi_i) # iterate over pairwise comparisons for counter in range(step, step + 20): if counter == phi_i_num: # if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy # label = add_noise_pairwise(label, self.noise_percentage) if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) P = Variable( torch.Tensor([ 1 - self.distribution_epsilon, self.distribution_epsilon ]).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) P = Variable( torch.Tensor([ 1 - self.distribution_epsilon, self.distribution_epsilon ])) output = self.model(feature_input, previous_hidden_state) loss = loss_func.forward(P, output, self.alpha) # prepare optimizer, compute gradient, update params if loss.item() < .001 or loss.item() > 50: pass else: embedding_optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_( self.model.parameters(), 0.5) embedding_optimizer.step() for counter in range(step, step + 20): if counter == phi_i_num: continue else: phi_j = X[counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_j_numpy - phi_i_numpy if torch.cuda.is_available(): feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13)).cuda()) P = Variable( torch.Tensor([ self.distribution_epsilon, 1 - self.distribution_epsilon ]).cuda()) else: feature_input = Variable( torch.Tensor(feature_input.reshape(1, 13))) P = Variable( torch.Tensor([ self.distribution_epsilon, 1 - self.distribution_epsilon ])) output = self.model(feature_input, previous_hidden_state) loss = loss_func.forward(P, output, self.alpha) # print('loss is :', loss.item()) # clip any very high gradients # prepare optimizer, compute gradient, update params if loss.item() < .001 or loss.item() > 50: pass else: embedding_optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_( self.model.parameters(), 0.5) embedding_optimizer.step() # add average loss to array store_embedding_back_bnn(self.model, embedding_list, j) step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] self.save_performance_results( percentage_accuracy_top1, percentage_accuracy_top3, 'inf_blstm_small_' + str(self.num_schedules))
def evaluate_on_test_data(self, model, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. This is tested on 20% of the data and will be stored in a text file. Note this function is called after training convergence :return: """ # define new optimizer that only optimizes gradient num_schedules = 75 loss_func = AlphaLoss() load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/test/' + str( num_schedules) + '_inf_hetero_deadline_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(X) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] if load_in_model: model.load_state_dict(torch.load('/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/model_homog.tar')['nn_state_dict']) for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] while step < schedule_bounds[1]: probability_matrix = np.zeros((20, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) # for each set of twenty for n, second_counter in enumerate(range(step, step + 20)): # fill entire array with diagnols set to zero if second_counter == counter: # same as m = n continue phi_j = X[second_counter] phi_j_numpy = np.asarray(phi_j) feature_input = phi_i_numpy - phi_j_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) probability_matrix[m][n] = preference_prob[0].data.detach()[0].item() probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() # Set of twenty is completed column_vec = np.sum(probability_matrix, axis=1) # top 1 choice = np.argmax(column_vec) # top 3 _, top_three = torch.topk(torch.Tensor(column_vec), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] self.save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'inf_DDT'+ str(self.num_schedules))
def evaluate_on_test_data(self, model, load_in_model=False): """ Evaluate performance of a trained network tuned upon the alpha divergence loss. This is tested on 20% of the data and will be stored in a text file. Note this function is called after training convergence :return: """ # define new optimizer that only optimizes gradient num_schedules = 100 # load in new data load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( num_schedules) + 'test_dist_early_hili_pairwise.pkl' data = pickle.load(open(load_directory, "rb")) X, Y, schedule_array = create_new_data(num_schedules, data) prediction_accuracy = [0, 0] percentage_accuracy_top1 = [] percentage_accuracy_top3 = [] embedding_optimizer = torch.optim.SGD([{'params': self.model.bayesian_embedding.parameters()}], lr=.01) criterion = torch.nn.BCELoss() if load_in_model: model.load_state_dict(torch.load('/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/model_homog.tar')['nn_state_dict']) embedding_list = [torch.ones(3) * 1 / 3 for i in range(num_schedules)] for j in range(0, num_schedules): schedule_bounds = schedule_array[j] step = schedule_bounds[0] model.set_bayesian_embedding(embedding_list[j]) while step < schedule_bounds[1]: probability_vector = np.zeros((1, 20)) for m, counter in enumerate(range(step, step + 20)): phi_i = X[counter] phi_i_numpy = np.asarray(phi_i) feature_input = phi_i_numpy if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) # push through nets preference_prob = model.forward(feature_input) sig = torch.nn.Sigmoid() preference_prob = sig(preference_prob) probability_vector[0][m] = preference_prob[0].data.detach()[ 0].item() # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item() embedding_list[j] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()) # very ugly print(probability_vector) highest_val = max(probability_vector[0]) all_indexes_that_have_highest_val = [i for i, e in enumerate(list(probability_vector[0])) if e == highest_val] # top 1 choice = np.random.choice(all_indexes_that_have_highest_val) # choice = np.argmax(probability_vector) # top 3 _, top_three = torch.topk(torch.Tensor(probability_vector), 3) # Then do training update loop truth = Y[step] # index top 1 if choice == truth: prediction_accuracy[0] += 1 # index top 3 if truth in top_three: prediction_accuracy[1] += 1 # update loop truth = Y[step] phi_i_num = truth + step # old method: set_of_twenty[0] + truth for counter in range(step, step + 20): if counter == phi_i_num: # if counter == phi_i_num: label = torch.ones((1, 1)) else: label = torch.zeros((1, 1)) phi_j = X[counter] phi = np.asarray(phi_j) feature_input = phi if torch.cuda.is_available(): feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda()) label = Variable(torch.Tensor(label).cuda()) else: feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13))) label = Variable(torch.Tensor(label.reshape(1, 1))) output = model.forward(feature_input) output = sig(output) embedding_optimizer.zero_grad() loss = criterion(output, label) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) embedding_optimizer.step() # print(model.EmbeddingList.state_dict()) # add average loss to array step += 20 # schedule finished print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20) print('schedule num:', j) percentage_accuracy_top1.append(prediction_accuracy[0] / 20) percentage_accuracy_top3.append(prediction_accuracy[1] / 20) prediction_accuracy = [0, 0] self.save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'PDDT_pointwise'+ str(self.num_schedules))