def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_naive.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_dataset( num_schedules=self.num_schedules, data=self.data) for i, each_element in enumerate(self.X): self.X[i] = each_element + list(range(20)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = BLSTMSmall().to(device) print(self.model.state_dict()) blstm_params = list(self.model.parameters()) del blstm_params[12] self.opt = torch.optim.Adam( blstm_params, lr=.0001) # TODO: try together and not together self.embedding_optimizer = torch.optim.SGD( self.model.EmbeddingList.parameters(), lr=.001) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.embedding_list = [ torch.ones(1, 8) * 1 / 3 for _ in range(self.num_schedules) ]
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = LSTMSmall().to(device) print(self.model.state_dict()) params = list(self.model.parameters()) self.opt = torch.optim.Adam(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model1 = NNSmall().to(device) model2 = NNSmall().to(device) model3 = NNSmall().to(device) self.models = [model1, model2, model3] opt1 = torch.optim.Adam(model1.parameters()) opt2 = torch.optim.Adam(model2.parameters()) opt3 = torch.optim.Adam(model3.parameters()) self.optimizers = [opt1, opt2, opt3] schedule_matrix_load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/' + str( self.num_schedules) + 'matrixes.pkl' self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb")) self.kmeans_model, self.label = self.cluster_matrices( self.matrices, self.num_schedules)
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_naive.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_dataset(num_schedules=self.num_schedules, data=self.data) for i, each_element in enumerate(self.X): self.X[i] = each_element + list(range(20)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = NNwEmbedding().to(device) print(self.model.state_dict()) self.opt = torch.optim.Adam([{'params': list(self.model.parameters())[:-1]}, {'params': self.model.EmbeddingList.parameters(), 'lr': .01}]) self.embedding_optimizer = torch.optim.Adam(self.model.EmbeddingList.parameters(), lr=.1) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.embedding_list = [torch.ones(3) * 1 / 3 for _ in range(self.num_schedules)]
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = NNwEmbedding().to(device) print(self.model.state_dict()) self.opt = torch.optim.Adam( [{ 'params': list(self.model.parameters())[:-1] }, { 'params': self.model.EmbeddingList.parameters(), 'lr': .01 }], lr=.001) self.embedding_optimizer = torch.optim.SGD( self.model.EmbeddingList.parameters(), lr=.01) self.embedding_list = [ torch.ones(3) * 1 / 3 for _ in range(self.num_schedules) ]
def __init__(self, alpha): self.arguments = Logger() self.alpha = alpha self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/scheduling_environment/new_data_pickle/' + str( self.num_schedules) + 'pairwise.pkl' self.X = None self.Y = None self.schedule_array = None bayesian_embedding_dim = 14 self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = self.create_sets_of_20_from_x_for_pairwise_comparisions( ) self.embedding_list = [ torch.ones(bayesian_embedding_dim) * 1 / 3 for _ in range(self.num_schedules) ] # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=bayesian_embedding_dim, alpha=1.5, use_gpu=True, vectorized=True, is_value=False).cuda() use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) del params[0] self.opt = torch.optim.RMSprop([{ 'params': params }, { 'params': self.model.bayesian_embedding, 'lr': .001 }]) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data(self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(self.X) self.model_will_deepen = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=8, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) self.model_will_not_deepen = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=8, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) del params[0] self.opt = torch.optim.RMSprop([{'params': params}, {'params': self.model.bayesian_embedding, 'lr': .001}]) # TODO: make optimizers seperate self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.max_depth = 10 self.embedding_list = [torch.ones(8) * 1 / 3 for _ in range(self.num_schedules)]
def __init__(self, bayesian_dim): self.arguments = Logger() self.alpha = .9 self.num_schedules = 200 # test on 40, train on 160 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_BDFIL_hetero_deadline_pairwise.pkl' self.bayesian_embedding_dim = int(bayesian_dim) self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.schedule_array) self.embedding_list = [ torch.ones(self.bayesian_embedding_dim) * 1 / 3 for _ in range(self.num_schedules) ] # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") use_gpu = True self.model = ProLoNet( input_dim=len(self.X[0]), weights=None, comparators=None, leaves=16, output_dim=2, bayesian_embedding_dim=self.bayesian_embedding_dim, alpha=1.5, use_gpu=use_gpu, vectorized=True, is_value=False).cuda() if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) # delete embedding parameter del params[0] self.opt = torch.optim.RMSprop(params, lr=.0001) # optimizer for the embedding self.embedding_optimizer = torch.optim.Adam([{ 'params': self.model.bayesian_embedding, 'lr': .01 }]) self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 2000 self.distribution_epsilon = .0001
def __init__(self): self.arguments = Logger() self.loss_baseline_action = [] self.sc2_max_training_iterations = self.arguments.sc2_max_training_iterations self.home_dir = "/home/ghost/PycharmProjects/scheduling_environment" # Directories self.all_data_train_dir = '/home/ghost/PycharmProjects/scheduling_environment/training_encoding_states_starcraft' self.all_data_test_dir = '/home/ghost/PycharmProjects/scheduling_environment/testing_encoding_states_starcraft' self.mmr = '/home/ghost/PycharmProjects/scheduling_environment/games_that_have_an_win_loss.pkl' self.list_of_games_mmr_train = pickle.load(open(self.mmr, "rb")) # This will produce list of games self.size_of_training_set = len(self.list_of_games_mmr_train) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nn_action = Naive_NN().to(device) self.optimizer = torch.optim.Adam(self.nn_action.parameters())
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'high_low_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data(self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = NNSmall().to(device) print(self.model.state_dict()) self.opt = torch.optim.SGD(self.model.parameters(), lr=.0001) # TODO: tune weight decay
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] # TODO: load in new directory load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_naive.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_dataset( num_schedules=self.num_schedules, data=self.data) for i, each_element in enumerate(self.X): self.X[i] = each_element + list(range(20)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model1 = NNSmall().to(device) model2 = NNSmall().to(device) model3 = NNSmall().to(device) self.models = [model1, model2, model3] opt1 = torch.optim.RMSprop(model1.parameters(), lr=.0001) opt2 = torch.optim.RMSprop(model2.parameters(), lr=.0001) opt3 = torch.optim.RMSprop(model3.parameters(), lr=.0001) self.optimizers = [opt1, opt2, opt3] self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 schedule_matrix_load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/' + str( self.num_schedules) + 'matrixes.pkl' self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb")) self.kmeans_model, self.label = self.cluster_matrices( self.matrices, self.num_schedules)
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_inf_hetero_deadline_naive.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_dataset( self.data, num_schedules=self.num_schedules) for i, each_element in enumerate(self.X): self.X[i] = each_element + list(range(20)) # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=256, output_dim=20, bayesian_embedding_dim=None, alpha=1.5, use_gpu=True, vectorized=False, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) self.opt = torch.optim.RMSprop([{'params': params}]) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + 'dist_early_hili_naive.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_dataset(num_schedules=self.num_schedules, data=self.data) for i, each_element in enumerate(self.X): self.X[i] = each_element + list(range(20)) self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=64, output_dim=20, bayesian_embedding_dim=8, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) self.opt = torch.optim.RMSprop([{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}]) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.max_depth = 10 self.embedding_list = [torch.ones(8) * 1 / 3 for _ in range(self.num_schedules)]
def __init__(self, num_schedules): self.arguments = Logger() self.alpha = .9 self.num_schedules = num_schedules self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_BDFIL_hetero_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions( self.X) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model = BNNSmall().to(device) print(self.model.state_dict()) bnn_params = list(self.model.parameters()) del bnn_params[10] self.opt = torch.optim.Adam([{ 'params': bnn_params }]) # TODO: try together and not together self.embedding_optimizer = torch.optim.Adam( self.model.EmbeddingList.parameters(), lr=.001) # params = list(self.model.parameters()) # self.opt = torch.optim.Adam(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.convergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001 self.embedding_list = [ torch.ones(1, 8) * 1 / 3 for _ in range(self.num_schedules) ]
def __init__(self): self.arguments = Logger() self.alpha = .9 self.num_schedules = 150 self.home_dir = self.arguments.home_dir self.total_loss_array = [] load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str( self.num_schedules) + '_homog_deadline_pairwise.pkl' self.data = pickle.load(open(load_directory, "rb")) self.X, self.Y, self.schedule_array = create_new_data( self.num_schedules, self.data) self.start_of_each_set_twenty = self.create_sets_of_20_from_x_for_pairwise_comparisions( ) self.model = ProLoNet(input_dim=len(self.X[0]), weights=None, comparators=None, leaves=64, output_dim=2, bayesian_embedding_dim=None, alpha=1.5, use_gpu=True, vectorized=True, is_value=False) use_gpu = True if use_gpu: self.model = self.model.cuda() print(self.model.state_dict()) params = list(self.model.parameters()) self.opt = torch.optim.RMSprop(params) self.num_iterations_predict_task = 0 self.total_iterations = 0 self.covergence_epsilon = .01 self.when_to_save = 1000 self.distribution_epsilon = .0001