def test(self, X, y, sol, solver): Q = self.Q G = self.G h = self.h A = self.A b = self.b F = self.F model_params_quad = self.gurobi_model time = 0 n_train = 1 n_items = self.n_items epochs = self.epochs net = self.net model = self.model model.eval() X_tensor = torch.tensor(X, dtype=torch.float) c_pred = (model(X_tensor)) y_pred = c_pred.detach().numpy().squeeze() model.train() n_knap = len(y) // n_items sol_pred = self.solution_func(y_pred, solver=solver) regret_list = [] loss_list = [] for i in range(n_knap): n_start = n_items * i n_stop = n_start + n_items regret = (y[n_start:n_stop] * (sol_pred[i][0] - sol[i][0])).sum() ### to be delted #print(regret) #print(sol_pred[i][0]) #print(sol[i]) ########### regret_list.append(regret) c = torch.mm(F, c_pred[n_start:n_stop]) c_true = torch.mm( F, torch.tensor(y[n_start:n_stop], dtype=torch.float).unsqueeze(1)) solver = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad) x = solver(Q.expand(n_train, *Q.shape), c.squeeze(), G.expand(n_train, *G.shape), h.expand(n_train, *h.shape), torch.Tensor(), torch.Tensor()) loss = (x.squeeze() * c_true.squeeze()).mean().item() loss_list.append(loss) time += solver.Runtime() return np.median(regret_list), mse(y, y_pred), np.median(loss_list), time
def test(self, X, y, relaxation=False): Q = self.Q G = self.G h = self.h n_train = 1 n_items = self.n_items epochs = self.epochs net = self.net capacity = self.capacity model = self.model weights = self.weights model.eval() X_tensor = torch.tensor(X, dtype=torch.float) y_pred = model(X_tensor).detach().numpy().squeeze() n_knapsacks = X.shape[0] // n_items regret_list = [] cf_list = [] loss_list = [] time = 0 model_params_quad = make_gurobi_model(G.detach().numpy(), h.detach().numpy(), None, None, Q.detach().numpy()) for i in range(n_knapsacks): n_start = n_items * i n_stop = n_start + n_items regret, cf = regret_knapsack([y[n_start:n_stop]], [y_pred[n_start:n_stop]], weights=weights, cap=[self.capacity], relaxation=relaxation) regret_list.append(regret) cf_list.append(cf) z = torch.tensor(y[n_start:n_stop], dtype=torch.float) X_tensor = torch.tensor(X[n_start:n_stop, :], dtype=torch.float) c_true = -z c_pred = -(model(X_tensor)) solver = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad) x = solver(Q.expand(n_train, *Q.shape), c_pred.squeeze(), G.expand(n_train, *G.shape), h.expand(n_train, *h.shape), torch.Tensor(), torch.Tensor()) time += solver.Runtime() loss_list.append((x.squeeze() * c_true).mean().item()) model.train() if not relaxation: tn, fp, fn, tp = np.sum(np.stack(cf_list), axis=0).ravel() accuracy = (tn + tp) / (tn + fp + fn + tp) else: accuracy = None return np.median(regret_list), mse( y, y_pred), accuracy, np.median(loss_list), time
def compute_matching(current_pool_list, curr_type_weights, e_weights_by_type, gamma=0.000001): # current_pool_list should have lhs and rhs, get them both as tensors lhs_current_elems = torch.tensor([x[0] for x in current_pool_list.lhs]) rhs_current_elems = torch.tensor([x[0] for x in current_pool_list.rhs]) l_n = lhs_current_elems.shape[0] r_n = rhs_current_elems.shape[0] A, b = make_matching_matrix(l_n, r_n) A = torch.from_numpy(A).float() b = torch.from_numpy(b).float() # should take lhs and rhs e_weights = weight_matrix(current_pool_list.lhs, current_pool_list.rhs, e_weights_by_type).view(l_n, r_n) jitter_e_weights = e_weights + 1e-4 * jitter_matrix(l_n, r_n) # e_weights = torch.rand(n,n) model_params_quad = make_gurobi_model(A.detach().numpy(), b.detach().numpy(), None, None, gamma * np.eye(A.shape[1])) func = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad) Q_mat = gamma * torch.eye(A.shape[1]) curr_elem_weights = type_weight_matrix(lhs_current_elems, rhs_current_elems, curr_type_weights).view(l_n, r_n) modified_edge_weights = jitter_e_weights - 0.5 * (curr_elem_weights) # may need some negative signs resulting_match = func(Q_mat, -modified_edge_weights.view(-1), A, b, torch.Tensor(), torch.Tensor()).view(l_n, r_n) return resulting_match, e_weights
def train_fwbw(self, data, init_criterion, init_opti, sol_noispool, epoch=None): indices = list(range(len(data))) train_dl = DataLoader(data, batch_size=self.bsizeCOP, shuffle=True) optimizer = init_opti sign = -1 if self.maximize else 1 # QPf minimizes ii = 0 A, b, G, h = self.get_qpt_mat(**self.param) G_trch = torch.from_numpy(G if G is not None else np.array([])).float() h_trch = torch.from_numpy(h if h is not None else np.array([])).float() A_trch = torch.from_numpy(A if A is not None else np.array([])).float() b_trch = torch.from_numpy(b if b is not None else np.array([])).float() Q_trch = (self.tau) * torch.eye(G.shape[1]) model_params_quad = make_gurobi_model(G, h, A, b, Q_trch.detach().numpy()) for xb, yb, solb in train_dl: optimizer.zero_grad() for i in range(len(yb)): y_pred = self.model(xb[i]).squeeze() sol = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad)( Q_trch.expand(1, *Q_trch.shape), sign * y_pred, G_trch.expand(1, *G_trch.shape), h_trch.expand(1, *h_trch.shape), A_trch.expand(1, *A_trch.shape), b_trch.expand(1, *b_trch.shape)) ii += 1 loss = -(sol * yb[i]).mean() loss.backward() optimizer.step()
def fit(self, economic_data, properties_data): logging.info("QPTL") train_df = MyCustomDataset(economic_data, properties_data) grad_list = [] for e in range(self.epochs): total_loss = 0 # for i in range(30): # logging.info("EPOCH Starts") # train_prop = properties_data.sample(n = 279,random_state =i) # valid_prop = properties_data.loc[~properties_data.index.isin(train_prop.index)] # train_sl = train_prop.Sl.unique().tolist() # valid_sl = valid_prop.Sl.unique().tolist() # train_prop = train_prop.sort_values(['Sl'],ascending=[True]) # valid_prop = valid_prop.sort_values(['Sl'],ascending=[True]) # train_econ = economic_data[economic_data.Sl.isin(train_sl)] # valid_econ = economic_data[economic_data.Sl.isin(valid_sl)] # train_econ = train_econ.sort_values(['Sl','Lag'],ascending=[True,False]) # valid_econ = valid_econ.sort_values(['Sl','Lag'],ascending=[True,False]) # train_df = MyCustomDataset(train_econ,train_prop) train_dl = data_utils.DataLoader(train_df, batch_size=self.batch_size, shuffle=False) for x_f, x_c, x_t, y, cst in train_dl: self.optimizer.zero_grad() h = torch.zeros( (self.num_layers, x_t.shape[0], self.hidden_size), dtype=torch.float) c = torch.zeros( (self.num_layers, x_t.shape[0], self.hidden_size), dtype=torch.float) op, states = self.model(x_f, x_c, x_t, (h, c)) h, c = states G, h, A, b = make_matrix_qp(cst.detach().numpy(), x_t.shape[0] * self.budget) Q = torch.eye(x_t.shape[0]) / self.tau model_params_quad = make_gurobi_model( G.detach().numpy(), h.detach().numpy(), A.detach().numpy(), b.detach().numpy(), self.tau * np.eye(x_t.shape[0])) x = QPFunction( verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad)(Q.expand(1, *Q.shape), -op, G.expand(1, *G.shape), h.expand(1, *h.shape), A.expand(1, *A.shape), b.expand(1, *b.shape)) loss = -(x * y).mean() op.retain_grad() loss.backward() # op_grad = copy.deepcopy(op.grad) # grad_dict = {} # grad_dict['epoch'] = e # grad_dict['subepoch'] = i # for l in range(len(op_grad)): # grad_dict['qpt_cgrad'] = op_grad[l].item() # grad_dict['prediction'] = op[l].item() # grad_dict['true'] = y[l].item() # grad_list.append(copy.deepcopy(grad_dict)) self.optimizer.step() total_loss += loss.item() logging.info("EPOCH Ends")
def fit(self, X, y, X_validation=None, y_validation=None, X_test=None, y_test=None): # if validation true validation and tets data should be provided tau = self.tau start = time.time() validation_time = 0 test_time = 0 # if validation true validation and tets data should be provided X = X[:, 1:] validation = (X_validation is not None) and (y_validation is not None) test = (X_test is not None) and (y_test is not None) if self.doScale: self.scaler = preprocessing.StandardScaler().fit(X) X = self.scaler.transform(X) if validation: start_validation = time.time() X_validation = X_validation[:, 1:] if self.doScale: X_validation = self.scaler.transform(X_validation) end_validation = time.time() validation_time += end_validation - start_validation if test: start_test = time.time() X_test = X_test[:, 1:] if self.doScale: X_test = self.scaler.transform(X_test) end_test = time.time() test_time += end_test - start_test validation_relax = self.validation_relax test_relax = self.test_relax n_items = self.n_items epochs = self.epochs net = self.net capacity = self.capacity weights = self.weights hyperparams = self.hyperparams #Q= torch.diagflat(torch.ones(n_items)/tau) Q = torch.eye(n_items) / tau #G = torch.cat((torch.from_numpy(weights).float(), torch.diagflat(torch.ones(n_items)), # torch.diagflat(torch.ones(n_items)*-1)), 0) #h = torch.cat((torch.tensor([capacity],dtype=torch.float),torch.ones(n_items),torch.zeros(n_items))) G = torch.from_numpy(weights).float() h = torch.tensor([capacity], dtype=torch.float) self.Q = Q self.G = G self.h = h model = net(X.shape[1], 1) self.model = model #optimizer = torch.optim.Adam(model.parameters(),**hyperparams) optimizer = self.optimizer(model.parameters(), **hyperparams) model_params_quad = make_gurobi_model(G.detach().numpy(), h.detach().numpy(), None, None, Q.detach().numpy()) n_knapsacks = X.shape[0] // n_items loss_list = [] accuracy_list = [] regret_list = [] subepoch_list = [] subepoch = 0 logger = [] test_list = [] n_train = 1 shuffled_batches = [i for i in range(n_knapsacks)] for e in range(epochs): np.random.shuffle(shuffled_batches) logging.info('Epoch %d' % e) for i in range(n_knapsacks): n_start = n_items * shuffled_batches[i] n_stop = n_start + n_items z = torch.tensor(y[n_start:n_stop], dtype=torch.float) X_tensor = torch.tensor(X[n_start:n_stop, :], dtype=torch.float) c_true = -z c_pred = -(model(X_tensor)) solver = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad) x = solver(Q.expand(n_train, *Q.shape), c_pred.squeeze(), G.expand(n_train, *G.shape), h.expand(n_train, *h.shape), torch.Tensor(), torch.Tensor()) self.model_time += solver.Runtime() loss = (x.squeeze() * c_true).mean() optimizer.zero_grad() loss.backward() optimizer.step() subepoch += 1 if i % 20 == 0: if self.verbose: dict_validation = {} logging.info('Validation starts\n ') #train_result = self.test(X,y) if validation: start_validation = time.time() validation_result = self.test( X_validation, y_validation, relaxation=validation_relax) self.model_time += validation_result[4] dict_validation[ 'validation_regret'] = validation_result[0] dict_validation[ 'validation_mse'] = validation_result[1] dict_validation[ 'validation_accuracy'] = validation_result[2] dict_validation[ 'validation_loss'] = validation_result[3] end_validation = time.time() validation_time += end_validation - start_validation if test: start_test = time.time() test_result = self.test(X_test, y_test, relaxation=test_relax) self.model_time += validation_result[4] dict_validation['test_regret'] = test_result[0] dict_validation['test_mse'] = test_result[1] dict_validation['test_accuracy'] = test_result[2] dict_validation['test_loss'] = test_result[3] end_test = time.time() test_time += end_test - start_test dict_validation['subepoch'] = subepoch dict_validation['Runtime'] = self.model_time dict_validation['time'] = time.time() - start test_list.append(dict_validation) logging.info( "Epoch %d::subepoch %d Total time %d, validation time %d & test time %d" % (e, i, time.time() - start, validation_time, test_time)) #print('Epoch[{}/{}], loss(train):{:.2f} '.format(e+1, i, loss.item() )) if self.plotting: subepoch_list.append(subepoch) reg, loss, acc = self.test(X, y) loss_list.append(loss) regret_list.append(reg) accuracy_list.append(acc) if self.plotting: fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 6)) ax1.plot(subepoch_list, regret_list) ax1.set_ylabel('Regret') ax2.plot(subepoch_list, loss_list) ax2.set_yscale('log') ax2.set_ylabel('Loss') ax3.plot(subepoch_list, accuracy_list) ax3.set_xlabel('Sub Epochs') ax3.set_ylabel('Accuracy') plt.savefig(self.figname) if self.verbose: dd = defaultdict(list) for d in test_list: for key, value in d.items(): dd[key].append(value) df = pd.DataFrame.from_dict(dd) return df
def fit(self, X, y, X_validation=None, y_validation=None, X_test=None, y_test=None): def make_model_matrix(nbMachines, nbTasks, nbResources, MC, U, D, E, L, P, idle, up, down, q, **h): # nbMachines: number of machine # nbTasks: number of task # nb resources: number of resources # MC[m][r] resource capacity of machine m for resource r # U[f][r] resource use of task f for resource r # D[f] duration of tasks f # E[f] earliest start of task f # L[f] latest end of task f # P[f] power use of tasks f # idle[m] idle cost of server m # up[m] startup cost of server m # down[m] shut-down cost of server m # q time resolution # timelimit in seconds Machines = range(nbMachines) Tasks = range(nbTasks) Resources = range(nbResources) N = 1440 // q ### G and h G = torch.zeros((nbMachines * N, nbTasks * nbMachines * N)) h = torch.zeros(nbMachines * N) F = torch.zeros((N, nbTasks * nbMachines * N)) for m in Machines: for t in range(N): h[m * N + t] = MC[m][0] for f in Tasks: c_index = (f * nbMachines + m) * N G[t + m * N, (c_index + max(0, t - D[f] + 1)):(c_index + (t + 1))] = 1 F[t, (c_index + max(0, t - D[f] + 1)):(c_index + (t + 1))] = P[f] ### A and b A1 = torch.zeros((nbTasks, nbTasks * nbMachines * N)) A2 = torch.zeros((nbTasks, nbTasks * nbMachines * N)) A3 = torch.zeros((nbTasks, nbTasks * nbMachines * N)) for f in Tasks: A1[f, (f * N * nbMachines):((f + 1) * N * nbMachines)] = 1 A2[f, (f * N * nbMachines):(f * N * nbMachines + E[f])] = 1 A3[f, (f * N * nbMachines + L[f] - D[f] + 1):((f + 1) * N * nbMachines)] = 1 b = torch.cat((torch.ones(nbTasks), torch.zeros(2 * nbTasks))) A = torch.cat((A1, A2, A3)) return A, b, G, h, torch.transpose(F, 0, 1) ############################ logging.info('Model Training Starts\n') tau = self.tau start = time.time() validation_time = 0 test_time = 0 # if validation true validation and tets data should be provided X = X[:, 1:] validation = (X_validation is not None) and (y_validation is not None) test = (X_test is not None) and (y_test is not None) if self.doScale: self.scaler = preprocessing.StandardScaler().fit(X) X = self.scaler.transform(X) if validation: start_validation = time.time() X_validation = X_validation[:, 1:] if self.doScale: X_validation = self.scaler.transform(X_validation) end_validation = time.time() validation_time += end_validation - start_validation if test: start_test = time.time() X_test = X_test[:, 1:] if self.doScale: X_test = self.scaler.transform(X_test) end_test = time.time() test_time += end_test - start_test n_items = self.n_items epochs = self.epochs net = self.net param = self.param hyperparams = self.hyperparams validation_relax = self.validation_relax test_relax = self.test_relax if validation: start_validation = time.time() solver_validation = Gurobi_ICON(relax=validation_relax, reset=True, presolve=True, **param) solver_validation.make_model() self.solver_validation = solver_validation end_validation = time.time() validation_time += end_validation - start_validation if test: start_test = time.time() solver_test = Gurobi_ICON(**param, reset=True, presolve=True) solver_test.make_model() self.solver_test = solver_test end_test = time.time() test_time += end_test - start_test #sol_train = self.solution_func(y) if self.validation: if validation: sol_validation = self.solution_func( y_validation, solver=self.solver_validation) if test: sol_test = self.solution_func(y_test, solver=self.solver_test) A, b, G, h, F = make_model_matrix(**param) #Q= torch.diagflat(torch.ones(F.shape[0])/tau) #print(F.shape) Q = torch.eye(F.shape[0]) / tau self.Q = Q self.G = G self.h = h self.A = A self.b = b self.F = F model = net(X.shape[1], 1) #optimizer = torch.optim.Adam(model.parameters(),**hyperparams) optimizer = self.optimizer(model.parameters(), **hyperparams) model_params_quad = make_gurobi_model(G.detach().numpy(), h.detach().numpy(), A.detach().numpy(), b.detach().numpy(), Q.detach().numpy()) self.gurobi_model = model_params_quad n_knapsacks = X.shape[0] // n_items loss_list = [] regret_list = [] subepoch = 0 logger = [] test_list = [] n_train = 1 for e in range(epochs): logging.info('Epoch %d' % e) subepoch_list = [j for j in range(n_knapsacks)] random.shuffle(subepoch_list) for i in range(n_knapsacks): n_start = n_items * subepoch_list[i] n_stop = n_start + n_items c_true = torch.mm( F, torch.tensor(y[n_start:n_stop], dtype=torch.float).unsqueeze(1)) X_tensor = torch.tensor(X[n_start:n_stop, :], dtype=torch.float) c_pred = (model(X_tensor)) c_pred = torch.mm(F, model(X_tensor)) #logging.info('Call to qp function starts' ) solver = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params=model_params_quad) x = solver(Q.expand(n_train, *Q.shape), c_pred.squeeze(), G.expand(n_train, *G.shape), h.expand(n_train, *h.shape), A.expand(n_train, *A.shape), b.expand(n_train, *b.shape)) #logging.info('Call to qp function ends' ) self.model_time += solver.Runtime() loss = (x.squeeze() * c_true.squeeze()).mean() optimizer.zero_grad() #print(loss) loss.backward() optimizer.step() self.model = model subepoch += 1 if self.model_save: if i % 10 == 0: logging.info("Model saving:%d-%d\n " % (e, i)) torch.save( self.model.state_dict(), str(self.model_name + "_Epoch" + str(e) + "_" + str(i) + ".pth")) if i % 50 == 0: if self.verbose: #train_result = self.test(X,y,sol_train) dict_validation = {} logging.info('Validation starts\n ') if validation: start_validation = time.time() validation_result = self.test( X_validation, y_validation, sol_validation, solver=self.solver_validation) logging.info('Validation on test data starts\n ') self.model_time += validation_result[3] dict_validation[ 'validation_regret'] = validation_result[0] dict_validation[ 'validation_mse'] = validation_result[1] dict_validation[ 'validation_loss'] = validation_result[2] end_validation = time.time() validation_time += end_validation - start_validation if test: start_test = time.time() test_result = self.test(X_test, y_test, sol_test, solver=self.solver_test) logging.info('Validation Ends \n ') dict_validation['test_regret'] = test_result[0] dict_validation['test_mse'] = test_result[1] dict_validation['test_loss'] = test_result[2] end_test = time.time() test_time += end_test - start_test dict_validation['subepoch'] = subepoch dict_validation['Runtime'] = self.model_time dict_validation['time'] = time.time() - start test_list.append(dict_validation) logging.info( "Epoch %d::subepoch %d Total time %d, validation time %d & test time %d" % (e, i, time.time() - start, validation_time, test_time)) print('Epoch[%d::%d], loss(train):%.2f at %s' % (e + 1, i, loss, datetime.datetime.now())) if self.verbose: dd = defaultdict(list) for d in test_list: for key, value in d.items(): dd[key].append(value) df = pd.DataFrame.from_dict(dd) return df