def optimize(self, iterations): (n, m) = self.M.shape M = self.M k = self.k w = np.abs(np.random.randn(n, k)) h = np.abs(np.random.randn(k, m)) distance_type = self.distance_type w_aux = w.copy() h_aux = h.copy() # init dual variables for w, h, y dual_w = np.zeros_like(w) dual_h = np.zeros_like(h) v_aux = np.zeros_like(M) dual_v = np.zeros_like(M) reg_w = (0, 'nn') reg_h = (0, 'l2n') rho = 1 for i in range(iterations): print(i, utils.objective(M, w, h)) if distance_type == 'eu': h_aux = aux_update(h, dual_h, w_aux, M, None, rho, distance_type) w_aux = aux_update(w.T, dual_w.T, h_aux.T, M.T, None, rho, distance_type) w_aux = w_aux.T h = prox(reg_h[1], h_aux, dual_h, rho=rho, lambda_=reg_h[0]) w = prox(reg_w[1], w_aux.T, dual_w.T, rho=rho, lambda_=reg_w[0]) w = w.T elif distance_type == 'kl': h_aux = aux_update(h, dual_h, w_aux, v_aux, dual_v, rho, distance_type) w_aux = aux_update(w.T, dual_w.T, h_aux.T, v_aux.T, dual_v.T, rho, distance_type) w_aux = w_aux.T h = prox(reg_h[1], h_aux, dual_h, rho=rho, lambda_=reg_h[0]) w = prox(reg_w[1], w_aux.T, dual_w.T, rho=rho, lambda_=reg_w[0]) w = w.T v_bar = w_aux @ h_aux - dual_v v_aux = 1 / 2 * ((v_bar - 1) + np.sqrt((v_bar - 1) ** 2 + 4 * M)) dual_v = dual_v + v_aux - w_aux @ h_aux else: raise TypeError('Unknown loss type.') dual_h = dual_h + h - h_aux dual_w = dual_w + w - w_aux print() return utils.objective(M, w, h)
def optimize(self, E, iterations): M = self.M W, H = utils.init_wh(M, self.k) for j in range(self.k): W[:, j] = W[:, j] / torch.norm(W[:, j]) H[j, :] = H[j, :] / torch.norm(H[j, :]) for i in range(iterations): print(i, utils.objective(M, W, H, E)) Dif = E * (M - W.mm(H)) for j in range(self.k): uj = W[:, j] vj = H[j, :] Dif = Dif + uj.reshape(-1, 1).mm(vj.reshape(1, -1)) u_nom = Dif.t().mv(uj).clamp_min(0) if torch.norm(u_nom) > 1e-18: vj = u_nom / (torch.norm(uj)**2) else: vj = torch.zeros_like(vj) v_nom = Dif.mv(vj).clamp_min(0) if torch.norm(v_nom) > 1e-18: uj = v_nom / (torch.norm(vj)**2) else: uj = torch.zeros_like(uj) W[:, j] = uj H[j, :] = vj Dif = Dif - uj.reshape(-1, 1).mm(vj.reshape(1, -1)) return W, H
def __current_score(self, li): (is_valid, _, total) = objective(self.capasity, self.values, self.costs, li) if is_valid: self.logger.debug('valid score: {} {}'.format(total, li)) return total else: return 0
def optimize(self, E, iterations): M = self.M W, H = utils.init_wh(M, self.k) V, U = utils.init_wh(M, self.k) for i in range(iterations): print(i, utils.objective(M, W, H, E)) H, _ = admm_H_update(M, W, H, U, E, self.k) W, _ = admm_W_update(M, W, H, V, E, self.k) return W, H
def optimize(self, E, iterations): M = self.M W, H = utils.init_wh(M, self.k) for i in range(iterations): print(i, utils.objective(M, W, H, E)) pH = H H = H + self.step * (W.T.mm(E * (M - W.mm(H)))) H = utils.proj(H, pH, self.sticky) pW = W W = W + self.step * ((E * (M - W.mm(H))).mm(H.T)) W = utils.proj(W, pW, self.sticky) return W, H
def optimize(self, E, iterations): M = self.M W, H = utils.init_wh(M, self.k) for i in range(iterations): print(i, utils.objective(M, W, H, E)) # M_aux = (M - W.mm(H)) * E + W.mm(H) h_denom = W.t().mm(E * W.mm(H)) H = H * torch.where(h_denom < 1e-10, eps, W.t().mm(M) / h_denom) # TODO # M_aux = (M - W.mm(H)) * E + W.mm(H) w_denom = (E * W.mm(H)).mm(H.t()) W = W * torch.where(w_denom < 1e-10, eps, (M).mm(H.t()) / w_denom) # TODO return W, H
def weight(individual, data): x = data[0] y = data[1] #tree = PrimitiveTree(individual) #tree. func = toolbox.compile(expr=individual) #print(type(func)) #print(individual) pred = func(x, y) label = objective(x, y) loss = fitness(pred, label) return loss,
def optimize(self, E: Tensor, iterations): M = self.M W, H = utils.init_wh(M, self.k) mW, mH = 0, 0 for i in range(iterations): print(i, utils.objective(M, W, H, E)) prevW, prevH = W, H W = W + (1 - self.momentum) * mW H = H + (1 - self.momentum) * mH (gW, gH) = self.grad(M, W, H, E) # nW = np.random.normal(0, 0.01 * self.step, (n, self.k)) # nH = np.random.normal(0, 0.01 * self.step, (self.k, m)) W = utils.proj(W - self.step * gW, W, self.sticky) H = utils.proj(H - self.step * gH, H, self.sticky) mW = W - prevW mH = H - prevH return W, H
def main(): runs = 8 # ds = RandomDataset() # ds = ImageDataset() ds = TextDataset() # ds = RecommendationDataset() k = ds.k() objs = [0] * 5 times = [0] * 5 names = [None] * 5 for run in range(runs): step = ds.step(run) # M = torch.from_numpy(ds.generate(run)).float().to(device) M, E = ds.generate(run) M = utils.to_tensor(M) E = utils.to_tensor(E) # M = ds.generate(run) print(M.shape) algorithms: List[Optimizer] = [ SPGD(M, k, step, True, 0.1), MultUpdate(M, k), BlockCoordinate(M, k, step, False), AO_ADMM(M, k), LeastSquares(M, k) ] for i, opt in enumerate(algorithms): print(opt.name()) names[i] = opt.name() start_time = time.time() W, H = opt.optimize(E, ds.iterations()) print() objs[i] += utils.objective(M, W, H, E) elapsed = time.time() - start_time times[i] += elapsed print("time", elapsed) ds.postprocess(W, H, run, opt.short_name()) print(names) print(objs) print(times)
def __objenctive(self) -> (True, int, int): return objective(self.capasity, self.values, self.costs, self.number_of_items)
self.number_of_items[n] += 1 is_ok = False while not is_ok: (is_ok, cost, total_value) = self.__objenctive() remove_n = random.randint(0, len(self.number_of_items) - 1) if self.number_of_items[remove_n] != 0: self.number_of_items[remove_n] -= 1 if __name__ == '__main__': # Parameters values = [120, 130, 80, 100, 250, 185] costs = [10, 12, 7, 9, 21, 16] number_of_items = [0 for i in range(0, len(values))] capasity = 100 # for logging # from logging import DEBUG # basicConfig(level=DEBUG) from logging import INFO basicConfig(level=INFO) sa = SimulatedAnealing(capasity, values, costs, number_of_items, 2, 10000, 0.99) best_result_of_sa = sa.solve() best_cost = objective(capasity, values, costs, best_result_of_sa) print('best conbination: ', best_result_of_sa) print('best cost:', best_cost)
import numpy as np import matplotlib.pyplot as plt import utils as uu # range of values x_lim = [0, 20] # train inputs n_train = 10 np.random.seed(10) x_train = np.random.uniform(x_lim[0], x_lim[1], n_train) f_train = uu.objective(x_train) sigma_n = 0.5 noise = np.random.normal(0, sigma_n, n_train) f_train += noise np.random.seed(None) # test inputs n_test = 200 x_test = np.linspace(x_lim[0], x_lim[1], n_test) # predictive distribution n_samples = 3 scale = 1 cov_test_test = uu.se_matrix(x_test, scale=scale) cov_train_train = uu.se_matrix(x_train, scale=scale) + sigma_n**2 * np.eye(n_train) cov_test_train = uu.se_matrix(x_test, x_train, scale=scale) cov_train_test = cov_test_train.T cov_train_train_inv = np.linalg.inv(cov_train_train)
def main(): parser = argparse.ArgumentParser('PCA with Pytorch') parser.add_argument( '--method', default='l2rmsg', help= "can be among ['l1rmsg','l2rmsg','l12rmsg','msg','incremental','original','sgd']" ) parser.add_argument('--subspace_ratio', type=float, default=0.5, help='k/d ratio') parser.add_argument('--beta', type=float, default=0.5, help='regularization const for l1') parser.add_argument('--lambda', type=float, default=1e-3, help='regularization const for l2') parser.add_argument('--eta', type=float, default=1, help='learning rate') parser.add_argument( '--eps', type=float, default=1e-6, help='threshold on norm for rank-1 update of non-trivial components') parser.add_argument('--nepochs', type=int, default=20, help='no. of epochs') parser.add_argument('--cuda', action='store_true', default=False, help='To train on GPU') parser.add_argument('--verbose', action='store_true', default=False, help='if true then progress bar gets printed') parser.add_argument('--log_interval', type=int, default=1, help='log interval in epochs') args = parser.parse_args() device = lambda tens: device_templ(tens, args.cuda) torch.manual_seed(7) if torch.cuda.is_available(): torch.cuda.manual_seed_all(7) print('-----------------------TRAINING {}--------------------'.format( args.method.upper())) X_train, X_val, X_test = get_syn_data(device=device) k = int(args.subspace_ratio * X_train.size(1)) d = X_train.size(1) epochs_iter = range(args.nepochs) epochs_iter = tqdm.tqdm(epochs_iter) if args.verbose else epochs_iter for epoch in epochs_iter: iterator = DataLoader(TensorDataset(X_train), shuffle=True) iterator = tqdm.tqdm(iterator) if args.verbose else iterator for x in iterator: x = x[0].squeeze() method = args.method if method in ['l1rmsg', 'l2rmsg', 'l12rmsg', 'msg']: if epoch == 0: U = device(torch.zeros(d, k).float()) S = device(torch.zeros(k).float()) U, S = msg(U, S, k, x, args.eta, args.eps, args.beta) elif method in 'incremental': if epoch == 0: U = device(torch.zeros(d, k).float()) S = device(torch.zeros(k).float()) U, S = incremental_update(U, S, x, max_rank=None) # print(U,S) U, S = U[:, :k], S[:k] elif method in 'sgd': if epoch == 0: U = gram_schmidt( nn.init.uniform_(device(torch.zeros(d, k)))) U = stochastic_power_update(U, x, args.eta) U = gram_schmidt(U) elif method in 'original': _, S, V = torch.svd(X_train) U = V[:, :k] break if method in ['l1rmsg', 'l2rmsg', 'l12rmsg', 'msg']: finalU = U[:, :k] elif method in 'incremental': finalU = U elif method in 'sgd': finalU = U elif method in 'original': finalU = U if method in 'original': break if epoch % args.log_interval == 0: # print(epoch) print('Objective(higher is good): TRAIN {:.4f} VALIDATION {:.4f}'. format(objective(X_train, finalU), objective(X_val, finalU))) method = args.method print('Objective(higher is good): TRAIN {:.4f} VALIDATION {:.4f}'.format( objective(X_train, finalU), objective(X_val, finalU)))