def main(): tic = time.clock() parser = argparse.ArgumentParser(description='Running Frank-Wolfe on given DPP') parser.add_argument('torch_seed', help='Torch random seed', type=int) parser.add_argument('dpp_file', help='DPP file', type=str) parser.add_argument('dpp_id', help='Id of dpp file', type=int) parser.add_argument('N', help='Number of items in DPP', type=int) parser.add_argument('k', help='Cardinality constraint', type=int) parser.add_argument('num_samples_mc', help='Number of samples for multilinear relaxation estimation', type=int) parser.add_argument('num_fw_iter', help='Number of iterations of Frank-Wolfe', type=int) args = parser.parse_args() N = args.N dpp_id = args.dpp_id k = args.k #cardinality constraint num_samples_mc = args.num_samples_mc #draw these many sets from x for multilinear relaxation num_fw_iter = args.num_fw_iter if_herd = 0 torch_seed = args.torch_seed torch.manual_seed(torch_seed) dpp_file = "/home/pankaj/Sampling/data/input/dpp/data/" + args.dpp_file (qualities, features) = read_dpp(dpp_file, 'dpp_' + str(dpp_id)) dpp = DPP(qualities, features) call_FrankWolfe(dpp, args) print "Compeleted in " + str(time.clock() - tic) + 's'
def main(): N = 100 L = read_dpp( "/home/pankaj/Sampling/data/input/dpp/data/dpp_100_1.5_0.5_200_0_0.1_5.h5", N, '/dpp_0') sample = torch.rand(N) > 0.8 dpp_obj = DPP(L) print dpp_obj(sample.numpy())
parser.add_argument('batch_size', nargs='?', help='Batch size', type=int, default=1) parser.add_argument('minibatch_size', nargs='?', help='Minibatch size', type=int, default=1) args = parser.parse_args() torch.manual_seed(args.torch_seed) (qualities, features) = read_dpp( '/home/pankaj/Sampling/data/input/dpp/data/clustered_dpp_20_20_4_3_1_4_1.h5', 'dpp_' + str(args.dpp_id)) dpp = DPP(qualities, features) y_mat = torch.Tensor( np.reshape( np.loadtxt( '/home/pankaj/Sampling/code/fw_dpp/workspace/dpp_123_0_20_10_1_100_fw_simple_iterates.txt' ), (100, args.N))) x_mat = y_mat[0:args.batch_size, :] x_mat = torch.rand(args.batch_size, args.N) training(x_mat, dpp, args) sys.exit() # x_val_mat = torch.rand(args.batch_size, args.N)
num_influ_iter = 10 nsample_mlr = int(sys.argv[2]) num_fw_iter = 100 params = [nNodes, p, num_influ_iter, nsample_mlr, num_fw_iter] param_string = '_'.join(str(t) for t in params) bufsize = 0 f = open(dirw + 'log_k_study_' + param_string + '.txt', 'w', bufsize) for i in range(len(graph_file_list)): filename = graph_dir + graph_file_list[i] results = [] for k in range(1, 400, 50): print i, k study_k_effect(filename, k, p, num_influ_iter, nsample_mlr, num_fw_iter, results, i) f.write('\n'.join('%s %s %s %s' % x for x in results)) f.write('\n') f.close() if __name__ == '__main__': N = 100 L = read_dpp( "/home/pankaj/Sampling/data/input/dpp/data/clustered_dpp_100_2_200_5_10_5.h5", N, '/dpp_1') print select_random_k_pair(L, 10, N, 50)
#Since DPPs are non-monotone, we need to prune the independent set S = prune(dpp_obj, I) opt_submod_val = dpp_obj(S) print "Items selected: " + ' '.join( [str(x) for x in range(N) if S[x] == 1]) print "Rounded discrete solution with pruning= ", opt_submod_val.item() print "(Rounded discrete solution without pruning = ", dpp_obj(I).item() #Save optimum solution and value f = open(opt_file, 'w') f.write(str(opt_submod_val.item()) + '\n') for x_t in x_opt: f.write(str(x_t.item()) + '\n') f.close() return x_opt if __name__ == '__main__': x = torch.Tensor([0.2] * 100) I = torch.bernoulli(x) N = 100 L = read_dpp("/home/pankaj/Sampling/data/input/dpp/data/" + sys.argv[1], N, '/dpp_0') dpp_obj = DPP(L) prune(dpp_obj, I)