def main(args, model_path): print(os.getcwd()) print("start training ...") print(model_path) start = time.time() ent_str2id, ent_id2str, rel_str2id, rel_id2str = load_kg() print("making vocab is done " + str(time.time() - start)) n_ent, n_rel = len(ent_str2id), len(rel_str2id) model = ConvE(args, n_ent, n_rel) model.init() if args.multi_gpu: model = torch.nn.DataParallel(model) model.load_state_dict(torch.load(model_path)) model.cuda() print('cuda : ' + str(torch.cuda.is_available()) + ' count : ' + str(torch.cuda.device_count())) params = [value.numel() for value in model.parameters()] print(params) print(sum(params)) start = time.time() evalset = KG_EvalSet(dir + '/test_set.txt', args, n_ent) print("making evalset is done " + str(time.time() - start)) evalloader = DataLoader(dataset=evalset, num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True) model.eval() with torch.no_grad(): start = time.time() ranking_and_hits(model, args, evalloader, n_ent, ent_id2str, rel_id2str) end = time.time() print('eval time used: {} minutes'.format((end - start) / 60))
def main(args, model_path): if args.preprocess: preprocess(args.data, delete_data=True) input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(args.data, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(args.data, 'train', args.batch_size, randomize=True, keys=input_keys, loader_threads=args.loader_threads) dev_rank_batcher = StreamBatcher(args.data, 'dev_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys) test_rank_batcher = StreamBatcher(args.data, 'test_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys) if args.model is None: model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token) elif args.model == 'conve': model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token) elif args.model == 'distmult': model = DistMult(args, vocab['e1'].num_token, vocab['rel'].num_token) elif args.model == 'complex': model = Complex(args, vocab['e1'].num_token, vocab['rel'].num_token) elif args.model == 'interacte': model = InteractE(args, vocab['e1'].num_token, vocab['rel'].num_token) else: log.info('Unknown model: {0}', args.model) raise Exception("Unknown model!") train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) eta = ETAHook('train', print_every_x_batches=args.log_interval) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=args.log_interval)) model.cuda() if args.resume: model_params = torch.load(model_path) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation') ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') else: model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params) print(np.sum(params)) opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) for epoch in range(args.epochs): model.train() for i, str2var in enumerate(train_batcher): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing e2_multi = ((1.0-args.label_smoothing)*e2_multi) + (1.0/e2_multi.size(1)) pred = model.forward(e1, rel) loss = model.loss(pred, e2_multi) loss.backward() opt.step() train_batcher.state.loss = loss.cpu() print('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): if epoch % 5 == 0 and epoch > 0: ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') if epoch % 5 == 0: if epoch > 0: ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
def main(): if Config.process: preprocess(Config.dataset, delete_data=True) input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(Config.dataset, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys) dev_rank_batcher = StreamBatcher(Config.dataset, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) test_rank_batcher = StreamBatcher(Config.dataset, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) if Config.model_name is None: model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ConvE': model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'DistMult': model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ComplEx': model = Complex(vocab['e1'].num_token, vocab['rel'].num_token) else: log.info('Unknown model: {0}', Config.model_name) raise Exception("Unknown model!") train_batcher.at_batch_prepared_observers.insert( 1, TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) eta = ETAHook('train', print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events( LossHook('train', print_every_x_batches=100)) if Config.cuda: model.cuda() if load: model_params = torch.load(model_path) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation') ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') else: model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params) print(np.sum(params)) max_mrr = 0 count = 0 max_count = 3 opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2) for epoch in range(1, epochs + 1): model.train() for i, str2var in enumerate(train_batcher): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing e2_multi = ((1.0 - Config.label_smoothing_epsilon) * e2_multi) + (1.0 / e2_multi.size(1)) pred = model.forward(e1, rel) loss = model.loss(pred, e2_multi) loss.backward() opt.step() train_batcher.state.loss = loss.cpu() print('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') if epoch % 15 == 0: mrr = ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') if mrr <= max_mrr: count += 1 if count > max_count: break else: count = 0 max_mrr = mrr mrr_test = ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
def main(args): if args.preprocess: print('start preprocessing', flush=True) preprocess(args, delete_data=True) print('finish preprocessing', flush=True) else: input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(args.data, keys=input_keys) print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + ': start loading vocabs', flush=True) p.load_vocabs() print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + ': finish loading vocabs', flush=True) vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(args.data, 'train', args.batch_size, randomize=True, keys=input_keys, loader_threads=args.loader_threads) model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token) train_batcher.at_batch_prepared_observers.insert( 1, TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) # eta = ETAHook('train', print_every_x_batches=args.log_interval) # train_batcher.subscribe_to_events(eta) # train_batcher.subscribe_to_start_of_epoch_event(eta) # train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=args.log_interval)) model.cuda() model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params, flush=True) print(np.sum(params), flush=True) opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + f': start training with epochs = {args.epochs}', flush=True) for epoch in range(args.epochs): model.train() # sampled_batches = set(np.random.choice(train_batcher.num_batches, args.num_batches, replace=False)) # print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + f': start epoch {epoch} with batches = {len(sampled_batches)} out of {train_batcher.num_batches}', flush=True) # processed_count = 0 for i, str2var in enumerate(train_batcher): # if i not in sampled_batches: continue # if processed_count%int(args.num_batches/1000)==0: print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + f': start epoch {epoch} batch {i} = {processed_count}', flush=True) # processed_count += 1 opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() e2_multi = ((1.0 - args.label_smoothing) * e2_multi) + (1.0 / e2_multi.size(1)) pred = model.forward(e1, rel) loss = model.loss(pred, e2_multi) loss.backward() opt.step() # train_batcher.state.loss = loss.cpu() print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()) + f': finish training epoch {epoch}', flush=True) model.eval() output(args, vocab['e1'], model.emb_e.weight.detach().cpu().numpy())
def main(args, model_path): if args.preprocess: preprocess(args.data, delete_data=True) input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(args.data, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] # 都要把数据转换成对象存储起来。这里用的是spodernet 中的Vocab对象 num_entities = vocab['e1'].num_token # 得到总共有多少个实体(sense) # 生成三批数据 train_batcher = StreamBatcher(args.data, 'train', args.batch_size, randomize=True, keys=input_keys, loader_threads=args.loader_threads) dev_rank_batcher = StreamBatcher(args.data, 'dev_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys) test_rank_batcher = StreamBatcher(args.data, 'test_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys) model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token) train_batcher.at_batch_prepared_observers.insert( 1, TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) # 这部分功能应该是:在训练完之后使用一个回调 eta = ETAHook('train', print_every_x_batches=args.log_interval) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events( LossHook('train', print_every_x_batches=args.log_interval)) P = Preprocessor("../external/wordnet-mlj12") tokenidx_to_synset = vocab['e1'].idx2token encoder = DefinitionEncoder() encoder.cuda() model.cuda() if args.initialize: model_params = torch.load(args.initialize) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation') ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') # 赋值definition encoder,但是在model的属性中,没有找到 encoder model.encoder = encoder model.encoder.init() elif args.resume: model.encoder = encoder model_params = torch.load(model_path) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation', tokenidx_to_synset, P.get_batch) ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation', tokenidx_to_synset, P.get_batch) else: model.encoder = encoder model.encoder.init() model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params) print(np.sum(params)) opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) best_dev_mrr = 0 model.eval() dev_mrr = ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation', tokenidx_to_synset, P.get_batch) # 准备训练 for epoch in range(args.epochs): model.train() for i, str2var in enumerate(train_batcher): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e1_tokens = [ tokenidx_to_synset[idx] for idx in e1.detach().cpu().numpy().ravel() ] batch, lengths = P.get_batch(e1_tokens) # e1_emb 就是使用 bilstm 得到的embedding e1_emb = model.encoder((batch.cuda(), lengths))[0] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing e2_multi = ((1.0 - args.label_smoothing) * e2_multi) + (1.0 / e2_multi.size(1)) # 放到 pred = model.forward(e1_emb, rel, e1_encoded=True) loss = model.loss(pred, e2_multi) loss.backward() opt.step() train_batcher.state.loss = loss.cpu() #saving on improvement in dev score #print('saving to {0}'.format(model_path)) #torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): if epoch % 5 == 0 and epoch > 0: dev_mrr = ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation', tokenidx_to_synset, P.get_batch) if dev_mrr > best_dev_mrr: print('saving to {} MRR {}->{}'.format( model_path, best_dev_mrr, dev_mrr)) best_dev_mrr = dev_mrr torch.save(model.state_dict(), model_path) if epoch % 5 == 0: if epoch > 0: ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation', tokenidx_to_synset, P.get_batch) if args.represent: P = Preprocessor() synsets = [P.idx_to_synset[idx] for idx in range(len(P.idx_to_synset))] embeddings = [] embeddings_proj = [] for i in tqdm(range(0, len(synsets), args.test_batch_size)): synsets_batch = synsets[i:i + args.test_batch_size] with torch.no_grad(): batch, lengths = P.get_batch(synsets_batch) emb_proj, emb = model.encoder((batch.cuda(), lengths)) embeddings_proj.append(emb_proj.detach().cpu()) embeddings.append(emb.detach().cpu()) embeddings = torch.cat(embeddings, 0).numpy() embeddings_proj = torch.cat(embeddings_proj, 0).numpy() print('embeddings', embeddings.shape, embeddings_proj.shape) basename, ext = os.path.splitext(args.represent) fname = args.represent np.savez_compressed(fname, embeddings=embeddings, synsets=synsets) fname = basename + '_projected' + ext np.savez_compressed(fname, embeddings=embeddings_proj, synsets=synsets)
def main(): if Config.process: preprocess(Config.dataset, delete_data=True) input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(Config.dataset, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys) dev_rank_batcher = StreamBatcher(Config.dataset, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) test_rank_batcher = StreamBatcher(Config.dataset, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) if Config.model_name is None: model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ConvE': model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'DistMult': model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ComplEx': model = Complex(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'RNNDist': model = RNNDist(vocab['e1'].num_token, vocab['rel'].num_token) else: log.info('Unknown model: {0}', Config.model_name) raise Exception("Unknown model!") train_batcher.at_batch_prepared_observers.insert( 1, TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) eta = ETAHook('train', print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events( LossHook('train', print_every_x_batches=100)) if Config.dataset == 'ICEWS18': lengths = [ 1618, 956, 815, 1461, 1634, 1596, 1754, 1494, 800, 979, 1588, 1779, 1831, 1762, 1566, 812, 820, 1707, 1988, 1845, 1670, 1695, 956, 930, 1641, 1813, 1759, 1664, 1616, 1021, 998, 1668, 1589, 1720 ] else: lengths = [ 1090, 730, 646, 939, 681, 783, 546, 526, 524, 586, 656, 741, 562, 474, 493, 487, 474, 477, 460, 532, 348, 530, 402, 493, 503, 452, 668, 512, 406, 467, 524, 563, 524, 418, 441, 487, 515, 475, 478, 532, 387, 479, 485, 417, 542, 496, 487, 445, 504, 350, 432, 445, 401, 570, 554, 504, 505, 483, 587, 441, 489, 501, 487, 513, 513, 524, 655, 545, 599, 702, 734, 519, 603, 579, 537, 635, 437, 422, 695, 575, 553, 485, 429, 663, 475, 673, 527, 559, 540, 591, 558, 698, 422, 1145, 969, 1074, 888, 683, 677, 910, 902, 644, 777, 695, 571, 656, 797, 576, 468, 676, 687, 549, 482, 1007, 778, 567, 813, 788, 879, 557, 724, 850, 809, 685, 714, 554, 799, 727, 208, 946, 979, 892, 859, 1092, 1038, 999, 1477, 1126, 1096, 1145, 955, 100, 1264, 1287, 962, 1031, 1603, 1662, 1179, 1064, 1179, 1105, 1465, 1176, 1219, 1137, 1112, 791, 829, 2347, 917, 913, 1107, 960, 850, 1005, 1045, 871, 972, 921, 1019, 984, 1033, 848, 918, 699, 1627, 1580, 1354, 1119, 1065, 1208, 1037, 1134, 980, 1249, 1031, 908, 787, 819, 804, 764, 959, 1057, 770, 691, 816, 620, 788, 829, 895, 1128, 1023, 1038, 1030, 1016, 991, 866, 878, 1013, 977, 914, 976, 717, 740, 904, 912, 1043, 1117, 930, 1116, 1028, 946, 922, 1151, 1092, 967, 1189, 1081, 1158, 943, 981, 1212, 1104, 941, 912, 1347, 1241, 1479, 1188, 1152, 1164, 1167, 1173, 1280, 979, 142, 1458, 910, 1126, 1053, 1083, 897, 1021, 1075, 881, 1054, 941, 927, 860, 1081, 876, 1952, 1576, 1560, 1599, 1226, 1083, 964, 1059, 1179, 982, 1032, 933, 877, 1032, 957, 884, 909, 846, 850, 798, 843, 1183, 1108, 1185, 797, 915, 952, 1181, 744, 86, 889, 1151, 925, 1119, 1115, 1036, 772, 1052, 837, 897, 1095, 926, 1034, 1031, 995, 907, 969, 981, 1135, 915, 1161, 100, 1269, 1244, 1331, 1124, 1074, 1162, 1159, 1078, 1311, 1210, 1308, 945, 1183, 1580, 1406, 1417, 1173, 1348, 1274, 1179, 893, 1107, 950, 1028, 1055, 1059, 1244, 1082, 1179, 1011, 955, 886, 865, 857 ] if Config.cuda: model.cuda() if load: # if True: model_params = torch.load(model_path) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation') # ranks = ranking_and_hits2(model, test_rank_batcher, vocab, 'test_evaluation') print(len(ranks)) mrr = [] curr_step = 0 for i in range(len(lengths)): rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]]) mrr.append(np.mean(1 / rr)) curr_step += 2 * lengths[i] with open(Config.dataset + 'mrr.txt', 'w') as f: for i, mr in enumerate(mrr): print("MRR (filtered) @ {}th day: {:.6f}".format(i, mr)) f.write(str(mr) + '\n') h10 = [] curr_step = 0 for i in range(len(lengths)): rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]]) h10.append(np.mean(rr <= 10)) with open(Config.dataset + 'h10.txt', 'w') as f: for i, mr in enumerate(h10): print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr)) f.write(str(mr) + '\n') h10 = [] for i in range(len(lengths)): rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]]) h10.append(np.mean(rr <= 3)) with open(Config.dataset + 'h3.txt', 'w') as f: for i, mr in enumerate(h10): print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr)) f.write(str(mr) + '\n') h10 = [] for i in range(len(lengths)): rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]]) h10.append(np.mean(rr <= 1)) with open(Config.dataset + 'h1.txt', 'w') as f: for i, mr in enumerate(h10): print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr)) f.write(str(mr) + '\n') print("length", len(ranks)) print("length_2", 2 * sum(lengths)) # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') else: model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params) print(np.sum(params)) opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2) for epoch in range(epochs): # break model.train() for i, str2var in enumerate(train_batcher): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing # e2_multi = ((1.0-Config.label_smoothing_epsilon)*e2_multi) + (1.0/e2_multi.size(1)) # print("this",Config.label_smoothing_epsilon, e2_multi.size(1)) pred = model.forward(e1, rel) # loss = model.loss(pred, e2_multi) # # loss = torch.zeros(1).cuda() for j in range(128): position = torch.nonzero(e2_multi[j])[0].cuda() label = torch.cat( [torch.ones(len(position)), torch.zeros(len(position))]).cuda() neg_position = torch.randint(e2_multi.shape[1], (len(position), )).long().cuda() position = torch.cat([position, neg_position]) loss += model.loss(pred[j, position], label) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # clip gradients opt.step() train_batcher.state.loss = loss.cpu() print('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') if epoch == 50: ranks = ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
def main(): if Config.process: preprocess(Config.dataset, delete_data=True) input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(Config.dataset, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys) dev_rank_batcher = StreamBatcher(Config.dataset, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) test_rank_batcher = StreamBatcher(Config.dataset, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys) if Config.model_name is None: model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ConvE': model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'DistMult': model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token) elif Config.model_name == 'ComplEx': model = Complex(vocab['e1'].num_token, vocab['rel'].num_token) else: log.info('Unknown model: {0}', Config.model_name) raise Exception("Unknown model!") train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) eta = ETAHook('train', print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=100)) if Config.cuda: model.cuda() if load: model_params = torch.load(model_path) print(model) total_param_size = [] params = [(key, value.size(), value.numel()) for key, value in model_params.items()] for key, size, count in params: total_param_size.append(count) print(key, size, count) print(np.array(total_param_size).sum()) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation',epochs,True) ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation',epochs,False) else: model.init() total_param_size = [] params = [value.numel() for value in model.parameters()] print(params) print(np.sum(params)) opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2) for epoch in range(epochs): model.train() for i, str2var in tqdm(enumerate(train_batcher)): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing pred = model.forward(e1, rel) loss = torch.zeros(1).cuda() for j in range(128): position = torch.nonzero(e2_multi[j])[0].cuda() label = torch.cat([torch.ones(len(position)), torch.zeros(len(position))]).cuda() neg_position = torch.randint(e2_multi.shape[1], (len(position),)).long().cuda() position = torch.cat([position, neg_position]) loss += model.loss(pred[j, position], label) loss.backward() opt.step() train_batcher.state.loss = loss.cpu() print('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): if epoch % 100 == 0: if epoch > 0: ranking_and_hits(model, test_rank_batcher, vocab, Config.dataset + "-" + Config.model_name,epoch,False) if epoch + 1 == epochs: ranking_and_hits(model, test_rank_batcher, vocab, Config.dataset,epoch,True)
def main(): if do_process: preprocess(dataset_name, delete_data=True) input_keys = ['e1', 'rel', 'e2', 'e2_multi1', 'e2_multi2'] p = Pipeline(dataset_name, keys=input_keys) p.load_vocabs() vocab = p.state['vocab'] num_entities = vocab['e1'].num_token train_batcher = StreamBatcher(dataset_name, 'train', Config.batch_size, randomize=True, keys=input_keys) dev_rank_batcher = StreamBatcher(dataset_name, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys, is_volatile=True) test_rank_batcher = StreamBatcher(dataset_name, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys, is_volatile=True) #model = Complex(vocab['e1'].num_token, vocab['rel'].num_token) #model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token) model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token) train_batcher.at_batch_prepared_observers.insert( 1, TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary')) eta = ETAHook('train', print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) train_batcher.subscribe_to_events( LossHook('train', print_every_x_batches=100)) if Config.cuda: model.cuda() if load: model_params = torch.load(model_path) print(model) print([(key, value.size()) for key, value in model_params.items()]) model.load_state_dict(model_params) model.eval() ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation') ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') else: model.init() opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2) for epoch in range(epochs): model.train() for i, str2var in enumerate(train_batcher): opt.zero_grad() e1 = str2var['e1'] rel = str2var['rel'] e2_multi = str2var['e2_multi1_binary'].float() # label smoothing e2_multi = ((1.0 - Config.label_smoothing_epsilon) * e2_multi) + (1.0 / e2_multi.size(1)) pred = model.forward(e1, rel) loss = model.loss(pred, e2_multi) loss.backward() opt.step() train_batcher.state.loss = loss print('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation') if epoch % 3 == 0: if epoch > 0: ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
def main(args, model_path): print (os.getcwd()) print ("start training ...") start = time.time() ent_str2id, ent_id2str, rel_str2id, rel_id2str = load_kg() print ("making vocab is done "+str(time.time()-start)) n_ent, n_rel = len(ent_str2id), len(rel_str2id) model = ConvE(args, n_ent, n_rel) model.init() if args.multi_gpu: model = torch.nn.DataParallel(model) bce = torch.nn.BCELoss().cuda() model.cuda() print ('cuda : ' + str(torch.cuda.is_available()) + ' count : ' + str(torch.cuda.device_count())) params = [value.numel() for value in model.parameters()] print(params) print(sum(params)) opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) start = time.time() dataset = KG_DataSet(dir+'/train_set.txt', args, n_ent) print ("making train dataset is done " + str(time.time()-start)) start = time.time() evalset = KG_EvalSet(dir+'/test_set.txt', args, n_ent) print ("making evalset is done " + str(time.time()-start)) prev_loss = 1000 patience = 0 early_stop = False best_loss = 1000 for epoch in range(args.epochs): print (epoch) epoch_loss = 0 epoch_start = time.time() model.train() tot = 0.0 dataloader = DataLoader(dataset=dataset, num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True) evalloader = DataLoader(dataset=evalset, num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True) n_train = dataset.__len__() for i, data in enumerate(dataloader): opt.zero_grad() start = time.time() head, rel, tail = data head = torch.LongTensor(head) rel = torch.LongTensor(rel) head = head.cuda() rel = rel.cuda() batch_size = head.size(0) e2_multi = tail.cuda() print ("e2_multi " + str(time.time()-start) + "\n") start = time.time() pred = model.forward(head, rel) loss = bce(pred, e2_multi) loss.backward() opt.step() batch_loss = torch.sum(loss) print ("step " + str(time.time()-start) + "\n") epoch_loss += batch_loss tot += head.size(0) print ('\r{:>10} epoch {} progress {} loss: {}\n'.format('', epoch, tot/n_train, batch_loss), end='') epoch_loss /= batch_size print ('') end = time.time() time_used = end - epoch_start print ('one epoch time: {} minutes'.format(time_used/60)) print ('{} epochs'.format(epoch)) print ('epoch {} loss: {}'.format(epoch+1, epoch_loss)) # TODO: calculate valid loss and develop early stopping model.eval() with torch.no_grad(): valid_loss = 0.0 for i,data in enumerate(evalloader): #head, rel, tail, head2, rel_rev, tail2 = data head, rel, tail, tail_idx = data head = torch.LongTensor(head) rel = torch.LongTensor(rel) #head2 = torch.LongTensor(head2) #rel_rev = torch.LongTensor(rel_rev) head = head.cuda() rel = rel.cuda() #head2 = head2.cuda() #rel_rev = rel_rev.cuda() batch_size = head.size(0) e2_multi1 = tail.cuda() #e2_multi2 = tail2.cuda() pred1 = model.forward(head, rel) #pred2 = model.forward(head2, rel_rev) loss1 = bce(pred1, e2_multi1) #loss2 = bce(pred2, e2_multi2) sum_loss = torch.sum(loss1).item() #sum_loss = (torch.sum(loss1).item() + torch.sum(loss2).item())/2 sum_loss /= batch_size valid_loss += sum_loss print ("valid loss : " + str(valid_loss)) with open(os.getcwd() + '/log_file/log.txt', 'a') as f: f.write(str(epoch) + " epochs valid loss : " + str(valid_loss) + "\n") if valid_loss > prev_loss: patience += 1 if patience > 2: early_stop = True else: patience = 0 prev_loss = valid_loss if early_stop: print("{0} epochs Early stopping ...".format(epoch)) break if valid_loss < best_loss: best_loss = valid_loss print ('saving to {0}'.format(model_path)) torch.save(model.state_dict(), model_path) model.eval() with torch.no_grad(): start = time.time() ranking_and_hits(model, args, evalloader, n_ent, epoch) end = time.time() print ('eval time used: {} minutes'.format((end - start)/60))