Пример #1
0
def main(args, model_path):
    if args.preprocess: preprocess(args.data, delete_data=True)
    input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
    p = Pipeline(args.data, keys=input_keys)
    p.load_vocabs()
    vocab = p.state['vocab']

    num_entities = vocab['e1'].num_token

    train_batcher = StreamBatcher(args.data, 'train', args.batch_size, randomize=True, keys=input_keys, loader_threads=args.loader_threads)
    dev_rank_batcher = StreamBatcher(args.data, 'dev_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys)
    test_rank_batcher = StreamBatcher(args.data, 'test_ranking', args.test_batch_size, randomize=False, loader_threads=args.loader_threads, keys=input_keys)


    if args.model is None:
        model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token)
    elif args.model == 'conve':
        model = ConvE(args, vocab['e1'].num_token, vocab['rel'].num_token)
    elif args.model == 'distmult':
        model = DistMult(args, vocab['e1'].num_token, vocab['rel'].num_token)
    elif args.model == 'complex':
        model = Complex(args, vocab['e1'].num_token, vocab['rel'].num_token)
    elif args.model == 'interacte':
        model = InteractE(args, vocab['e1'].num_token, vocab['rel'].num_token)
    else:
        log.info('Unknown model: {0}', args.model)
        raise Exception("Unknown model!")

    train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))


    eta = ETAHook('train', print_every_x_batches=args.log_interval)
    train_batcher.subscribe_to_events(eta)
    train_batcher.subscribe_to_start_of_epoch_event(eta)
    train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=args.log_interval))

    model.cuda()
    if args.resume:
        model_params = torch.load(model_path)
        print(model)
        total_param_size = []
        params = [(key, value.size(), value.numel()) for key, value in model_params.items()]
        for key, size, count in params:
            total_param_size.append(count)
            print(key, size, count)
        print(np.array(total_param_size).sum())
        model.load_state_dict(model_params)
        model.eval()
        ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
        ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
    else:
        model.init()

    total_param_size = []
    params = [value.numel() for value in model.parameters()]
    print(params)
    print(np.sum(params))

    opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
    for epoch in range(args.epochs):
        model.train()
        for i, str2var in enumerate(train_batcher):
            opt.zero_grad()
            e1 = str2var['e1']
            rel = str2var['rel']
            e2_multi = str2var['e2_multi1_binary'].float()
            # label smoothing
            e2_multi = ((1.0-args.label_smoothing)*e2_multi) + (1.0/e2_multi.size(1))

            pred = model.forward(e1, rel)
            loss = model.loss(pred, e2_multi)
            loss.backward()
            opt.step()

            train_batcher.state.loss = loss.cpu()


        print('saving to {0}'.format(model_path))
        torch.save(model.state_dict(), model_path)

        model.eval()
        with torch.no_grad():
            if epoch % 5 == 0 and epoch > 0:
                ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
            if epoch % 5 == 0:
                if epoch > 0:
                    ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
Пример #2
0
def main():
    if Config.process: preprocess(Config.dataset, delete_data=True)
    input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
    p = Pipeline(Config.dataset, keys=input_keys)
    p.load_vocabs()
    vocab = p.state['vocab']

    num_entities = vocab['e1'].num_token

    train_batcher = StreamBatcher(Config.dataset,
                                  'train',
                                  Config.batch_size,
                                  randomize=True,
                                  keys=input_keys)
    dev_rank_batcher = StreamBatcher(Config.dataset,
                                     'dev_ranking',
                                     Config.batch_size,
                                     randomize=False,
                                     loader_threads=4,
                                     keys=input_keys)
    test_rank_batcher = StreamBatcher(Config.dataset,
                                      'test_ranking',
                                      Config.batch_size,
                                      randomize=False,
                                      loader_threads=4,
                                      keys=input_keys)

    if Config.model_name is None:
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ConvE':
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'DistMult':
        model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ComplEx':
        model = Complex(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'RNNDist':
        model = RNNDist(vocab['e1'].num_token, vocab['rel'].num_token)
    else:
        log.info('Unknown model: {0}', Config.model_name)
        raise Exception("Unknown model!")

    train_batcher.at_batch_prepared_observers.insert(
        1, TargetIdx2MultiTarget(num_entities, 'e2_multi1',
                                 'e2_multi1_binary'))

    eta = ETAHook('train', print_every_x_batches=100)
    train_batcher.subscribe_to_events(eta)
    train_batcher.subscribe_to_start_of_epoch_event(eta)
    train_batcher.subscribe_to_events(
        LossHook('train', print_every_x_batches=100))
    if Config.dataset == 'ICEWS18':
        lengths = [
            1618, 956, 815, 1461, 1634, 1596, 1754, 1494, 800, 979, 1588, 1779,
            1831, 1762, 1566, 812, 820, 1707, 1988, 1845, 1670, 1695, 956, 930,
            1641, 1813, 1759, 1664, 1616, 1021, 998, 1668, 1589, 1720
        ]
    else:
        lengths = [
            1090, 730, 646, 939, 681, 783, 546, 526, 524, 586, 656, 741, 562,
            474, 493, 487, 474, 477, 460, 532, 348, 530, 402, 493, 503, 452,
            668, 512, 406, 467, 524, 563, 524, 418, 441, 487, 515, 475, 478,
            532, 387, 479, 485, 417, 542, 496, 487, 445, 504, 350, 432, 445,
            401, 570, 554, 504, 505, 483, 587, 441, 489, 501, 487, 513, 513,
            524, 655, 545, 599, 702, 734, 519, 603, 579, 537, 635, 437, 422,
            695, 575, 553, 485, 429, 663, 475, 673, 527, 559, 540, 591, 558,
            698, 422, 1145, 969, 1074, 888, 683, 677, 910, 902, 644, 777, 695,
            571, 656, 797, 576, 468, 676, 687, 549, 482, 1007, 778, 567, 813,
            788, 879, 557, 724, 850, 809, 685, 714, 554, 799, 727, 208, 946,
            979, 892, 859, 1092, 1038, 999, 1477, 1126, 1096, 1145, 955, 100,
            1264, 1287, 962, 1031, 1603, 1662, 1179, 1064, 1179, 1105, 1465,
            1176, 1219, 1137, 1112, 791, 829, 2347, 917, 913, 1107, 960, 850,
            1005, 1045, 871, 972, 921, 1019, 984, 1033, 848, 918, 699, 1627,
            1580, 1354, 1119, 1065, 1208, 1037, 1134, 980, 1249, 1031, 908,
            787, 819, 804, 764, 959, 1057, 770, 691, 816, 620, 788, 829, 895,
            1128, 1023, 1038, 1030, 1016, 991, 866, 878, 1013, 977, 914, 976,
            717, 740, 904, 912, 1043, 1117, 930, 1116, 1028, 946, 922, 1151,
            1092, 967, 1189, 1081, 1158, 943, 981, 1212, 1104, 941, 912, 1347,
            1241, 1479, 1188, 1152, 1164, 1167, 1173, 1280, 979, 142, 1458,
            910, 1126, 1053, 1083, 897, 1021, 1075, 881, 1054, 941, 927, 860,
            1081, 876, 1952, 1576, 1560, 1599, 1226, 1083, 964, 1059, 1179,
            982, 1032, 933, 877, 1032, 957, 884, 909, 846, 850, 798, 843, 1183,
            1108, 1185, 797, 915, 952, 1181, 744, 86, 889, 1151, 925, 1119,
            1115, 1036, 772, 1052, 837, 897, 1095, 926, 1034, 1031, 995, 907,
            969, 981, 1135, 915, 1161, 100, 1269, 1244, 1331, 1124, 1074, 1162,
            1159, 1078, 1311, 1210, 1308, 945, 1183, 1580, 1406, 1417, 1173,
            1348, 1274, 1179, 893, 1107, 950, 1028, 1055, 1059, 1244, 1082,
            1179, 1011, 955, 886, 865, 857
        ]
    if Config.cuda:
        model.cuda()
    if load:
        # if True:
        model_params = torch.load(model_path)
        print(model)
        total_param_size = []
        params = [(key, value.size(), value.numel())
                  for key, value in model_params.items()]
        for key, size, count in params:
            total_param_size.append(count)
            print(key, size, count)
        print(np.array(total_param_size).sum())
        model.load_state_dict(model_params)
        model.eval()
        ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
        # ranks = ranking_and_hits2(model, test_rank_batcher, vocab, 'test_evaluation')
        print(len(ranks))

        mrr = []
        curr_step = 0
        for i in range(len(lengths)):
            rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]])
            mrr.append(np.mean(1 / rr))

            curr_step += 2 * lengths[i]
        with open(Config.dataset + 'mrr.txt', 'w') as f:
            for i, mr in enumerate(mrr):
                print("MRR (filtered) @ {}th day: {:.6f}".format(i, mr))
                f.write(str(mr) + '\n')
        h10 = []
        curr_step = 0
        for i in range(len(lengths)):
            rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]])
            h10.append(np.mean(rr <= 10))
        with open(Config.dataset + 'h10.txt', 'w') as f:
            for i, mr in enumerate(h10):
                print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr))
                f.write(str(mr) + '\n')
        h10 = []
        for i in range(len(lengths)):
            rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]])
            h10.append(np.mean(rr <= 3))
        with open(Config.dataset + 'h3.txt', 'w') as f:
            for i, mr in enumerate(h10):
                print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr))
                f.write(str(mr) + '\n')

        h10 = []

        for i in range(len(lengths)):
            rr = np.array(ranks[curr_step:curr_step + 2 * lengths[i]])
            h10.append(np.mean(rr <= 1))
        with open(Config.dataset + 'h1.txt', 'w') as f:
            for i, mr in enumerate(h10):
                print("h10 (filtered) @ {}th day: {:.6f}".format(i, mr))
                f.write(str(mr) + '\n')
        print("length", len(ranks))
        print("length_2", 2 * sum(lengths))

        # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
    else:
        model.init()

    total_param_size = []
    params = [value.numel() for value in model.parameters()]
    print(params)
    print(np.sum(params))

    opt = torch.optim.Adam(model.parameters(),
                           lr=Config.learning_rate,
                           weight_decay=Config.L2)
    for epoch in range(epochs):
        # break
        model.train()
        for i, str2var in enumerate(train_batcher):
            opt.zero_grad()
            e1 = str2var['e1']
            rel = str2var['rel']
            e2_multi = str2var['e2_multi1_binary'].float()

            # label smoothing
            # e2_multi = ((1.0-Config.label_smoothing_epsilon)*e2_multi) + (1.0/e2_multi.size(1))
            # print("this",Config.label_smoothing_epsilon, e2_multi.size(1))

            pred = model.forward(e1, rel)
            # loss = model.loss(pred, e2_multi)
            # #
            loss = torch.zeros(1).cuda()
            for j in range(128):
                position = torch.nonzero(e2_multi[j])[0].cuda()
                label = torch.cat(
                    [torch.ones(len(position)),
                     torch.zeros(len(position))]).cuda()
                neg_position = torch.randint(e2_multi.shape[1],
                                             (len(position), )).long().cuda()
                position = torch.cat([position, neg_position])
                loss += model.loss(pred[j, position], label)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           1.0)  # clip gradients
            opt.step()

            train_batcher.state.loss = loss.cpu()

        print('saving to {0}'.format(model_path))
        torch.save(model.state_dict(), model_path)

        model.eval()
        with torch.no_grad():
            # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
            if epoch == 50:
                ranks = ranking_and_hits(model, test_rank_batcher, vocab,
                                         'test_evaluation')
Пример #3
0
def main():
    if Config.process: preprocess(Config.dataset, delete_data=True)
    input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
    p = Pipeline(Config.dataset, keys=input_keys)
    p.load_vocabs()
    vocab = p.state['vocab']

    num_entities = vocab['e1'].num_token

    train_batcher = StreamBatcher(Config.dataset,
                                  'train',
                                  Config.batch_size,
                                  randomize=True,
                                  keys=input_keys)
    dev_rank_batcher = StreamBatcher(Config.dataset,
                                     'dev_ranking',
                                     Config.batch_size,
                                     randomize=False,
                                     loader_threads=4,
                                     keys=input_keys)
    test_rank_batcher = StreamBatcher(Config.dataset,
                                      'test_ranking',
                                      Config.batch_size,
                                      randomize=False,
                                      loader_threads=4,
                                      keys=input_keys)

    if Config.model_name is None:
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ConvE':
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'DistMult':
        model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ComplEx':
        model = Complex(vocab['e1'].num_token, vocab['rel'].num_token)
    else:
        log.info('Unknown model: {0}', Config.model_name)
        raise Exception("Unknown model!")

    train_batcher.at_batch_prepared_observers.insert(
        1, TargetIdx2MultiTarget(num_entities, 'e2_multi1',
                                 'e2_multi1_binary'))

    eta = ETAHook('train', print_every_x_batches=100)
    train_batcher.subscribe_to_events(eta)
    train_batcher.subscribe_to_start_of_epoch_event(eta)
    train_batcher.subscribe_to_events(
        LossHook('train', print_every_x_batches=100))

    if Config.cuda:
        model.cuda()
    if load:
        model_params = torch.load(model_path)
        print(model)
        total_param_size = []
        params = [(key, value.size(), value.numel())
                  for key, value in model_params.items()]
        for key, size, count in params:
            total_param_size.append(count)
            print(key, size, count)
        print(np.array(total_param_size).sum())
        model.load_state_dict(model_params)
        model.eval()
        ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
        ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
    else:
        model.init()

    total_param_size = []
    params = [value.numel() for value in model.parameters()]
    print(params)
    print(np.sum(params))

    max_mrr = 0
    count = 0
    max_count = 3
    opt = torch.optim.Adam(model.parameters(),
                           lr=Config.learning_rate,
                           weight_decay=Config.L2)
    for epoch in range(1, epochs + 1):
        model.train()
        for i, str2var in enumerate(train_batcher):
            opt.zero_grad()
            e1 = str2var['e1']
            rel = str2var['rel']
            e2_multi = str2var['e2_multi1_binary'].float()
            # label smoothing
            e2_multi = ((1.0 - Config.label_smoothing_epsilon) *
                        e2_multi) + (1.0 / e2_multi.size(1))

            pred = model.forward(e1, rel)
            loss = model.loss(pred, e2_multi)
            loss.backward()
            opt.step()

            train_batcher.state.loss = loss.cpu()

        print('saving to {0}'.format(model_path))
        torch.save(model.state_dict(), model_path)

        model.eval()
        with torch.no_grad():
            # ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
            if epoch % 15 == 0:
                mrr = ranking_and_hits(model, dev_rank_batcher, vocab,
                                       'dev_evaluation')
                if mrr <= max_mrr:
                    count += 1
                    if count > max_count:
                        break
                else:
                    count = 0
                    max_mrr = mrr
    mrr_test = ranking_and_hits(model, test_rank_batcher, vocab,
                                'test_evaluation')
Пример #4
0
Файл: main.py Проект: zzw-x/CPL
def main():
    if Config.process: preprocess(Config.dataset, delete_data=True)
    input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
    p = Pipeline(Config.dataset, keys=input_keys)
    p.load_vocabs()
    vocab = p.state['vocab']

    num_entities = vocab['e1'].num_token

    train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys)
    dev_rank_batcher = StreamBatcher(Config.dataset, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys)
    test_rank_batcher = StreamBatcher(Config.dataset, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys)


    if Config.model_name is None:
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ConvE':
        model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'DistMult':
        model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token)
    elif Config.model_name == 'ComplEx':
        model = Complex(vocab['e1'].num_token, vocab['rel'].num_token)
    else:
        log.info('Unknown model: {0}', Config.model_name)
        raise Exception("Unknown model!")

    train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))


    eta = ETAHook('train', print_every_x_batches=100)
    train_batcher.subscribe_to_events(eta)
    train_batcher.subscribe_to_start_of_epoch_event(eta)
    train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=100))

    if Config.cuda:
        model.cuda()
    if load:
        model_params = torch.load(model_path)
        print(model)
        total_param_size = []
        params = [(key, value.size(), value.numel()) for key, value in model_params.items()]
        for key, size, count in params:
            total_param_size.append(count)
            print(key, size, count)
        print(np.array(total_param_size).sum())
        model.load_state_dict(model_params)
        model.eval()
        ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation',epochs,True)
        ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation',epochs,False)
    else:
        model.init()

    total_param_size = []
    params = [value.numel() for value in model.parameters()]
    print(params)
    print(np.sum(params))

    opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2)
    for epoch in range(epochs):
        model.train()
        for i, str2var in tqdm(enumerate(train_batcher)):
            opt.zero_grad()
            e1 = str2var['e1']
            rel = str2var['rel']
            e2_multi = str2var['e2_multi1_binary'].float()
            # label smoothing
            pred = model.forward(e1, rel)
            loss = torch.zeros(1).cuda()
            for j in range(128):
                position = torch.nonzero(e2_multi[j])[0].cuda()
                label = torch.cat([torch.ones(len(position)), torch.zeros(len(position))]).cuda()
                neg_position = torch.randint(e2_multi.shape[1], (len(position),)).long().cuda()
                position = torch.cat([position, neg_position])
                loss += model.loss(pred[j, position], label)
            loss.backward()
            opt.step()

            train_batcher.state.loss = loss.cpu()


        print('saving to {0}'.format(model_path))
        torch.save(model.state_dict(), model_path)

        model.eval()
        with torch.no_grad():
            if epoch % 100 == 0:
                if epoch > 0:
                    ranking_and_hits(model, test_rank_batcher, vocab, Config.dataset + "-" + Config.model_name,epoch,False)
            if epoch + 1 == epochs:
                ranking_and_hits(model, test_rank_batcher, vocab, Config.dataset,epoch,True)