Ejemplo n.º 1
0
def main(mode):
    label_list_wd = json.load(open(mode + '_label_list.json'))
    print('load file : ' + mode + '_label_list.json' + ' [OK]')
    pred_list_wd = json.load(open(mode + '_pred_list.json'))
    print('load file : ' + mode + '_pred_list.json' + ' [OK]')
    if len(label_list_wd) == 1200:
        print('Test on 500 seen: ')
        test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(
            label_list_wd[:500], pred_list_wd[:500])
        print('test_accu(1/10/100): %.2f %.2F %.2f %.2f %.2f' %
              (test_accu_1, test_accu_10, test_accu_100, median, variance))
        print('Test on 500 unseen: ')
        test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(
            label_list_wd[500:1000], pred_list_wd[500:1000])
        print('test_accu(1/10/100): %.2f %.2F %.2f %.2f %.2f' %
              (test_accu_1, test_accu_10, test_accu_100, median, variance))
        print('Test on 200: ')
        test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(
            label_list_wd[1000:], pred_list_wd[1000:])
        print('test_accu(1/10/100): %.2f %.2F %.2f %.2f %.2f' %
              (test_accu_1, test_accu_10, test_accu_100, median, variance))
Ejemplo n.º 2
0
def main(epoch_num, batch_size, verbose, UNSEEN, SEEN, MODE):
    [
        hownet_file, sememe_file, word_index_file, word_vector_file,
        dictionary_file, word_cilinClass_file
    ] = [
        'hownet.json', 'sememe.json', 'word_index.json', 'word_vector.npy',
        'dictionary_sense.json', 'word_cilinClass.json'
    ]
    word2index, index2word, word2vec, sememe_num, label_size, label_size_chara, word_defi_idx_all = load_data(
        hownet_file, sememe_file, word_index_file, word_vector_file,
        dictionary_file, word_cilinClass_file)
    (word_defi_idx_TrainDev, word_defi_idx_seen, word_defi_idx_test2000,
     word_defi_idx_test200, word_defi_idx_test272) = word_defi_idx_all
    index2word = np.array(index2word)
    length = len(word_defi_idx_TrainDev)
    valid_dataset = MyDataset(word_defi_idx_TrainDev[int(0.9 * length):])
    test_dataset = MyDataset(word_defi_idx_test2000 + word_defi_idx_test200 +
                             word_defi_idx_test272)
    if SEEN:
        mode = 'S_' + MODE
        print('*METHOD: Seen defi.')
        print('*TRAIN: [Train + allSeen(2000+200+272)]')
        print('*TEST: [2000rand1 + 200desc + 272desc]')
        train_dataset = MyDataset(word_defi_idx_TrainDev[:int(0.9 * length)] +
                                  word_defi_idx_seen)
    elif UNSEEN:
        mode = 'U_' + MODE
        print('*METHOD: Unseen All words and defi.')
        print('*TRAIN: [Train]')
        print('*TEST: [2000rand1 + 200desc + 272desc]')
        train_dataset = MyDataset(word_defi_idx_TrainDev[:int(0.9 * length)])
    print('*MODE: [%s]' % mode)

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   collate_fn=my_collate_fn)
    valid_dataloader = torch.utils.data.DataLoader(valid_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   collate_fn=my_collate_fn)
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=my_collate_fn_test)

    print('Train dataset: ', len(train_dataset))
    print('Valid dataset: ', len(valid_dataset))
    print('Test dataset: ', len(test_dataset))
    word_defi_idx = word_defi_idx_TrainDev + word_defi_idx_seen

    wd2sem = word2sememe(word_defi_idx, len(word2index), sememe_num)
    wd_sems = label_multihot(wd2sem, sememe_num)
    wd_sems = torch.from_numpy(np.array(wd_sems[:label_size])).to(device)
    wd_POSs = label_multihot(word2POS(word_defi_idx, len(word2index), 13), 13)
    wd_POSs = torch.from_numpy(np.array(wd_POSs[:label_size])).to(device)
    wd_charas = label_multihot(
        word2chara(word_defi_idx, len(word2index), label_size_chara),
        label_size_chara)
    wd_charas = torch.from_numpy(np.array(wd_charas[:label_size])).to(device)
    wd2Cilin1 = word2Cn(word_defi_idx, len(word2index), 'C1', 13)
    wd_C1 = label_multihot(wd2Cilin1, 13)  #13 96 1426 4098
    wd_C1 = torch.from_numpy(np.array(wd_C1[:label_size])).to(device)
    wd_C2 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C2', 96),
                           96)
    wd_C2 = torch.from_numpy(np.array(wd_C2[:label_size])).to(device)
    wd_C3 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C3', 1426),
                           1426)
    wd_C3 = torch.from_numpy(np.array(wd_C3[:label_size])).to(device)
    wd_C4 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C4', 4098),
                           4098)
    wd_C4 = torch.from_numpy(np.array(wd_C4[:label_size])).to(device)
    '''wd2Cilin = word2Cn(word_defi_idx, len(word2index), 'C', 5633)
    wd_C0 = label_multihot(wd2Cilin, 5633) 
    wd_C0 = torch.from_numpy(np.array(wd_C0[:label_size])).to(device)
    wd_C = [wd_C1, wd_C2, wd_C3, wd_C4, wd_C0]
    '''
    wd_C = [wd_C1, wd_C2, wd_C3, wd_C4]
    #----------mask of no sememes
    print('calculating mask of no sememes...')
    mask_s = torch.zeros(label_size, dtype=torch.float32, device=device)
    for i in range(label_size):
        sems = set(wd2sem[i].detach().cpu().numpy().tolist()) - set(
            [sememe_num])
        if len(sems) == 0:
            mask_s[i] = 1

    mask_c = torch.zeros(label_size, dtype=torch.float32, device=device)
    for i in range(label_size):
        cc = set(wd2Cilin1[i].detach().cpu().numpy().tolist()) - set([13])
        if len(cc) == 0:
            mask_c[i] = 1

    model = Encoder(vocab_size=len(word2index),
                    embed_dim=word2vec.shape[1],
                    hidden_dim=200,
                    layers=1,
                    class_num=label_size,
                    sememe_num=sememe_num,
                    chara_num=label_size_chara)
    model.embedding.weight.data = torch.from_numpy(word2vec)
    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # Adam
    best_valid_accu = 0
    DEF_UPDATE = True
    for epoch in range(epoch_num):
        print('epoch: ', epoch)
        model.train()
        train_loss = 0
        label_list = list()
        pred_list = list()
        for words_t, sememes_t, definition_words_t, POS_t, sememes, POSs, charas_t, C, C_t in tqdm(
                train_dataloader, disable=verbose):
            optimizer.zero_grad()
            loss, _, indices = model('train',
                                     x=definition_words_t,
                                     w=words_t,
                                     ws=wd_sems,
                                     wP=wd_POSs,
                                     wc=wd_charas,
                                     wC=wd_C,
                                     msk_s=mask_s,
                                     msk_c=mask_c,
                                     mode=MODE)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
            optimizer.step()
            predicted = indices[:, :100].detach().cpu().numpy().tolist()
            train_loss += loss.item()
            label_list.extend(words_t.detach().cpu().numpy())
            pred_list.extend(predicted)
        train_accu_1, train_accu_10, train_accu_100 = evaluate(
            label_list, pred_list)
        del label_list
        del pred_list
        gc.collect()
        print('train_loss: ', train_loss / len(train_dataset))
        print('train_accu(1/10/100): %.2f %.2F %.2f' %
              (train_accu_1, train_accu_10, train_accu_100))
        model.eval()
        with torch.no_grad():
            valid_loss = 0
            label_list = []
            pred_list = []
            for words_t, sememes_t, definition_words_t, POS_t, sememes, POSs, charas_t, C, C_t in tqdm(
                    valid_dataloader, disable=verbose):
                loss, _, indices = model('train',
                                         x=definition_words_t,
                                         w=words_t,
                                         ws=wd_sems,
                                         wP=wd_POSs,
                                         wc=wd_charas,
                                         wC=wd_C,
                                         msk_s=mask_s,
                                         msk_c=mask_c,
                                         mode=MODE)
                predicted = indices[:, :100].detach().cpu().numpy().tolist()
                valid_loss += loss.item()
                label_list.extend(words_t.detach().cpu().numpy())
                pred_list.extend(predicted)
            valid_accu_1, valid_accu_10, valid_accu_100 = evaluate(
                label_list, pred_list)
            print('valid_loss: ', valid_loss / len(valid_dataset))
            print('valid_accu(1/10/100): %.2f %.2F %.2f' %
                  (valid_accu_1, valid_accu_10, valid_accu_100))
            del label_list
            del pred_list
            gc.collect()

            if valid_accu_10 > best_valid_accu:
                best_valid_accu = valid_accu_10
                print('-----best_valid_accu-----')
                #torch.save(model, 'saved.model')
                label_list = []
                pred_list = []
                for words_t, definition_words_t in tqdm(test_dataloader,
                                                        disable=verbose):
                    indices = model('test',
                                    x=definition_words_t,
                                    w=words_t,
                                    ws=wd_sems,
                                    wP=wd_POSs,
                                    wc=wd_charas,
                                    wC=wd_C,
                                    msk_s=mask_s,
                                    msk_c=mask_c,
                                    mode=MODE)
                    predicted = indices[:, :1000].detach().cpu().numpy(
                    ).tolist()
                    label_list.extend(words_t.detach().cpu().numpy())
                    pred_list.extend(predicted)
                test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(
                    label_list, pred_list)
                print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f' %
                      (test_accu_1, test_accu_10, test_accu_100, median,
                       variance))
                if epoch > 10:
                    json.dump((index2word[label_list]).tolist(),
                              open(mode + '_label_list.json', 'w'))
                    json.dump((index2word[np.array(pred_list)]).tolist(),
                              open(mode + '_pred_list.json', 'w'))
                del label_list
                del pred_list
                gc.collect()
Ejemplo n.º 3
0
def main(frequency, batch_size, epoch_num, verbose, MODE):
    mode = MODE
    word2index, index2word, word2vec, index2each, label_size_each, data_idx_each = load_data(
        frequency)
    (label_size, label_lexname_size, label_rootaffix_size,
     label_sememe_size) = label_size_each
    (data_train_idx, data_dev_idx, data_test_500_seen_idx,
     data_test_500_unseen_idx, data_defi_c_idx,
     data_desc_c_idx) = data_idx_each
    (index2sememe, index2lexname, index2rootaffix) = index2each
    index2word = np.array(index2word)
    test_dataset = MyDataset(data_test_500_seen_idx +
                             data_test_500_unseen_idx + data_desc_c_idx)
    valid_dataset = MyDataset(data_dev_idx)
    train_dataset = MyDataset(data_train_idx + data_defi_c_idx)

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   collate_fn=my_collate_fn)
    valid_dataloader = torch.utils.data.DataLoader(valid_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   collate_fn=my_collate_fn)
    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  collate_fn=my_collate_fn)

    print('DataLoader prepared. Batch_size [%d]' % batch_size)
    print('Train dataset: ', len(train_dataset))
    print('Valid dataset: ', len(valid_dataset))
    print('Test dataset: ', len(test_dataset))
    data_all_idx = data_train_idx + data_dev_idx + data_test_500_seen_idx + data_test_500_unseen_idx + data_defi_c_idx

    sememe_num = len(index2sememe)
    wd2sem = word2feature(
        data_all_idx, label_size, sememe_num, 'sememes'
    )  # label_size, not len(word2index). we only use target_words' feature
    wd_sems = label_multihot(wd2sem, sememe_num)
    wd_sems = torch.from_numpy(np.array(wd_sems)).to(
        device)  #torch.from_numpy(np.array(wd_sems[:label_size])).to(device)
    lexname_num = len(index2lexname)
    wd2lex = word2feature(data_all_idx, label_size, lexname_num, 'lexnames')
    wd_lex = label_multihot(wd2lex, lexname_num)
    wd_lex = torch.from_numpy(np.array(wd_lex)).to(device)
    rootaffix_num = len(index2rootaffix)
    wd2ra = word2feature(data_all_idx, label_size, rootaffix_num, 'root_affix')
    wd_ra = label_multihot(wd2ra, rootaffix_num)
    wd_ra = torch.from_numpy(np.array(wd_ra)).to(device)
    mask_s = mask_noFeature(label_size, wd2sem, sememe_num)
    mask_l = mask_noFeature(label_size, wd2lex, lexname_num)
    mask_r = mask_noFeature(label_size, wd2ra, rootaffix_num)

    model = Encoder(vocab_size=len(word2index),
                    embed_dim=word2vec.shape[1],
                    hidden_dim=300,
                    layers=1,
                    class_num=label_size,
                    sememe_num=sememe_num,
                    lexname_num=lexname_num,
                    rootaffix_num=rootaffix_num)
    model.embedding.weight.data = torch.from_numpy(word2vec)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # Adam
    best_valid_accu = 0
    DEF_UPDATE = True
    for epoch in range(epoch_num):
        print('epoch: ', epoch)
        model.train()
        train_loss = 0
        label_list = list()
        pred_list = list()
        for words_t, definition_words_t in tqdm(train_dataloader,
                                                disable=verbose):
            optimizer.zero_grad()
            loss, _, indices = model('train',
                                     x=definition_words_t,
                                     w=words_t,
                                     ws=wd_sems,
                                     wl=wd_lex,
                                     wr=wd_ra,
                                     msk_s=mask_s,
                                     msk_l=mask_l,
                                     msk_r=mask_r,
                                     mode=MODE)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
            optimizer.step()
            predicted = indices[:, :100].detach().cpu().numpy().tolist()
            train_loss += loss.item()
            label_list.extend(words_t.detach().cpu().numpy())
            pred_list.extend(predicted)
        train_accu_1, train_accu_10, train_accu_100 = evaluate(
            label_list, pred_list)
        del label_list
        del pred_list
        gc.collect()
        print('train_loss: ', train_loss / len(train_dataset))
        print('train_accu(1/10/100): %.2f %.2F %.2f' %
              (train_accu_1, train_accu_10, train_accu_100))
        model.eval()
        with torch.no_grad():
            valid_loss = 0
            label_list = []
            pred_list = []
            for words_t, definition_words_t in tqdm(valid_dataloader,
                                                    disable=verbose):
                loss, _, indices = model('train',
                                         x=definition_words_t,
                                         w=words_t,
                                         ws=wd_sems,
                                         wl=wd_lex,
                                         wr=wd_ra,
                                         msk_s=mask_s,
                                         msk_l=mask_l,
                                         msk_r=mask_r,
                                         mode=MODE)
                predicted = indices[:, :100].detach().cpu().numpy().tolist()
                valid_loss += loss.item()
                label_list.extend(words_t.detach().cpu().numpy())
                pred_list.extend(predicted)
            valid_accu_1, valid_accu_10, valid_accu_100 = evaluate(
                label_list, pred_list)
            print('valid_loss: ', valid_loss / len(valid_dataset))
            print('valid_accu(1/10/100): %.2f %.2F %.2f' %
                  (valid_accu_1, valid_accu_10, valid_accu_100))
            del label_list
            del pred_list
            gc.collect()

            if valid_accu_10 > best_valid_accu:
                best_valid_accu = valid_accu_10
                print('-----best_valid_accu-----')
                #torch.save(model, 'saved.model')
                test_loss = 0
                label_list = []
                pred_list = []
                for words_t, definition_words_t in tqdm(test_dataloader,
                                                        disable=verbose):
                    indices = model('test',
                                    x=definition_words_t,
                                    w=words_t,
                                    ws=wd_sems,
                                    wl=wd_lex,
                                    wr=wd_ra,
                                    msk_s=mask_s,
                                    msk_l=mask_l,
                                    msk_r=mask_r,
                                    mode=MODE)
                    predicted = indices[:, :1000].detach().cpu().numpy(
                    ).tolist()
                    label_list.extend(words_t.detach().cpu().numpy())
                    pred_list.extend(predicted)
                test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(
                    label_list, pred_list)
                print('test_accu(1/10/100): %.2f %.2F %.2f %.2f %.2f' %
                      (test_accu_1, test_accu_10, test_accu_100, median,
                       variance))
                if epoch > 5:
                    json.dump((index2word[label_list]).tolist(),
                              open(mode + '_label_list.json', 'w'))
                    json.dump((index2word[np.array(pred_list)]).tolist(),
                              open(mode + '_pred_list.json', 'w'))
                del label_list
                del pred_list
                gc.collect()
Ejemplo n.º 4
0
def main():
    """
    Wrapper to run the classification task
    """
    # Parse command-line arguments
    parser = build_parser()
    options = parser.parse_args()

    if options.mode == "gen_data":
        # Split the data into train/dev/test sets
        split_data()

        # Load the data and reshape for training and evaluation
        X, y_media, y_emotion = load_data(update=options.update,
                                          remove_broken=options.remove_broken)

        for set_type in ["train", "dev", "test"]:
            total_media = np.sum(y_media[set_type], axis=0)
            total_emotion = np.sum(y_emotion[set_type], axis=0)

            print(f"Total images for each media category in {set_type} set:")
            for v, k in enumerate(MEDIA_LABELS):
                print(f"\t{k}: {total_media[v]}")
            print(f"Total images for each emotion category in {set_type} set:")
            for v, k in enumerate(EMOTION_LABELS):
                print(f"\t{k}: {total_emotion[v]}")

    elif options.mode == "train":
        # Create directory to save the results
        results_dir = "results"
        if not os.path.exists("./" + results_dir):
            os.makedirs("./" + results_dir)
        # Check if the given log folder already exists
        results_subdirs = os.listdir("./" + results_dir)
        if not options.log_folder:
            raise Exception(
                'Please specify log_folder argument to store results.')
        elif options.log_folder in results_subdirs:
            raise Exception('The given log folder already exists.')
        else:
            # Create a folder for each training run
            log_folder = os.path.join(results_dir, options.log_folder)
            os.makedirs(log_folder)

        # Load the data and organize into three tuples (train, val/dev, test)
        # Each tuple consists of input arrays, media labels, and emotion labels
        train_data, val_data, test_data = load_data(DATA_DIR, INPUT_FILE,
                                                    MEDIA_LABEL_FILE,
                                                    EMOTION_LABEL_FILE)

        # Preprocess the data
        train_dset, val_dset, test_dset = preprocess(
            train_data,
            val_data,
            test_data,
            augment=options.augment,
            train_stats_dir=TRAIN_STATS_DIR)

        # Specify the device:
        if options.device == "cpu":
            device = "/cpu:0"
        elif options.device == "gpu":
            device = "/device:GPU:0"

        # Train the model
        train(train_dset,
              val_dset,
              log_folder=log_folder,
              device=device,
              batch_size=64,
              num_epochs=100,
              model_type=options.model_type)

    elif options.mode == "test":
        # Load the data and organize into three tuples (train, val/dev, test)
        # Each tuple consists of input arrays, media labels, and emotion labels
        train_data, val_data, test_data = load_data(DATA_DIR, INPUT_FILE,
                                                    MEDIA_LABEL_FILE,
                                                    EMOTION_LABEL_FILE)
        # Preprocess the data
        if os.path.isfile(os.path.join(TRAIN_STATS_DIR, "train_stats.npz")):
            print(
                "Preprocess test data using saved statistics from train data..."
            )
            train_stats_file = os.path.join(TRAIN_STATS_DIR, "train_stats.npz")
            test_dset = preprocess_from_file(train_stats_file,
                                             test_data,
                                             augment=options.augment)
        else:
            print("Preprocess test data using train data...")
            train_dset, val_dset, test_dset = preprocess(
                train_data,
                val_data,
                test_data,
                augment=options.augment,
                train_stats_dir=TRAIN_STATS_DIR)

        # Specify the device:
        if options.device == "cpu":
            device = "/cpu:0"
        elif options.device == "gpu":
            device = "/device:GPU:0"

        # Load the model
        model_path = os.path.join("test_models", options.model_name)
        evaluate_test(model_path,
                      options.model_type,
                      test_dset,
                      batch_size=64,
                      confusion_mat=options.confusion_mat)

    elif options.mode == "ensemble":
        # Load the data and organize into three tuples (train, val/dev, test)
        # Each tuple consists of input arrays, media labels, and emotion labels
        train_data, val_data, test_data = load_data(DATA_DIR, INPUT_FILE,
                                                    MEDIA_LABEL_FILE,
                                                    EMOTION_LABEL_FILE)
        # Preprocess the data
        if os.path.isfile(os.path.join(TRAIN_STATS_DIR, "train_stats.npz")):
            print(
                "Preprocess test data using saved statistics from train data..."
            )
            train_stats_file = os.path.join(TRAIN_STATS_DIR, "train_stats.npz")
            test_dset = preprocess_from_file(train_stats_file,
                                             test_data,
                                             augment=options.augment)
        else:
            print("Preprocess test data using train data...")
            train_dset, val_dset, test_dset = preprocess(
                train_data,
                val_data,
                test_data,
                augment=options.augment,
                train_stats_dir=TRAIN_STATS_DIR)
        # Specify the device:
        if options.device == "cpu":
            device = "/cpu:0"
        elif options.device == "gpu":
            device = "/device:GPU:0"

        if not options.ensemble_folder:
            raise Exception(
                'Please specify ensemble_folder argument to find ensemble folders.'
            )
        elif len(os.listdir(options.ensemble_folder)) == 0:
            raise Exception('Ensemble folder is empty.')

        # Evaluate the ensemble
        evaluate_ensemble(options.ensemble_folder,
                          test_dset,
                          batch_size=64,
                          confusion_mat=options.confusion_mat)

    elif options.mode == "test_single":
        x_test = load_image(
            os.path.join('stylized_images_configs', options.image))
        train_stats_file = os.path.join(TRAIN_STATS_DIR, "train_stats.npz")
        x_test = preprocess_image(train_stats_file,
                                  x_test,
                                  augment=options.augment)

        model_path = os.path.join("test_models", options.model_name)
        predict_image(x_test, model_path)