예제 #1
0
def ensemble():
    f_train = "../data/train.txt"
    # f_test = "data/test_attr2.txt"
    if args.w2v == "merge":
        f_w2v = "../embedding/embedding_all_merge_300.txt"
    elif args.w2v == "fasttext2":
        f_w2v = "../embedding/embedding_all_fasttext2_300.txt"
    elif args.w2v == "tencent":
        f_w2v = "../embedding/embedding_all_tencent_200.txt"
    else:
        print("error, no embedding")
        exit(-1)
    f_dict = "../dataset/attribute.json"
    print(f_train)
    print(f_w2v)
    if not os.path.exists("%s" % args.check_dir):
        os.mkdir("%s" % args.check_dir)
    raw_texts, raw_labels = load_attr_data(filename=f_train)
    W, word2index2 = load_w2v(f_w2v)
    word2index = pickle.load(open("../data/vocabulary.pkl", 'rb'))
    assert word2index == word2index2
    attr_list, attr_dict = parse_json(f_dict)
    kf = 0
    for train_index, test_index in kfold_split(len(raw_texts), args.folds):
        kf += 1
        print("FOLD:", kf)
        print("TRAIN:", str(len(train_index)), '\n', "TEST:",
              str(len(test_index)))
        # train_index, test_index = train_index.tolist(), test_index.tolist()
        test_texts, test_labels = [raw_texts[i] for i in test_index
                                   ], [raw_labels[i] for i in test_index]
        train_texts, train_labels = [raw_texts[i] for i in train_index
                                     ], [raw_labels[i] for i in train_index]
        print(len(train_texts))
        print(len(test_labels))
        model = AttributeClassifier()
        print(attr_list)
        print(attr_dict)
        # exit(-1)
        # print(train_texts)
        model.train_from_data((train_texts, train_labels),
                              (test_texts, test_labels), W, word2index,
                              attr_dict, args, kf)
    pass
예제 #2
0
def main():
    f_train = "../data/train.txt"
    # f_test = "data/test_attr2.txt"
    if args.w2v == "merge":
        f_w2v = "../embedding/embedding_all_merge_300.txt"
    elif args.w2v == "fasttext":
        f_w2v = "../embedding/embedding_all_fasttext_300.txt"
    elif args.w2v == "fasttext2":
        f_w2v = "../embedding/embedding_all_fasttext2_300.txt"
    elif args.w2v == "tencent":
        f_w2v = "../embedding/embedding_all_tencent_200.txt"
    else:
        print("error, no embedding")
        exit(-1)
    f_dict = "../dataset/attribute.json"
    print(f_w2v)
    train_texts, train_labels = load_attr_data(filename=f_train)
    train_texts, train_labels, test_texts, test_labels = split_dev(
        train_texts, train_labels)
    print(len(train_texts))
    print(len(test_labels))
    # train_texts2, train_labels2, test_texts, test_labels = split_dev(train_texts, train_labels)
    if not os.path.exists("%s" % args.check_dir):
        os.mkdir("%s" % args.check_dir)
    # test_texts, test_labels = load_attr_data(filename=f_test)
    W, word2index2 = load_w2v(f_w2v)
    word2index = pickle.load(open("../data/vocabulary.pkl", 'rb'))
    assert word2index == word2index2
    attr_list, attr_dict = parse_json(f_dict)
    print(list(attr_dict.keys()))
    model = AttributeClassifier()
    print(attr_list)
    print(attr_dict)
    # exit(-1)
    # print(train_texts)
    model.train_from_data((train_texts, train_labels),
                          (test_texts, test_labels), W, word2index, attr_dict,
                          args)
예제 #3
0
def dev():
    model = AttributeClassifier()
    check_point = "checkpoints5/checkpoint_AttA3_0.8666.pt"
    model.load_model(check_point)

    f_train = "data/attribute_data.txt"
    # f_test = "data/test_attr2.txt"
    f_w2v = "../embedding/embedding_all_merge_300.txt"
    f_dict = "../dataset/attribute.json"
    print(f_w2v)
    raw_texts, raw_labels = load_attr_data(filename=f_train)
    W, word2index = load_w2v(f_w2v)
    attr_list, attr_dict = parse_json(f_dict)
    kf = 0

    _, test_index = kfold_split(len(raw_texts), args.folds)[2]
    test_texts, test_labels = [raw_texts[i] for i in test_index
                               ], [raw_labels[i] for i in test_index]
    test_data = Data((test_texts, test_labels), word2index, attr_dict, args)

    test_predict = train.predict(model.classifier, test_data, args)
    pred_acc_t = score(test_predict, test_data.labels)
    print(pred_acc_t)
예제 #4
0
def stacking():
    saved = True if args.saved != 0 else False
    f_train = "../data/train.txt"
    test_file = "../data/test.txt"
    test_texts = load_test_data(test_file)
    raw_texts, raw_labels = load_attr_data(filename=f_train)
    word2index = pickle.load(open("../data/vocabulary.pkl", 'rb'))

    f_dict = "../dataset/attribute.json"
    attr_list, attr_dict = parse_json(f_dict)

    paths = args.test_dir.split('#')
    models_files = []
    for path in paths:
        models_files.append([
            os.path.join(path, f) for f in os.listdir(path)
            if os.path.isfile(os.path.join(path, f))
        ])

    test_data = Data((test_texts, None), word2index)
    if args.use_elmo != 0:
        test_elmo = load_elmo(test_texts)
        test_data.add_feature(test_elmo)

    x_train = []
    y_train = []  # TODO replace
    x_test = []
    for dir, checkpoints_per_model in zip(paths, models_files):
        print(dir, checkpoints_per_model)
        if saved == 1 and os.path.isfile(
                os.path.join(dir, 'npy', "oof_train.npy")):
            oof_train, oof_train_y, oof_test = load_oof(dir)
        else:
            NFOLDS = len(checkpoints_per_model)
            print(NFOLDS)
            assert NFOLDS == args.folds
            clfs = [None for i in range(NFOLDS)]
            for cp in checkpoints_per_model:
                fold = int(cp.replace('_', '.').split('.')[-2])
                print(fold)
                clfs[fold - 1] = cp
            oof_train, oof_train_y, oof_test = get_oof(clfs, raw_texts,
                                                       raw_labels, test_data,
                                                       word2index, attr_dict)
        x_train.append(oof_train)
        if y_train == []:
            y_train = oof_train_y
        else:
            assert (y_train == oof_train_y).all()
        x_test.append(oof_test)
    x_train = np.stack(x_train, axis=2)
    x_test = np.stack(x_test, axis=2)

    print(x_train.shape)
    num_train = x_train.shape[0]
    num_test = x_test.shape[0]
    test_predict = []
    for c in range(x_train.shape[1]):
        x_train_c = x_train[:, c, :].reshape(num_train, -1)
        x_test_c = x_test[:, c, :].reshape(num_test, -1)
        meta_clf_c = LogisticRegression()
        y_train_c = y_train[:, c]
        meta_clf_c.fit(x_train_c, y_train_c)
        test_predict_c = meta_clf_c.predict_proba(x_test_c)[:, 1]
        test_predict.append(test_predict_c)

    test_predict = np.stack(test_predict, axis=1)
    print(test_predict.shape)
    fw = codecs.open("../data/test_predict_aspect_ensemble.txt",
                     'w',
                     encoding='utf-8')

    for prob in test_predict:
        attributes = []
        voted = [0 for a in range(len(attr_list))]

        for i in range(len(prob)):
            p = prob[i]
            # print(p)
            if p > args.threshold:
                voted[i] = 1
                # categories.append(attrC[i])
        if sum(voted) == 0:
            voted[prob.argmax()] = 1
        for i, l in enumerate(voted):
            if l != 0:
                attributes.append(attr_list[i])
        fw.write('|'.join(attributes) + '\n')
    time_stamp = time.asctime().replace(':', '_').split()
    fw.close()
    shutil.copy2(
        "../data/test_predict_aspect_ensemble.txt",
        "../data/backup/test_predict_aspect_ensemble_%s.txt" % time_stamp)