Esempio n. 1
0
        self.words = words
        self.starts = starts
        self.ends = ends
        self.labels = labels
        self.predict_labels = []


if __name__ == '__main__':
    # parameters
    config_file = 'default.ini'
    config = Configurable(config_file)

    # model
    model = CompanyPredict()
    # load data
    train_data = read_pkl(config.train_pkl)
    dev_data = None
    if config.para_dev_file:
        dev_data = read_pkl(config.dev_pkl)
    test_data = read_pkl(config.test_pkl)

    word_list = read_pkl(config.load_feature_voc)
    p_label_list, s_label_list = read_pkl(config.load_label_voc)
    word_voc = VocabSrc(word_list)
    p_label_voc = VocabTgt(p_label_list)
    s_label_voc = VocabTgt(s_label_list)

    embedding = None
    if os.path.isfile(config.embedding_pkl):
        embedding = read_pkl(config.embedding_pkl)
Esempio n. 2
0
    parse = argparse.ArgumentParser('Attention Target Classifier')
    parse.add_argument('--config_file', type=str, default='default.ini')
    parse.add_argument('--thread', type=int, default=1)
    parse.add_argument('--use_cuda', action='store_true', default=False)
    parse.add_argument('--model', type=str, default='model.742')
    args, extra_args = parse.parse_known_args()

    config = Configurable(args.config_file, extra_args)
    torch.set_num_threads(args.thread)
    config.use_cuda = False
    if gpu and args.use_cuda:
        config.use_cuda = True
    print("\nGPU using status: ", config.use_cuda)

    # load vocab and model
    feature_list = read_pkl(config.load_feature_voc)
    label_list = read_pkl(config.load_label_voc)
    feature_vec = VocabSrc(feature_list)
    label_vec = VocabTgt(label_list)

    # model
    if config.which_model == 'Vanilla':
        model = Vanilla(config, feature_vec.size, config.embed_dim, PAD,
                        label_vec.size)
    elif config.which_model == 'Contextualized':
        model = Contextualized(config, feature_vec.size, config.embed_dim, PAD,
                               label_vec.size)
    elif config.which_model == 'ContextualizedGates':
        model = ContextualizedGates(config, feature_vec.size, config.embed_dim,
                                    PAD, label_vec.size)
    else: