Beispiel #1
0
    def __init__(self, vocab, config):
        word2id = vocab.word2idx
        super(Model, self).__init__()
        vocab_num = len(word2id)
        self.word2id = word2id
        self.config = config
        self.char_dict = preprocess.get_char_dict('data/char_vocab.english.txt')
        self.genres = {g: i for i, g in enumerate(["bc", "bn", "mz", "nw", "pt", "tc", "wb"])}
        self.device = torch.device("cuda:" + config.cuda)

        self.emb = nn.Embedding(vocab_num, 350)

        emb1 = EmbedLoader().load_with_vocab(config.glove, vocab,normalize=False)
        emb2 = EmbedLoader().load_with_vocab(config.turian,  vocab ,normalize=False)
        pre_emb = np.concatenate((emb1, emb2), axis=1)
        pre_emb /= (np.linalg.norm(pre_emb, axis=1, keepdims=True) + 1e-12)

        if pre_emb is not None:
            self.emb.weight = nn.Parameter(torch.from_numpy(pre_emb).float())
            for param in self.emb.parameters():
                param.requires_grad = False
        self.emb_dropout = nn.Dropout(inplace=True)


        if config.use_elmo:
            self.elmo = ElmoEmbedder(options_file='data/elmo/elmo_2x4096_512_2048cnn_2xhighway_options.json',
                                     weight_file='data/elmo/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5',
                                     cuda_device=int(config.cuda))
            print("elmo load over.")
            self.elmo_args = torch.randn((3), requires_grad=True).to(self.device)

        self.char_emb = nn.Embedding(len(self.char_dict), config.char_emb_size)
        self.conv1 = nn.Conv1d(config.char_emb_size, 50, 3)
        self.conv2 = nn.Conv1d(config.char_emb_size, 50, 4)
        self.conv3 = nn.Conv1d(config.char_emb_size, 50, 5)

        self.feature_emb = nn.Embedding(config.span_width, config.feature_size)
        self.feature_emb_dropout = nn.Dropout(p=0.2, inplace=True)

        self.mention_distance_emb = nn.Embedding(10, config.feature_size)
        self.distance_drop = nn.Dropout(p=0.2, inplace=True)

        self.genre_emb = nn.Embedding(7, config.feature_size)
        self.speaker_emb = nn.Embedding(2, config.feature_size)

        self.bilstm = VarLSTM(input_size=350+150*config.use_CNN+config.use_elmo*1024,hidden_size=200,bidirectional=True,batch_first=True,hidden_dropout=0.2)
        # self.bilstm = nn.LSTM(input_size=500, hidden_size=200, bidirectional=True, batch_first=True)
        self.h0 = nn.init.orthogonal_(torch.empty(2, 1, 200)).to(self.device)
        self.c0 = nn.init.orthogonal_(torch.empty(2, 1, 200)).to(self.device)
        self.bilstm_drop = nn.Dropout(p=0.2, inplace=True)

        self.atten = ffnn(input_size=400, hidden_size=config.atten_hidden_size, output_size=1)
        self.mention_score = ffnn(input_size=1320, hidden_size=config.mention_hidden_size, output_size=1)
        self.sa = ffnn(input_size=3980+40*config.use_metadata, hidden_size=config.sa_hidden_size, output_size=1)
        self.mention_start_np = None
        self.mention_end_np = None
Beispiel #2
0
def load_conll_with_glove(
        data_dir,
        data_path='train.pos',
        glove_path="",
        # glove_path='/remote-home/ygxu/dataset/glove.empty.txt',
        load_glove=True,
        vocabs=None):
    path = os.path.join(data_dir, data_path)
    print(f"start load dataset from {path}.")

    from dataset import MyConllLoader
    ds = MyConllLoader().load(path)
    print(ds)
    ds.rename_field('word_seq', 'sentence')
    ds.rename_field('label_seq', 'label')
    #ds = DataSet.read_pos(path, headers=('sentence', 'label'), sep='\t')

    #ds.apply(lambda x: x['sentence'].lower(), new_field_name='sentence')
    #ds.apply(lambda x: x['sentence'].strip().split(), new_field_name='sentence')
    ds.apply(lambda x: len(x['sentence']) * [1.],
             new_field_name='word_seq_origin_len',
             is_input=True)

    if vocabs is None:
        vocab = Vocabulary(max_size=30000,
                           min_freq=2,
                           unknown='<unk>',
                           padding='<pad>')
        ds.apply(lambda x: [vocab.add(word) for word in x['sentence']])
        vocab.build_vocab()
        vocab_label = Vocabulary(max_size=200, unknown=None, padding='<pad>')
        ds.apply(lambda x: [vocab_label.add(label) for label in x['label']])
        vocab_label.build_vocab()
    else:
        vocab, vocab_label = vocabs

    ds.apply(lambda x: [vocab.to_index(w) for w in x['sentence']],
             new_field_name='word_seq',
             is_input=True)
    ds.apply(lambda x: [vocab_label.to_index(w) for w in x['label']],
             new_field_name='truth',
             is_input=True,
             is_target=True)

    if not load_glove:
        print(f"successful load dataset from {path}")
        return ds

    embedding, _ = EmbedLoader().load_embedding(300, glove_path, 'glove',
                                                vocab)

    print(f"successful load dataset and embedding from {path}")

    return ds, embedding, (vocab, vocab_label)
Beispiel #3
0
def load_dataset_with_glove(data_dir,
                            data_path='mr.task.train',
                            glove_path="",
                            load_glove=True,
                            vocabs=None):
    path = os.path.join(data_dir, data_path)
    print(f"start load dataset from {path}.")

    ds = DataSet.read_csv(path, headers=('label', 'sentence'), sep='\t')

    ds.apply(lambda x: x['sentence'].lower(), new_field_name='sentence')
    ds.apply(lambda x: x['sentence'].strip().split(),
             new_field_name='sentence')
    ds.apply(lambda x: len(x['sentence']) * [1.],
             new_field_name='mask',
             is_input=True)
    ds.apply(lambda x: int(x['label']), new_field_name='label', is_target=True)

    if vocabs is None:
        vocab = Vocabulary(max_size=30000,
                           min_freq=2,
                           unknown='<unk>',
                           padding='<pad>')
        ds.apply(lambda x: [vocab.add(word) for word in x['sentence']])
        vocab.build_vocab()
    else:
        vocab = vocabs

    ds.apply(lambda x: [vocab.to_index(w) for w in x['sentence']],
             new_field_name='data',
             is_input=True)

    if not load_glove:
        print(f"successful load dataset from {path}")
        return ds

    embedding, _ = EmbedLoader().load_embedding(300, glove_path, 'glove',
                                                vocab)

    print(f"successful load dataset and embedding from {path}")

    return ds, embedding, vocab
def main():
    parser = argparse.ArgumentParser(description='Summarization Model')

    # Where to find data
    parser.add_argument(
        '--data_path',
        type=str,
        default='/remote-home/dqwang/Datasets/CNNDM/train.label.jsonl',
        help='Path expression to pickle datafiles.')
    parser.add_argument(
        '--valid_path',
        type=str,
        default='/remote-home/dqwang/Datasets/CNNDM/val.label.jsonl',
        help='Path expression to pickle valid datafiles.')
    parser.add_argument('--vocab_path',
                        type=str,
                        default='/remote-home/dqwang/Datasets/CNNDM/vocab',
                        help='Path expression to text vocabulary file.')

    # Important settings
    parser.add_argument('--mode',
                        choices=['train', 'test'],
                        default='train',
                        help='must be one of train/test')
    parser.add_argument('--embedding',
                        type=str,
                        default='glove',
                        choices=['word2vec', 'glove', 'elmo', 'bert'],
                        help='must be one of word2vec/glove/elmo/bert')
    parser.add_argument('--sentence_encoder',
                        type=str,
                        default='transformer',
                        choices=['bilstm', 'deeplstm', 'transformer'],
                        help='must be one of LSTM/Transformer')
    parser.add_argument('--sentence_decoder',
                        type=str,
                        default='SeqLab',
                        choices=['PN', 'SeqLab'],
                        help='must be one of PN/SeqLab')
    parser.add_argument(
        '--restore_model',
        type=str,
        default='None',
        help=
        'Restore model for further training. [bestmodel/bestFmodel/earlystop/None]'
    )

    # Where to save output
    parser.add_argument('--save_root',
                        type=str,
                        default='save/',
                        help='Root directory for all model.')
    parser.add_argument('--log_root',
                        type=str,
                        default='log/',
                        help='Root directory for all logging.')

    # Hyperparameters
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='GPU ID to use. For cpu, set -1 [default: -1]')
    parser.add_argument('--cuda',
                        action='store_true',
                        default=False,
                        help='use cuda')
    parser.add_argument(
        '--vocab_size',
        type=int,
        default=100000,
        help=
        'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.'
    )
    parser.add_argument('--n_epochs',
                        type=int,
                        default=20,
                        help='Number of epochs [default: 20]')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='Mini batch size [default: 128]')

    parser.add_argument('--word_embedding',
                        action='store_true',
                        default=True,
                        help='whether to use Word embedding')
    parser.add_argument('--embedding_path',
                        type=str,
                        default='/remote-home/dqwang/Glove/glove.42B.300d.txt',
                        help='Path expression to external word embedding.')
    parser.add_argument('--word_emb_dim',
                        type=int,
                        default=300,
                        help='Word embedding size [default: 200]')
    parser.add_argument(
        '--embed_train',
        action='store_true',
        default=False,
        help='whether to train Word embedding [default: False]')
    parser.add_argument('--min_kernel_size',
                        type=int,
                        default=1,
                        help='kernel min length for CNN [default:1]')
    parser.add_argument('--max_kernel_size',
                        type=int,
                        default=7,
                        help='kernel max length for CNN [default:7]')
    parser.add_argument('--output_channel',
                        type=int,
                        default=50,
                        help='output channel: repeated times for one kernel')
    parser.add_argument('--use_orthnormal_init',
                        action='store_true',
                        default=True,
                        help='use orthnormal init for lstm [default: true]')
    parser.add_argument(
        '--sent_max_len',
        type=int,
        default=100,
        help='max length of sentences (max source text sentence tokens)')
    parser.add_argument(
        '--doc_max_timesteps',
        type=int,
        default=50,
        help='max length of documents (max timesteps of documents)')
    parser.add_argument('--save_label',
                        action='store_true',
                        default=False,
                        help='require multihead attention')

    # Training
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        help='learning rate')
    parser.add_argument('--lr_descent',
                        action='store_true',
                        default=False,
                        help='learning rate descent')
    parser.add_argument('--grad_clip',
                        action='store_true',
                        default=False,
                        help='for gradient clipping')
    parser.add_argument(
        '--max_grad_norm',
        type=float,
        default=10,
        help='for gradient clipping max gradient normalization')

    # test
    parser.add_argument('-m',
                        type=int,
                        default=3,
                        help='decode summary length')
    parser.add_argument(
        '--test_model',
        type=str,
        default='evalbestmodel',
        help=
        'choose different model to test [evalbestmodel/evalbestFmodel/trainbestmodel/trainbestFmodel/earlystop]'
    )
    parser.add_argument('--use_pyrouge',
                        action='store_true',
                        default=False,
                        help='use_pyrouge')

    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.set_printoptions(threshold=50000)

    # File paths
    DATA_FILE = args.data_path
    VALID_FILE = args.valid_path
    VOCAL_FILE = args.vocab_path
    LOG_PATH = args.log_root

    # # train_log setting
    if not os.path.exists(LOG_PATH):
        if args.mode == "train":
            os.makedirs(LOG_PATH)
        else:
            raise Exception(
                "[Error] Logdir %s doesn't exist. Run in train mode to create it."
                % (LOG_PATH))
    nowTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    log_path = os.path.join(LOG_PATH, args.mode + "_" + nowTime)
    # logger = _init_logger(path=log_path)
    # file_handler = logging.FileHandler(log_path)
    # file_handler.setFormatter(formatter)
    # logger.addHandler(file_handler)

    logger.info("Pytorch %s", torch.__version__)

    # dataset
    hps = args
    dbPipe = ExtCNNDMPipe(vocab_size=hps.vocab_size,
                          vocab_path=VOCAL_FILE,
                          sent_max_len=hps.sent_max_len,
                          doc_max_timesteps=hps.doc_max_timesteps)
    if hps.mode == 'test':
        hps.recurrent_dropout_prob = 0.0
        hps.atten_dropout_prob = 0.0
        hps.ffn_dropout_prob = 0.0
        logger.info(hps)
        paths = {"test": DATA_FILE}
        db = dbPipe.process_from_file(paths)
    else:
        paths = {"train": DATA_FILE, "valid": VALID_FILE}
        db = dbPipe.process_from_file(paths)

    # embedding
    if args.embedding == "glove":
        vocab = db.get_vocab("vocab")
        embed = torch.nn.Embedding(len(vocab), hps.word_emb_dim)
        if hps.word_embedding:
            embed_loader = EmbedLoader()
            pretrained_weight = embed_loader.load_with_vocab(
                hps.embedding_path, vocab)  # unfound with random init
            embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
            embed.weight.requires_grad = hps.embed_train
    else:
        logger.error("[ERROR] embedding To Be Continued!")
        sys.exit(1)

    # model
    if args.sentence_encoder == "transformer" and args.sentence_decoder == "SeqLab":
        model_param = json.load(open("config/transformer.config", "rb"))
        hps.__dict__.update(model_param)
        model = TransformerModel(hps, embed)
    elif args.sentence_encoder == "deeplstm" and args.sentence_decoder == "SeqLab":
        model_param = json.load(open("config/deeplstm.config", "rb"))
        hps.__dict__.update(model_param)
        model = SummarizationModel(hps, embed)
    else:
        logger.error("[ERROR] Model To Be Continued!")
        sys.exit(1)
    if hps.cuda:
        model = model.cuda()
        logger.info("[INFO] Use cuda")

    logger.info(hps)

    if hps.mode == 'train':
        db.get_dataset("valid").set_target("text", "summary")
        setup_training(model, db.get_dataset("train"), db.get_dataset("valid"),
                       hps)
    elif hps.mode == 'test':
        logger.info("[INFO] Decoding...")
        db.get_dataset("test").set_target("text", "summary")
        run_test(model, db.get_dataset("test"), hps, limited=hps.limited)
    else:
        logger.error("The 'mode' flag must be one of train/eval/test")
        raise ValueError("The 'mode' flag must be one of train/eval/test")
Beispiel #5
0
# In[5]:

# 1. get dataset
dataset = load_data('data/train.tsv', 1)
train_dataset, val_dataset = dataset.split(0.1)
test_dataset = load_data('data/test.tsv', 0)
print("train_dataset size: ", train_dataset.get_length())
print("val_dataset size: ", val_dataset.get_length())
print("test_dataset size: ", test_dataset.get_length())

# In[6]:

# 2. get vocabulary
if (use_pretrain):
    loader = EmbedLoader()
    pre_embed, vocab = loader.load_without_vocab(embed_path, normalize=False)
    embedding_size = pre_embed.shape[1]
else:
    vocab = Vocabulary(min_freq=2).from_dataset(dataset, field_name='words')
print("vocabulary size: ", len(vocab))

# In[7]:

# 3. word to index
vocab.index_dataset(train_dataset, field_name='words', new_field_name='words')
vocab.index_dataset(val_dataset, field_name='words', new_field_name='words')
vocab.index_dataset(test_dataset, field_name='words', new_field_name='words')

# ### 3. Build CNN model
Beispiel #6
0
 def test_case(self):
     vocab = Vocabulary()
     vocab.update(["the", "in", "I", "to", "of", "hahaha"])
     embedding = EmbedLoader().fast_load_embedding(
         50, "test/data_for_tests/glove.6B.50d_test.txt", vocab)
     self.assertEqual(tuple(embedding.shape), (len(vocab), 50))