Beispiel #1
0
        train_features, train_labels, f_map, l_map = utils.generate_corpus(
            lines, if_shrink_feature=True, thresholds=0)

        f_set = {v for v in f_map}
        f_map = utils.shrink_features(f_map, train_features, args.mini_count)

        dt_f_set = functools.reduce(lambda x, y: x | y,
                                    map(lambda t: set(t), dev_features), f_set)

        if not args.rand_embedding:
            print("feature size: '{}'".format(len(f_map)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                f_map = {'<eof>': 0}
            f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(
                args.emb_file, ' ', f_map, dt_f_set, args.caseless, args.unk,
                args.embedding_dim)
            print("embedding size: '{}'".format(len(f_map)))

        l_set = functools.reduce(lambda x, y: x | y,
                                 map(lambda t: set(t), dev_labels))
        l_set = functools.reduce(lambda x, y: x | y,
                                 map(lambda t: set(t), test_labels), l_set)
        for label in l_set:
            if label not in l_map:
                l_map[label] = len(l_map)

    # construct dataset
    dataset = utils.construct_bucket_mean_vb(train_features, train_labels,
                                             f_map, l_map, args.caseless)
    dev_dataset = utils.construct_bucket_mean_vb(dev_features, dev_labels,
Beispiel #2
0
        l_set = functools.reduce(lambda x, y: x | y,
                                 map(lambda t: set(t), dev_labels[i]), l_set)
        l_set = functools.reduce(lambda x, y: x | y,
                                 map(lambda t: set(t), test_labels[i]), l_set)

    if not args.rand_embedding:
        print("feature size: '{}'".format(len(f_map)))
        print('loading embedding')
        if args.fine_tune:  # which means does not do fine-tune
            f_map = {'<eof>': 0}
        f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(
            args.emb_file,
            ' ',
            f_map,
            dt_f_set,
            args.caseless,
            args.unk,
            args.word_dim,
            shrink_to_corpus=args.shrink_embedding)
        print("embedding size: '{}'".format(len(f_map)))

    for label in l_set:
        if label not in l_map:
            l_map[label] = len(l_map)

    print('constructing dataset')
    for i in range(file_num):
        # construct dataset
        dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(
            train_features[i], train_labels[i], l_map, char_map, f_map,
Beispiel #3
0
        train_features, train_labels, f_map, l_map, c_map = utils.generate_corpus_char(lines, if_shrink_c_feature=True, c_thresholds=args.mini_count, if_shrink_w_feature=False)
        
        f_set = {v for v in f_map}
        f_map = utils.shrink_features(f_map, train_features, args.mini_count)

        if args.rand_embedding:
            print("embedding size: '{}'".format(len(f_map)))
            in_doc_words = len(f_map)
        else:
            dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)
            dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)
            print("feature size: '{}'".format(len(f_map)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                f_map = {'<eof>': 0}
            f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(args.emb_file, ' ', f_map, dt_f_set, args.caseless, args.unk, args.word_dim, shrink_to_corpus=args.shrink_embedding)
            print("embedding size: '{}'".format(len(f_map)))

        l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_labels))
        l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_labels), l_set)
        for label in l_set:
            if label not in l_map:
                l_map[label] = len(l_map)
    
    print('constructing dataset')
    # construct dataset
    dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(train_features, train_labels, l_map, c_map, f_map, args.caseless)
    dev_dataset, forw_dev, back_dev = utils.construct_bucket_mean_vb_wc(dev_features, dev_labels, l_map, c_map, f_map, args.caseless)
    test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(test_features, test_labels, l_map, c_map, f_map, args.caseless)
    
    dataset_loader = [torch.utils.data.DataLoader(tup, args.batch_size, shuffle=True, drop_last=False) for tup in dataset]
Beispiel #4
0
    build model
    '''
    # print 'building model'
    if args.use_crf:
        print 'building model with CRF'
        ner_model = LSTM_CRF(len(l_map)-1, len(c_map), args.char_dim, args.char_hidden, args.word_dim, args.word_hidden, args.win_size, len(w_map), args.drop_out, segtgt_size = len(seg_l_map)-1, enttgt_size = len(ent_l_map)-1, if_highway=args.high_way, ex_embedding_dim = args.ex_word_dim, segment_loss = args.seg_loss, entity_loss = args.ent_loss)
    else:
        print 'building model w/o CRF'
        ner_model = LSTM_TH(len(l_map)-1, len(c_map), args.char_dim, args.char_hidden, args.word_dim, args.word_hidden, args.win_size, len(w_map), args.drop_out, segtgt_size = len(seg_l_map)-1, enttgt_size = len(ent_l_map)-1, if_highway=args.high_way, ex_embedding_dim = args.ex_word_dim, segment_loss = args.seg_loss, entity_loss = args.ent_loss)

    '''
    load pretrained embedding
    '''
    if not args.rand_embedding:
        print('loading embeddings')
        embedding_tensor = utils.load_embedding_wlm(args.emb_file, w_map, args.word_dim)
        if args.ex_emb_file:
            print('loading extra embeddings')
            embedding_tensor2 = utils.load_embedding_wlm(args.ex_emb_file, w_map, args.ex_word_dim)
    else:
        embedding_tensor = torch.FloatTensor(len(w_map), args.word_dim)
        init_embedding(embedding_tensor)

    print("word embedding size: '{}, {}'".format(len(w_map), args.word_dim))
    if torch.cuda.is_available():
        embedding_tensor = embedding_tensor.cuda()
        if args.ex_emb_file:
            embedding_tensor2 = embedding_tensor2.cuda()
    ner_model.load_word_embedding(embedding_tensor, args.no_fine_tune)
    if args.ex_emb_file:
        ner_model.load_word_embedding(embedding_tensor2, args.no_fine_tune, extra = True)
                if '<pad>' in l_map.keys():
                    l_map.pop('<pad>')
                l_map[label] = len(l_map)
                l_map['<start>'] = len(l_map)
                l_map['<pad>'] = len(l_map)

        if not args.rand_char_embedding:
            print("feature size: '{}'".format(len(f_map)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                f_map = {'<eof>': 0}
            f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(
                args.emb_file,
                ' ',
                f_map,
                dt_f_set,
                args.caseless,
                args.unk,
                args.embedding_dim,
                shrink_to_corpus=args.shrink_embedding)
            print("embedding size: '{}'".format(len(f_map)))

        if not args.rand_word_embedding:
            print("feature size: '{}'".format(len(lexicon_f_map)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                lexicon_f_map = {'<eof>': 0}
            lexicon_f_map, word_embedding_tensor, word_in_doc_words = utils.load_embedding_wlm(
                args.emb_file,
                ' ',
                lexicon_f_map,
            print("no checkpoint found at: '{}'".format(args.load_check_point))
    else:
        print('constructing coding table')

        if not args.rand_embedding:
            print("char size: '{}'".format(len(alphabet.char2idx)))
            print("bichar size: '{}'".format(len(alphabet.bichar2idx)))
            print("word size: '{}'".format(len(alphabet.word2idx)))
            print("pos size: '{}'".format(len(alphabet.pos2idx)))
            print("action size: '{}'".format(len(alphabet.label2idx)))

            print('loading embedding')

            pretrain_char_embedding_tensor, idx2char, char2idx = utils.load_embedding_wlm(
                args.char_emb_file,
                ' ',
                static_alphabet.char2idx,
                args.char_embedding_dim,
                shrink_to_corpus=args.shrink_embedding)
            static_alphabet.idx2char = idx2char
            static_alphabet.char2idx = char2idx

            pretrain_bichar_embedding_tensor, idx2bichar, bichar2idx = utils.load_embedding_wlm(
                args.bichar_emb_file,
                ' ',
                static_alphabet.bichar2idx,
                args.char_embedding_dim,
                shrink_to_corpus=args.shrink_embedding)
            static_alphabet.idx2bishar = idx2bichar
            static_alphabet.bichar2idx = bichar2idx

            print("char embedding size: '{}'".format(
Beispiel #7
0
    def read_dataset(self, file_dict, dataset_name, *args, **kwargs):
        print('loading corpus')
        self.file_num = len(self.args.train_file)
        for i in range(self.file_num):
            with codecs.open(self.args.train_file[i], 'r', 'utf-8') as f:
                lines0 = f.readlines()
                lines0 = lines0[0:2000]
                # print (len(lines0))
            self.lines.append(lines0)
        for i in range(self.file_num):
            with codecs.open(self.args.dev_file[i], 'r', 'utf-8') as f:
                dev_lines0 = f.readlines()
                dev_lines0 = dev_lines0[0:2000]
            self.dev_lines.append(dev_lines0)
        for i in range(self.file_num):
            with codecs.open(self.args.test_file[i], 'r', 'utf-8') as f:
                test_lines0 = f.readlines()
                test_lines0 = test_lines0[0:2000]
            self.test_lines.append(test_lines0)

        for i in range(self.file_num):
            dev_features0, dev_labels0 = utils.read_corpus(self.dev_lines[i])
            test_features0, test_labels0 = utils.read_corpus(
                self.test_lines[i])

            self.dev_features.append(dev_features0)
            self.test_features.append(test_features0)
            self.dev_labels.append(dev_labels0)
            self.test_labels.append(test_labels0)

            if self.args.output_annotation:  # NEW
                test_word0, test_word_tag0 = utils.read_features(
                    self.test_lines[i])
                self.test_word.append(test_word0)
                self.test_word_tag.append(test_word_tag0)
            #print (len(self.test_word), len(self.test_labels))
            if self.args.load_check_point:
                if os.path.isfile(self.args.load_check_point):
                    print("loading checkpoint: '{}'".format(
                        self.args.load_check_point))
                    self.checkpoint_file = torch.load(
                        self.args.load_check_point)
                    self.args.start_epoch = self.checkpoint_file['epoch']
                    self.f_map = self.checkpoint_file['f_map']
                    self.l_map = self.checkpoint_file['l_map']
                    c_map = self.checkpoint_file['c_map']
                    self.in_doc_words = self.checkpoint_file['in_doc_words']
                    self.train_features, self.train_labels = utils.read_corpus(
                        self.lines[i])
                else:
                    print("no checkpoint found at: '{}'".format(
                        self.args.load_check_point))
            else:
                print('constructing coding table')
                train_features0, train_labels0, self.f_map, self.l_map, self.char_count = utils.generate_corpus_char(
                    self.lines[i],
                    self.f_map,
                    self.l_map,
                    self.char_count,
                    c_thresholds=self.args.mini_count,
                    if_shrink_w_feature=False)
            self.train_features.append(train_features0)
            self.train_labels.append(train_labels0)

            self.train_features_tot += train_features0

        shrink_char_count = [
            k for (k, v) in iter(self.char_count.items())
            if v >= self.args.mini_count
        ]
        self.char_map = {
            shrink_char_count[ind]: ind
            for ind in range(0, len(shrink_char_count))
        }

        self.char_map['<u>'] = len(self.char_map)  # unk for char
        self.char_map[' '] = len(self.char_map)  # concat for char
        self.char_map['\n'] = len(self.char_map)  # eof for char

        f_set = {v for v in self.f_map}
        dt_f_set = f_set
        self.f_map = utils.shrink_features(self.f_map, self.train_features_tot,
                                           self.args.mini_count)
        l_set = set()

        for i in range(self.file_num):
            dt_f_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t),
                                        self.dev_features[i]), dt_f_set)
            dt_f_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t),
                                        self.test_features[i]), dt_f_set)

            l_set = functools.reduce(lambda x, y: x | y,
                                     map(lambda t: set(t), self.dev_labels[i]),
                                     l_set)
            l_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t), self.test_labels[i]),
                l_set)

        if not self.args.rand_embedding:
            print("feature size: '{}'".format(len(self.f_map)))
            print('loading embedding')
            if self.args.fine_tune:  # which means does not do fine-tune
                self.f_map = {'<eof>': 0}
            self.f_map, self.embedding_tensor, self.in_doc_words = utils.load_embedding_wlm(
                self.args.emb_file,
                ' ',
                self.f_map,
                dt_f_set,
                self.args.caseless,
                self.args.unk,
                self.args.word_dim,
                shrink_to_corpus=self.args.shrink_embedding)
            print("embedding size: '{}'".format(len(self.f_map)))

        for label in l_set:

            if label not in self.l_map:
                self.l_map[label] = len(self.l_map)

        print('constructing dataset')
        for i in range(self.file_num):
            # construct dataset
            dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(
                self.train_features[i], self.train_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            dev_dataset, forw_dev, back_dev = utils.construct_bucket_mean_vb_wc(
                self.dev_features[i], self.dev_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(
                self.test_features[i], self.test_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            self.dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            self.args.batch_size,
                                            shuffle=True,
                                            drop_last=False) for tup in dataset
            ])
            self.dev_dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            50,
                                            shuffle=False,
                                            drop_last=False)
                for tup in dev_dataset
            ])
            self.test_dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            50,
                                            shuffle=False,
                                            drop_last=False)
                for tup in test_dataset
            ])