예제 #1
0
            in_doc_words = checkpoint_file['in_doc_words']
            train_features, train_labels = utils.read_corpus(lines)
        else:
            print("no checkpoint found at: '{}'".format(args.load_check_point))
            sys.exit()
    else:
        print('constructing coding table')

        train_features, train_labels, f_map, _, c_map = \
            utils.generate_corpus_char(lines, if_shrink_c_feature=True,
                                       c_thresholds=args.mini_count,
                                       if_shrink_w_feature=False)

        f_set = {v for v in f_map}

        f_map = utils.shrink_features(f_map, train_features, args.mini_count)
        dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)
        dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)

        f_map, embedding_tensor, in_doc_words = utils.load_embedding(args.emb_file, ' ', f_map, dt_f_set, args.unk, args.word_embedding_dim, shrink_to_corpus=args.shrink_embedding)

        l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_labels))
        l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_labels), l_set)

    print('constructing dataset')
    dataset, dataset_onlycrf = utils.construct_bucket_mean_vb_wc(train_features, train_labels, CRF_l_map, SCRF_l_map, c_map, f_map, SCRF_stop_tag=SCRF_l_map['<STOP>'], ALLOW_SPANLEN=args.allowspan, train_set=True)
    dev_dataset = utils.construct_bucket_mean_vb_wc(dev_features, dev_labels, CRF_l_map, SCRF_l_map, c_map, f_map, SCRF_stop_tag=SCRF_l_map['<STOP>'], train_set=False)
    test_dataset = utils.construct_bucket_mean_vb_wc(test_features, test_labels, CRF_l_map, SCRF_l_map, c_map, f_map, SCRF_stop_tag=SCRF_l_map['<STOP>'], train_set=False)

    dataset_loader = [torch.utils.data.DataLoader(tup, args.batch_size, shuffle=True, drop_last=False) for tup in dataset]
    dataset_loader_crf = [torch.utils.data.DataLoader(tup, 3, shuffle=True, drop_last=False) for tup in dataset_onlycrf] if dataset_onlycrf else None
예제 #2
0
        print("rebuild_maps")
        if args.combine:
            train_features, train_labels, token2idx, tag2idx, chr2idx = read_combine_data(
                args.train_file, args.dev_file, rebuild_maps, args.mini_count)
        else:
            train_features, train_labels, token2idx, tag2idx, chr2idx = read_data(
                args.train_file, rebuild_maps, args.mini_count)
        dev_features, dev_labels = read_data(args.dev_file)
        test_features, test_labels = read_data(args.test_file)

        token_set = {v for v in token2idx}
        dt_token_set = token_set

        train_features_tot = functools.reduce(lambda x, y: x + y,
                                              train_features)
        token2idx = utils.shrink_features(token2idx, train_features_tot,
                                          args.mini_count)

        for i in range(num_corpus):
            dt_token_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t), dev_features[i]),
                dt_token_set)
            dt_token_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t), test_features[i]),
                dt_token_set)

        if not args.rand_embedding:
            print("feature size: '{}'".format(len(token2idx)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                token2idx = {'<eof>': 0}
            try:
예제 #3
0
    def read_dataset(self, file_dict, dataset_name, *args, **kwargs):
        print('loading corpus')
        self.file_num = len(self.args.train_file)
        for i in range(self.file_num):
            with codecs.open(self.args.train_file[i], 'r', 'utf-8') as f:
                lines0 = f.readlines()
                lines0 = lines0[0:2000]
                # print (len(lines0))
            self.lines.append(lines0)
        for i in range(self.file_num):
            with codecs.open(self.args.dev_file[i], 'r', 'utf-8') as f:
                dev_lines0 = f.readlines()
                dev_lines0 = dev_lines0[0:2000]
            self.dev_lines.append(dev_lines0)
        for i in range(self.file_num):
            with codecs.open(self.args.test_file[i], 'r', 'utf-8') as f:
                test_lines0 = f.readlines()
                test_lines0 = test_lines0[0:2000]
            self.test_lines.append(test_lines0)

        for i in range(self.file_num):
            dev_features0, dev_labels0 = utils.read_corpus(self.dev_lines[i])
            test_features0, test_labels0 = utils.read_corpus(
                self.test_lines[i])

            self.dev_features.append(dev_features0)
            self.test_features.append(test_features0)
            self.dev_labels.append(dev_labels0)
            self.test_labels.append(test_labels0)

            if self.args.output_annotation:  # NEW
                test_word0, test_word_tag0 = utils.read_features(
                    self.test_lines[i])
                self.test_word.append(test_word0)
                self.test_word_tag.append(test_word_tag0)
            #print (len(self.test_word), len(self.test_labels))
            if self.args.load_check_point:
                if os.path.isfile(self.args.load_check_point):
                    print("loading checkpoint: '{}'".format(
                        self.args.load_check_point))
                    self.checkpoint_file = torch.load(
                        self.args.load_check_point)
                    self.args.start_epoch = self.checkpoint_file['epoch']
                    self.f_map = self.checkpoint_file['f_map']
                    self.l_map = self.checkpoint_file['l_map']
                    c_map = self.checkpoint_file['c_map']
                    self.in_doc_words = self.checkpoint_file['in_doc_words']
                    self.train_features, self.train_labels = utils.read_corpus(
                        self.lines[i])
                else:
                    print("no checkpoint found at: '{}'".format(
                        self.args.load_check_point))
            else:
                print('constructing coding table')
                train_features0, train_labels0, self.f_map, self.l_map, self.char_count = utils.generate_corpus_char(
                    self.lines[i],
                    self.f_map,
                    self.l_map,
                    self.char_count,
                    c_thresholds=self.args.mini_count,
                    if_shrink_w_feature=False)
            self.train_features.append(train_features0)
            self.train_labels.append(train_labels0)

            self.train_features_tot += train_features0

        shrink_char_count = [
            k for (k, v) in iter(self.char_count.items())
            if v >= self.args.mini_count
        ]
        self.char_map = {
            shrink_char_count[ind]: ind
            for ind in range(0, len(shrink_char_count))
        }

        self.char_map['<u>'] = len(self.char_map)  # unk for char
        self.char_map[' '] = len(self.char_map)  # concat for char
        self.char_map['\n'] = len(self.char_map)  # eof for char

        f_set = {v for v in self.f_map}
        dt_f_set = f_set
        self.f_map = utils.shrink_features(self.f_map, self.train_features_tot,
                                           self.args.mini_count)
        l_set = set()

        for i in range(self.file_num):
            dt_f_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t),
                                        self.dev_features[i]), dt_f_set)
            dt_f_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t),
                                        self.test_features[i]), dt_f_set)

            l_set = functools.reduce(lambda x, y: x | y,
                                     map(lambda t: set(t), self.dev_labels[i]),
                                     l_set)
            l_set = functools.reduce(
                lambda x, y: x | y, map(lambda t: set(t), self.test_labels[i]),
                l_set)

        if not self.args.rand_embedding:
            print("feature size: '{}'".format(len(self.f_map)))
            print('loading embedding')
            if self.args.fine_tune:  # which means does not do fine-tune
                self.f_map = {'<eof>': 0}
            self.f_map, self.embedding_tensor, self.in_doc_words = utils.load_embedding_wlm(
                self.args.emb_file,
                ' ',
                self.f_map,
                dt_f_set,
                self.args.caseless,
                self.args.unk,
                self.args.word_dim,
                shrink_to_corpus=self.args.shrink_embedding)
            print("embedding size: '{}'".format(len(self.f_map)))

        for label in l_set:

            if label not in self.l_map:
                self.l_map[label] = len(self.l_map)

        print('constructing dataset')
        for i in range(self.file_num):
            # construct dataset
            dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(
                self.train_features[i], self.train_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            dev_dataset, forw_dev, back_dev = utils.construct_bucket_mean_vb_wc(
                self.dev_features[i], self.dev_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(
                self.test_features[i], self.test_labels[i], self.l_map,
                self.char_map, self.f_map, self.args.caseless)
            self.dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            self.args.batch_size,
                                            shuffle=True,
                                            drop_last=False) for tup in dataset
            ])
            self.dev_dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            50,
                                            shuffle=False,
                                            drop_last=False)
                for tup in dev_dataset
            ])
            self.test_dataset_loader.append([
                torch.utils.data.DataLoader(tup,
                                            50,
                                            shuffle=False,
                                            drop_last=False)
                for tup in test_dataset
            ])
예제 #4
0
            f_map = checkpoint_file['f_map']
            l_map = checkpoint_file['l_map']
            c_map = checkpoint_file['c_map']
            in_doc_words = checkpoint_file['in_doc_words']
            train_features, train_labels = utils.read_corpus(train_lines)
        else:
            print("no checkpoint found at: '{}'".format(args.load_check_point))
    else:
        print('constructing coding table')

        # converting format
        all_features, all_labels, f_map, l_map, c_map = utils.generate_corpus_char(train_lines+dev_lines+test_lines+cotrain_lines, if_shrink_c_feature=True, c_thresholds=args.mini_count, if_shrink_w_feature=False)
        with open('c_map.json','w') as f:
            json.dump(c_map,f)
        f_set = {v for v in f_map}
        f_map = utils.shrink_features(f_map, all_features, args.mini_count)
        if args.rand_embedding:
            print("embedding size: '{}'".format(len(f_map)))
            in_doc_words = len(f_map)
        else:
            dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)
            dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)
            print("feature size: '{}'".format(len(f_map)))
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                f_map = {'<eof>': 0}
            
            f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(args.emb_file, ' ', f_map, dt_f_set, args.caseless, args.unk, args.word_dim, shrink_to_corpus=args.shrink_embedding)
            print("embedding size: '{}'".format(len(f_map)))
            
예제 #5
0
            checkpoint_file = torch.load(args.load_check_point)
            args.start_epoch = checkpoint_file['epoch']
            f_map = checkpoint_file['f_map']
            l_map = checkpoint_file['l_map']
            train_features, train_labels = utils.read_corpus(lines)
        else:
            print("no checkpoint found at: '{}'".format(args.load_check_point))
    else:
        print('constructing coding table')

        # converting format
        train_features, train_labels, f_map, l_map = utils.generate_corpus(
            lines, if_shrink_feature=True, thresholds=0)
        f_set = {v for v in f_map}  #f_set是只取f_map中的key值,即训练数据中所有的词,不重复
        f_map = utils.shrink_features(
            f_map, train_features,
            args.mini_count)  #args.mini_count默认是5,将稀少的字,即出现不超过5次的字用unk标记

        dt_f_set = functools.reduce(lambda x, y: x | y,
                                    map(lambda t: set(t), test_features),
                                    f_set)

        if not args.rand_embedding:
            print("feature size: '{}'".format(
                len(f_map)))  #得到的feature是将稀少的字用unk代替了的
            print('loading embedding')
            if args.fine_tune:  # which means does not do fine-tune
                f_map = {'<eof>': 0}
            f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(
                args.emb_file,
                ' ',