def test_demo(self): # related to issue https://github.com/fastnlp/fastNLP/issues/324#issue-705081091 from fastNLP import DataSet, Instance from fastNLP.io import DataBundle data_bundle = DataBundle() ds = DataSet() ds.append(Instance(raw_words="截流 进入 最后 冲刺 ( 附 图片 1 张 )")) data_bundle.set_dataset(ds, name='train') data_bundle = CWSPipe().process(data_bundle) self.assertFalse('<' in data_bundle.get_vocab('chars'))
vocab = Vocabulary() vocab.from_dataset(train_dataset, field_name='words', no_create_entry_dataset=[test_dataset]) vocab.index_dataset(train_dataset, test_dataset, field_name='words') target_vocab = Vocabulary(padding=None, unknown=None) target_vocab.from_dataset(train_dataset, field_name='target', no_create_entry_dataset=[test_dataset]) target_vocab.index_dataset(train_dataset, test_dataset, field_name='target') '''build bundle''' data_dict = {"train":train_dataset, "test":test_dataset} vocab_dict = {"words":vocab, "target":target_vocab} data_bundle = DataBundle(vocab_dict, data_dict) print(data_bundle) '''build model''' embed = BertEmbedding(data_bundle.get_vocab('words'), model_dir_or_name='en-base-uncased', include_cls_sep=True) model = BertForSequenceClassification(embed, len(data_bundle.get_vocab('target'))) # model = BertForSequenceClassification(embed, 2) device = 0 if torch.cuda.is_available() else 'cpu' trainer = Trainer(data_bundle.get_dataset('train'), model, optimizer=Adam(model_params=model.parameters(), lr=2e-5), loss=CrossEntropyLoss(), device=device, batch_size=8, dev_data=data_bundle.get_dataset('train'), metrics=AccuracyMetric(), n_epochs=10, print_every=1) trainer.train() tester = Tester(data_bundle.get_dataset('test'), model, batch_size=128, metrics=AccuracyMetric()) tester.test()
target_vocab = Vocabulary(padding=None, unknown=None) target_vocab.from_dataset(train_dataset, field_name='target', no_create_entry_dataset=[dev_dataset, test_dataset]) target_vocab.index_dataset(train_dataset, dev_dataset, test_dataset, field_name='target') '''build bundle''' data_dict = {"train": train_dataset, "dev": dev_dataset, "test": test_dataset} vocab_dict = {"words": vocab, "target": target_vocab} data_bundle = DataBundle(vocab_dict, data_dict) print(data_bundle) '''build model''' embed = BertEmbedding(data_bundle.get_vocab('words'), model_dir_or_name='en-base-uncased', include_cls_sep=True) model = BertForSequenceClassification(embed, len(data_bundle.get_vocab('target'))) # model = BertForSequenceClassification(embed, 2) device = 0 if torch.cuda.is_available() else 'cpu' trainer = Trainer(data_bundle.get_dataset('train'), model, optimizer=Adam(model_params=model.parameters(), lr=2e-5), loss=CrossEntropyLoss(target='target'), device=device, batch_size=8, dev_data=data_bundle.get_dataset('dev'), metrics=AccuracyMetric(target='target'),