Пример #1
0
 def test_lr_scheduler(self):
     data_set, model = prepare_env()
     optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
     trainer = Trainer(data_set, model, optimizer=optimizer, loss=BCELoss(pred="predict", target="y"), batch_size=32,
                       n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False,
                       callbacks=[LRScheduler(torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1))],
                       check_code_level=2)
     trainer.train()
Пример #2
0
embed = StackEmbedding([word_embed, char_embed])
model = CNNBiLSTMCRF(embed,
                     hidden_size=1200,
                     num_layers=1,
                     tag_vocab=data.vocabs[Const.TARGET],
                     encoding_type=encoding_type,
                     dropout=dropout)

callbacks = [
    GradientClipCallback(clip_value=5, clip_type='value'),
    EvaluateCallback(data.datasets['test'])
]

optimizer = SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = LRScheduler(
    LambdaLR(optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch)))
callbacks.append(scheduler)

trainer = Trainer(train_data=data.get_dataset('train'),
                  model=model,
                  optimizer=optimizer,
                  sampler=BucketSampler(num_buckets=100),
                  device=0,
                  dev_data=data.get_dataset('dev'),
                  batch_size=batch_size,
                  metrics=SpanFPreRecMetric(
                      tag_vocab=data.vocabs[Const.TARGET],
                      encoding_type=encoding_type),
                  callbacks=callbacks,
                  num_workers=1,
                  n_epochs=100,
Пример #3
0
                             requires_grad=False,
                             normalize=False),
    ElmoLayer=None,
    args_of_imm={
        "input_size": 300,
        "hidden_size": arg.hidden_size,
        "dropout": arg.dropout,
        "use_allennlp": False,
    },
)

optimizer = Adadelta(lr=arg.lr, params=model.parameters())
scheduler = StepLR(optimizer, step_size=10, gamma=0.5)

callbacks = [
    LRScheduler(scheduler),
]

if arg.task in ['snli']:
    callbacks.append(
        FitlogCallback(data_info.datasets[arg.testset_name], verbose=1))
elif arg.task == 'mnli':
    callbacks.append(
        FitlogCallback(
            {
                'dev_matched': data_info.datasets['dev_matched'],
                'dev_mismatched': data_info.datasets['dev_mismatched']
            },
            verbose=1))

trainer = Trainer(train_data=data_info.datasets['train'],