Beispiel #1
0
def test_control_C():
    # 用于测试 ControlC , 再两次训练时用 Control+C 进行退出,如果最后不显示 "Test failed!" 则通过测试
    from fastNLP import ControlC, Callback
    import time
    
    line1 = "\n\n\n\n\n*************************"
    line2 = "*************************\n\n\n\n\n"
    
    class Wait(Callback):
        def on_epoch_end(self):
            time.sleep(5)
    
    data_set, model = prepare_env()
    
    print(line1 + "Test starts!" + line2)
    trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                      batch_size=32, n_epochs=20, dev_data=data_set,
                      metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                      callbacks=[Wait(), ControlC(False)], check_code_level=2)
    trainer.train()
    
    print(line1 + "Program goes on ..." + line2)
    
    trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                      batch_size=32, n_epochs=20, dev_data=data_set,
                      metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                      callbacks=[Wait(), ControlC(True)], check_code_level=2)
    trainer.train()
    
    print(line1 + "Test failed!" + line2)
Beispiel #2
0
    def test_save_path(self):
        data_set = prepare_fake_dataset()
        data_set.set_input("x", flag=True)
        data_set.set_target("y", flag=True)

        train_set, dev_set = data_set.split(0.3)

        model = NaiveClassifier(2, 1)

        save_path = 'test_save_models'

        trainer = Trainer(train_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCELoss(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=10,
                          print_every=50,
                          dev_data=dev_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          validate_every=-1,
                          save_path=save_path,
                          use_tqdm=True,
                          check_code_level=2)
        trainer.train()
        import os
        import shutil
        self.assertTrue(os.path.exists(save_path))
        if os.path.exists(save_path):
            shutil.rmtree(save_path)

        # 无dev_data的训练
        trainer = Trainer(train_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCELoss(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=10,
                          print_every=50,
                          dev_data=None,
                          metrics=None,
                          validate_every=-1,
                          save_path=save_path,
                          use_tqdm=True,
                          check_code_level=2)
        trainer.train()
        self.assertTrue(os.path.exists(save_path))
        if os.path.exists(save_path):
            shutil.rmtree(save_path)
Beispiel #3
0
 def test_gradient_clip(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=20, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False,
                       callbacks=[GradientClipCallback(model.parameters(), clip_value=2)], check_code_level=2)
     trainer.train()
    def run1(self):
        # test distributed training
        print('local rank', get_local_rank())
        set_rng_seed(100)
        data_set = prepare_fake_dataset()
        data_set.set_input("x", flag=True)
        data_set.set_target("y", flag=True)

        model = NaiveClassifier(2, 2)

        trainer = DistTrainer(
            model=model,
            train_data=data_set,
            optimizer=SGD(lr=0.1),
            loss=CrossEntropyLoss(pred="predict", target="y"),
            batch_size_per_gpu=8,
            n_epochs=3,
            print_every=50,
            save_path=self.save_path,
        )
        trainer.train()
        """
        # 应该正确运行
        """
        if trainer.is_master and os.path.exists(self.save_path):
            shutil.rmtree(self.save_path)
Beispiel #5
0
    def test_readonly_property(self):
        from fastNLP.core.callback import Callback
        passed_epochs = []
        total_epochs = 5

        class MyCallback(Callback):
            def __init__(self):
                super(MyCallback, self).__init__()

            def on_epoch_begin(self):
                passed_epochs.append(self.epoch)
                print(self.n_epochs, self.n_steps, self.batch_size)
                print(self.model)
                print(self.optimizer)

        data_set, model = prepare_env()
        trainer = Trainer(data_set,
                          model,
                          loss=BCELoss(pred="predict", target="y"),
                          n_epochs=total_epochs,
                          batch_size=32,
                          print_every=50,
                          optimizer=SGD(lr=0.1),
                          check_code_level=2,
                          use_tqdm=False,
                          dev_data=data_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          callbacks=[MyCallback()])
        trainer.train()
        assert passed_epochs == list(range(1, total_epochs + 1))
Beispiel #6
0
 def test_early_stop(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.01), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=20, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False,
                       callbacks=[EarlyStopCallback(5)], check_code_level=2)
     trainer.train()
    def run4(self):
        set_rng_seed(100)
        data_set, model = prepare_env()

        train_set, dev_set = data_set.split(0.3)

        model = NaiveClassifier(2, 1)

        trainer = DistTrainer(
            train_set,
            model,
            optimizer=SGD(lr=0.1),
            loss=BCELoss(pred="predict", target="y"),
            batch_size_per_gpu=32,
            n_epochs=3,
            print_every=50,
            dev_data=dev_set,
            metrics=AccuracyMetric(pred="predict", target="y"),
            validate_every=-1,
            save_path=self.save_path,
        )
        trainer.train()
        """
        # 应该正确运行
        """
        if trainer.is_master and os.path.exists(self.save_path):
            shutil.rmtree(self.save_path)
Beispiel #8
0
 def test_warmup_callback(self):
     data_set, model = prepare_env()
     warmup_callback = WarmupCallback()
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                       callbacks=warmup_callback, check_code_level=2)
     trainer.train()
Beispiel #9
0
    def test_CheckPointCallback(self):

        from fastNLP import CheckPointCallback, Callback
        from fastNLP import Tester

        class RaiseCallback(Callback):
            def __init__(self, stop_step=10):
                super().__init__()
                self.stop_step = stop_step

            def on_backward_begin(self, loss):
                if self.step > self.stop_step:
                    raise RuntimeError()

        data_set, model = prepare_env()
        tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y"))
        import fitlog

        fitlog.set_log_dir(self.tempdir, new_log=True)
        tempfile_path = os.path.join(self.tempdir, 'chkt.pt')
        callbacks = [CheckPointCallback(tempfile_path)]

        fitlog_callback = FitlogCallback(data_set, tester)
        callbacks.append(fitlog_callback)

        callbacks.append(RaiseCallback(100))
        try:
            trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                              batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                              metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                              callbacks=callbacks, check_code_level=2)
            trainer.train()
        except:
            pass
        #  用下面的代码模拟重新运行
        data_set, model = prepare_env()
        callbacks = [CheckPointCallback(tempfile_path)]
        tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y"))
        fitlog_callback = FitlogCallback(data_set, tester)
        callbacks.append(fitlog_callback)

        trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                          batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                          metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                          callbacks=callbacks, check_code_level=2)
        trainer.train()
Beispiel #10
0
    def test_control_C_callback(self):
        class Raise(Callback):
            def on_epoch_end(self):
                raise KeyboardInterrupt

        flags = [False]

        def set_flag():
            flags[0] = not flags[0]

        data_set, model = prepare_env()

        trainer = Trainer(data_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCELoss(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=20,
                          dev_data=data_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          use_tqdm=True,
                          callbacks=[Raise(),
                                     ControlC(False, set_flag)],
                          check_code_level=2)
        trainer.train()

        self.assertEqual(flags[0], False)

        trainer = Trainer(data_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCELoss(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=20,
                          dev_data=data_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          use_tqdm=True,
                          callbacks=[Raise(),
                                     ControlC(True, set_flag)],
                          check_code_level=2)
        trainer.train()

        self.assertEqual(flags[0], True)
Beispiel #11
0
    def test_run_fp16(self):
        data_set = prepare_fake_dataset()
        data_set.set_input("x", flag=True)
        data_set.set_target("y", flag=True)

        train_set, dev_set = data_set.split(0.3)

        model = NaiveClassifier2(2, 1)
        trainer = Trainer(train_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCEWithLogits(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=10,
                          print_every=50,
                          dev_data=dev_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          validate_every=-1,
                          save_path=None,
                          use_tqdm=True,
                          check_code_level=2,
                          fp16=True,
                          device=0)
        trainer.train(load_best_model=False)

        model = NaiveClassifier2(2, 1)
        trainer = Trainer(train_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCEWithLogits(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=10,
                          print_every=50,
                          dev_data=dev_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          validate_every=-1,
                          save_path=None,
                          use_tqdm=True,
                          check_code_level=2,
                          fp16=True,
                          device=0,
                          test_use_fp16=False)
        trainer.train(load_best_model=False)
Beispiel #12
0
 def test_early_stop_callback(self):
     """
     需要观察是否真的 EarlyStop
     """
     data_set, model = prepare_env()
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=2, n_epochs=10, print_every=5, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                       callbacks=EarlyStopCallback(1), check_code_level=2)
     trainer.train()
Beispiel #13
0
 def test_evaluate_callback(self):
     data_set, model = prepare_env()
     from fastNLP import Tester
     tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y"))
     evaluate_callback = EvaluateCallback(data_set, tester)
     
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False,
                       callbacks=evaluate_callback, check_code_level=2)
     trainer.train()
Beispiel #14
0
 def test_TensorboardCallback(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False,
                       callbacks=[TensorboardCallback("loss", "metric")], check_code_level=2)
     trainer.train()
     import os
     import shutil
     path = os.path.join("./", 'tensorboard_logs_{}'.format(trainer.start_time))
     if os.path.exists(path):
         shutil.rmtree(path)
Beispiel #15
0
 def test_save_model_callback(self):
     data_set, model = prepare_env()
     top = 3
     save_model_callback = SaveModelCallback(self.tempdir, top=top)
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                       callbacks=save_model_callback, check_code_level=2)
     trainer.train()
     
     timestamp = os.listdir(self.tempdir)[0]
     self.assertEqual(len(os.listdir(os.path.join(self.tempdir, timestamp))), top)
Beispiel #16
0
 def test_KeyBoardInterrupt(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set,
                       model,
                       loss=BCELoss(pred="predict", target="y"),
                       n_epochs=5,
                       batch_size=32,
                       print_every=50,
                       optimizer=SGD(lr=0.1),
                       check_code_level=2,
                       use_tqdm=False,
                       callbacks=[ControlC(False)])
     trainer.train()
Beispiel #17
0
 def test_LRFinder(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set,
                       model,
                       loss=BCELoss(pred="predict", target="y"),
                       n_epochs=5,
                       batch_size=32,
                       print_every=50,
                       optimizer=SGD(lr=0.1),
                       check_code_level=2,
                       use_tqdm=False,
                       callbacks=[LRFinder(len(data_set) // 32)])
     trainer.train()
Beispiel #18
0
 def test_fitlog_callback(self):
     import fitlog
     fitlog.set_log_dir(self.tempdir, new_log=True)
     data_set, model = prepare_env()
     from fastNLP import Tester
     tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y"))
     fitlog_callback = FitlogCallback(data_set, tester)
     
     trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"),
                       batch_size=32, n_epochs=5, print_every=50, dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True,
                       callbacks=fitlog_callback, check_code_level=2)
     trainer.train()
Beispiel #19
0
 def test_TensorboardCallback(self):
     data_set, model = prepare_env()
     trainer = Trainer(data_set,
                       model,
                       loss=BCELoss(pred="predict", target="y"),
                       n_epochs=5,
                       batch_size=32,
                       print_every=50,
                       optimizer=SGD(lr=0.1),
                       check_code_level=2,
                       use_tqdm=False,
                       dev_data=data_set,
                       metrics=AccuracyMetric(pred="predict", target="y"),
                       callbacks=[TensorboardCallback("loss", "metric")])
     trainer.train()
def train():
    config = Config()

    train_data, dev_data, vocabulary = get_dataset(config.data_path)

    poetry_model = PoetryModel(vocabulary_size=len(vocabulary),
                               embedding_size=config.embedding_size,
                               hidden_size=config.hidden_size)
    loss = Loss(pred='output', target='target')
    perplexity = Perplexity(pred='output', target='target')

    print("optimizer:", config.optimizer)
    print("momentum:", config.momentum)
    if config.optimizer == 'adam':
        optimizer = Adam(lr=config.lr, weight_decay=config.weight_decay)
    elif config.optimizer == 'sgd':
        optimizer = SGD(lr=config.lr, momentum=config.momentum)
    elif config.optimizer == 'adagrad':
        optimizer = Adagrad(lr=config.lr, weight_decay=config.weight_decay)
    elif config.optimizer == 'adadelta':
        optimizer = Adadelta(lr=config.lr,
                             rho=config.rho,
                             eps=config.eps,
                             weight_decay=config.weight_decay)

    timing = TimingCallback()
    early_stop = EarlyStopCallback(config.patience)

    trainer = Trainer(train_data=train_data,
                      model=poetry_model,
                      loss=loss,
                      metrics=perplexity,
                      n_epochs=config.epoch,
                      batch_size=config.batch_size,
                      print_every=config.print_every,
                      validate_every=config.validate_every,
                      dev_data=dev_data,
                      save_path=config.save_path,
                      optimizer=optimizer,
                      check_code_level=config.check_code_level,
                      metric_key="-PPL",
                      sampler=RandomSampler(),
                      prefetch=False,
                      use_tqdm=True,
                      device=config.device,
                      callbacks=[timing, early_stop])
    trainer.train()
Beispiel #21
0
 def test_case(self):
     data_set = prepare_fake_dataset()
     data_set.set_input("x", flag=True)
     data_set.set_target("y", flag=True)
     
     train_set, dev_set = data_set.split(0.3)
     
     model = NaiveClassifier(2, 1)
     
     trainer = Trainer(train_set, model,
                       loss=BCELoss(pred="predict", target="y"),
                       metrics=AccuracyMetric(pred="predict", target="y"),
                       n_epochs=10,
                       batch_size=32,
                       print_every=50,
                       validate_every=-1,
                       dev_data=dev_set,
                       optimizer=SGD(lr=0.1),
                       check_code_level=2,
                       use_tqdm=True,
                       save_path=None)
     trainer.train()
     """
def train(config):
    train_data = pickle.load(
        open(os.path.join(config.data_path, config.train_name), "rb"))
    # debug
    train_data = train_data[0:100]
    dev_data = pickle.load(
        open(os.path.join(config.data_path, config.dev_name), "rb"))
    print(len(train_data), len(dev_data))
    # test_data = pickle.load(open(os.path.join(config.data_path, config.test_name), "rb"))
    # load w2v data
    # weight = pickle.load(open(os.path.join(config.data_path, config.weight_name), "rb"))

    word_vocab = pickle.load(
        open(os.path.join(config.data_path, config.word_vocab_name), "rb"))
    char_vocab = pickle.load(
        open(os.path.join(config.data_path, config.char_vocab_name), "rb"))
    pos_vocab = pickle.load(
        open(os.path.join(config.data_path, config.pos_vocab_name), "rb"))
    spo_vocab = pickle.load(
        open(os.path.join(config.data_path, config.spo_vocab_name), "rb"))
    tag_vocab = pickle.load(
        open(os.path.join(config.data_path, config.tag_vocab_name), "rb"))
    print('word vocab', len(word_vocab))
    print('char vocab', len(char_vocab))
    print('pos vocab', len(pos_vocab))
    print('spo vocab', len(spo_vocab))
    print('tag vocab', len(tag_vocab))

    model = BiLSTM_CRF(config.batch_size,
                       len(word_vocab),
                       len(char_vocab),
                       len(pos_vocab),
                       len(spo_vocab),
                       config.embed_dim,
                       config.hidden_dim,
                       tag_vocab.idx2word,
                       dropout=0.5)

    optimizer = SGD(lr=config.lr, momentum=config.momentum)
    timing = TimingCallback()
    early_stop = EarlyStopCallback(config.patience)
    loss = NLLLoss()
    metrics = SpanFPreRecMetric(tag_vocab)
    # accuracy = AccuracyMetric(pred='output', target='target')

    trainer = Trainer(train_data=train_data,
                      model=model,
                      loss=loss,
                      metrics=metrics,
                      batch_size=config.batch_size,
                      n_epochs=config.epoch,
                      dev_data=dev_data,
                      save_path=config.save_path,
                      check_code_level=-1,
                      print_every=100,
                      validate_every=0,
                      optimizer=optimizer,
                      use_tqdm=False,
                      device=config.device,
                      callbacks=[timing, early_stop])
    trainer.train()
Beispiel #23
0
    def test_run_data_parallel(self):
        data_set = prepare_fake_dataset()
        data_set.set_input("x", flag=True)
        data_set.set_target("y", flag=True)

        train_set, dev_set = data_set.split(0.3)

        class NaiveClassifier2(BaseModel):
            r"""
            一个简单的分类器例子,可用于各种测试
            """
            def __init__(self, in_feature_dim, out_feature_dim):
                super(NaiveClassifier2, self).__init__()
                self.mlp = MLP(
                    [in_feature_dim, in_feature_dim, out_feature_dim])

            def forward(self, x):
                return {"predict": self.mlp(x)}

            def predict(self, x):
                return {"predict": torch.sigmoid(self.mlp(x)) > 0.5}

        model = NaiveClassifier2(2, 1)
        with self.assertRaises(RuntimeError):
            trainer = Trainer(train_set,
                              model,
                              optimizer=SGD(lr=0.1),
                              loss=BCEWithLogits(pred="predict", target="y"),
                              batch_size=32,
                              n_epochs=10,
                              print_every=50,
                              dev_data=dev_set,
                              metrics=AccuracyMetric(pred="predict",
                                                     target="y"),
                              validate_every=-1,
                              save_path=None,
                              use_tqdm=True,
                              check_code_level=2,
                              fp16=True,
                              device=[0, 1])

        with self.assertRaises(RuntimeError):

            class NaiveClassifier3(BaseModel):
                r"""
                一个简单的分类器例子,可用于各种测试
                """
                def __init__(self, in_feature_dim, out_feature_dim):
                    super(NaiveClassifier3, self).__init__()
                    self.mlp = MLP(
                        [in_feature_dim, in_feature_dim, out_feature_dim])

                @torch.cuda.amp.autocast()
                def forward(self, x):
                    return {"predict": self.mlp(x)}

                @torch.cuda.amp.autocast()
                def predict(self, x):
                    return {"predict": torch.sigmoid(self.mlp(x)) > 0.5}

            model = NaiveClassifier3(2, 1)
            trainer = Trainer(train_set,
                              model,
                              optimizer=SGD(lr=0.1),
                              loss=BCEWithLogits(pred="predict", target="y"),
                              batch_size=32,
                              n_epochs=10,
                              print_every=50,
                              dev_data=dev_set,
                              metrics=AccuracyMetric(pred="predict",
                                                     target="y"),
                              validate_every=-1,
                              save_path=None,
                              use_tqdm=True,
                              check_code_level=2,
                              fp16=True,
                              device=[0, 1],
                              test_use_fp16=True)

        class NaiveClassifier4(BaseModel):
            r"""
            一个简单的分类器例子,可用于各种测试
            """
            def __init__(self, in_feature_dim, out_feature_dim):
                super(NaiveClassifier4, self).__init__()
                self.mlp = MLP(
                    [in_feature_dim, in_feature_dim, out_feature_dim])

            def forward(self, x):
                with torch.cuda.amp.autocast():
                    return {"predict": self.mlp(x)}

            def predict(self, x):
                with torch.cuda.amp.autocast():
                    return {"predict": torch.sigmoid(self.mlp(x)) > 0.5}

        model = NaiveClassifier4(2, 1)
        trainer = Trainer(train_set,
                          model,
                          optimizer=SGD(lr=0.1),
                          loss=BCEWithLogits(pred="predict", target="y"),
                          batch_size=32,
                          n_epochs=10,
                          print_every=50,
                          dev_data=dev_set,
                          metrics=AccuracyMetric(pred="predict", target="y"),
                          validate_every=-1,
                          save_path=None,
                          use_tqdm=True,
                          check_code_level=2,
                          fp16=True,
                          device=[0, 1],
                          test_use_fp16=True)
        trainer.train(load_best_model=False)
Beispiel #24
0
    def test_raise_error(self):
        data_set = prepare_fake_dataset()
        data_set.set_input("x", flag=True)
        data_set.set_target("y", flag=True)

        train_set, dev_set = data_set.split(0.3)

        model = NaiveClassifier2(2, 1)

        with self.assertRaises(RuntimeError):
            trainer = Trainer(train_set,
                              model,
                              optimizer=SGD(lr=0.1),
                              loss=BCEWithLogits(pred="predict", target="y"),
                              batch_size=32,
                              n_epochs=10,
                              print_every=50,
                              dev_data=dev_set,
                              metrics=AccuracyMetric(pred="predict",
                                                     target="y"),
                              validate_every=-1,
                              save_path=None,
                              use_tqdm=True,
                              check_code_level=2,
                              fp16=True)

        with self.assertRaises(RuntimeError):
            trainer = Trainer(train_set,
                              model,
                              optimizer=SGD(lr=0.1),
                              loss=BCEWithLogits(pred="predict", target="y"),
                              batch_size=32,
                              n_epochs=10,
                              print_every=50,
                              dev_data=dev_set,
                              metrics=AccuracyMetric(pred="predict",
                                                     target="y"),
                              validate_every=-1,
                              save_path=None,
                              use_tqdm=True,
                              check_code_level=2,
                              fp16=True,
                              device='cpu')

        with self.assertRaises(RuntimeError):
            trainer = Trainer(train_set,
                              model,
                              optimizer=SGD(lr=0.1),
                              loss=BCEWithLogits(pred="predict", target="y"),
                              batch_size=32,
                              n_epochs=10,
                              print_every=50,
                              dev_data=dev_set,
                              metrics=AccuracyMetric(pred="predict",
                                                     target="y"),
                              validate_every=-1,
                              save_path=None,
                              use_tqdm=True,
                              check_code_level=2,
                              fp16=True,
                              device=torch.device('cpu'))
Beispiel #25
0
    def test_SGD(self):
        optim = SGD(model_params=torch.nn.Linear(10, 3).parameters())
        self.assertTrue("lr" in optim.__dict__["settings"])
        self.assertTrue("momentum" in optim.__dict__["settings"])
        res = optim.construct_from_pytorch(torch.nn.Linear(10, 3).parameters())
        self.assertTrue(isinstance(res, torch.optim.SGD))

        optim = SGD(lr=0.001)
        self.assertEqual(optim.__dict__["settings"]["lr"], 0.001)
        res = optim.construct_from_pytorch(torch.nn.Linear(10, 3).parameters())
        self.assertTrue(isinstance(res, torch.optim.SGD))

        optim = SGD(lr=0.002, momentum=0.989)
        self.assertEqual(optim.__dict__["settings"]["lr"], 0.002)
        self.assertEqual(optim.__dict__["settings"]["momentum"], 0.989)

        optim = SGD(0.001)
        self.assertEqual(optim.__dict__["settings"]["lr"], 0.001)
        res = optim.construct_from_pytorch(torch.nn.Linear(10, 3).parameters())
        self.assertTrue(isinstance(res, torch.optim.SGD))

        with self.assertRaises(TypeError):
            _ = SGD("???")
        with self.assertRaises(TypeError):
            _ = SGD(0.001, lr=0.002)