def test_default(): runner = Runner(model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=None) runner.fit(TestLoader, epochs=2)
def test_default(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=None, ) runner.fit(TEST_LOADER, epochs=2)
def test_accumulate_steps(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, accumulate_steps=10, ) runner.fit(TEST_LOADER, epochs=2)
def test_segm_callback(callback): runner = Runner( model=TEST_SEGM_MODEL, optimizer=TEST_SEGM_OPTIMZER, criterion=TEST_CRITERION, callbacks=callback, ) runner.fit(TEST_SEGM_LOADER, epochs=2)
def test_callback(callback): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=[callback, pt_clb.BatchMetrics(TEST_METRIC)], ) runner.fit(TEST_LOADER, epochs=2)
def test_ModelEma_callback(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=pt_clb.ModelEma(TEST_MODEL), ) runner.fit(TEST_LOADER, epochs=2)
def test_fp16_training(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, use_fp16=True, ) runner.fit(TEST_LOADER, epochs=2)
def test_grad_clip_loader(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, gradient_clip_val=1.0, ) runner.fit(TEST_LOADER, epochs=2)
def test_accumulate_steps_with_clip_grad(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=[pt_clb.GradientClipping(1)], accumulate_steps=4, ) runner.fit(TEST_LOADER, epochs=2)
def test_val_loader(): runner = Runner(model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION) runner.fit(TEST_LOADER, epochs=2, steps_per_epoch=100, val_loader=TEST_LOADER, val_steps=200)
def test_Timer_callback(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.Timer(), ) runner.fit(TestLoader, epochs=2)
def test_Mixup(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.Mixup(0.2, NUM_CLASSES), ) runner.fit(TestLoader, epochs=2)
def test_FileLogger_callback(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.FileLogger(TMP_PATH), ) runner.fit(TestLoader, epochs=2)
def test_TensorBoard(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.TensorBoard(log_dir=TMP_PATH), ) runner.fit(TestLoader, epochs=2)
def test_CheckpointSaver_callback(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.CheckpointSaver(TMP_PATH, save_name="model.chpn"), ) runner.fit(TestLoader, epochs=2)
def test_ReduceLROnPlateau_callback(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=pt_clb.ReduceLROnPlateau(), ) runner.fit(TestLoader, epochs=2)
def test_callback(callback): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, metrics=TEST_METRIC, callbacks=callback, ) runner.fit(TEST_LOADER, epochs=2)
def test_tensorboar_CM(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=[ pt_clb.TensorBoardCM(), pt_clb.TensorBoard(log_dir=TMP_PATH) ], ) runner.fit(TEST_LOADER, epochs=2)
def test_val_loader(): runner = Runner( model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, ) runner.fit(TestLoader, epochs=2, steps_per_epoch=100, val_loader=TestLoader, val_steps=200)
def test_loader_metric(): """Check that LoaderMetric doesn't store grads and results are on cpu to avoid memory leak""" clb = pt_clb.LoaderMetrics(TEST_METRIC) runner = Runner(model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=clb) runner.fit(TEST_LOADER, epochs=2) assert clb.target[0].grad_fn is None assert clb.output[0].grad_fn is None assert clb.target[0].device == torch.device("cpu") assert clb.output[0].device == torch.device("cpu")
def test_invalid_phases_scheduler_mode(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=pt_clb.PhasesScheduler([ { "ep": [0, 1], "lr": [0, 1], "mode": "new_mode" }, ]), ) with pytest.raises(ValueError): runner.fit(TEST_LOADER, epochs=2)
def test_state_batch_size(): runner = Runner( model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=None, ) runner.fit(TEST_LOADER, epochs=1) # check that batch_size is copied correctly assert runner.state.batch_size == BS # check that if batch_size is not given, it would be 1 loader = deepcopy(TEST_LOADER) delattr(loader, "batch_size") runner.fit(loader, epochs=1) assert runner.state.batch_size == 1