def test_tensor_converter_2(): class _Trainer(BaseRunner): def __init__(self): super().__init__() self.non_blocking = False def predict(self, x_, y_): return x_, y_ trainer = _Trainer() converter = TensorConverter() np_ = np.asarray([[1, 2, 3], [4, 5, 6]]) pd_ = pd.DataFrame(np_) tensor_ = torch.Tensor(np_) # noqa x, y = converter.input_proc(np_, np_[0], trainer=trainer) # noqa assert isinstance(y, torch.Tensor) assert y.shape == (3, 1) assert torch.equal(y, tensor_[0].unsqueeze(-1)) x, y = converter.input_proc(pd_, pd_.iloc[0], trainer=trainer) # noqa assert isinstance(y, torch.Tensor) assert y.shape == (3, 1) assert torch.equal(y, tensor_[0].unsqueeze(-1)) x, y = converter.input_proc(tensor_, tensor_[0], trainer=trainer) # noqa assert isinstance(y, torch.Tensor) assert y.shape == (3,) assert torch.equal(y, tensor_[0])
def test_trainer_prediction_1(data): model = deepcopy(data[0]) trainer = Trainer(model=model, optimizer=Adam(lr=0.1), loss_func=MSELoss(), epochs=200) trainer.extend(TensorConverter()) trainer.fit(*data[1], *data[1]) trainer = Trainer(model=model).extend(TensorConverter()) y_p = trainer.predict(data[1][0]) assert np.any(np.not_equal(y_p, data[1][1].numpy())) assert np.allclose(y_p, data[1][1].numpy(), rtol=0, atol=0.2) y_p, y_t = trainer.predict(*data[1]) assert np.any(np.not_equal(y_p, y_t)) assert np.allclose(y_p, y_t, rtol=0, atol=0.2) val_set = DataLoader(TensorDataset(*data[1]), batch_size=50) y_p, y_t = trainer.predict(dataset=val_set) assert np.any(np.not_equal(y_p, y_t)) assert np.allclose(y_p, y_t, rtol=0, atol=0.2) with pytest.raises( RuntimeError, match='parameters <x_in> and <dataset> are mutually exclusive'): trainer.predict(*data[1], dataset='not none')
def test_trainer_prediction_2(): model = _Net(n_feature=2, n_hidden=10, n_output=2) n_data = np.ones((100, 2)) x0 = np.random.normal(2 * n_data, 1) y0 = np.zeros(100) x1 = np.random.normal(-2 * n_data, 1) y1 = np.ones(100) x = np.vstack((x0, x1)) y = np.concatenate((y0, y1)) s = np.arange(x.shape[0]) np.random.shuffle(s) x, y = x[s], y[s] trainer = Trainer(model=model, optimizer=Adam(lr=0.1), loss_func=CrossEntropyLoss(), epochs=200) trainer.extend(TensorConverter(x_dtype=torch.float32, y_dtype=torch.long, argmax=True)) trainer.fit(x, y) y_p, y_t = trainer.predict(x, y) assert y_p.shape == (200,) assert np.all(y_p == y_t) # trainer.reset() val_set = DataLoader(ArrayDataset(x, y, dtypes=(torch.float, torch.long)), batch_size=20) trainer.extend(TensorConverter(x_dtype=torch.float32, y_dtype=torch.long, auto_reshape=False)) y_p, y_t = trainer.predict(dataset=val_set) assert y_p.shape == (200, 2) y_p = np.argmax(y_p, 1) assert np.all(y_p == y_t)
def test_validator_1(data): model = deepcopy(data[0]) trainer = Trainer(model=model, optimizer=Adam(lr=0.1), loss_func=MSELoss(), epochs=20) trainer.extend(TensorConverter(), Validator('regress', early_stop=30, trace_order=1, warming_up=0, mae=0)) trainer.fit(*data[1], *data[1]) assert trainer.get_checkpoint() == ['mae'] model = deepcopy(data[0]) trainer = Trainer(model=model, optimizer=Adam(lr=0.1), loss_func=MSELoss(), epochs=20) trainer.extend(TensorConverter(), Validator('regress', early_stop=30, trace_order=5, warming_up=50, mae=0)) trainer.fit(*data[1], *data[1]) assert trainer.get_checkpoint() == []
def test_tensor_converter_3(): converter = TensorConverter() np_ = np.asarray([[1, 2, 3], [4, 5, 6]]) tensor_ = torch.from_numpy(np_) y, y_ = converter.output_proc(tensor_, None, training=True) assert y_ is None assert isinstance(y, torch.Tensor) assert y.shape == (2, 3) assert torch.equal(y, tensor_) y, y_ = converter.output_proc(tensor_, tensor_, training=True) assert isinstance(y, torch.Tensor) assert isinstance(y_, torch.Tensor) assert y.equal(y_) assert y.shape == (2, 3) assert torch.equal(y, tensor_) y, _ = converter.output_proc((tensor_, ), None, training=True) assert isinstance(y, tuple) assert isinstance(y[0], torch.Tensor) assert torch.equal(y[0], tensor_) y, y_ = converter.output_proc(tensor_, tensor_, training=False) assert isinstance(y, np.ndarray) assert isinstance(y_, np.ndarray) assert np.all(y == y_) assert y.shape == (2, 3) assert np.all(y == tensor_.numpy()) y, _ = converter.output_proc((tensor_, ), None, training=False) assert isinstance(y, tuple) assert isinstance(y[0], np.ndarray) assert np.all(y[0] == tensor_.numpy())
def test_tensor_converter_1(): class _Trainer(BaseRunner): def __init__(self): super().__init__() self.non_blocking = False def predict(self, x_, y_): return x_, y_ trainer = _Trainer() converter = TensorConverter() np_ = np.asarray([[1, 2, 3], [4, 5, 6]]) pd_ = pd.DataFrame(np_) tensor_ = torch.Tensor(np_) x, y = converter.input_proc(np_, None, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(pd_, None, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(tensor_, None, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(np_, np_, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_) x, y = converter.input_proc(pd_, pd_, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_) x, y = converter.input_proc(tensor_, tensor_, trainer=trainer) assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_)
def test_persist_1(data): model = deepcopy(data[0]) trainer = Trainer(model=model, optimizer=Adam(lr=0.1), loss_func=MSELoss(), epochs=200) trainer.extend(TensorConverter(), Persist('model_dir')) trainer.fit(*data[1], *data[1]) persist = trainer['persist'] checker = persist._checker assert isinstance(persist, Persist) assert isinstance(checker.model, torch.nn.Module) assert isinstance(checker.describe, dict) assert isinstance(checker.files, list) assert set(checker.files) == { 'model', 'init_state', 'model_structure', 'describe', 'training_info', 'final_state' } trainer = Trainer.load(checker) assert isinstance(trainer.training_info, pd.DataFrame) assert isinstance(trainer.model, torch.nn.Module) assert isinstance(trainer._training_info, list) assert trainer.optimizer is None assert trainer.lr_scheduler is None assert trainer.x_val is None assert trainer.y_val is None assert trainer.validate_dataset is None assert trainer._optimizer_state is None assert trainer.total_epochs == 0 assert trainer.total_iterations == 0 assert trainer.loss_type is None assert trainer.loss_func is None trainer = Trainer.load(from_=checker.path, optimizer=Adam(), loss_func=MSELoss(), lr_scheduler=ExponentialLR(gamma=0.99), clip_grad=ClipValue(clip_value=0.1)) assert isinstance(trainer._scheduler, ExponentialLR) assert isinstance(trainer._optim, Adam) assert isinstance(trainer.clip_grad, ClipValue) assert isinstance(trainer.loss_func, MSELoss)
def test_tensor_converter_1(): class _Trainer(BaseRunner): def __init__(self): super().__init__() self.non_blocking = False def predict(self, x_, y_): # noqa return x_, y_ trainer = _Trainer() arr_1 = [1, 2, 3] np_1 = np.asarray(arr_1) se_1 = pd.Series(arr_1) pd_1 = pd.DataFrame(arr_1) np_ = np.asarray([arr_1, arr_1]) pd_ = pd.DataFrame(np_) tensor_ = torch.Tensor(np_) # test auto reshape; #189 converter = TensorConverter(auto_reshape=False) x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3,) x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3,) x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3, 1) converter = TensorConverter() x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3, 1) x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3, 1) x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (3, 1) # normal tests x, y = converter.input_proc(np_, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(pd_, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(tensor_, None, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert y is None x, y = converter.input_proc(np_, np_, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_) x, y = converter.input_proc(pd_, pd_, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_) x, y = converter.input_proc(tensor_, tensor_, trainer=trainer) # noqa assert isinstance(x, torch.Tensor) assert x.shape == (2, 3) assert torch.equal(x, tensor_) assert torch.equal(y, tensor_) converter = TensorConverter(x_dtype=torch.long) x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa assert isinstance(x, tuple) assert len(x) == 2 assert x[0].dtype == torch.long assert x[1].dtype == torch.long converter = TensorConverter(x_dtype=(torch.long, torch.float32), y_dtype=torch.long) x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa assert isinstance(x, tuple) assert len(x) == 2 assert x[0].dtype == torch.long assert x[1].dtype == torch.float32 assert y.dtype == torch.long converter = TensorConverter(x_dtype=(torch.long, torch.float32)) x, y = converter.input_proc((pd_, pd_), pd_, trainer=trainer) # noqa assert isinstance(x, tuple) assert len(x) == 2 assert x[0].dtype == torch.long assert x[1].dtype == torch.float32 # for tensor input, dtype change will never be executed converter = TensorConverter(x_dtype=(torch.long, torch.long)) x, y = converter.input_proc((tensor_, tensor_), tensor_, trainer=trainer) # noqa assert isinstance(x, tuple) assert len(x) == 2 assert x[0].dtype == torch.float32 assert x[1].dtype == torch.float32
def test_tensor_converter_3(): np_ = np.asarray([[1, 2, 3], [4, 5, 6]]) tensor_ = torch.from_numpy(np_) converter = TensorConverter() y, y_ = converter.output_proc(tensor_, None, is_training=True) assert y_ is None assert isinstance(y, torch.Tensor) assert y.shape == (2, 3) assert torch.equal(y, tensor_) y, y_ = converter.output_proc(tensor_, tensor_, is_training=True) assert isinstance(y, torch.Tensor) assert isinstance(y_, torch.Tensor) assert y.equal(y_) assert y.shape == (2, 3) assert torch.equal(y, tensor_) y, _ = converter.output_proc((tensor_, ), None, is_training=True) assert isinstance(y, tuple) assert isinstance(y[0], torch.Tensor) assert torch.equal(y[0], tensor_) y, y_ = converter.output_proc(tensor_, tensor_, is_training=False) assert isinstance(y, np.ndarray) assert isinstance(y_, np.ndarray) assert np.all(y == y_) assert y.shape == (2, 3) assert np.all(y == tensor_.numpy()) y, _ = converter.output_proc((tensor_, ), None, is_training=False) assert isinstance(y, tuple) assert isinstance(y[0], np.ndarray) assert np.all(y[0] == tensor_.numpy()) converter = TensorConverter(argmax=True) y, y_ = converter.output_proc(tensor_, tensor_, is_training=False) assert isinstance(y, np.ndarray) assert isinstance(y_, np.ndarray) assert y.shape == (2, ) assert y_.shape == (2, 3) assert np.all(y == np.argmax(np_, 1)) y, y_ = converter.output_proc((tensor_, tensor_), None, is_training=False) assert isinstance(y, tuple) assert y_ is None assert y[0].shape == (2, ) assert y[0].shape == y[1].shape assert np.all(y[0] == np.argmax(np_, 1)) converter = TensorConverter(probability=True) y, y_ = converter.output_proc(tensor_, tensor_, is_training=False) assert isinstance(y, np.ndarray) assert isinstance(y_, np.ndarray) assert y.shape == (2, 3) assert y_.shape == (2, 3) assert np.all(y == softmax(np_, 1)) y, y_ = converter.output_proc((tensor_, tensor_), None, is_training=False) assert isinstance(y, tuple) assert y_ is None assert y[0].shape == (2, 3) assert y[0].shape == y[1].shape assert np.all(y[0] == softmax(np_, 1))