def test_AETrainer(): """Test the AETrainer class """ net = MultiDAE_net([1, 2], [2, 1], .1) model = AETrainer(net) assert hasattr(model, "network"), "model should have the attribute newtork" assert hasattr(model, "device"), "model should have the attribute device" assert hasattr( model, "learning_rate"), "model should have the attribute learning_rate" assert hasattr(model, "optimizer"), "model should have the attribute optimizer" assert model.learning_rate == 1e-3, "the learning rate should be 1e-3" assert model.network == net, "the network should be the same as the parameter" assert model.device == torch.device("cpu"), "the device should be cpu" assert isinstance(model.optimizer, torch.optim.Adam), "optimizer should be of Adam type" assert str(model) == repr( model), "repr and str should have the same effect" gt = torch.FloatTensor([[1, 1], [2, 1]]) pred = torch.FloatTensor([[1, 1], [1, 1]]) assert model.loss_function(pred, gt) == torch.FloatTensor( [.25]), "the loss should be .25" values = np.array([1., 1., 1.]) rows = np.array([0, 0, 1]) cols = np.array([0, 1, 1]) train = csr_matrix((values, (rows, cols))) sampler = DataSampler(train, batch_size=1, shuffle=False) x = torch.FloatTensor([[1, 1], [2, 2]]) model.predict(x, True) torch.manual_seed(12345) out_1 = model.predict(x, False)[0] model.train(sampler, num_epochs=10, verbose=4) torch.manual_seed(12345) out_2 = model.predict(x, False)[0] assert not torch.all(out_1.eq(out_2)), "the outputs should be different" tmp = tempfile.NamedTemporaryFile() model.save_model(tmp.name, 1) net = MultiDAE_net([1, 2], [2, 1], .1) model2 = AETrainer(net) model2.load_model(tmp.name) torch.manual_seed(12345) out_1 = model.predict(x, False)[0] torch.manual_seed(12345) out_2 = model2.predict(x, False)[0] assert torch.all(out_1.eq(out_2)), "the outputs should be the same" sampler = DataSampler(train, train, batch_size=1, shuffle=False) res = model.validate(sampler, "ndcg@1") assert isinstance(res, np.ndarray), "results should the be a numpy array" assert len(res) == 2, "results should be of length 2"
def test_TorchNNTrainer(): """Test the TorchNNTrainer class """ net = MultiDAE_net([1, 2], [2, 1], .1) model = TorchNNTrainer(net) assert hasattr(model, "network"), "model should have the attribute newtork" assert hasattr(model, "device"), "model should have the attribute device" assert hasattr( model, "learning_rate"), "model should have the attribute learning_rate" assert hasattr(model, "optimizer"), "model should have the attribute optimizer" assert model.learning_rate == 1e-3, "the learning rate should be 1e-3" assert model.network == net, "the network should be the same as the parameter" assert model.device == torch.device("cpu"), "the device should be cpu" assert model.optimizer is None, "optimizer should be None" assert str(model) == repr(model) x = torch.FloatTensor([[1, 1], [2, 2]]) with pytest.raises(NotImplementedError): model.loss_function(None, None) model.train(None, None) model.train_epoch(0, None) model.train_batch(0, None, None) model.predict(x)
def test_MultiDAE_net(): """Test the MultiDAE_net class """ net = MultiDAE_net([1, 2], [2, 1], .1) x = torch.FloatTensor([[1, 1], [2, 2]]) y = net(x) assert hasattr(net, "enc_dims"), "Missing enc_dims attribute" assert hasattr(net, "dec_dims"), "Missing dec_dims attribute" assert hasattr(net, "dropout"), "Missing dropout attribute" assert hasattr(net, "dec_layers"), "Missing dec_layers attribute" assert hasattr(net, "enc_layers"), "Missing end_layers attribute" assert isinstance(net.dropout, torch.nn.Dropout), "dropout must be a torch.nn.Dropout" assert net.dropout.p == .1, "dropout probability must be equal to .1" assert isinstance(y, torch.FloatTensor), "y should be a torch.FloatTensor" assert y.shape == x.shape, "The shape of x and y should be the same"