Esempio n. 1
0
 def test_predict(self):
     model = SingleInputModel()
     trainer = Trainer(model,
                       nn.CrossEntropyLoss(),
                       Adam(model.parameters()),
                       metrics=[Accuracy()])
     trainer.predict(train_x)
Esempio n. 2
0
 def test_train_on_generator(self, generator, validation_data):
     t = Trainer(multi_input_model, nn.CrossEntropyLoss(),
                 _get_optim(multi_input_model))
     t.train_on_generator(generator,
                          steps_per_epoch=31,
                          validation_data=validation_data,
                          validation_steps=31)
Esempio n. 3
0
 def test_evaluate(self):
     model = SingleInputModel()
     trainer = Trainer(model,
                       nn.CrossEntropyLoss(),
                       Adam(model.parameters()),
                       metrics=[Accuracy()])
     trainer.evaluate(train_x, train_y)
Esempio n. 4
0
 def test_train(self):
     model = SingleInputModel()
     trainer = Trainer(model,
                       nn.CrossEntropyLoss(),
                       Adam(model.parameters()),
                       metrics=[Accuracy()])
     history = trainer.train(train_x, train_y, epochs=2)
     train_logs = history.train_logs
     assert train_logs['Loss'][0] > train_logs['Loss'][1]
Esempio n. 5
0
    def test_train_with_validation_split(self):
        model = SingleInputModel()
        trainer = Trainer(model,
                          nn.CrossEntropyLoss(),
                          Adam(model.parameters()),
                          metrics=[Accuracy()])
        history = trainer.train(train_x,
                                train_y,
                                epochs=2,
                                validation_split=0.2)
        train_logs, test_logs = history.train_logs, history.test_logs

        assert train_logs['Loss'][0] > train_logs['Loss'][1]
        assert test_logs['Loss'][0] > test_logs['Loss'][1]
Esempio n. 6
0
train_X, train_Y = train_data.train_data, train_data.train_labels
test_X, test_Y = test_data.test_data, test_data.test_labels

# Conv layers require 4D inputs
train_X = torch.unsqueeze(train_X, 1).float()
test_X = torch.unsqueeze(test_X, 1).float()


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)


model = Net()

t = Trainer(model, nn.NLLLoss(), torch.optim.Adam(model.parameters()))
t.train(train_X, train_Y, validation_split=0.2, batch_size=128)
Esempio n. 7
0
 def test_train_generator(self):
     model = SingleInputModel()
     trainer = Trainer(model,
                       nn.CrossEntropyLoss(),
                       Adam(model.parameters()),
                       metrics=[Accuracy()])
Esempio n. 8
0
 def test_train_validation_split(self, model, data, validation_split):
     t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model))
     t.train(data,
             np_target,
             validation_split=validation_split,
             batch_size=128)
Esempio n. 9
0
 def test_unequal_samples(self, input_data, target_data):
     with pytest.raises(ValueError):
         t = Trainer(multi_input_model, nn.CrossEntropyLoss(),
                     _get_optim(multi_input_model))
         t.train(input_data, target_data)
Esempio n. 10
0
 def test_predict_on_generator(self, generator):
     t = Trainer(multi_input_model, nn.CrossEntropyLoss(),
                 _get_optim(multi_input_model))
     t.predict_on_generator(generator, steps_per_epoch=31)
Esempio n. 11
0
 def test_predict(self, model, data, classes):
     t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model))
     t.predict(data, classes=classes, batch_size=128)
Esempio n. 12
0
 def test_evaluate(self, model, data):
     t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model))
     t.evaluate(data, np_target, batch_size=128)
Esempio n. 13
0
        return F.log_softmax(x, dim=1)


train_data = np.load(os.path.abspath("./data/mnist_train.npz"))
train_x, train_y = train_data['X_train'], train_data['Y_train']

train_x = np.expand_dims(train_x, 1).astype('float32')
train_y = train_y.reshape(-1)

test_data = np.load(os.path.abspath("./data/mnist_test.npz"))
test_x, test_y = test_data['X_test'], test_data['Y_test']

test_x = np.expand_dims(test_x, 1).astype('float32')
test_y = test_y.reshape(-1)

train_x /= 255.0
test_x /= 255.0

model = Net()
trainer = Trainer(model,
                  F.nll_loss,
                  Adam(model.parameters()),
                  metrics=[Accuracy()])
history = trainer.train(train_x,
                        train_y,
                        batch_size=64,
                        epochs=2,
                        validation_split=0.2)
print(history.train_logs)
print(history.test_logs)