コード例 #1
0
def test_relu_output_size():
    x = torch.randn(2, 2)
    l1 = Dense(3, input_dim=2)
    l2 = Activation('relu')

    y = l1.forward(x)
    y = l2.forward(y)

    assert y.size() == (2, 3)
    assert (y.data >= 0).sum() == 6
コード例 #2
0
def test_model_fit_unknown_loss():
    x = torch.rand(20, 4)
    y = torch.rand(20, 10)

    model = Model(Dense(10, input_dim=x.size()[-1]), Activation('relu'),
                  Dense(5), Activation('relu'), Dense(y.size()[-1]))

    assert len(model.params) > 0

    with pytest.raises(Exception) as e:
        model.fit(x, y, loss='UNKNOWN_TEST', batch_size=10, n_epoch=5)
コード例 #3
0
def test_model_adam_optmizer():
    X = np.random.normal(size=[10, 10]).astype('float32')
    y = np.random.normal(size=[10, 1]).astype('float32')

    model = Model(Dense(10, input_dim=X.shape[-1]), Activation('relu'),
                  Dense(5), Activation('relu'), Dense(y.shape[-1]))
    history = model.fit(X, y=y, loss='mse', optimizer='adam', epochs=10)

    y_pred = model.predict(X)
    assert type(y_pred) is np.ndarray

    assert len(history['loss']) == 10
    assert all(type(v) is float for v in history['loss'])
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #4
0
def test_model_simple_fit():
    x = torch.rand(20, 4)
    y = torch.rand(20, 10)

    model = Model(Dense(10, input_dim=x.size()[-1]), Activation('relu'),
                  Dense(5), Activation('relu'), Dense(y.size()[-1]))

    opt = SGD(lr=0.01, momentum=0.9)
    loss = mean_squared_error
    history = model.fit(x, y, loss=loss, optimizer='sgd', epochs=10, verbose=1)

    assert len(history['loss']) == 10
    assert all(type(v) is float for v in history['loss'])
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #5
0
def test_model_validation_data():
    X = np.random.normal(size=[10, 10]).astype('float32')
    y = np.random.normal(size=[10, 1]).astype('float32')

    model = Model(Dense(10, input_dim=X.shape[-1]), Activation('relu'),
                  Dense(5), Activation('relu'), Dense(y.shape[-1]))
    history = model.fit(X, y=y, loss='mse', val_data=(X, y))

    y_pred = model.predict(X)
    assert type(y_pred) is np.ndarray

    assert 'loss' in history
    assert 'val_loss' in history
    assert all(type(v) is float for v in history['loss'])
    assert all(type(v) is float for v in history['val_loss'])
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #6
0
def test_model_custom_loss():
    x = torch.rand(20, 4)
    y = torch.rand(20, 10)

    model = Model(Dense(10, input_dim=x.size()[-1]), Activation('relu'),
                  Dense(5), Activation('relu'), Dense(y.size()[-1]))

    opt = SGD(lr=0.01, momentum=0.9)

    def mae(y_true, y_pred):
        return torch.mean(torch.abs(y_true - y_pred))

    history = model.fit(x, y, loss=mae, optimizer=opt, epochs=10)
    assert len(history['loss']) == 10
    assert all(type(v) is float for v in history['loss'])
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #7
0
def test_model_conv2d_dropout():
    X = np.random.normal(size=[10, 3, 10, 10]).astype('float32')
    y = np.random.normal(size=[10, 1]).astype('float32')

    model = Model(Conv2D(4, kernel_size=(3, 3), input_dim=X.shape[1:]),
                  Flatten(), Dense(5), Activation('relu'), Dense(5),
                  Dropout(0.5), Activation('relu'), Dense(y.shape[-1]))
    history = model.fit(X, y=y, loss='mse', epochs=10, val_data=(X, y))

    y_pred = model.predict(X)
    assert type(y_pred) is np.ndarray

    assert 'loss' in history
    assert 'val_loss' in history
    assert all(type(v) is float for v in history['loss'])
    assert all(type(v) is float for v in history['val_loss'])
    assert history['val_loss'] == sorted(history['val_loss'], reverse=True)
コード例 #8
0
def test_model_add_layers():
    model = Model()
    model.add(Dense(10))
    model.add(Activation('relu'))
    model.add(Dense(1))

    assert len(model.layers) == 3
    assert type(model.layers[0]) == Dense
    assert type(model.layers[1]) == Activation
コード例 #9
0
def test_model_recurrent_time_distributed():
    X = np.random.normal(size=[2, 3, 4]).astype('float32')
    y = np.random.normal(size=[2, 3, 10]).astype('float32')

    model = Model(
        Recurrent(units=2, length=3, input_dim=4),
        Activation('relu'),
        TimeDistributed(Dense(units=10)),
    )
    history = model.fit(X, y, loss='mse')
    y_pred = model.predict(X)
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #10
0
def test_model_recurrent():
    X = np.random.normal(size=[2, 3, 4]).astype('float32')
    y = np.random.normal(size=[2, 3, 2]).astype('float32')

    model = Model(Recurrent(units=2, length=3, input_dim=4),
                  Activation('relu'))
    history = model.fit(X, y, loss='mse')

    y_pred = model.predict(X)
    assert type(y_pred) is np.ndarray

    assert 'loss' in history
    assert history['loss'] == sorted(history['loss'], reverse=True)
コード例 #11
0
ファイル: mnist.py プロジェクト: hedgefair/aorun
(X, y), (X_test, y_test) = datasets.load_mnist()
X = X / 127.0
X_test = X_test / 127.0
y = np.eye(y.max() + 1)[y]
y_test = np.eye(y_test.max() + 1)[y_test]
print(X.shape, X_test.shape)

X = X.astype('float32')
X_test = X_test.astype('float32')
y = y.astype('float32')
y_test = y_test.astype('float32')

model = Model(
    Conv2D(8, kernel_size=(3, 3), input_dim=X.shape[1:]),
    Flatten(),
    Activation('relu'),
    Dropout(0.5),
    Dense(100),
    Activation('relu'),
    Dropout(0.5),
    Dense(y_test.shape[-1]),
    Activation('softmax')
)

loss = 'categorical_crossentropy'
history = model.fit(X, y, loss=loss, val_data=(X_test, y_test))

y_pred = model.predict(X_test)
acc = metrics.accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1))
print('Classes:', y.shape[1])
print('Accuracy:', acc)
コード例 #12
0
def test_layer_get_params():
    l = Dense(3, input_dim=3)
    assert len(l.params) == 2

    l = Activation('relu')
    assert len(l.params) == 0
コード例 #13
0
def test_model_constructor_layers():
    model = Model(Dense(10), Activation('relu'), Dense(1))

    assert len(model.layers) == 3
    assert type(model.layers[0]) == Dense
    assert type(model.layers[1]) == Activation
コード例 #14
0
ファイル: classification.py プロジェクト: hedgefair/aorun
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics

import numpy as np
from aorun.models import Model
from aorun.layers import Dense
from aorun.layers import Activation

X, y = datasets.load_digits(return_X_y=True)
X = X.astype('float32')
y = np.eye(y.max() + 1)[y].astype('float32')

X = StandardScaler().fit_transform(X)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(X_train.shape, y_train.shape)

model = Model(Dense(100, input_dim=X_train.shape[-1]), Activation('relu'),
              Dense(100), Activation('relu'), Dense(y_test.shape[-1]),
              Activation('softmax'))

loss = 'categorical_crossentropy'
history = model.fit(X_train, y_train, loss=loss, val_split=0.1)

y_pred = model.predict(X_test)
acc = metrics.accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1))
print('Classes:', y.shape[1])
print('Accuracy:', acc)
コード例 #15
0
from sklearn.preprocessing import StandardScaler
from sklearn import metrics

import numpy as np
from aorun.models import Model
from aorun.layers import ProbabilisticDense
from aorun.layers import Activation
from aorun.optimizers import SGD
from aorun.losses import variational_loss

X, y = datasets.load_digits(return_X_y=True)
X = X.astype('float32')
y = np.eye(y.max() + 1)[y].astype('float32')
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(X_train.shape, y_train.shape)

model = Model(ProbabilisticDense(200, input_dim=X_train.shape[-1]),
              Activation('relu'), ProbabilisticDense(200), Activation('relu'),
              ProbabilisticDense(y_test.shape[-1]), Activation('softmax'))

opt = SGD(lr=0.1, momentum=0.9)
loss = variational_loss(model, 'categorical_crossentropy')
history = model.fit(X_train, y_train, epochs=20, loss=loss, optimizer=opt)

y_pred = model.predict(X_test)
acc = metrics.accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1))
print('test samples:', len(y_test))
print('classes:', len(y_test[0]))
print('Accuracy:', acc)
コード例 #16
0
ファイル: regression.py プロジェクト: hedgefair/aorun
import os
import sys
sys.path.insert(0, os.path.abspath('..'))

from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics

from aorun.models import Model
from aorun.layers import Dense
from aorun.layers import Activation
from aorun.optimizers import SGD

X, y = datasets.load_boston(return_X_y=True)
X = StandardScaler().fit_transform(X).astype('float32')
y = StandardScaler().fit_transform(y).astype('float32')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

model = Model(Dense(100, input_dim=X_train.shape[-1]), Activation('relu'),
              Dense(100), Activation('relu'), Dense(1))

sgd = SGD(lr=0.1)
history = model.fit(X_train, y_train, loss='mse', optimizer=sgd, epochs=100)
y_pred = model.predict(X_test)
print('r2_score:', metrics.r2_score(y_test, y_pred))
print('mean_absolute_error:', metrics.mean_absolute_error(y_test, y_pred))
print('mean_squared_error:', metrics.mean_squared_error(y_test, y_pred))
コード例 #17
0
def test_layer_relu():
    x = Variable(torch.randn(10, 10))
    l = Activation('relu')
    assert np.any(x.data.numpy() < 0.0)
    assert np.any(l.forward(x).data.numpy() >= 0.0)
コード例 #18
0
def test_layer_softmax():
    x = Variable(torch.randn(10, 10))
    l = Activation('softmax')
    sum_softmax_x = torch.sum(l.forward(x), dim=1).data.numpy()
    assert np.all(np.abs(sum_softmax_x - 1) <= 1e-6)
コード例 #19
0
from sklearn.preprocessing import StandardScaler
from sklearn import metrics

import torch
from aorun.models import Model
from aorun.layers import ProbabilisticDense
from aorun.layers import Activation
from aorun.optimizers import SGD
from aorun.losses import variational_loss

X, y = datasets.load_boston(return_X_y=True)
X = X.astype('float32')
y = y.astype('float32')
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

model = Model()
model.add(ProbabilisticDense(100, input_dim=X_train.shape[-1]))
model.add(Activation('relu'))
model.add(ProbabilisticDense(100))
model.add(Activation('relu'))
model.add(ProbabilisticDense(1))

sgd = SGD(lr=0.1)
loss = variational_loss(model, 'mean_squared_error')
history = model.fit(X_train, y_train, loss=loss, optimizer=sgd, epochs=100)
y_pred = model.predict(X_test)
print('r2_score:', metrics.r2_score(y_test, y_pred))
print('mean_absolute_error:', metrics.mean_absolute_error(y_test, y_pred))
print('mean_squared_error:', metrics.mean_squared_error(y_test, y_pred))