Exemplo n.º 1
0
def get_categorical_model(input_neurons, output_neurons, layers=None):
    """
    creates a model with Categorical Crossentropy Loss
    :param input_neurons: input neuron number
    :param output_neurons: output neuron number
    :param layers: list of intermediate neuron sizes, default is the number of neurons and layer sizes for neuron
    :return: network with Categorical Crossentropy loss
    """
    if layers is None:
        layers = [25, 25, 25]

    default_act = 'relu'
    model = Sequential()

    idx = 1
    layers.insert(0, input_neurons)
    while idx < len(layers):
        model.add(Linear(out=layers[idx], input_size=layers[idx - 1], activation=default_act))
        idx += 1

    # model.add(Dropout(prob=0.2))
    model.add(Linear(out=output_neurons, activation='softmax'))

    # Set loss function to model: Sequential object
    ce = LossCrossEntropy()
    model.loss = ce
    return model
Exemplo n.º 2
0
    def test_Sequential(self):
        np.random.seed(42)
        torch.manual_seed(42)

        batch_size, n_in = 2, 4
        for _ in range(100):
            # layers initialization
            alpha = 0.9
            torch_layer = torch.nn.BatchNorm1d(n_in,
                                               eps=BatchNormalization.EPS,
                                               momentum=1. - alpha,
                                               affine=True)
            torch_layer.bias.data = torch.from_numpy(
                np.random.random(n_in).astype(np.float32))
            custom_layer = Sequential()
            bn_layer = BatchNormalization(alpha)
            bn_layer.moving_mean = torch_layer.running_mean.numpy().copy()
            bn_layer.moving_variance = torch_layer.running_var.numpy().copy()
            custom_layer.add(bn_layer)
            scaling_layer = ChannelwiseScaling(n_in)
            scaling_layer.gamma = torch_layer.weight.data.numpy()
            scaling_layer.beta = torch_layer.bias.data.numpy()
            custom_layer.add(scaling_layer)
            custom_layer.train()

            layer_input = np.random.uniform(-5, 5, (batch_size, n_in)).astype(
                np.float32)
            next_layer_grad = np.random.uniform(
                -5, 5, (batch_size, n_in)).astype(np.float32)

            # 1. check layer output
            custom_layer_output = custom_layer.updateOutput(layer_input)
            layer_input_var = Variable(torch.from_numpy(layer_input),
                                       requires_grad=True)
            torch_layer_output_var = torch_layer(layer_input_var)
            self.assertTrue(
                np.allclose(torch_layer_output_var.data.numpy(),
                            custom_layer_output,
                            atol=1e-6))

            # 2. check layer input grad
            custom_layer_grad = custom_layer.backward(layer_input,
                                                      next_layer_grad)
            torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
            torch_layer_grad_var = layer_input_var.grad
            self.assertTrue(
                np.allclose(torch_layer_grad_var.data.numpy(),
                            custom_layer_grad,
                            atol=1e-5))

            # 3. check layer parameters grad
            weight_grad, bias_grad = custom_layer.getGradParameters()[1]
            torch_weight_grad = torch_layer.weight.grad.data.numpy()
            torch_bias_grad = torch_layer.bias.grad.data.numpy()
            self.assertTrue(
                np.allclose(torch_weight_grad, weight_grad, atol=1e-6))
            self.assertTrue(np.allclose(torch_bias_grad, bias_grad, atol=1e-6))
Exemplo n.º 3
0
    def test_load_model(self):
        """

        :return:
        """
        model = Sequential()
        model.add(Linear(input_size=2, out=24, activation='tanh'))
        model.add(Linear(input_size=24, out=2, activation='tanh'))

        file_name = "model.h5py"
Exemplo n.º 4
0
    def test_save_model(self):
        """

        :return:
        """
        model = Sequential()
        model.add(Linear(input_size=2, out=24, activation='tanh'))
        model.add(Linear(input_size=24, out=2, activation='tanh'))

        pass
Exemplo n.º 5
0
    with np.load('mnist.npz', 'r', allow_pickle=True) as data:
        X = data['X']
        y = data['y']
else:
    X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
    # X, y = mnist.data / 255.0, mnist.target
    np.savez('mnist.npz', X=X, y=y)

print("data shape:", X.shape, y.shape)

X, Y = X / 255, one_hot(y)
train_x, test_x, train_y, test_y = X[:60000], X[60000:], Y[:60000], Y[60000:]

#### build model
net = Sequential()
net.add(Dense(784, 400))
net.add(ReLU())
# net.add(Sigmoid())
# net.add(SoftPlus())
# net.add(Dropout())
net.add(Dense(400, 128))
net.add(ReLU())
# net.add(BatchMeanSubtraction())
# net.add(ReLU())
# net.add(Dropout())
net.add(Dense(128, 10))
net.add(SoftMax())

# criterion = MultiLabelCriterion()  # loss function
criterion = CrossEntropyCriterion()
###############################
optimizer_config = {
    'learning_rate': 1e-2,
    'beta1': 9e-1,
    'beta2': 999e-3,
    'epsilon': 10e-8
}

optimizer_state = {}

# Looping params
n_epoch = 20
batch_size = 1024

for activation_name, activation in activations.items():
    nn1 = Sequential()
    nn1.add(Dense(784, 100))
    nn1.add(activation())
    nn1.add(Dense(100, 50))
    nn1.add(activation())
    nn1.add(Dense(50, 10))
    nn1.add(SoftMax())

    print("****************************************************")
    print(f"Training NN with {activation_name} without Batch Normalization")
    print("****************************************************")

    loss_history1 = fit(X_train, y_train, X_val, y_val, nn1, n_epoch,
                        batch_size, criterion, optimizer, optimizer_config,
                        optimizer_state)

    nn2 = Sequential()
Exemplo n.º 7
0
# Use this example to debug your code, start with logistic regression and then
# test other layers. You do not need to change anything here. This code is
# provided for you to test the layers. Next you will use similar code in MNIST task.
###############################

###############################
#### generate_data
X, Y = generate_two_classes(500)
print("Data dimenstions: ", X.shape, Y.shape)
# plt.scatter(X[:,0], X[:,1], c=Y.argmax(axis=-1))
# plt.show()

###############################
#### build model
net = Sequential()
net.add(Dense(2, 4))
net.add(ReLU())
net.add(Dense(4, 2))
net.add(SoftMax())

criterion = MSECriterion()  # loss function

# Optimizer params
optimizer_config = {'learning_rate': 1e-2, 'momentum': 0.9}
optimizer_state = {}

# Looping params
n_epoch = 20
batch_size = 128

##############################
Exemplo n.º 8
0
# Use this example to debug your code, start with logistic regression and then
# test other layers. You do not need to change anything here. This code is
# provided for you to test the layers. Next you will use similar code in MNIST task.
###############################

###############################
#### generate_data
X, Y = generate_spirale(500, 3)
print("Data dimenstions: ", X.shape, Y.shape)
plt.scatter(X[:, 0], X[:, 1], c=Y.argmax(axis=-1))
plt.show()

###############################
#### build model
net = Sequential()
net.add(Dense(2, 40))
# net.add(Dropout())
# net.add(BatchMeanSubtraction())
net.add(ReLU())
net.add(Dense(40, 40))
# net.add(Tanh())
net.add(ReLU())
# net.add(Dropout())
net.add(Dense(40, 3))
net.add(SoftMax())

# criterion = MultiLabelCriterion()  # loss function
criterion = CrossEntropyCriterion()
###############################
#### optimizer config
# Iptimizer params