def __init__(self, kernel_size=5, nb_classes=10):
        super(Net, self).__init__()
        self.best_epoch = 0
        self.sumloss = []
        self.train_error = []
        self.test_error = []

        self.c1 = ff.Sequential(
            ff.Conv2d(1, 3, kernel_size=kernel_size, padding=2),
            ff.BatchNorm2d(3), ff.ReLU(), ff.MaxPool2d(2), ff.Dropout(p=0.1))
        self.fc = ff.Sequential(ff.Linear(588, 84), ff.ReLU(),
                                ff.Linear(84, nb_classes))
Beispiel #2
0
    def __init__(self, nb_nodes, act_fct=ff.ReLU()):
        super(Net, self).__init__()
        self.best_epoch = 0
        self.sumloss = []
        self.train_error = []
        self.test_error = []

        self.linear_layers = ff.Sequential(ff.Linear(2, nb_nodes), act_fct, \
                                            ff.Linear(nb_nodes, nb_nodes), act_fct, \
                                            ff.Linear(nb_nodes, nb_nodes), act_fct, \
                                            ff.Linear(nb_nodes, 2))
Beispiel #3
0
def build_model(
        act: ty.Union[framework.Tanh, framework.ReLU]) -> framework.Sequential:
    """
    Build the sequential model defined in the project prompt.

    :param act: activation module
    """
    return framework.Sequential(framework.Linear(2, 25), act(),
                                framework.Linear(25, 25), act(),
                                framework.Linear(25, 25), act(),
                                framework.Linear(25, 25), act(),
                                framework.Linear(25, 1))
Beispiel #4
0
def define_model(H, hidden_nb, d_in, d_out):
    """
    Function that defines a fully connected network with H hidden layers.
    Arguments:
        - H: int, number of units in the fully connected layers
        - hidden_nb: int, number of hidden layers
        - d_in: int, input dimension
        - d_out: int, output dimension
    Returns: 
        - Fully connected network with H hidden layers
    """
    modules_array = []
    # Input layer
    modules_array.append(framework.Linear(d_in, H, 'relu'))
    # Hidden layers
    for i in range(0, hidden_nb):
        modules_array.append(framework.ReLU())
        modules_array.append(framework.Linear(H, H, 'relu'))
    # Output layer
    modules_array.append(framework.ReLU())
    modules_array.append(framework.Linear(H, d_out, 'none'))
    model = framework.Sequential(modules_array)
    return model
Beispiel #5
0
test_set = MNIST("test")

n_train = int(0.7 * len(train_validation_set))
print("MNIST:")
print("   Train set size:", n_train)
print("   Validation set size:", len(train_validation_set) - n_train)
print("   Test set size", len(test_set))

np.random.seed(0xDEADBEEF)
batch_size = 64

loss = SoftmaxCrossEntropyLoss()
learning_rate = 0.03

model = lib.Sequential(
    [lib.Linear(28 * 28, 20),
     lib.Tanh(), lib.Linear(20, 10)])

#######################################################################################################################
# Nothing to do BEFORE this line.
#######################################################################################################################

indices = np.random.permutation(len(train_validation_set))

## Implement
## Hint: you should split indices to 2 parts: a training and a validation one. Later when loading a batch of data,
## just iterate over those indices by loading "batch_size" of them at once, and load data from the dataset by
## train_validation_set.images[your_indices[i: i+batch_size]] and
## train_validation_set.labels[your_indices[i: i+batch_size]]

train_indices = indices[:n_train]
Beispiel #6
0
# defines and creates the model

minibatch_size = 100  # batch size for SGD
nb_epochs = 500  # number of epochs for training
batch_mod = int(X.size(0) / minibatch_size)  # calculate number of batches
X_mini = torch.Tensor(
    minibatch_size, batch_mod,
    X.size(1))  # reorganize dataset: [batch_size X nb_batches X nb_features]
T_mini = torch.Tensor(
    minibatch_size, batch_mod)  # reorganize targets: [batch_size x nb_batches]
for i in range(0, batch_mod):  # fill reorganized tensors with data.
    X_mini[:, i, :] = X[i * minibatch_size:((i + 1) * minibatch_size), :]
    T_mini[:, i] = T[i * minibatch_size:((i + 1) * minibatch_size)]

Model = framework.Sequential(3)  # creates the model with 3 layers
layer_0 = Model.add_layer(
    'Tanh', 2,
    25)  # creates Linear() layer with reLU activation (2 inputs, 25 outputs)
layer_1 = Model.add_layer(
    'ReLU', 25, 25
)  # creates Linear() layer with Sigmoid activation (25 inputs, 25 outputs)
layer_2 = Model.add_layer(
    'Sigmoid', 25,
    2)  # creates Linear() layer with Sigmoid activation (25 inputs, 2 outputs)

#***************************************************************************#

# training the network
Yaxis = np.empty(shape=(nb_epochs, 2))
for j in range(0, nb_epochs):  # loop over nb_epochs
Beispiel #7
0
import numpy as np
import framework as fw
import matplotlib.pyplot as plt

model = fw.Sequential(fw.Linear(2, 1), )
criterion = fw.MSE()
epochs = 100
learning_rate = 1e-3
batch_size = 1
X = np.array([[5, 4]])
Y = np.array([[21]])
print(X.shape)
print(Y.shape)

history = []

for i in range(epochs):
    for x, y_true in fw.loader(X, Y, batch_size):
        # forward -- считаем все значения до функции потерь
        #print(f'x : {x} x.shape = {x.shape}')
        #display(y_true.reshape(batch_size).shape)
        y_pred = model.forward(x)
        #print(f'y_true = {y_true} y_true.shape = {y_true.shape}')
        #print(f'y_pred = {y_pred} y_pred.shape = {y_pred.shape}')
        #print(f'model.W = {model.layers[0].W}')

        #display(y_pred)
        #display(y_true)
        #display(y_pred - y_true)
        loss = criterion.forward(y_pred, y_true)
        print(f'loss: {loss}')