Exemplo n.º 1
0
 def __init__(self):
     super(LinearNet, self).__init__([
         lrp_module.Reshape(28, 28, 1),
         lrp_module.Linear(28 * 28, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 10)
     ])
     self.outputLayers = [0, 3, 5, 7, 8]
Exemplo n.º 2
0
 def __init__(self):
     super(ConvNet, self).__init__([
         lrp_module.Conv2d(1, 6, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Conv2d(6, 16, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Reshape(4, 4, 16),
         lrp_module.Linear(4 * 4 * 16, 120),
         lrp_module.ReLU(),
         lrp_module.Linear(120, 100),
         lrp_module.ReLU(),
         lrp_module.Linear(100, 10)
     ])
     self.outputLayers = [0, 2, 3, 5, 6, 9, 11, 12]
Exemplo n.º 3
0
    def test_forwardWrongInputListDimension(self):
        m = M.ReLU()

        try:
            m.forward(FloatTensor([[2]]), FloatTensor([[2]]))
        except ValueError:
            return 0

        return 1
Exemplo n.º 4
0
    def test_backwardBeforeForward(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])

        m = M.ReLU()

        try:
            output = m.backward(input)
        except:
            return 0

        return 1
Exemplo n.º 5
0
    def test_forwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])
        expected_output = FloatTensor([[1, 0, 1], [0, 0, 2]])

        m = M.ReLU()
        output = m.forward(input)

        if not areEqual(output, expected_output):
            return 1

        return 0
Exemplo n.º 6
0
    def test_backwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])
        dsigma = FloatTensor([[1, 0, 1], [1, 0, 1]])

        m = M.ReLU()
        m.forward(input)
        grad = FloatTensor([[1, 2, 3], [3, 2, 1]])

        output = m.backward(grad)
        output_expected = dsigma * grad

        if not areEqual(output, output_expected):
            return 1

        return 0
Exemplo n.º 7
0
test_input.sub_(mean).div_(std)

# Define models
model1 = modules.Sequential(modules.Linear(2, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 25),
                            modules.TanH(),
                            modules.Linear(25, 2),
                            modules.MSELoss()
                           )
model2 = modules.Sequential(modules.Linear(2, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 25),
                            modules.ReLU(),
                            modules.Linear(25, 2),
                            modules.MSELoss()
                           )

# Define training parameters
nb_epochs = 1000
lr = 1e-5
mini_batch_size = 100
optimizer1 = SGD(model1.param(), lr=lr)
Exemplo n.º 8
0
  validation_data, validation_targets, test_data, test_targets = h.split_data(inputs, targets, 0.7, 0.1, 0.2)

#Data normalization
mean, std = inputs.mean(), inputs.std()

train_data.sub_(mean).div_(std)
validation_data.sub_(mean).div_(std)
test_data.sub_(mean).div_(std)

#Instantiate the model

Input_Units = 2
Output_Units = 2
Hidden_Units = 25

model = m.Sequential(m.Linear(Input_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.Tanh(),
                     m.Linear(Hidden_Units, Output_Units), m.Tanh())

#Instantiate the optimizer
lr = 0.00095
sgd = m.SGD(params=model.param(), lr=lr)

#Train the model
EPOCHS = 150

model, train_error, validation_error = h.train_model(train_data, train_targets,\
                                        validation_data, validation_targets, model, sgd, nb_epochs = EPOCHS)
'''
#Plot both train and validation errors wrt the number of epochs
Exemplo n.º 9
0
# Normalize inplace the data
loadData.normalize_data(train_input)
loadData.normalize_data(test_input)


########## modules and model #########

# define optimizers and losses as list, to be able to juggle with them
optimizers = [optim.SGDOptimizer, optim.SGDmomOptimizer, optim.AdamOptimizer, optim.BFGSOptimizer]
losses = [modules.LossMSE]

# define layers and activations
Lin1 = modules.Linear(2,25)
Lin2 = modules.Linear(25,25)
Lin3 = modules.Linear(25,2)
act1 = modules.ReLU()
act2 = modules.ReLU()
act3 = modules.Tanh()
#act4 = modules.Sigmoid()

# combine the layers together
layers = [
    Lin1,
    act1,
    Lin2,
    act2,
    Lin3,
    act3]

# set parameters for the run
lr = 0.005 # learning rate, for BFGS multiply by 10 to 100