Exemple #1
0
Max-Planck-Institute for Medical Research, Heidelberg, Germany
Authors: Marius Killinger, Gregor Urban
"""

import matplotlib.pyplot as plt

from elektronn.training.traindata import MNISTData
from elektronn.net.convnet import MixedConvNN
from elektronn.net.introspection import embedMatricesInGray

# Load Data #
data = MNISTData(path=None, convert2image=False, shift_augment=False)

# Create Autoencoder #
batch_size = 100
cnn = MixedConvNN((28**2), input_depth=None)
cnn.addPerceptronLayer(n_outputs=300, activation_func="tanh")
cnn.addPerceptronLayer(n_outputs=200, activation_func="tanh")
cnn.addPerceptronLayer(n_outputs=50, activation_func="tanh")
cnn.addTiedAutoencoderChain(n_layers=None,
                            activation_func="tanh",
                            input_noise=0.3)
cnn.compileOutputFunctions(
    target="regression")  #compiles the cnn.get_error function as well
cnn.setOptimizerParams(SGD={'LR': 5e-1, 'momentum': 0.9}, weight_decay=0)

print "training..."
for i in range(40000):
    d, l = data.getbatch(batch_size)
    loss, loss_instance, time_per_step = cnn.trainingStep(d, d, mode="SGD")
Exemple #2
0
# -*- coding: utf-8 -*-
"""
ELEKTRONN - Neural Network Toolkit
Copyright (c) 2015 Gregor Urban, Marius Killinger
"""

from elektronn.training.traindata import MNISTData
from elektronn.net.convnet import MixedConvNN

# Load Data #
data = MNISTData(path=None, convert2image=True, shift_augment=False)

# Create CNN #
batch_size = 100
cnn = MixedConvNN((28, 28), input_depth=1, enable_dropout=True)
cnn.addConvLayer(10,
                 5,
                 pool_shape=2,
                 activation_func="tanh",
                 force_no_dropout=True)  # (nof, filtersize)
cnn.addConvLayer(8,
                 5,
                 pool_shape=2,
                 activation_func="tanh",
                 force_no_dropout=True)
cnn.addPerceptronLayer(200, activation_func="tanh")
cnn.addPerceptronLayer(150, activation_func="tanh")
cnn.addPerceptronLayer(
    10, activation_func="tanh", force_no_dropout=True
)  # need 10 outputs as there are 10 classes in the data set
cnn.compileOutputFunctions()
"""
ELEKTRONN - Neural Network Toolkit
Copyright (c) 2015 Gregor Urban, Marius Killinger
"""

from elektronn.training.traindata import MNISTData
from elektronn.net.convnet import MixedConvNN

# Load Data #
data = MNISTData(path=None,
                 convert2image=True,
                 shift_augment=False)

# Create CNN #
batch_size = 100
cnn = MixedConvNN((28, 28), input_depth=1)
cnn.addConvLayer(10,
                 5,
                 pool_shape=2,
                 activation_func="abs")  # (nof, filtersize)
cnn.addConvLayer(8, 5, pool_shape=2, activation_func="abs")
cnn.addPerceptronLayer(100, activation_func="abs")
cnn.addPerceptronLayer(80, activation_func="abs")
cnn.addPerceptronLayer(
    10,
    activation_func="abs"
)  # need 10 outputs as there are 10 classes in the data set
cnn.compileOutputFunctions()
cnn.setOptimizerParams(SGD={'LR': 1e-2, 'momentum': 0.9}, weight_decay=0)

print "training..."
"""

import matplotlib.pyplot as plt

from elektronn.training.traindata import MNISTData
from elektronn.net.convnet import MixedConvNN
from elektronn.net.introspection import embedMatricesInGray

# Load Data #
data = MNISTData(path=None,
                 convert2image=False,
                 shift_augment=False)

# Create Autoencoder #
batch_size = 100
cnn = MixedConvNN((28**2), input_depth=None)
cnn.addPerceptronLayer(n_outputs=300, activation_func="tanh")
cnn.addPerceptronLayer(n_outputs=200, activation_func="tanh")
cnn.addPerceptronLayer(n_outputs=50, activation_func="tanh")
cnn.addTiedAutoencoderChain(n_layers=None,
                            activation_func="tanh",
                            input_noise=0.3)
cnn.compileOutputFunctions(target="regression")  #compiles the cnn.get_error function as well
cnn.setOptimizerParams(SGD={'LR': 5e-1, 'momentum': 0.9}, weight_decay=0)

print "training..."
for i in range(40000):
    d, l = data.getbatch(batch_size)
    loss, loss_instance, time_per_step = cnn.trainingStep(d, d, mode="SGD")

    if i % 100 == 0: