Exemple #1
0
model = NeuralNetwork(
    n_inputs=32 * 32 * 3,
    input_shape=(64, 3, 32, 32),
    layers=[
        (ConvolutionLayer, {
            'image_shape': (3, 32, 32),
            'filter_shape': (3, 5, 5),
            'n_kernels': 20,
            'non_linearity': activations.rectify
        }),
        (
            ConvolutionLayer,
            {
                'filter_shape': (
                    20, 3, 3
                ),  # Change this to only (3, 3), the first arg is the number of kernels in the previous layer
                'n_kernels': 40,
                'non_linearity': activations.rectify
            }),
        (FlattenLayer, {}),
        (HiddenLayer, {
            'n_units': 1024,
            'non_linearity': activations.rectify
        }),
        (DropoutLayer, {
            'probability': 0.5
        }),
        (HiddenLayer, {
            'n_units': 1024,
            'non_linearity': activations.rectify
        }),
        (DropoutLayer, {
            'probability': 0.5
        }),
        (HiddenLayer, {
            'n_units': 10,
            'non_linearity': activations.softmax
        })
    ],
    trainer=(
        SGDTrainer,
        {
            'batch_size': 64,
            'learning_rate': 0.1,
            'n_epochs': 400,
            #'global_L2_regularization': 0.0001,
            'dynamic_learning_rate': (ExponentialDecay, {
                'decay': 0.99
            }),
        }))
model = NeuralNetwork(
	n_inputs=28*28,
	layers = [
		(HiddenLayer,
		{
			'n_units': 150, 
			'non_linearity': activations.rectify
		}),
		(HiddenLayer,
		{
			'n_units': 80, 
			'non_linearity': activations.rectify
		}),
		(HiddenLayer,
		{
			'n_units': 10, 
			'non_linearity': activations.softmax
		})
	],
	trainer=(SGDTrainer,
		{
			'batch_size': 20,
			'learning_rate': 0.1,
			'n_epochs': 400,
			'global_L2_regularization': 0.0001,
			'dynamic_learning_rate': (ExponentialDecay, {'decay': 0.99}),
		}
	)
)
Exemple #3
0
	return rval

# Load MNIST
datasets = load_data("mnist.pkl.gz")

model = NeuralNetwork(
	n_inputs=28*28,
	batch_size=20,
	layers = [
		(ConvolutionLayer,
		{
			'image_shape': (1, 28, 28),
			'filter_shape': (1, 5, 5),
			'n_kernels': 40,
			'non_linearity': activations.rectify
		}),
		(FlattenLayer, {}),
		(HiddenLayer,
		{
			'n_units': 80, 
			'non_linearity': activations.rectify
		}),
		(HiddenLayer,
		{
			'n_units': 10,
			'non_linearity': activations.softmax
		})
	]
)

model.train(datasets[0], datasets[1])
Exemple #4
0
model = NeuralNetwork(
	n_inputs=28*28,
	layers = [
		(DropoutLayer, {'probability': 0.2}),
		(HiddenLayer,
		{
			'n_units': 800, 
			'non_linearity': activations.rectify
		}),
		(DropoutLayer, {'probability': 0.5}),
		(HiddenLayer,
		{
			'n_units': 800, 
			'non_linearity': activations.rectify
		}),
		(DropoutLayer, {'probability': 0.5}),
		(HiddenLayer,
		{
			'n_units': 10, 
			'non_linearity': activations.softmax
		})
	],
	trainer=(SGDTrainer,
		{
			'batch_size': 100,
			'learning_rate': 0.1,
			'n_epochs': 400,
			#'global_L2_regularization': 0.0001,
			'dynamic_learning_rate': (ExponentialDecay, {'decay': 0.99}),
		}
	)
)
Exemple #5
0
import sys
sys.path.append("../")
sys.path.append("../neuralmind")

import gzip
import cPickle
import numpy as np

import theano
import theano.tensor as T

from neuralmind import NeuralNetwork
from layers import HiddenLayer
import activations

import datasets

# Load MNIST
datasets = datasets.load_mnist("mnist.pkl.gz")

model = NeuralNetwork(n_inputs=28 * 28,
                      layers=[(HiddenLayer, {
                          'n_units': 10,
                          'non_linearity': activations.softmax
                      })])

model.train(datasets[0], datasets[1])