示例#1
0
def basicConv2Layer():
    model = Network()
    model.add(Conv2D('conv1', 1, 4, 3, 1, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
    model.add(Conv2D('conv2', 4, 4, 3, 1, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
    model.add(Reshape('flatten', (-1, 196)))
    model.add(Linear('fc3', 196, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
示例#2
0
def LeNet():
    model = Network()
    model.add(Conv2D('conv1', 1, 6, 5, 2, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 6 x 14 x 14
    model.add(Conv2D('conv2', 6, 16, 5, 0, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 16 x 5 x 5
    model.add(Reshape('flatten', (-1, 400)))
    model.add(Linear('fc1', 400, 120, 0.1))
    model.add(Relu('relu3'))
    model.add(Linear('fc2', 120, 84, 0.1))
    model.add(Relu('relu4'))
    model.add(Linear('fc3', 84, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
示例#3
0
from network import Network
from layers import Relu, Linear, Conv2D, AvgPool2D, Reshape
from utils import LOG_INFO
from loss import EuclideanLoss, SoftmaxCrossEntropyLoss
from solve_net import train_net, test_net
from load_data import load_mnist_4d
from plot import show
from solve_net import show4category
train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 8, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 8 x 7 x 7
model.add(Reshape('flatten', (-1, 392)))
model.add(Linear('fc3', 392, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
示例#4
0
# Your model defintion here
# You should explore different model architecture
# Batch = N x 28 x 28

# Origin CNN
model = Network()
model.add(
    Conv2D('conv1',
           in_channel=1,
           out_channel=4,
           kernel_size=3,
           pad=1,
           init_std=0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', kernel_size=2,
                    pad=0))  # output shape: N x 4 x 14 x 14
model.add(
    Conv2D('conv2',
           in_channel=4,
           out_channel=4,
           kernel_size=3,
           pad=1,
           init_std=0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', kernel_size=2,
                    pad=0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 196)))
model.add(Linear('fc3', in_num=196, out_num=10, init_std=0.1))
'''
# LeNet
model = Network()
示例#5
0
import json
import sys
from scipy import misc, ndimage

train_data, test_data, train_label, test_label = load_mnist_4d('data')
# train_data = train_data + np.random.randn(*train_data.shape) * 0.01

# Your model defintion here
# You should explore different model architecture
model = Network()

conv1 = Conv2D('conv1', 1, 4, 5, 0, 1)  # output shape: N x 4 x 24 x 24
model.add(conv1)
relu1 = Relu('relu1')
model.add(relu1)
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 12 x 12
model.add(Conv2D('conv2', 4, 16, 5, 0, 1))  # output shape: N x 4 x 8 x 8
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 16 x 4 x 4
model.add(Reshape('flatten', (-1, 256)))
model.add(Linear('fc3', 256, 10, 0.1))
'''
# input: N x 1 x 28 x 28
model.add(Conv2D('conv1', 1, 4, 5, 1, 0.01)) # output shape: N x 4 x 26 x 26
model.add(Relu('relu1'))

model.add(Conv2D('conv1', 4, 1, 5, 0, 0.01)) # output shape: N x 1 x 22 x 22 
model.add(Relu('relu1'))

model.add(Reshape('flatten', (-1, 484)))