Ejemplo n.º 1
0
def GCN_check(name, adj, weights, layer_config):
    num_layer = len(layer_config)

    model = Network()
    for i in range(num_layer - 2):
        model.add(Aggregate('A{}'.format(i), adj))
        model.add(
            Linear('W{}'.format(i), layer_config[i], layer_config[i + 1],
                   'xavier').set_W(weights[i]))
        model.add(Tanh('Tanh{}'.format(i)))

    model.add(Aggregate('A{}'.format(num_layer - 2), adj))
    model.add(
        Linear('W{}'.format(num_layer - 2), layer_config[-2], layer_config[-1],
               'xavier').set_W(weights[-1]))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    # loss = EuclideanLoss(name='loss')

    print("Model " + name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
Ejemplo n.º 2
0
def build_model(config):
    model = Network()
    layer_num = 0
    for layer in config['use_layer']:
        if layer['type'] == "Linear":
            in_num = layer['in_num']
            out_num = layer['out_num']
            if "init_std" in layer.keys():
                model.add(
                    Linear(layer['type'] + str(layer_num),
                           in_num,
                           out_num,
                           init_std=layer['init_std']))
            else:
                model.add(
                    Linear(layer['type'] + str(layer_num), in_num, out_num))
            layer_num += 1
        else:
            assert 0
    loss_name = config['use_loss']
    if loss_name == 'EuclideanLoss':
        loss = EuclideanLoss(loss_name)
    elif loss_name == 'SoftmaxCrossEntropyLoss':
        loss = SoftmaxCrossEntropyLoss(loss_name)
    else:
        assert 0
    return model, loss
Ejemplo n.º 3
0
def two_layer_mlp(loss='enclidean', activation='relu'):
    model = Network()
    model.add(Linear('fc1', 784, 512, 0.01))
    model.add(activation_map[activation]('activation1'))
    model.add(Linear('fc2', 512, 256, 0.01))
    model.add(activation_map[activation]('activation2'))
    model.add(Linear('fc3', 256, 10, 0.01))
    loss = loss_map[loss](name=loss)
    return model, loss
def Model_Linear_Gelu_2_HingeLoss():
    name = '2_Gelu_HingeLoss'
    model = Network()
    model.add(Linear('fc1', 784, 441, 0.01))
    model.add(Gelu('a1'))
    model.add(Linear('fc2', 441, 196, 0.01))
    model.add(Gelu('a2'))
    model.add(Linear('fc3', 196, 10, 0.01))
    loss = HingeLoss(name='loss')
    return name, model, loss
def Model_Linear_Sigmoid_2_SoftmaxCrossEntropyLoss():
    name = '2_Sigmoid_SoftmaxCrossEntropyLoss'
    model = Network()
    model.add(Linear('fc1', 784, 441, 0.01))
    model.add(Sigmoid('a1'))
    model.add(Linear('fc2', 441, 196, 0.01))
    model.add(Sigmoid('a2'))
    model.add(Linear('fc3', 196, 10, 0.01))
    loss = SoftmaxCrossEntropyLoss(name='loss')
    return name, model, loss
def Model_Linear_Relu_2_EuclideanLoss():
    name = '2_Relu_EuclideanLoss'
    model = Network()
    model.add(Linear('fc1', 784, 441, 0.01))
    model.add(Relu('a1'))
    model.add(Linear('fc2', 441, 196, 0.01))
    model.add(Relu('a2'))
    model.add(Linear('fc3', 196, 10, 0.01))
    loss = EuclideanLoss(name='loss')
    return name, model, loss
def one_layer_net():
    model = Network()
    model.add(Linear('fc1', 784, 10, 0.001))
    config = {
        'learning_rate': 0.00001,
        'weight_decay': 0.005,
        'momentum': 0.9,
        'batch_size': 50,
        'max_epoch': 10,
        'disp_freq': 50,
        'test_epoch': 5
    }
    return model, config
Ejemplo n.º 8
0
def network_setup(model_file_path=None):
    freq_count = 4000
    count_bins = 88 * 20
    dataset = MapsDB('../db',
                     freq_count=freq_count,
                     count_bins=count_bins,
                     batch_size=128,
                     start_time=0.5,
                     duration=0.5)
    model = Network()
    model.add(Linear('fc1', dataset.get_vec_input_width(), 2048, 0.001))
    model.add(Sigmoid('sigmoid1'))
    model.add(Linear('fc2', 2048, dataset.get_label_width(), 0.001))
    model.add(Softmax('softmax2'))

    loss = CrossEntropyLoss(name='xent')
    # loss = EuclideanLoss(name='r2')

    optim = SGDOptimizer(learning_rate=0.00001, weight_decay=0.005, momentum=0.9)
    # optim = AdagradOptimizer(learning_rate=0.001, eps=1e-6)

    input_placeholder = T.fmatrix('input')
    label_placeholder = T.fmatrix('label')
    label_active_size_placeholder = T.ivector('label_active_size')

    if model_file_path:
        model.loads(model_file_path)
    else:
        dataset.load_cache()

    model.compile(input_placeholder, label_placeholder, label_active_size_placeholder, loss, optim)
    return model, dataset, freq_count, count_bins
Ejemplo n.º 9
0
def zero_layer_relu():
    model = Network()
    model.add(Linear('fc1', 784, 10, 0.01))
    model.add(Relu('rl1'))
    config = {
        'learning_rate': 0.001,
        'weight_decay': 0.001,
        'momentum': 0.9,
        'batch_size': 50,
        'max_epoch': 20,
        'disp_freq': 50,
        'test_epoch': 5
    }
    return model, config
Ejemplo n.º 10
0
def getNetwork():
	'''
	to obtain network structure from specified file
	'''
	file_name = "models/structure.json"
	if len(sys.argv)>1:
		file_name = sys.argv[1]
	f = file(file_name, "r")
	s = f.read()
	f.close()

	networks = json.loads(s)
	for network in networks:
		config = network['config']
		dis_model = network['model']
		model = Network()
		for layer in dis_model:
			if layer['type'] == 'Linear':
				model.add(Linear(layer['name'], layer['in_num'], layer['out_num'], layer['std']))
			if layer['type'] == 'Relu':
				model.add(Relu(layer['name']))
			if layer['type'] == 'Sigmoid':
				model.add(Sigmoid(layer['name']))
			if layer['type'] == 'Softmax':
				model.add(Softmax(layer['name']))
		loss = EuclideanLoss('loss')
		if 'loss' in config:
			if config['loss'] == 'CrossEntropyLoss':
				loss = CrossEntropyLoss('loss')
		yield network['name'], model, config, loss
Ejemplo n.º 11
0
def make_net_from_system(system, image_layer=0, image_channels=3.0):
    """
    :return: A neural network architecture with the same nodes and connections as the given
        neurophysiological system architecture, and otherwise random hyperparameters
        (to be optimized separately)
    """
    net = Network()

    for i in range(len(system.populations)):
        pop = system.populations[i]
        units = pop.n

        if i == image_layer:
            channels = image_channels
            pixels = round(np.sqrt(units / image_channels))
        else:
            ratio_channels_over_pixels = np.exp(-1.5 + 3 * np.random.rand())
            pixels = round(np.cbrt(units / ratio_channels_over_pixels))
            channels = max(1, round(ratio_channels_over_pixels * pixels))
        net.add(pop.name, channels, pixels)

    # try to set strides to reasonable values
    for i in range(len(system.populations)):
        pres = system.find_pre(system.populations[i].name)
        units = net.layers[i].m * net.layers[i].width**2

        if len(pres) > 0:
            pre_ind = system.find_population_index(pres[0].name)
            net.layers[i].width = net.layers[pre_ind].width / 1.5
            net.layers[i].m = units / net.layers[i].width**2

    for projection in system.projections:
        pre = net.find_layer(projection.origin.name)
        post = net.find_layer(projection.termination.name)

        c = 0.1 + 0.2 * np.random.rand()
        s = 1.0 + 9.0 * np.random.rand()
        rf_ratio = projection.termination.w / projection.origin.w
        w = (rf_ratio - 1.0) / (0.5 + np.random.rand())
        w = np.maximum(0.1, w)  # make sure kernel has +ve width
        sigma = 0.1 + 0.1 * np.random.rand()
        net.connect(pre, post, c, s, w, sigma)

    return net
Ejemplo n.º 12
0
def MLP(name, weights, layer_config):
    num_layer = len(layer_config)

    model = Network()
    for i in range(num_layer - 2):
        model.add(Linear('W{}'.format(i),
                         layer_config[i], layer_config[i + 1], 'kaiming'))
        model.add(Relu('Relu{}'.format(i)))

    model.add(Linear('W{}'.format(num_layer - 2),
                     layer_config[-2], layer_config[-1], 'kaiming'))

    loss = SoftmaxCrossEntropyLoss(name='loss')

    print("Model "+name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))
    print()

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
def Model_Linear_Gelu_1_HingeLoss():
    name = '1_Gelu_HingeLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Gelu('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = HingeLoss(name='loss')
    return name, model, loss
def Model_Linear_Relu_1_EuclideanLoss():
    name = '1_Relu_EuclideanLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Relu('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = EuclideanLoss(name='loss')
    return name, model, loss
def Model_Linear_Gelu_1_SoftmaxCrossEntropyLoss():
    name = '1_Gelu_SoftmaxCrossEntropyLoss'
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(Gelu('a1'))
    model.add(Linear('fc2', 256, 10, 0.01))
    loss = SoftmaxCrossEntropyLoss(name='loss')
    return name, model, loss
Ejemplo n.º 16
0
def test_lr(network, x, lrate, momentum=0.9, iterations=100):
    """
    Arguments:
    - network must be an object of class Network
    - data must be data given by function: data_read_classification
    - lrate must be np array 
    """
    errors = np.zeros(len(lrate))
    for i in range(len(lrate)):
        brain = Network(learning_rate=lrate[i],
                        momentum_rate=momentum,
                        iterations=iterations)
        for j in range(len(network.layers)):
            brain.add(
                Layer(network.layers[j].inputs_neurons,
                      network.layers[j].output_neurons,
                      network.layers[j].activation_func_name))
        all_errors = brain.train_and_evaluate(x[0], x[1], x[2], x[3])
        errors[i] = all_errors[0][iterations - 1]
    plt.plot(sorted(lrate), errors)
    plt.xlabel('Learning Rate')
    plt.ylabel('Error of network after ' + str(iterations) + ' iterations')
    plt.show()
def two_layer_sigmoid():
    model = Network()
    model.add(Linear('fc1', 784, 256, 0.001))
    model.add(Sigmoid('sg1'))
    model.add(Linear('fc2', 256, 10, 0.001))
    model.add(Sigmoid('sg2'))
    config = {
        'learning_rate': 0.01,
        'weight_decay': 0.005,
        'momentum': 0.9,
        'batch_size': 100,
        'max_epoch': 20,
        'disp_freq': 50,
        'test_epoch': 5
    }
    return model, config
Ejemplo n.º 18
0
def build_model_from_string(def_str):
    model = Network()
    def_str.strip()
    layer_strs = def_str.split(';')
    for layer_str in layer_strs:
        tokens = layer_str.split(',')
        if (len(tokens) <= 1):
            raise Exception(
                "Invalid token: {} in layer definition".format(layer_str))
        type = tokens[0].strip()
        name = tokens[1].strip()
        if (type == "linear"):
            model.add(
                Linear(name, int(tokens[2]), int(tokens[3]), float(tokens[4])))
        elif (type == "sigmoid"):
            model.add(Sigmoid(name))
        elif (type == "relu"):
            model.add(Relu(name))
        else:
            raise NotImplementedError("Unsupported layer type {}".format(type))
    print("=" * 50 + "\nModel Summary:\n{}\n".format(model) + "=" * 50 + "\n")
    return model
x_train = x_train.reshape(x_train.shape[0], 1, 28 * 28)
x_train = x_train.astype('float32')
x_train /= 255
# encode output which is a number in range [0,9] into a vector of size 10
# e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
y_train = np_utils.to_categorical(y_train)

# same for test data : 10000 samples
x_test = x_test.reshape(x_test.shape[0], 1, 28 * 28)
x_test = x_test.astype('float32')
x_test /= 255
y_test = np_utils.to_categorical(y_test)

# Network
net = Network()
net.add(FCLayer(28 * 28,
                100))  # input_shape=(1, 28*28)    ;   output_shape=(1, 100)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(100, 50))  # input_shape=(1, 100)      ;   output_shape=(1, 50)
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(50, 10))  # input_shape=(1, 50)       ;   output_shape=(1, 10)
net.add(ActivationLayer(tanh, tanh_prime))

# train on 1000 samples
# as we didn't implemented mini-batch GD, training will be pretty slow if we update at each iteration on 60000 samples...
net.use(mse, mse_prime)
net.fit(x_train[0:1000], y_train[0:1000], epochs=50, learning_rate=0.01)

# test on 3 samples
out = net.predict(x_test[0:3])
print("\n")
print("predicted values : ")
Ejemplo n.º 20
0
from network import Network
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear, Gelu
from loss import EuclideanLoss, SoftmaxCrossEntropyLoss, HingeLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d
import time

NAME = "2layersGelu.Cross"
train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc0', 784, 500, 0.01, 'gelu', 'act0'))
model.add(Linear('fc1', 500, 256, 0.01, 'gelu', 'act1'))
model.add(Linear('fc2', 256, 10, 0.01, None, 'act2'))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.1,
    'weight_decay': 0.0005,
    'momentum': 0.9,
    'batch_size': 500,
Ejemplo n.º 21
0
                  avSP500, stdSP500, y])
rData = np.rot90(rData, 3)
rData = rData.reshape((length,1,9))

#Setting data in random order
np.random.shuffle(rData)

#Splitting for train and test data
rDataTrain = rData[:int(0.8*len(rData))]
rDataTest = rData[int(0.8*len(rData)):]


#Making a network
network = Network()
#Adding layers
network.add(FCLayer(8, 4))
network.add(ActivationLayer(tanh, d_tanh))
network.add(FCLayer(4,2))
network.add(ActivationLayer(sigmoid, d_sigmoid))
network.add(FCLayer(2,1))
network.add(ActivationLayer(sigmoid, d_sigmoid))

#Setting loss functions
network.use(mse, d_mse)

#Training data
network.fit(rDataTrain[:,:,1:],rDataTrain[:,:,0], 3500, 0.3)

#Predicting output
out = network.predict(rDataTrain[:,:,1:])
res = np.empty(len(out))
import numpy as np

from network import Network
from conv_layer import ConvLayer
from activation_layer import ActivationLayer
from activations import tanh, tanh_prime
from losses import mse, mse_prime

# training data
x_train = [np.random.rand(10,10,1)]
y_train = [np.random.rand(4,4,2)]

# network
net = Network()
net.add(ConvLayer((10,10,1), (3,3), 1))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(ConvLayer((8,8,1), (3,3), 1))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(ConvLayer((6,6,1), (3,3), 2))
net.add(ActivationLayer(tanh, tanh_prime))

# train
net.use(mse, mse_prime)
net.fit(x_train, y_train, epochs=1000, learning_rate=0.3)

# test
out = net.predict(x_train)
print("predicted = ", out)
print("expected = ", y_train)
Ejemplo n.º 23
0
from network import Network
from utils import LOG_INFO, lc_plot
from layers import Relu, Sigmoid, Linear, Gelu
from loss import EuclideanLoss, SoftmaxCrossEntropyLoss, HingeLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d
import time

NAME = "1layersSigmoid.Hinge"
train_data, test_data, train_label, test_label = load_mnist_2d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 256, 0.01, 'sigmoid', 'act1'))
model.add(Linear('fc2', 256, 10, 0.01, None, 'act2'))

loss = HingeLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.1,
    'weight_decay': 0.0005,
    'momentum': 0.9,
    'batch_size': 500,
    'max_epoch': 50,
Ejemplo n.º 24
0
# reshape and normalize input data
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_train = x_train.astype('float32')
x_train /= 255

y_train = y_train.reshape(y_train.shape[0], 1)

x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_test = x_test.astype('float32')
x_test /= 255
# y_test = np_utils.to_categorical(y_test)
y_test = y_test.reshape(y_test.shape[0], 1)

# Network
net = Network(batch_size=16)
net.add(Conv2Layer(1, 10, padding='same')) # input_shape=(bacth, 28, 28, 1)
net.add(ActivationLayer(relu, relu_prime))
net.add(PoolLayer(kernel_shape=2, stride=2))
net.add(Conv2Layer(10, 20, padding='same'))
net.add(ActivationLayer(relu, relu_prime))
net.add(PoolLayer(kernel_shape=2, stride=2))
net.add(FlattenLayer())
net.add(FCLayer(7*7*20, 200))
net.add(ActivationLayer(relu, relu_prime))
net.add(FCLayer(200, 50))                   # input_shape=(batch, 200)      ;   output_shape=(batch, 50)
net.add(ActivationLayer(relu, relu_prime))
net.add(FCLayer(50, 10))                    # input_shape=(batch, 50)       ;   output_shape=(batch, 10)

net.use(softmax_cross_entropy_with_logits, softmax_cross_entropy_with_logits_prime)

net.fit(x_train, y_train, epochs=10, learning_rate=0.01, evaluation=(x_test, y_test), gamma=0.9)
Ejemplo n.º 25
0
class NetLineParser:

    def __init__(self, file_name):
        self.file_name = file_name
        self.network = Network()

    def parse(self):
        self.__parse_legs__()
        self.__parse_crew_flow__()
        self.network.sort_routes()

    # PLEG|20150506|G3 1567|IGU|CGH|20150506|1630|1805|20150506|1630|1805|B738|73M|G3|G3
    # POPT|-82678574|
    def __parse_legs__(self):
        with open(self.file_name, 'r') as f:
            line = f.readline()
            while line:
                fields = line.split('|')
                if fields[0] == 'PLEG':
                    flight_number = fields[2]
                    fr = fields[3]
                    to = fields[4]
                    sdt = datetime.strptime('%s %s' % (fields[5], fields[6]), '%Y%m%d %H%M')
                    sat = datetime.strptime('%s %s' % (fields[5], fields[7]), '%Y%m%d %H%M')
                    if sat <= sdt:
                        sat = sat + timedelta(days=1)
                    main = fields[11]
                    sub = fields[12]
                    popt = f.readline()
                    route_number = popt.split('|')[1].strip()
                    self.network.add(flight_number, fr, to, sdt, sat, main, sub, route_number)
                line = f.readline()

    # PPRG|1|1|0|0|0|1|0|0|1|0|0|0|A|NL3574381||Y||19700101||||
    # PLEG|20150423|G31726|BSB|PMW|20150423|1912|2030|20150423|1912|2030|B738|738|G3|G3
    # POPT|-82678625|
    # PLEG|20150423|G31729|PMW|BSB|20150423|2100|2218|20150423|2100|2218|B738|738|G3|G3
    # POPT|-82678625|
    # POPT|4:45|28:56
    # PLEG|20150425|G31239|THE|BSB|20150425|0826|1037|20150425|0826|1037|B738|738|G3|G3
    # POPT|41|
    # POPT|2:11|0:00
    # PFTR|20150424|G31313|CNF|CGH|20150424|1728|1840|20150424|1728|1840
    def __parse_crew_flow__(self):
        with open(self.file_name, 'r') as f:
            line = f.readline()
            while line:
                fields = line.split('|')
                if fields[0] == 'PPRG':
                    crew_complement = map(int, fields[1:13])
                    from_leg = None
                elif fields[0] == 'PLEG':
                    current_leg = self.__get_leg__(fields)
                    if current_leg is not None:
                        current_leg.add_crew_from(crew_complement, from_leg)
                    from_leg = current_leg
                elif fields[0] == 'PFTR':
                    from_leg = self.__get_leg__(fields)
                elif fields[0] == 'POPT' and ':' in fields[1]:
                    from_leg = None
                line = f.readline()

    def __get_leg__(self, fields):
        flight_number = fields[2]
        sdt = datetime.strptime('%s %s' % (fields[5], fields[6]), '%Y%m%d %H%M')
        key = (flight_number, sdt)
        return self.network.legs[key] if key in self.network.legs else None
Ejemplo n.º 26
0
import numpy as np

from network import Network
from fc_layer import FCLayer
from activation_layer import ActivationLayer
#from activations import tanh, tanh_prime
from losses import mse, mse_prime
from activations import sigmoid, sigmoid_prime

# training data
x_train = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
y_train = np.array([[[0]], [[1]], [[1]], [[0]]])

# network
net = Network()
net.add(FCLayer(2, 3))
net.add(ActivationLayer(sigmoid, sigmoid_prime))
net.add(FCLayer(3, 1))
net.add(ActivationLayer(sigmoid, sigmoid_prime))

# train
net.use(mse, mse_prime)
cost_, myerr = net.fit(x_train, y_train, epochs=10000, learning_rate=0.2)

# test
out = net.predict(x_train)
print(out)

import matplotlib.pyplot as plt
plt.plot(cost_)
Ejemplo n.º 27
0
import numpy as np
from network import Network
from base import *

# training data
xtrain = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
ytrain = np.array([[0], [1], [1], [0]])

# network
net = Network()
net.add(Layer(in_neurons=2, out_neurons=3))
net.add(ActivationLayer(gauss, gauss_prime))
net.add(Layer(in_neurons=3, out_neurons=1))
net.add(ActivationLayer(tanh, tanh_prime))

# train
net.loss_parameter(mse, mse_prime)
net.train(xtrain, ytrain, epochs=375, learning_rate=0.1, graphic=True)

# test
net.predict(xtrain)
Ejemplo n.º 28
0
from network import Network
from layers import Relu, Linear, Conv2D, AvgPool2D, Reshape
from utils import LOG_INFO
from loss import EuclideanLoss, SoftmaxCrossEntropyLoss
from solve_net import train_net, test_net
from load_data import load_mnist_4d
from plot import show
from solve_net import show4category
train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 8, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 8 x 7 x 7
model.add(Reshape('flatten', (-1, 392)))
model.add(Linear('fc3', 392, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
Ejemplo n.º 29
0
from network import Network
from layers import Relu, Linear, Conv2D, AvgPool2D, Reshape
from utils import LOG_INFO
from loss import EuclideanLoss, SoftmaxCrossEntropyLoss
from solve_net import train_net, test_net, get_feature_map
from load_data import load_mnist_4d
import matplotlib.pyplot as plt

train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 8, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 8, 16, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 784)))
model.add(Linear('fc3', 784, 256, 0.01))
model.add(Relu('relu3'))
model.add(Linear('fc4', 256, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.
Ejemplo n.º 30
0
from loss import CrossEntropyLoss
from optimizer import SGDOptimizer
from network import Network
from data_preparation import load_data
from solve_rnn import solve_rnn

import theano.tensor as T

X_train, y_train, X_test, y_test = load_data()

HIDDEN_DIM = 32
INPUT_DIM = 20
OUTPUT_DIM = 10

model = Network()
model.add(RNN('rnn1', HIDDEN_DIM, INPUT_DIM, 0.1))      # output shape: 4 x HIDDEN_DIM
model.add(Linear('fc', HIDDEN_DIM, OUTPUT_DIM, 0.1))    # output shape: 4 x OUTPUT_DIM
model.add(Softmax('softmax'))

loss = CrossEntropyLoss('xent')

optim = SGDOptimizer(0.01, 0.0001, 0.9)
input_placeholder = T.fmatrix('input')
label_placeholder = T.fmatrix('label')

model.compile(input_placeholder, label_placeholder, loss, optim)

MAX_EPOCH = 6
DISP_FREQ = 1000
TEST_FREQ = 10000
Ejemplo n.º 31
0
        default="Euclidean",
    )
    parser.add_argument("--activation",
                        choices=ACTIVATIONS.keys(),
                        default="Relu")
    return parser.parse_args()


# Your model defintion here
# You should explore different model architecture
args = parser_args()
model = Network()
activation = ACTIVATIONS[args.activation]
loss = LOSSES[args.loss]("loss")
if args.layers == 1:
    model.add(Linear('fc1', 784, 256, 0.01))
    model.add(activation('act1'))
    model.add(Linear('fc2', 256, 10, 0.01))
elif args.layers == 2:
    model.add(Linear('fc1', 784, 500, 0.01))
    model.add(activation('act1'))
    model.add(Linear('fc2', 500, 256, 0.01))
    model.add(activation('act2'))
    model.add(Linear('fc3', 256, 10, 0.01))
else:
    raise Exception("layers error")

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.