Exemple #1
0
    def __init__(self, insize=56, z_dim=10):
        super().__init__()
        self.insize = insize
        self.linear_size = int(((insize / 16)**2) * 16)
        # first conv pair
        self.net = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            # second conv pair
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            # third conv
            nn.Conv2d(32, 16, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            # fourth conv
            nn.Conv2d(16, 16, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            # linear
            Reshape(-1, self.linear_size),
            # linear
        )

        self.loc = nn.Linear(int(self.linear_size), z_dim)
        self.scale = nn.Linear(int(self.linear_size), z_dim)
        self.relu = nn.ReLU()
Exemple #2
0
 def __init__(self):
     super().__init__()
     self.net = nn.Sequential(
         # first conv pair
         nn.Conv2d(1, 32, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.Conv2d(32, 32, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(2),
         # second conv pair
         nn.Conv2d(32, 32, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.Conv2d(32, 32, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(2),
         # third conv
         nn.Conv2d(32, 16, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(2),
         # fourth conv
         nn.Conv2d(16, 16, kernel_size=3, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(2),
         # linear
         Reshape(-1, 1024),
         nn.Linear(1024, 128),
         nn.ReLU(),
         # linear
         nn.Linear(128, 3),
         nn.Softmax(dim=1))
Exemple #3
0
 def __init__(self, config):
     super(MLP_VAE, self).__init__()
     self.__dict__.update(config)
     self.flat = Flatten()
     self.dense1 = nn.Linear(self.timesteps * self.input_dim, 64)
     self.densemu = nn.Linear(64, self.latent_dim)
     self.denselogvar = nn.Linear(64, self.latent_dim)
     self.dense2 = nn.Linear(self.latent_dim, 64)
     self.dense3 = nn.Linear(64, self.timesteps * self.input_dim)
     self.reshape = Reshape((self.timesteps, self.input_dim))
Exemple #4
0
    def _make_hnet(self):
        # default behaviour is to only predict map of states instead of state-action pair
        modules = []
        modules.append(nn.Linear(self.obs_dim, self.layers_dbn[0]))
        modules.append(self.act_fun())

        for idx in range(len(self.layers_dbn)-1):
            modules.append(nn.Linear(self.layers_dbn[idx], self.layers_dbn[idx+1]))
            modules.append(self.act_fun())

        # Final layer
        if isinstance(self.n_obs_dbn, tuple):
            modules.append(nn.Linear(self.layers_dbn[-1], np.prod(self.n_obs_dbn)*self.n_actions**2))
            modules.append(Reshape(-1, self.n_actions, np.prod(self.n_obs_dbn), self.n_actions))
        else:
            modules.append(nn.Linear(self.layers_dbn[-1], self.n_obs_dbn*self.n_actions**2))
            modules.append(Reshape(-1, self.n_actions, self.n_obs_dbn, self.n_actions))

        hnet = nn.Sequential(*modules).to(self.device)
        return hnet
Exemple #5
0
 def __init__(self, in_dim=1024, hidden_dim=128):
     super().__init__()
     self.net = nn.Sequential(
         # first conv pair
         nn.Dropout(p=0.2),
         nn.Conv2d(16, 8, kernel_size=3, padding=1),
         nn.ReLU(),
         Reshape(-1,int(in_dim/2)),
         nn.Dropout(p=0.2),
         nn.Linear(int(in_dim/2), 3),
         nn.Softmax(dim=1),
     )
Exemple #6
0
def basicConv2Layer():
    model = Network()
    model.add(Conv2D('conv1', 1, 4, 3, 1, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
    model.add(Conv2D('conv2', 4, 4, 3, 1, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
    model.add(Reshape('flatten', (-1, 196)))
    model.add(Linear('fc3', 196, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
Exemple #7
0
 def __init__(self, config):
     super(MLP_AE,self).__init__()
     self.__dict__.update(config)
     self.encoder = nn.Sequential(
         Flatten(),
         nn.Linear(self.timesteps * self.input_dim, self.units_enc),
         nn.Linear(self.units_enc, self.latent_dim),
         )
     self.decoder = nn.Sequential(             
         nn.Linear(self.latent_dim,self.units_dec),
         nn.Linear(self.units_dec, self.timesteps * self.input_dim),
         Reshape((self.timesteps, self.input_dim))
         )
Exemple #8
0
    def addReshapeLayer(self, **kwargs):
        """
        Add reshape layer to reshape dimensions
        """

        input_layer = self.input_layer if not self.all_layers \
            else self.all_layers[-1]

        self.n_reshape_layers += 1
        name = "reshape%i" % self.n_reshape_layers

        new_layer = Reshape(input_layer, name=name, **kwargs)

        self.all_layers += (new_layer, )
Exemple #9
0
def inner_model(trainable, x):
    layers_list = [
        Reshape([-1, 28, 28, 1]),
        Conv(32),
        BatchNormalization(),
        Relu(),
        MaxPool(),
        Conv(64),
        BatchNormalization(),
        Relu(),
        MaxPool(),
        Reshape([-1, 7 * 7 * 64]),
        FullyConnected(1024),
        Relu(),
        FullyConnected(10)
    ]
    variable_saver = VariableSaver()
    signal = x
    print('shape', signal.get_shape())
    for idx, layer in enumerate(layers_list):
        signal = layer.contribute(signal, idx, trainable,
                                  variable_saver.save_variable)
        print('shape', signal.get_shape())
    return signal, variable_saver.var_list
Exemple #10
0
def LeNet():
    model = Network()
    model.add(Conv2D('conv1', 1, 6, 5, 2, 1))
    model.add(Relu('relu1'))
    model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 6 x 14 x 14
    model.add(Conv2D('conv2', 6, 16, 5, 0, 1))
    model.add(Relu('relu2'))
    model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 16 x 5 x 5
    model.add(Reshape('flatten', (-1, 400)))
    model.add(Linear('fc1', 400, 120, 0.1))
    model.add(Relu('relu3'))
    model.add(Linear('fc2', 120, 84, 0.1))
    model.add(Relu('relu4'))
    model.add(Linear('fc3', 84, 10, 0.1))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    return model, loss
Exemple #11
0
def main():
    c = color_codes()
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    try:
        net = load_model('/home/mariano/Desktop/test.tf')
    except IOError:
        x = Input([784])
        x_image = Reshape([28, 28, 1])(x)
        x_conv1 = Conv(filters=32,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(x_image)
        h_pool1 = MaxPool((2, 2), padding='same')(x_conv1)
        h_conv2 = Conv(filters=64,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(h_pool1)
        h_pool2 = MaxPool((2, 2), padding='same')(h_conv2)
        h_fc1 = Dense(1024, activation='relu')(h_pool2)
        h_drop = Dropout(0.5)(h_fc1)
        y_conv = Dense(10)(h_drop)

        net = Model(x,
                    y_conv,
                    optimizer='adam',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + c['b'] +
          'Original (MNIST)' + c['nc'] + c['g'] + ' net ' + c['nc'] + c['b'] +
          '(%d parameters)' % net.count_trainable_parameters() + c['nc'])

    net.fit(mnist.train.images,
            mnist.train.labels,
            val_data=mnist.test.images,
            val_labels=mnist.test.labels,
            patience=10,
            epochs=200,
            batch_size=1024)

    save_model(net, '/home/mariano/Desktop/test.tf')
Exemple #12
0
    def build_model(self):
        self.input = Input(shape=tuple([self.n_time_slice] +
                                       list(self.input_shape)),
                           dtype='float32')
        inp = Reshape([-1] + list(self.input_shape))(self.input)
        pre_conv = Dense(3, activation=None, name='pre_conv')(inp)

        backbone = segmentation_models.backbones.get_backbone(
            'resnet34',
            input_shape=list(self.input_shape[:2]) + [3],
            weights='imagenet',
            include_top=False)

        if self.freeze_encoder:
            for layer in backbone.layers:
                if not isinstance(layer, BatchNormalization):
                    layer.trainable = False
        skip_connection_layers = segmentation_models.backbones.get_feature_layers(
            'resnet34', n=4)
        self.unet = segmentation_models.unet.builder.build_unet(
            backbone,
            self.unet_feat,
            skip_connection_layers,
            decoder_filters=(256, 128, 64, 32, 16),
            block_type='upsampling',
            activation='linear',
            n_upsample_blocks=5,
            upsample_rates=(2, 2, 2, 2, 2),
            use_batchnorm=True)

        output = self.unet(pre_conv)
        output = MergeOnZ(self.n_time_slice, self.unet_feat)(output)
        output = Dense(self.unet_feat, activation='relu')(output)
        output = Dense(self.n_classes, activation=None)(output)
        self.model = Model(self.input, output)
        self.model.compile(optimizer='Adam', loss=self.loss_func, metrics=[])
Exemple #13
0
def convert(keras_model, class_map, description="Neural Network Model"):
	"""
	Convert a keras model to PMML
	@model. The keras model object
	@class_map. A map in the form {class_id: class_name}
	@description. A short description of the model
	Returns a DeepNeuralNetwork object which can be exported to PMML
	"""
	pmml = DeepNetwork(description=description, class_map=class_map)
	pmml.keras_model = keras_model
	pmml.model_name = keras_model.name
	config = keras_model.get_config()

	for layer in config['layers']:
		layer_class = layer['class_name']
		layer_config = layer['config']
		layer_inbound_nodes = layer['inbound_nodes']
		# Input
		if layer_class is "InputLayer":
			pmml._append_layer(InputLayer(
				name=layer_config['name'],
				input_size=layer_config['batch_input_shape'][1:]
			))
		# Conv2D
		elif layer_class is "Conv2D":
			pmml._append_layer(Conv2D(
				name=layer_config['name'],
				channels=layer_config['filters'],
				kernel_size=layer_config['kernel_size'],
				dilation_rate=layer_config['dilation_rate'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				strides=layer_config['strides'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# DepthwiseConv2D
		elif layer_class is "DepthwiseConv2D":
			pmml._append_layer(DepthwiseConv2D(
				name=layer_config['name'],
				kernel_size=layer_config['kernel_size'],
				depth_multiplier=layer_config['depth_multiplier'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				strides=layer_config['strides'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# MaxPooling
		elif layer_class is "MaxPooling2D":
			pmml._append_layer(MaxPooling2D(
				name=layer_config['name'],
				pool_size=layer_config['pool_size'],
				strides=layer_config['strides'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "AveragePooling2D":
			pmml._append_layer(AveragePooling2D(
				name=layer_config['name'],
				pool_size=layer_config['pool_size'],
				strides=layer_config['strides'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "GlobalAveragePooling2D":
			pmml._append_layer(GlobalAveragePooling2D(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Flatten
		elif layer_class is "Flatten":
			pmml._append_layer(Flatten(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Dense
		elif layer_class is "Dense":
			pmml._append_layer(Dense(
				name=layer_config['name'],
				channels=layer_config['units'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Zero padding layer
		elif layer_class is "ZeroPadding2D":
			pmml._append_layer(ZeroPadding2D(
				name=layer_config['name'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Reshape layer
		elif layer_class is "Reshape":
			pmml._append_layer(Reshape(
				name=layer_config['name'],
				target_shape=layer_config['target_shape'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "Dropout":
			pmml._append_layer(Dropout(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Batch Normalization
		elif layer_class is "BatchNormalization":
			pmml._append_layer(BatchNormalization(
				name=layer_config['name'],
				axis=layer_config['axis'],
				momentum=layer_config['momentum'],
				epsilon=layer_config['epsilon'],
				center=layer_config['center'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "Add":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Subtract":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				operator='subtract',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Dot":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				operator='dot',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Concatenate":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				axis=layer_config['axis'],
				operator='concatenate',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Activation":
			pmml._append_layer(Activation(
				name=layer_config['name'],
				activation=layer_config['activation'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "ReLU":
			pmml._append_layer(Activation(
				name=layer_config['name'],
				activation='relu',
				threshold = layer_config['threshold'],
				max_value = layer_config['max_value'],
				negative_slope = layer_config['negative_slope'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Unknown layer
		else:
			raise ValueError("Unknown layer type:",layer_class)
	return pmml
Exemple #14
0
    yt = yt.ravel()

    n = 50000
    nt = 10000
    x = x[:n]
    y = y[:n]
    xt = xt[:nt]
    yt = yt[:nt]

    # Model
    net = Net()
    net.push(Conv2d(5, 5, 3, 20))  # 3x32 -> 10x28
    net.push(Relu())
    net.push(BatchNorm())
    net.push(Maxpooling(4, 4))  # 10x28 -> 10x7
    net.push(Reshape((980)))
    net.push(Linear(980, 200))
    net.push(Relu())
    net.push(BatchNorm())
    net.push(Softmax(200, 10))

    # Data
    data = DataProvider()
    data.train_input(x, y)
    data.test_input(xt, yt)
    data.batch_size(32)
    data.batch_size_test(1000)

    lr = 1e-3
    gamma = 1
    beta_1 = 0.9
Exemple #15
0
from solve_net import train_net, test_net, get_feature_map
from load_data import load_mnist_4d
import matplotlib.pyplot as plt

train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 8, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 8, 16, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 784)))
model.add(Linear('fc3', 784, 256, 0.01))
model.add(Relu('relu3'))
model.add(Linear('fc4', 256, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0.0001,
Exemple #16
0
                input_depth=64,
                n_filters=128,
                filter_dim=(3, 3),
                stride=(1, 1),
                padding=((1, 1), (1, 1))), ReLU(), BatchNorm(),
    Convolution(input_shape=(8, 8),
                input_depth=128,
                n_filters=128,
                filter_dim=(3, 3),
                stride=(1, 1),
                padding=((1, 1), (1, 1))), ReLU(), BatchNorm(),
    MaxPooling(input_shape=(8, 8),
               input_depth=128,
               filter_dim=(2, 2),
               stride=(2, 2)), Dropout(rate=0.4),
    Reshape(input_shape=(128, 4, 4), output_shape=(2048, 1)),
    Dense(size=10, input_len=2048), Softmax())

optimizer = Adam(network.trainables,
                 learning_rate=lambda n: 0.0001,
                 beta_1=0.9,
                 beta_2=0.999)

avg = IncrementalAverage()
for epoch in range(STARTING_EPOCH, STARTING_EPOCH + EPOCHS):
    batch = 1
    for x, y in make_batch(training_data, training_labels, BATCH_SIZE):
        out = network(x)
        avg.add(np.sum(VectorCrossEntropy.error(out, y)))
        network.backward(VectorCrossEntropy.gradient(out, y), update=True)
        if batch % LOG_FREQ == 0:
Exemple #17
0
def get_brats_nets(input_shape, filters_list, kernel_size_list, dense_size,
                   nlabels):
    inputs = Input(shape=input_shape)
    conv = inputs
    for filters, kernel_size in zip(filters_list, kernel_size_list):
        conv = Conv(filters,
                    kernel_size=(kernel_size, ) * 3,
                    activation='relu',
                    data_format='channels_first')(conv)

    full = Conv(dense_size,
                kernel_size=(1, 1, 1),
                data_format='channels_first',
                name='fc_dense',
                activation='relu')(conv)
    full_roi = Conv(nlabels[0],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_roi')(full)
    full_sub = Conv(nlabels[1],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_sub')(full)

    rf_roi = Concatenate(axis=1)([conv, full_roi])
    rf_sub = Concatenate(axis=1)([conv, full_sub])

    rf_num = 1
    while np.product(rf_roi.shape[2:]) > 1:
        rf_roi = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_roi%d' % rf_num)(rf_roi)
        rf_sub = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_sub%d' % rf_num)(rf_sub)
        rf_num += 1

    full_roi = Reshape((nlabels[0], -1))(full_roi)
    full_sub = Reshape((nlabels[1], -1))(full_sub)
    full_roi = Permute((2, 1))(full_roi)
    full_sub = Permute((2, 1))(full_sub)
    full_roi_out = Activation('softmax', name='fc_roi_out')(full_roi)
    full_sub_out = Activation('softmax', name='fc_sub_out')(full_sub)

    combo_roi = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_roi)])
    combo_sub = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_sub)])

    tumor_roi = Dense(nlabels[0], activation='softmax',
                      name='tumor_roi')(combo_roi)
    tumor_sub = Dense(nlabels[1], activation='softmax',
                      name='tumor_sub')(combo_sub)

    outputs_roi = [tumor_roi, full_roi_out]

    net_roi = Model(inputs=inputs,
                    outputs=outputs_roi,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    outputs_sub = [tumor_sub, full_sub_out]

    net_sub = Model(inputs=inputs,
                    outputs=outputs_sub,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    return net_roi, net_sub
Exemple #18
0
train_data, test_data, train_label, test_label = load_mnist_4d('data')
img_record_num = 4
save_parameters = True
use_parameters = False
logpath = 'output.csv'

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.1))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 4, 3, 1, 0.1))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 196)))
model.add(Linear('fc3', 196, 10, 0.1))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0.0,
    'momentum': 0.9,
    'batch_size': 100,
    # plt.imshow(data)
    # plt.axis('off')


train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 12, 3, 1, 1))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 12, 10, 3, 1, 1))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 4 x 7 x 7
model.add(Reshape('flatten', (-1, 49 * 10)))
model.add(Linear('fc3', 49 * 10, 10, 0.1))

# loss = EuclideanLoss(name='loss')
loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

# np.random.seed(1626)
config = {
    'learning_rate': 0.01,
    'weight_decay': 0.000,
    def __init__(self, name, numpy_rng, theano_rng, batchsize=128):
        # CALL PARENT CONSTRUCTOR TO SETUP CONVENIENCE FUNCTIONS
        # (SAVE/LOAD, ...)
        super(EmotionConvNet, self).__init__(name=name)

        self.numpy_rng = numpy_rng
        self.batchsize = batchsize
        self.theano_rng = theano_rng
        self.mode = theano.shared(np.int8(0), name='mode')

        self.inputs = T.ftensor4('inputs')
        self.inputs.tag.test_value = numpy_rng.randn(self.batchsize, 1, 48,
                                                     48).astype(np.float32)

        self.targets = T.ivector('targets')
        self.targets.tag.test_value = numpy_rng.randint(
            7, size=self.batchsize).astype(np.int32)

        self.layers = OrderedDict()

        self.layers['randcropandflip'] = RandCropAndFlip(
            inputs=self.inputs,
            image_shape=(self.batchsize, 1, 48, 48),
            patch_size=(44, 44),
            name='randcropandflip',
            theano_rng=self.theano_rng,
            mode_var=self.mode)

        self.layers['conv0'] = ConvLayer(
            rng=self.numpy_rng,
            inputs=self.layers['randcropandflip'],
            filter_shape=(32, 1, 9, 9),
            #image_shape=(self.batchsize, 1, 48, 48),
            name='conv0',
            pad=4)

        self.layers['maxpool0'] = MaxPoolLayer(inputs=self.layers['conv0'],
                                               pool_size=(2, 2),
                                               stride=(2, 2),
                                               name='maxpool0')

        self.layers['bias0'] = ConvBiasLayer(inputs=self.layers['maxpool0'],
                                             name='bias0')

        self.layers['relu0'] = Relu(inputs=self.layers['bias0'], name='relu0')

        self.layers['dropout0'] = Dropout(inputs=self.layers['relu0'],
                                          dropout_rate=.25,
                                          name='dropout0',
                                          theano_rng=self.theano_rng,
                                          mode_var=self.mode)

        self.layers['conv1'] = ConvLayer(rng=self.numpy_rng,
                                         inputs=self.layers['dropout0'],
                                         filter_shape=(32, 32, 5, 5),
                                         name='conv1',
                                         pad=2)

        self.layers['maxpool1'] = MaxPoolLayer(inputs=self.layers['conv1'],
                                               pool_size=(2, 2),
                                               stride=(2, 2),
                                               name='maxpool1')

        self.layers['bias1'] = ConvBiasLayer(inputs=self.layers['maxpool1'],
                                             name='bias1')

        self.layers['relu1'] = Relu(inputs=self.layers['bias1'], name='relu1')

        self.layers['dropout1'] = Dropout(inputs=self.layers['relu1'],
                                          dropout_rate=.25,
                                          name='dropout1',
                                          theano_rng=self.theano_rng,
                                          mode_var=self.mode)

        self.layers['conv2'] = ConvLayer(rng=self.numpy_rng,
                                         inputs=self.layers['dropout1'],
                                         filter_shape=(64, 32, 5, 5),
                                         name='conv2',
                                         pad=2)

        self.layers['maxpool2'] = MaxPoolLayer(inputs=self.layers['conv2'],
                                               pool_size=(2, 2),
                                               stride=(2, 2),
                                               name='maxpool2')

        self.layers['bias2'] = ConvBiasLayer(inputs=self.layers['maxpool2'],
                                             name='bias2')

        self.layers['relu2'] = Relu(inputs=self.layers['bias2'], name='relu2')

        self.layers['dropout2'] = Dropout(inputs=self.layers['relu2'],
                                          dropout_rate=.25,
                                          name='dropout2',
                                          theano_rng=self.theano_rng,
                                          mode_var=self.mode)

        self.layers['reshape2'] = Reshape(
            inputs=self.layers['dropout2'],
            shape=(self.layers['dropout2'].outputs_shape[0],
                   np.prod(self.layers['dropout2'].outputs_shape[1:])),
            name='reshape2')

        self.layers['fc3'] = AffineLayer(rng=self.numpy_rng,
                                         inputs=self.layers['reshape2'],
                                         nouts=7,
                                         name='fc3')

        self.layers['softmax3'] = Softmax(inputs=self.layers['fc3'],
                                          name='softmax3')

        self.probabilities = self.layers['softmax3'].outputs
        self.probabilities = T.clip(self.probabilities, 1e-6, 1 - 1e-6)

        self._cost = T.nnet.categorical_crossentropy(self.probabilities,
                                                     self.targets).mean()

        self.classification = T.argmax(self.probabilities, axis=1)

        self.params = []
        for l in self.layers.values():
            self.params.extend(l.params)

        self._grads = T.grad(self._cost, self.params)

        self.classify = theano.function(
            [self.inputs],
            self.classification,
            #givens={self.mode: np.int8(1)})
        )
Exemple #21
0
from solve_net import train_net, test_net
from load_data import load_mnist_4d
from plot import show
from solve_net import show4category
train_data, test_data, train_label, test_label = load_mnist_4d('data')

# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Conv2D('conv1', 1, 4, 3, 1, 0.01))
model.add(Relu('relu1'))
model.add(AvgPool2D('pool1', 2, 0))  # output shape: N x 4 x 14 x 14
model.add(Conv2D('conv2', 4, 8, 3, 1, 0.01))
model.add(Relu('relu2'))
model.add(AvgPool2D('pool2', 2, 0))  # output shape: N x 8 x 7 x 7
model.add(Reshape('flatten', (-1, 392)))
model.add(Linear('fc3', 392, 10, 0.01))

loss = SoftmaxCrossEntropyLoss(name='loss')

# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0,
    'momentum': 0.7,
    'batch_size': 100,
Exemple #22
0
        return x

    x = preprocessing(x)
    xt = preprocessing(xt)
    #x = np.random.random((n, 1, 28, 28))
    #y = np.random.randint(2, size=(n))

    # Model
    net = Net()
    net.push(Conv2d(5, 5, 1, 6))  # 1x28x28 -> 6x24x24
    net.push(Relu())
    net.push(Maxpooling(2, 2))  # 6x24x24 -> 6x12x12
    net.push(Conv2d(5, 5, 6, 16))  # 6x12x12 -> 16x8x8
    net.push(Relu())
    net.push(Maxpooling(2, 2))  # 16x8x8 -> 16x4x4
    net.push(Reshape((256)))
    net.push(Linear(256, 84))
    net.push(Relu())
    net.push(Softmax(84, 10))

    # Data
    data = DataProvider()
    n = 10000
    data.train_input(x[:n], y[:n])
    data.test_input(xt, yt)
    data.batch_size(16)

    lr = 0.0009
    gamma = 0.9
    for epoch in xrange(50):
        print 'Epoch: ', epoch