コード例 #1
0
def build_network(net_in_views, net_in_actions):
    net_views_out = FlattenLayer(net_in_views)
    net_actions_out = FlattenLayer(net_in_actions)
    net_concat = ConcatLayer([net_views_out, net_actions_out])
    net_hid = DenseLayer(net_concat, num_units=16, nonlinearity=rectify)
    net_out = DenseLayer(net_hid, num_units=4, nonlinearity=linear)
    return net_out
コード例 #2
0
ファイル: eeg_cnn_lib.py プロジェクト: zht96pi/EEGLearn
def build_convpool_mix(input_vars,
                       nb_classes,
                       grad_clip=110,
                       imsize=32,
                       n_colors=3,
                       n_timewin=7):
    """
    Builds the complete network with LSTM and 1D-conv layers combined

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i],
                                        imsize=imsize,
                                        n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i],
                                   w_init=w_init,
                                   imsize=imsize,
                                   n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool,
                            ([0], n_timewin, get_output_shape(convnets[0])[1]))
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
    conv_out = FlattenLayer(conv_out)
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    lstm = LSTMLayer(convpool,
                     num_units=128,
                     grad_clipping=grad_clip,
                     nonlinearity=lasagne.nonlinearities.tanh)
    lstm_out = SliceLayer(lstm, -1, 1)
    # Merge 1D-Conv and LSTM outputs
    dense_input = ConcatLayer([conv_out, lstm_out])
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
                          num_units=512,
                          nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    convpool = DenseLayer(convpool,
                          num_units=nb_classes,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #3
0
def build_convpool_mix(input_vars, input_shape=None):
    """
  Builds the complete network with LSTM and 1D-conv layers combined
  to integrate time from sequences of EEG images.
  :param input_vars: list of EEG images (one image per time window)
  :return: a pointer to the output of last layer
  """
    convnets = []
    W_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(input_shape[0]):
        if i == 0:
            convnet, W_init = build_cnn(input_vars[i], input_shape)
        else:
            convnet, _ = build_cnn(input_vars[i], input_shape, W_init)

        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    # convpool = ReshapeLayer(convpool, ([0], -1, numTimeWin))
    convpool = ReshapeLayer(
        convpool, ([0], input_shape[0], get_output_shape(convnets[0])[1]))
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))

    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
    conv_out = FlattenLayer(conv_out)
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    lstm = LSTMLayer(convpool,
                     num_units=128,
                     grad_clipping=grad_clip,
                     nonlinearity=lasagne.nonlinearities.tanh)
    # After LSTM layer you either need to reshape or slice it (depending on whether you
    # want to keep all predictions or just the last prediction.
    # http://lasagne.readthedocs.org/en/latest/modules/layers/recurrent.html
    # https://github.com/Lasagne/Recipes/blob/master/examples/lstm_text_generation.py
    lstm_out = SliceLayer(lstm, -1, 1)

    # Merge 1D-Conv and LSTM outputs
    dense_input = ConcatLayer([conv_out, lstm_out])
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
                          num_units=512,
                          nonlinearity=lasagne.nonlinearities.rectify)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.

    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    convpool = DenseLayer(convpool,
                          num_units=num_classes,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #4
0
        def q_network(state):
            input_state = InputLayer(input_var=state,
                                     shape=(None, self.state_dimension[0],
                                            self.state_dimension[1],
                                            self.state_dimension[2]))

            input_state = DimshuffleLayer(input_state, pattern=(0, 3, 1, 2))

            conv = Conv2DLayer(input_state,
                               num_filters=32,
                               filter_size=(8, 8),
                               stride=(4, 4),
                               nonlinearity=rectify)

            conv = Conv2DLayer(conv,
                               num_filters=64,
                               filter_size=(4, 4),
                               stride=(2, 2),
                               nonlinearity=rectify)

            conv = Conv2DLayer(conv,
                               num_filters=64,
                               filter_size=(3, 3),
                               stride=(1, 1),
                               nonlinearity=rectify)

            flatten = FlattenLayer(conv)

            dense = DenseLayer(flatten, num_units=512, nonlinearity=rectify)

            q_values = DenseLayer(dense,
                                  num_units=self.action_dimension,
                                  nonlinearity=linear)

            return q_values
コード例 #5
0
def network_custom_mlp(input_var,
                       nb_classes,
                       channel_size=20,
                       n_colors=3,
                       width=[256, 256],
                       drop_input=.2,
                       drop_hidden=.5):
    """
    Builds the mlp layer

    :param input_var: list of EEG features
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """

    # Input layer and dropout (with shortcut `dropout` for `DropoutLayer`):
    network = lasagne.layers.InputLayer(shape=(None, n_colors, channel_size),
                                        input_var=input_var)
    network = FlattenLayer(network)
    if drop_input:
        network = lasagne.layers.dropout(network, p=drop_input)
    # Hidden layers and dropout:
    for i in range(len(width)):
        network = lasagne.layers.DenseLayer(
            network, width[i], nonlinearity=lasagne.nonlinearities.rectify)
        if drop_hidden:
            network = lasagne.layers.dropout(network, p=drop_hidden)
    # Output layer:
    network = lasagne.layers.DenseLayer(
        network, nb_classes, nonlinearity=lasagne.nonlinearities.sigmoid)
    return network
コード例 #6
0
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):
    """
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    convpool = Conv1DLayer(convpool, 64, 3)
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #7
0
def createDiscriminator2(input_var=None):

	_ = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = FlattenLayer(_)
	_ = DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify)
	l_discriminator = DenseLayer(_, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid)

	print('--------------------')
	print('Discriminator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_discriminator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Discriminator output:", l_discriminator.output_shape)
	return l_discriminator
コード例 #8
0
def network_convpool_cnn3d(input_vars,
                           nb_classes,
                           imsize=[32, 32],
                           n_colors=3,
                           n_timewin=5,
                           n_layers=(4, 2),
                           n_filters_first=32,
                           dense_num_unit=[512, 512],
                           batch_norm_dense=False,
                           pool_size=[(2, 1, 1), (2, 2, 2), (2, 2, 2)],
                           dropout_dense=True,
                           batch_norm_conv=False,
                           filter_factor=2,
                           filter_size=[(3, 1, 1), (3, 3, 3), (3, 3, 3)]):
    """
    Builds the complete network with maxpooling layer in time.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :return: a pointer to the output of last layer
    """

    convnet, _ = build_cnn3d(input_vars,
                             imsize=imsize,
                             n_colors=n_colors,
                             n_timewin=n_timewin,
                             n_filters_first=n_filters_first,
                             n_layers=n_layers,
                             padding='same',
                             isMaxpool=True,
                             pool_size=pool_size,
                             batch_norm_conv=batch_norm_conv,
                             factor=filter_factor,
                             filter_size=filter_size)

    convnet = FlattenLayer(convnet)
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    for i in range(len(dense_num_unit)):
        if dropout_dense:
            convnet = lasagne.layers.dropout(convnet, p=.5)
        convnet = DenseLayer(convnet,
                             num_units=dense_num_unit[i],
                             nonlinearity=lasagne.nonlinearities.rectify)
        if batch_norm_dense:
            convnet = batch_norm(convnet)
    # And, finally, the 2-unit output layer with 50% dropout on its inputs:
    if nb_classes == 1:
        nonlinearity = lasagne.nonlinearities.sigmoid
    else:
        nonlinearity = lasagne.nonlinearities.softmax

    if dropout_dense:
        convnet = lasagne.layers.dropout(convnet, p=.5)
    convnet = DenseLayer(convnet,
                         num_units=nb_classes,
                         nonlinearity=nonlinearity)
    return convnet
コード例 #9
0
def build_convpool_lstm(input_vars,
                        nb_classes,
                        GRAD_CLIP=100,
                        imSize=32,
                        n_colors=3,
                        n_timewin=3):
    """
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param GRAD_CLIP:  the gradient messages are clipped to the given value during
                        the backward pass.
    :return: a pointer to the output of last layer
    """
    convnets = []
    W_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, W_init = build_cnn(input_vars[i],
                                        imSize=imSize,
                                        n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i],
                                   W_init=W_init,
                                   imSize=imSize,
                                   n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    # convpool = ReshapeLayer(convpool, ([0], -1, numTimeWin))

    convpool = ReshapeLayer(convpool,
                            ([0], n_timewin, get_output_shape(convnets[0])[1]))
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    convpool = LSTMLayer(convpool,
                         num_units=128,
                         grad_clipping=GRAD_CLIP,
                         nonlinearity=lasagne.nonlinearities.tanh)
    # After LSTM layer you either need to reshape or slice it (depending on whether you
    # want to keep all predictions or just the last prediction.
    # http://lasagne.readthedocs.org/en/latest/modules/layers/recurrent.html
    # https://github.com/Lasagne/Recipes/blob/master/examples/lstm_text_generation.py
    convpool = SliceLayer(convpool, -1, 1)  # Selecting the last prediction
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=256,
                          nonlinearity=lasagne.nonlinearities.rectify)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.

    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=nb_classes,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #10
0
ファイル: eeg_cnn_lib.py プロジェクト: zht96pi/EEGLearn
def build_convpool_lstm(input_vars,
                        nb_classes,
                        grad_clip=110,
                        imsize=32,
                        n_colors=3,
                        n_timewin=7):
    """
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i],
                                        imsize=imsize,
                                        n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i],
                                   w_init=w_init,
                                   imsize=imsize,
                                   n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool,
                            ([0], n_timewin, get_output_shape(convnets[0])[1]))
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    convpool = LSTMLayer(convpool,
                         num_units=128,
                         grad_clipping=grad_clip,
                         nonlinearity=lasagne.nonlinearities.tanh)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.
    convpool = SliceLayer(convpool, -1, 1)  # Selecting the last prediction
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=256,
                          nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=nb_classes,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #11
0
def __input_var_TO_embedding_layer__(input_var, imgPatch_hw_size):
    net = {}
    # subtracted MEAN in the preprocess stage.
    net['input'] = InputLayer((None, 3, imgPatch_hw_size[0], imgPatch_hw_size[1]), \
       input_var=input_var)
    net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1)
    net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1)
    net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1)
    net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
    net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)
    net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1)
    net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
    net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)
    net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1)
    net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
    net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
    net['pool5'] = PoolLayer(net['conv5_3'],
                             2)  # keep the output layer in lower dim
    # focus more on the center of the embedding maps
    net['concat'] = ConcatLayer([
        FlattenLayer(net['pool5'], 2),
        CropFeatureMapCenterLayer(net['pool1'], cropCenter_r=1),
        CropFeatureMapCenterLayer(net['pool2'], cropCenter_r=1),
        CropFeatureMapCenterLayer(net['pool3'], cropCenter_r=1),
        CropFeatureMapCenterLayer(net['pool4'], cropCenter_r=1)
    ],
                                axis=1)
    net['flat1'] = FlattenLayer(net['concat'], 2)
    net['L2_norm'] = L2NormLayer(net['flat1'])
    net['embedding'] = DenseLayer(net['L2_norm'],
                                  num_units=params.__D_imgPatchEmbedding,
                                  nonlinearity=None)
    return net
コード例 #12
0
def build_standard_cnn(input_var):
    from lasagne.layers import InputLayer, ReshapeLayer, Conv2DLayer, DenseLayer, FlattenLayer
    network = InputLayer(shape=(None, 784), input_var=input_var)
    network = ReshapeLayer(network, (-1, 1, 28, 28))
    network = Conv2DLayer(network, 32, 5)
    network = Conv2DLayer(network, 32, 5)
    network = Conv2DLayer(network, 32, 5)
    network = FlattenLayer(network)
    network = DenseLayer(network, 256)
    network = DenseLayer(network,
                         10,
                         nonlinearity=lasagne.nonlinearities.softmax)
    return network
コード例 #13
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_2         = batch_norm(Conv3DDNNLayer(incoming=layer_1, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_3         = MaxPool3DDNNLayer(layer_2, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_4         = DropoutLayer(layer_3, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_4, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = MaxPool3DDNNLayer(layer_6, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_8         = DropoutLayer(layer_7, p=0.25)
    
    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_8, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = batch_norm(Conv3DDNNLayer(incoming=layer_6, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_8         = MaxPool3DDNNLayer(layer_7, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_9         = DropoutLayer(layer_8, p=0.25)

    layer_flatten = FlattenLayer(layer_9)

    # Output Layer
    layer_hidden         = DenseLayer(layer_flatten, 500, nonlinearity=linear)
    layer_prediction     = DenseLayer(layer_hidden, 2, nonlinearity=linear)

    # Loss
    prediction           = get_output(layer_prediction) / multiply_var
    loss                 = squared_error(prediction, target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True) / multiply_var
    test_loss            = squared_error(test_prediction, target_var)
    test_loss            = test_loss.mean()

    # crps estimate
    crps                 = T.abs_(test_prediction - target_var).mean()/600

    return test_prediction, crps, loss, params
コード例 #14
0
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,
                         num_filters=kernel1,
                         filter_size=(4, test_size1))
    layer4 = Conv2DLayer(layer3,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer5 = Conv2DLayer(layer4,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, pool_size))
    layer7 = Conv2DLayer(layer6,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer8 = Conv2DLayer(layer7,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer9 = Conv2DLayer(layer8,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, pool_size))
    layer11 = Conv2DLayer(layer10,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer12 = Conv2DLayer(layer11,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer13 = Conv2DLayer(layer12,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, pool_size))
    layer14_d = DenseLayer(layer14, num_units=256)
    layer3_2 = DenseLayer(layer2_f, num_units=128)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    layer16 = DropoutLayer(layer15, p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units=2, nonlinearity=softmax)
    return network
コード例 #15
0
def build_model(input_var=None):
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32), input_var=input_var)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=192,
                             filter_size=5,
                             pad=2)
    net['cccp1'] = ConvLayer(net['conv1'], num_filters=160, filter_size=1)
    net['cccp2'] = ConvLayer(net['cccp1'], num_filters=96, filter_size=1)
    net['pool1'] = PoolLayer(net['cccp2'],
                             pool_size=3,
                             stride=2,
                             mode='max',
                             ignore_border=False)
    net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
    net['conv2'] = ConvLayer(net['drop3'],
                             num_filters=192,
                             filter_size=5,
                             pad=2)
    net['cccp3'] = ConvLayer(net['conv2'], num_filters=192, filter_size=1)
    net['cccp4'] = ConvLayer(net['cccp3'], num_filters=192, filter_size=1)
    net['pool2'] = PoolLayer(net['cccp4'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
    net['conv3'] = ConvLayer(net['drop6'],
                             num_filters=192,
                             filter_size=3,
                             pad=1)
    net['cccp5'] = ConvLayer(net['conv3'], num_filters=192, filter_size=1)
    net['cccp6'] = ConvLayer(net['cccp5'], num_filters=10, filter_size=1)
    net['pool3'] = PoolLayer(net['cccp6'],
                             pool_size=8,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['output'] = FlattenLayer(net['pool3'])
    
    print 'Loading network pretrained with CIFAR-10'
    with open('save/cifar10-nin.pkl', 'rb') as f:
        params = cPickle.load(f)    
    lasagne.layers.set_all_param_values(net['output'], params)  
    
    # Modify model output
    #net['fc1'] = DenseLayer(net['pool3'], num_units=2, nonlinearity=None)
    #net['output'] = NonlinearityLayer(net['fc1'], softmax)
    

    return net
コード例 #16
0
def buildmodel(x):
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32), input_var=x)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=192,
                             filter_size=5,
                             pad=2,
                             flip_filters=False)
    net['cccp1'] = ConvLayer(
        net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
    net['cccp2'] = ConvLayer(
        net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
    net['pool1'] = PoolLayer(net['cccp2'],
                             pool_size=3,
                             stride=2,
                             mode='max',
                             ignore_border=False)
    net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
    net['conv2'] = ConvLayer(net['drop3'],
                             num_filters=192,
                             filter_size=5,
                             pad=2,
                             flip_filters=False)
    net['cccp3'] = ConvLayer(
        net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
    net['cccp4'] = ConvLayer(
        net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
    net['pool2'] = PoolLayer(net['cccp4'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
    net['conv3'] = ConvLayer(net['drop6'],
                             num_filters=192,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['cccp5'] = ConvLayer(
        net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
    net['cccp6'] = ConvLayer(
        net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
    net['pool3'] = PoolLayer(net['cccp6'],
                             pool_size=8,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['out'] = NonlinearityLayer(FlattenLayer(net['pool3']), nonlinearity=nonlinearities.softmax)
    net['dense'] = layers.DenseLayer(net['cccp6'], 10, b=None, nonlinearity=nonlinearities.softmax)
    return net
コード例 #17
0
def build_maxout_cnn(input_var):
    from lasagne.layers import InputLayer
    from layers import Lipshitz_Layer, LipConvLayer, ReshapeLayer, FlattenLayer
    network = InputLayer(shape=(None, 784), input_var=input_var)
    network = ReshapeLayer(network, (-1, 1, 28, 28))
    network = LipConvLayer(network, 16, (5, 5), init=1)
    network = LipConvLayer(network, 32, (5, 5), init=1)
    network = LipConvLayer(network, 64, (5, 5), init=1)
    network = LipConvLayer(network, 128, (5, 5), init=1)
    network = FlattenLayer(network)
    network = Lipshitz_Layer(network, 256, init=1)
    network = Lipshitz_Layer(network,
                             10,
                             init=1,
                             nonlinearity=lasagne.nonlinearities.softmax)
    return network
def build_cnn(input_var,
              input_shape=(3, 32, 32),
              ccp_num_filters=[64, 128],
              ccp_filter_size=3,
              fc_num_units=[128, 128],
              num_classes=10,
              **junk):
    # input layer
    network = lasagne.layers.InputLayer(shape=(None, ) + input_shape,
                                        input_var=input_var)
    # conv-relu-conv-relu-pool layers
    for num_filters in ccp_num_filters:
        network = lasagne.layers.Conv2DLayer(
            network,
            num_filters=num_filters,
            filter_size=(ccp_filter_size, ccp_filter_size),
            pad='same',
            #nonlinearity=lasagne.nonlinearities.rectify,
            nonlinearity=lasagne.nonlinearities.elu,
            #W=lasagne.init.GlorotUniform(gain='relu')
            W=lasagne.init.HeUniform(gain='relu'))
        network = lasagne.layers.Conv2DLayer(
            network,
            num_filters=num_filters,
            filter_size=(ccp_filter_size, ccp_filter_size),
            pad='same',
            #nonlinearity=lasagne.nonlinearities.rectify,
            nonlinearity=lasagne.nonlinearities.elu,
            #W=lasagne.init.GlorotUniform(gain='relu')
            W=lasagne.init.HeUniform())
        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
    # fc-relu
    for num_units in fc_num_units:
        network = lasagne.layers.DenseLayer(
            lasagne.layers.dropout(network, p=.5),
            num_units=num_units,
            #nonlinearity=lasagne.nonlinearities.rectify,
            nonlinearity=lasagne.nonlinearities.elu,
            #W=lasagne.init.GlorotUniform(gain='relu')
            W=lasagne.init.HeUniform(gain='relu'))
    feanet = FlattenLayer(network)
    # output layer
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.5),
        num_units=num_classes,
        nonlinearity=lasagne.nonlinearities.softmax)
    return network, feanet
コード例 #19
0
def D_mnist_mode_recovery(
    num_channels    = 1,
    resolution      = 32,
    fmap_base       = 64,
    fmap_decay      = 1.0,
    fmap_max        = 256,
    mbstat_func     = 'Tstdeps',
    mbstat_avg      = None,         #'all',
    label_size      = 0,
    use_wscale      = False,
    use_gdrop       = False,
    use_layernorm   = False,
    use_batchnorm   = True,
    X               = 2,
    progressive     = False,
    **kwargs):

    R = int(np.log2(resolution))
    assert resolution == 2**R and resolution >= 4
    cur_lod = theano.shared(np.float32(0.0))
    gdrop_strength = theano.shared(np.float32(0.0))
    def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))) // X, fmap_max)
    def GD(layer): return GDropLayer(layer, name=layer.name+'gd', mode='prop', strength=gdrop_strength) if use_gdrop else layer
    def LN(layer): return LayerNormLayer(layer, name=layer.name+'ln') if use_layernorm else layer
    def WS(layer): return WScaleLayer(layer, name=layer.name+'ws') if use_wscale else layer
    def BN(layer): return lasagne.layers.batch_norm(layer) if use_batchnorm else layer

    net = input_layer = InputLayer(name='Dimages', shape=[None, num_channels, 2**R, 2**R])
    for I in xrange(R-1, 1, -1): # I = R-1, R-2, ..., 2     (i.e. 4,3,2)
        net = BN(LN(WS(Conv2DLayer     (GD(net),     name='D%da'   % I, num_filters=nf(I-1), filter_size=3, pad=1, nonlinearity=lrelu, W=ilrelu))))
        net =       Downscale2DLayer(net,         name='D%ddn'  % I, scale_factor=2)
        if progressive:
            lod =       Downscale2DLayer(input_layer, name='D%dxs'  % (I-1), scale_factor=2**(R-I))
            lod =    WS(NINLayer        (lod,         name='D%dx'   % (I-1), num_units=nf(I-1), nonlinearity=lrelu, W=ilrelu))
            net =       LODSelectLayer  (             name='D%dlod' % (I-1), incomings=[net, lod], cur_lod=cur_lod, first_incoming_lod=R-I-1)

    if mbstat_avg is not None:
        net = MinibatchStatConcatLayer(net, name='Dstat', func=globals()[mbstat_func], averaging=mbstat_avg)

    net = FlattenLayer(GD(net), name='Dflatten')
    output_layers = [WS(DenseLayer(net, name='Dscores', num_units=1, nonlinearity=linear, W=ilinear))]

    if label_size:
        output_layers += [WS(DenseLayer(net, name='Dlabels', num_units=label_size, nonlinearity=linear, W=ilinear))]
    return dict(input_layers=[input_layer], output_layers=output_layers, cur_lod=cur_lod, gdrop_strength=gdrop_strength)
コード例 #20
0
def build_representation(img_size=[64, 64],
                         nchannels=3,
                         ndf=64,
                         vis_filter_size=5,
                         filters_size=5,
                         global_pool=True,
                         strides=[2, 2, 2, 2]):
    print 'cnn'
    #if img_size[0] % 32 is not 0 or img_size[1]!=img_size[0]:
    #    # La imagen debe ser cuadrada y multiplo de 32
    #    raise 1

    depth = len(strides)
    w_sizes = [filters_size] * depth
    w_sizes[0] = vis_filter_size

    X = InputLayer((None, nchannels, img_size[0], img_size[1]))
    ishape = lasagne.layers.get_output_shape(X)
    # print ishape

    wf = 1
    h = X
    for i, s in enumerate(strides):
        wf *= s
        filter_size = w_sizes[i]
        x1 = Conv2DLayer(h,
                         num_filters=wf * ndf,
                         filter_size=filter_size,
                         stride=s,
                         pad='same',
                         b=None,
                         nonlinearity=None,
                         name='cnn_l%d_Conv' % i)
        x2 = BatchNormLayer(x1, name='cnn_l%d_BN' % i)
        h = NonlinearityLayer(x2, nonlinearity=lrelu)
        ishape = lasagne.layers.get_output_shape(x1)
        # print ishape

    if global_pool:
        h = GlobalPoolLayer(h, pool_function=T.max, name='cnn_last_code')
    else:
        h = FlattenLayer(h, name='cnn_last_code')

    return h
コード例 #21
0
def build_sc_cnn(input_var=None):
    network = lasagne.layers.InputLayer(shape=(None, 1, 23, 4),
                                        input_var=input_var)
    network1 = lasagne.layers.Conv2DLayer(
        network,
        num_filters=10,
        filter_size=(1, 4),
        nonlinearity=lasagne.nonlinearities.rectify)
    network1 = lasagne.layers.PadLayer(network1, width=[[0, 1], [0, 0]])
    network2 = lasagne.layers.Conv2DLayer(
        network,
        num_filters=10,
        filter_size=(2, 4),
        nonlinearity=lasagne.nonlinearities.rectify)
    network2 = lasagne.layers.PadLayer(network2, width=[[0, 2], [0, 0]])
    network3 = lasagne.layers.Conv2DLayer(
        network,
        num_filters=10,
        filter_size=(3, 4),
        nonlinearity=lasagne.nonlinearities.rectify)
    network3 = lasagne.layers.PadLayer(network3, width=[[0, 3], [0, 0]])
    network4 = lasagne.layers.Conv2DLayer(
        network,
        num_filters=10,
        filter_size=(4, 4),
        nonlinearity=lasagne.nonlinearities.rectify)
    network4 = lasagne.layers.PadLayer(network4, width=[[0, 4], [0, 0]])
    network = lasagne.layers.ConcatLayer(
        [network1, network2, network3, network4])
    network = lasagne.layers.BatchNormLayer(network)
    network = FlattenLayer(network)
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.15),
        num_units=200,
        nonlinearity=lasagne.nonlinearities.rectify)
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.15),
        num_units=23,
        nonlinearity=lasagne.nonlinearities.rectify)
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.15),
        num_units=2,
        nonlinearity=lasagne.nonlinearities.softmax)
    return network
コード例 #22
0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32))
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=192,
                             filter_size=5,
                             pad=2)
    net['cccp1'] = ConvLayer(net['conv1'], num_filters=160, filter_size=1)
    net['cccp2'] = ConvLayer(net['cccp1'], num_filters=96, filter_size=1)
    net['pool1'] = PoolLayer(net['cccp2'],
                             pool_size=3,
                             stride=2,
                             mode='max',
                             ignore_border=False)
    net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
    net['conv2'] = ConvLayer(net['drop3'],
                             num_filters=192,
                             filter_size=5,
                             pad=2)
    net['cccp3'] = ConvLayer(net['conv2'], num_filters=192, filter_size=1)
    net['cccp4'] = ConvLayer(net['cccp3'], num_filters=192, filter_size=1)
    net['pool2'] = PoolLayer(net['cccp4'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
    net['conv3'] = ConvLayer(net['drop6'],
                             num_filters=192,
                             filter_size=3,
                             pad=1)
    net['cccp5'] = ConvLayer(net['conv3'], num_filters=192, filter_size=1)
    net['cccp6'] = ConvLayer(net['cccp5'], num_filters=10, filter_size=1)
    net['pool3'] = PoolLayer(net['cccp6'],
                             pool_size=8,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['output'] = FlattenLayer(net['pool3'])

    return net
コード例 #23
0
def model_train(X_train, y_train, learning_rate=1e-4, epochs=10):
    l = 1000
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1, num_filters=64, filter_size=(4, 7))
    layer4 = Conv2DLayer(layer3, num_filters=64, filter_size=(1, 7))
    layer5 = Conv2DLayer(layer4, num_filters=64, filter_size=(1, 7))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, 6))
    layer7 = Conv2DLayer(layer6, num_filters=64, filter_size=(1, 7))
    layer8 = Conv2DLayer(layer7, num_filters=64, filter_size=(1, 7))
    layer9 = Conv2DLayer(layer8, num_filters=64, filter_size=(1, 7))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, 6))
    layer11 = Conv2DLayer(layer10, num_filters=64, filter_size=(1, 7))
    layer12 = Conv2DLayer(layer11, num_filters=64, filter_size=(1, 7))
    layer13 = Conv2DLayer(layer12, num_filters=64, filter_size=(1, 7))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, 6))
    layer14_d = DenseLayer(layer14, num_units=64)
    layer3_2 = DenseLayer(layer2_f, num_units=64)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    #layer15 = ConcatLayer([layer10_d,])
    layer16 = DropoutLayer(layer15)
    layer17 = DenseLayer(layer16, num_units=32)
    network = DenseLayer(layer17, num_units=2, nonlinearity=None)
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
        network,
        max_epochs=epochs,
        update=adam,
        update_learning_rate=lr,
        regression=True,
        train_split=TrainSplit(eval_size=0.1),
        objective_loss_function=squared_error,
        #on_epoch_finished=[AdjustVariable(lr, target=1e-8, half_life=20)],
        verbose=4)
    net.fit(X_train, y_train)
    return net
コード例 #24
0
def build_convpool_lstm(input_vars):
    convnets = []
    W_init = None
    for i in range(numTimeWin):
        if i == 0:
            convnet, W_init = build_cnn(input_vars[i])
        else:
            convnet, _ = build_cnn(input_vars[i], W_init)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    # convpool = ReshapeLayer(convpool, ([0], -1, numTimeWin))

    convpool = ReshapeLayer(
        convpool, ([0], numTimeWin, get_output_shape(convnets[0])[1]))
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    convpool = LSTMLayer(convpool,
                         num_units=128,
                         grad_clipping=GRAD_CLIP,
                         nonlinearity=lasagne.nonlinearities.tanh)
    # After LSTM layer you either need to reshape or slice it (depending on whether you
    # want to keep all predictions or just the last prediction.
    # http://lasagne.readthedocs.org/en/latest/modules/layers/recurrent.html
    # https://github.com/Lasagne/Recipes/blob/master/examples/lstm_text_generation.py
    convpool = SliceLayer(convpool, -1, 1)  # Selecting the last prediction
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=512,
                          nonlinearity=lasagne.nonlinearities.rectify)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.

    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=4,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
コード例 #25
0
    def buildLayers(self, bTestWholeImage=False, verbose=True):
        """
        This is a function for creating layer args to be used in instantiation

        For mnist, we use the basic LeNet5 example in Theano tutorial

        """
        if verbose:
            print(' --------------------------------------------------- ')
            print(' Build Layers ')
            print(' --------------------------------------------------- ')

        for idxSiam in six.moves.xrange(self.config.num_siamese):
            self.layers[idxSiam] = OrderedDict()

            # 2D raw input
            self.layers[idxSiam]['input_raw_2d'] \
                = InputLayer(
                    (self.config.batch_size, 1, self.config.patch_height,
                     self.config.patch_width),
                    input_var=self.x[idxSiam],
                    name='input_raw_2d')

            # Build Layers for Kp
            self.buildLayersKp(idxSiam,
                               resize=(not bTestWholeImage),
                               verbose=verbose)

            # Build Layers for orientation
            self.buildLayersOri(idxSiam, verbose)

            # Build Layers for descriptor
            self.buildLayersDesc(idxSiam, verbose)

            # Pass through the desc-output laer results to output
            self.layers[idxSiam]['output'] = FlattenLayer(
                self.layers[idxSiam]['desc-output'], 2)
コード例 #26
0
ファイル: sb_resnet.py プロジェクト: xoltar/sb_resnet
    def __init__(self,
                 layer,
                 stick,
                 kumar_parameters=lasagne.init.Normal(0.0001),
                 **kwargs):
        flatten_layer = FlattenLayer(layer, outdim=2)
        kumar_parameters = DenseLayer(flatten_layer,
                                      2,
                                      W=kumar_parameters,
                                      b=None,
                                      nonlinearity=softplus)
        self.kumar_a = SliceLayer(kumar_parameters,
                                  indices=slice(0, 1),
                                  axis=1)  # Equivalent to [:, [0]]
        self.kumar_b = SliceLayer(kumar_parameters,
                                  indices=slice(1, 2),
                                  axis=1)  # Equivalent to [:, [1]]

        # Bound Kumaraswamy's parameters: 1e-6 <= a, b <= 30.
        self.kumar_a = bound(self.kumar_a, min_value=1e-6, max_value=30)
        self.kumar_b = bound(self.kumar_b, min_value=1e-6, max_value=30)
        super(RemainingStickLengthLayer,
              self).__init__([layer, stick, self.kumar_a, self.kumar_b],
                             **kwargs)
コード例 #27
0
ファイル: models.py プロジェクト: htyao89/drcn
    def create_architecture(self,
                            input_shape,
                            dense_dim=1024,
                            input_var_=None,
                            output_var_=None,
                            convnet_=None,
                            is_enc_fixed=False):

        print('[ConvAE: create_architecture]')
        if input_var_ is not None:
            self.X_ = input_var_

        if output_var_ is not None:
            self.Y_ = output_var_

        (c, d1, d2) = input_shape

        self.lin = InputLayer((None, c, d1, d2), self.X_)
        if convnet_ is not None:
            self.lconv1 = Conv2DLayerFast(self.lin,
                                          100, (5, 5),
                                          pad=(2, 2),
                                          W=convnet_.lconv1.W,
                                          nonlinearity=rectify)
        else:
            self.lconv1 = Conv2DLayerFast(self.lin,
                                          100, (5, 5),
                                          pad=(2, 2),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)

        self.lpool1 = MaxPool2DLayerFast(self.lconv1, (2, 2))

        if convnet_ is not None:
            self.lconv2 = Conv2DLayerFast(self.lpool1,
                                          150, (5, 5),
                                          pad=(2, 2),
                                          W=convnet_.lconv2.W,
                                          nonlinearity=rectify)
        else:
            self.lconv2 = Conv2DLayerFast(self.lpool1,
                                          150, (5, 5),
                                          pad=(2, 2),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)

        self.lpool2 = MaxPool2DLayerFast(self.lconv2, (2, 2))

        if convnet_ is not None:
            self.lconv3 = Conv2DLayerFast(self.lpool2,
                                          200, (3, 3),
                                          W=convnet_.lconv3.W,
                                          nonlinearity=rectify)
        else:
            self.lconv3 = Conv2DLayerFast(self.lpool2,
                                          200, (3, 3),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)
        [nd, nf, dc1, dc2] = get_output_shape(self.lconv3)

        self.lconv3_flat = FlattenLayer(self.lconv3)
        [_, dflat] = get_output_shape(self.lconv3_flat)

        if convnet_ is not None:
            self.ldense1 = DenseLayer(self.lconv3_flat,
                                      dense_dim,
                                      W=convnet_.ldense1.W,
                                      nonlinearity=rectify)
        else:
            self.ldense1 = DenseLayer(self.lconv3_flat,
                                      dense_dim,
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)

        if convnet_ is not None:
            self.ldense2 = DenseLayer(self.ldense1,
                                      dense_dim,
                                      W=convnet_.ldense2.W,
                                      nonlinearity=rectify)
        else:
            self.ldense2 = DenseLayer(self.ldense1,
                                      dense_dim,
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)

        self.ldense3 = DenseLayer(self.ldense2,
                                  dflat,
                                  W=GlorotUniform(),
                                  nonlinearity=rectify)
        self.ldense3_reshape = ReshapeLayer(self.ldense3,
                                            ([0], nf, dc1, -1))  # lae_conv3

        self.ldeconv1 = Conv2DLayerFast(self.ldense3_reshape,
                                        150, (3, 3),
                                        pad=(2, 2),
                                        W=GlorotUniform(),
                                        nonlinearity=rectify)
        self.lunpool1 = Upscale2DLayer(self.ldeconv1, (2, 2))

        self.ldeconv2 = Conv2DLayerFast(self.lunpool1,
                                        100, (5, 5),
                                        pad=(2, 2),
                                        W=GlorotUniform(),
                                        nonlinearity=rectify)
        self.lunpool2 = Upscale2DLayer(self.ldeconv2, (2, 2))

        self.model_ = Conv2DLayerFast(self.lunpool2,
                                      1, (5, 5),
                                      pad=(2, 2),
                                      W=GlorotUniform(),
                                      nonlinearity=linear)

        self.is_enc_fixed = is_enc_fixed
コード例 #28
0
ファイル: models.py プロジェクト: htyao89/drcn
    def create_architecture(self,
                            input_shape,
                            dense_dim=1024,
                            dout=10,
                            dropout=0.5,
                            input_var_=None,
                            output_var_=None,
                            enc_weights=None):

        print('[ConvNet: create_architecture] dense_dim:', dense_dim)

        if input_var_ is not None:
            self.X_ = input_var_

        if output_var_ is not None:
            self.Y_ = output_var_

        self.dropout = dropout
        (c, d1, d2) = input_shape

        self.lin = InputLayer((None, c, d1, d2), self.X_)
        self.lconv1 = Conv2DLayerFast(self.lin,
                                      100, (5, 5),
                                      pad=(2, 2),
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)
        self.lpool1 = MaxPool2DLayerFast(self.lconv1, (2, 2))

        self.lconv2 = Conv2DLayerFast(self.lpool1,
                                      150, (5, 5),
                                      pad=(2, 2),
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)
        self.lpool2 = MaxPool2DLayerFast(self.lconv2, (2, 2))

        self.lconv3 = Conv2DLayerFast(self.lpool2,
                                      200, (3, 3),
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)
        self.lconv3_flat = FlattenLayer(self.lconv3)

        self.ldense1 = DenseLayer(self.lconv3_flat,
                                  dense_dim,
                                  W=GlorotUniform(),
                                  nonlinearity=rectify)
        self.ldense1_drop = self.ldense1
        if dropout > 0:
            self.ldense1_drop = DropoutLayer(self.ldense1, p=dropout)

        self.ldense2 = DenseLayer(self.ldense1_drop,
                                  dense_dim,
                                  W=GlorotUniform(),
                                  nonlinearity=rectify)
        self.ldense2_drop = self.ldense2
        if dropout > 0:
            self.ldense2_drop = DropoutLayer(self.ldense2_drop, p=dropout)

        self.model_ = DenseLayer(self.ldense2_drop,
                                 dout,
                                 W=GlorotUniform(),
                                 nonlinearity=softmax)

        self.enc_weights = enc_weights
        if enc_weights is not None:
            lasagne.layers.set_all_param_values(self.model_, enc_weights)
コード例 #29
0
ファイル: vae.py プロジェクト: kundan2510/convolutional_vae
def Encoder(input_var, use_batch_norm=False):
    input_var = input_var.dimshuffle(0, 'x', 1, 2)
    net = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(2, 2),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(3, 3),
                      stride=(2, 2),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)

    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(3, 3),
                      stride=(2, 2),
                      nonlinearity=lasagne.nonlinearities.elu)

    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(2, 2),
                      nonlinearity=lasagne.nonlinearities.elu)

    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(1, 1),
                      nonlinearity=lasagne.nonlinearities.elu)

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=(1, 1),
                      nonlinearity=lasagne.nonlinearities.elu)
    net = FlattenLayer(net, outdim=2)

    net = DenseLayer(net,
                     num_units=128,
                     nonlinearity=lasagne.nonlinearities.rectify)

    net = DenseLayer(net, num_units=40, nonlinearity=None)

    return net
コード例 #30
0
                             W=GlorotUniform(),
                             nonlinearity=rectify)
lnet_pool1 = MaxPool2DLayerFast(lnet_conv1, (2, 2))

lnet_conv2 = Conv2DLayerFast(lnet_pool1,
                             150, (5, 5),
                             pad=(2, 2),
                             W=GlorotUniform(),
                             nonlinearity=rectify)
lnet_pool2 = MaxPool2DLayerFast(lnet_conv2, (2, 2))

lnet_conv3 = Conv2DLayerFast(lnet_pool2,
                             200, (3, 3),
                             W=GlorotUniform(),
                             nonlinearity=rectify)
lnet_conv3_flat = FlattenLayer(lnet_conv3)

lnet_dense4 = DenseLayer(lnet_conv3_flat,
                         300,
                         W=GlorotUniform(),
                         nonlinearity=rectify)
lnet_dense4_drop = DropoutLayer(lnet_dense4, p=confnet['dropout_rate'])

convnet = DenseLayer(lnet_dense4_drop, 10, nonlinearity=softmax)

print('[ConvNet] define loss, optimizer, and compile')
Ynet_train_pred_ = get_output(convnet)
loss_ = categorical_crossentropy(Ynet_train_pred_, Ynet_)
loss_ = loss_.mean()
acc_ = T.mean(T.eq(T.argmax(Ynet_train_pred_, axis=1), Ynet_),
              dtype=theano.config.floatX)