Esempio n. 1
0
 def setup_discriminator(self):
     c = args.discriminator_size
     self.make_layer('disc1.1',
                     batch_norm(self.network['conv1_2']),
                     1 * c,
                     filter_size=(5, 5),
                     stride=(2, 2),
                     pad=(2, 2))
     self.make_layer('disc1.2',
                     self.last_layer(),
                     1 * c,
                     filter_size=(5, 5),
                     stride=(2, 2),
                     pad=(2, 2))
     self.make_layer('disc2',
                     batch_norm(self.network['conv2_2']),
                     2 * c,
                     filter_size=(5, 5),
                     stride=(2, 2),
                     pad=(2, 2))
     self.make_layer('disc3',
                     batch_norm(self.network['conv3_2']),
                     3 * c,
                     filter_size=(3, 3),
                     stride=(1, 1),
                     pad=(1, 1))
     hypercolumn = ConcatLayer([
         self.network['disc1.2>'], self.network['disc2>'],
         self.network['disc3>']
     ])
     self.make_layer('disc4',
                     hypercolumn,
                     4 * c,
                     filter_size=(1, 1),
                     stride=(1, 1),
                     pad=(0, 0))
     self.make_layer('disc5',
                     self.last_layer(),
                     3 * c,
                     filter_size=(3, 3),
                     stride=(2, 2))
     self.make_layer('disc6',
                     self.last_layer(),
                     2 * c,
                     filter_size=(1, 1),
                     stride=(1, 1),
                     pad=(0, 0))
     self.network['disc'] = batch_norm(
         ConvLayer(self.last_layer(),
                   1,
                   filter_size=(1, 1),
                   nonlinearity=lasagne.nonlinearities.linear))
Esempio n. 2
0
def _inception(inp, o1s, o2s1, o2s2, o3s1, o3s2, o4s):
    conv1 = Conv2DLayer(inp, o1s, 1)

    conv3_ = Conv2DLayer(inp, o2s1, 1)
    conv3 = Conv2DLayer(conv3_, o2s2, 3, pad='same')

    conv5_ = Conv2DLayer(inp, o3s1, 1)
    conv5 = Conv2DLayer(conv5_, o3s2, 5, pad='same')

    pool_ = MaxPool2DLayer(inp, 3, stride=1, pad=1)
    pool = Conv2DLayer(pool_, o4s, 1)

    return ConcatLayer([conv1, conv3, conv5, pool])
def inceptionD(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
    l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
    l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)

    l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

    return ConcatLayer([l1, l2, l3])
Esempio n. 4
0
    def nn_fn(self):

        l_in_z_p = InputLayer((None, self.z_dim))
        l_in_z_h = InputLayer((None, self.z_dim))

        l_h = ConcatLayer([l_in_z_p, l_in_z_h], axis=-1)

        for h in range(self.nn_depth - 1):
            l_h = DenseLayer(l_h, num_units=self.nn_hid_units, b=None)

        l_out = DenseLayer(l_h, num_units=self.num_outputs, b=None, nonlinearity=softmax)

        return (l_in_z_p, l_in_z_h), l_out
Esempio n. 5
0
def dense_block(network, num_layers, growth_rate, dropout, name_prefix):
    # concatenated 3x3 convolutions
    concat_layers = network
    for n in range(num_layers):
        conv = bn_relu_conv(network, channels=growth_rate,
                            filter_size=3, stride=1, dropout=dropout,
                            name_prefix=name_prefix + '_l%02d' % (n + 1))

        concat_layers = ConcatLayer([concat_layers, conv], axis=1,
                              name=name_prefix + '_l%02d_join' % (n + 1))
        network = conv
    network = concat_layers
    return network
Esempio n. 6
0
    def setup_model(self):
        """Use lasagne to create a network of convolution layers, first using VGG19 as the framework
        and then adding augmentations for Semantic Style Transfer.
        """

        net = {}

        # First network for the main image. These are convolution only, and stop at layer 4_2 (rest unused).
        net['img'] = InputLayer((1, 3, None, None))
        net['conv1_1'] = ConvLayer(net['img'], 64, 3, pad=1)
        net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
        net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1)
        net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
        net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1)
        net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
        net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
        net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
        net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1)
        net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
        net['main'] = net['conv4_2']

        # Second network for the semantic layers.  This dynamically downsamples the map and concatenates it.
        net['map'] = InputLayer((1, 3, None, None))
        net['map_2'] = PoolLayer(net['map'], 2, mode='average_exc_pad')
        net['map_3'] = PoolLayer(net['map'], 4, mode='average_exc_pad')
        net['map_4'] = PoolLayer(net['map'], 8, mode='average_exc_pad')

        net['sem2_1'] = ConcatLayer([net['conv2_1'], net['map_2']])
        net['sem3_1'] = ConcatLayer([net['conv3_1'], net['map_3']])
        net['sem4_1'] = ConcatLayer([net['conv4_1'], net['map_4']])

        # Third network for the nearest neighbors; it's a default size for now, updated once we know more.
        net['nn3_1'] = ConvLayer(net['sem3_1'], 1, 3, b=None, pad=0)
        net['nn4_1'] = ConvLayer(net['sem4_1'], 1, 3, b=None, pad=0)

        self.network = net
Esempio n. 7
0
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=3):
    """
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))

    #print(convnet.output_shape) #None, 128, 4, 4
    #print('0.:', convnets[0].output_shape) #None, 2048... 128*4*4
    
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    #print('1.concat:', convpool.output_shape) #None, 6144
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features) 
    #print('2.Reshape:', convpool.output_shape) #None, 3, 2048
    convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
        nonlinearity=lasagne.nonlinearities.tanh)
    
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.
    #print('3.LSTM:', convpool.output_shape) #None, 3, 128
    convpool = SliceLayer(convpool, -1, 1)      # Selecting the last prediction
    
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    #print('4.slice:', convpool.output_shape) #None, 128
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=256, nonlinearity=lasagne.nonlinearities.rectify)

    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
Esempio n. 8
0
def build_inception_module(name, input_layer, nfilters, batch_norm):
    # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
    net = {}
    net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
    net['pool_proj'] = ConvLayer(net['pool'],
                                 nfilters[0],
                                 1,
                                 flip_filters=False)
    if batch_norm:
        net['pool_proj'] = normalization.batch_norm(net['pool_proj'])

    net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)
    if batch_norm:
        net['1x1'] = normalization.batch_norm(net['1x1'])

    net['3x3_reduce'] = ConvLayer(input_layer,
                                  nfilters[2],
                                  1,
                                  flip_filters=False)
    if batch_norm:
        net['3x3_reduce'] = normalization.batch_norm(net['3x3_reduce'])
    net['3x3'] = ConvLayer(net['3x3_reduce'],
                           nfilters[3],
                           3,
                           pad=1,
                           flip_filters=False)
    if batch_norm:
        net['3x3'] = normalization.batch_norm(net['3x3'])

    net['5x5_reduce'] = ConvLayer(input_layer,
                                  nfilters[4],
                                  1,
                                  flip_filters=False)
    if batch_norm:
        net['5x5_reduce'] = normalization.batch_norm(net['5x5_reduce'])
    net['5x5'] = ConvLayer(net['5x5_reduce'],
                           nfilters[5],
                           5,
                           pad=2,
                           flip_filters=False)
    if batch_norm:
        net['5x5'] = normalization.batch_norm(net['5x5'])

    net['output'] = ConcatLayer([
        net['1x1'],
        net['3x3'],
        net['5x5'],
        net['pool_proj'],
    ])

    return {'{}/{}'.format(name, k): v for k, v in net.items()}
def createXYZTCropLayer(input_layer_4d,
                        xyz_layer,
                        theta_layer,
                        max_scale,
                        out_width,
                        name=None):

    input_layer_shape = get_output_shape(input_layer_4d)
    batch_size = input_layer_shape[0]

    new_width = out_width
    new_height = out_width

    # ratio to reduce to patch size from original
    reduc_ratio = (np.cast[floatX](out_width) /
                   np.cast[floatX](input_layer_shape[3]))

    # merge xyz and t layers together to form xyzt
    xyzt_layer = ConcatLayer([xyz_layer, theta_layer])

    # create a param layer from xyz layer
    def xyzt_2_param(xyzt):
        # get individual xyz
        dx = xyzt[:, 0]  # x and y are already between -1 and 1
        dy = xyzt[:, 1]  # x and y are already between -1 and 1
        z = xyzt[:, 2]
        t = xyzt[:, 3]
        # compute the resize from the largest scale image
        dr = (np.cast[floatX](reduc_ratio) * np.cast[floatX](2.0)**z /
              np.cast[floatX](max_scale))

        # dimshuffle before concatenate
        params = [
            dr * T.cos(t), -dr * T.sin(t), dx, dr * T.sin(t), dr * T.cos(t), dy
        ]
        params = [_p.flatten().dimshuffle(0, 'x') for _p in params]

        # concatenate to have (1 0 0 0 1 0) when identity transform
        return T.concatenate(params, axis=1)

    param_layer = ExpressionLayer(xyzt_layer,
                                  xyzt_2_param,
                                  output_shape=(batch_size, 6))

    resize_layer = TransformerLayer(input_layer_4d,
                                    param_layer,
                                    new_height,
                                    new_width,
                                    name=name)

    return resize_layer
Esempio n. 10
0
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,
                         num_filters=kernel1,
                         filter_size=(4, test_size1))
    layer4 = Conv2DLayer(layer3,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer5 = Conv2DLayer(layer4,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, pool_size))
    layer7 = Conv2DLayer(layer6,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer8 = Conv2DLayer(layer7,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer9 = Conv2DLayer(layer8,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, pool_size))
    layer11 = Conv2DLayer(layer10,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer12 = Conv2DLayer(layer11,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer13 = Conv2DLayer(layer12,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, pool_size))
    layer14_d = DenseLayer(layer14, num_units=256)
    layer3_2 = DenseLayer(layer2_f, num_units=128)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    layer16 = DropoutLayer(layer15, p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units=2, nonlinearity=softmax)
    return network
    def nn_fn(self):

        l_in_z = InputLayer((None, self.z_dim))
        l_in_x = InputLayer((None, self.max_length, self.emb_dim))

        l_in_z_reshape = ReshapeLayer(l_in_z, (
            [0],
            [1],
            1,
        ))
        l_in_z_rep = TileLayer(l_in_z_reshape, (1, 1, self.max_length))

        l_x_pre_pad = SliceLayer(PadLayer(l_in_x, [(1, 0), (0, 0)],
                                          batch_ndim=1),
                                 indices=slice(0, -1),
                                 axis=1)
        l_x_pre_pad = DimshuffleLayer(l_x_pre_pad, (0, 2, 1))
        l_x_pre_pad_drop = DropoutLayer(l_x_pre_pad,
                                        self.nn_word_drop,
                                        shared_axes=(1, ))

        l_concat = ConcatLayer((l_in_z_rep, l_x_pre_pad_drop), axis=1)

        l_in_d = Conv1DLayer(l_concat,
                             num_filters=self.nn_channels_external,
                             pad='same',
                             filter_size=1,
                             nonlinearity=None)

        for d in self.nn_dilations:
            l_cnn1 = Conv1DLayer(l_in_d,
                                 filter_size=1,
                                 num_filters=self.nn_channels_internal)
            l_dcnn = DilatedConv1DLayer(l_cnn1,
                                        filter_size=self.nn_filter_size,
                                        num_filters=self.nn_channels_internal,
                                        dilation=d)
            l_cnn2 = Conv1DLayer(l_dcnn,
                                 filter_size=1,
                                 num_filters=self.nn_channels_external)

            l_in_d = ElemwiseSumLayer([l_in_d, l_cnn2])

        l_final = Conv1DLayer(l_in_d,
                              filter_size=1,
                              num_filters=self.emb_dim,
                              nonlinearity=None)

        l_out = DimshuffleLayer(l_final, (0, 2, 1))

        return (l_in_z, l_in_x), l_out
Esempio n. 12
0
def gooey_gadget(network_in, conv_add, stride):
    network_c = Conv2DLayer(network_in,
                            conv_add / 2, (1, 1),
                            W=HeUniform('relu'))
    network_c = prelu(network_c)
    network_c = BatchNormLayer(network_c)
    network_c = Conv2DLayer(network_c,
                            conv_add, (3, 3),
                            stride=stride,
                            W=HeUniform('relu'))
    network_c = prelu(network_c)
    network_c = BatchNormLayer(network_c)
    network_p = MaxPool2DLayer(network_in, (3, 3), stride=stride)
    return ConcatLayer((network_c, network_p))
Esempio n. 13
0
    def test_print_layer_info_with_empty_shape(self, print_info, NeuralNet):
        # construct a net with both conv layer (to trigger
        # get_conv_infos) and a layer with shape (None,).
        l_img = InputLayer(shape=(None, 1, 28, 28))
        l_conv = Conv2DLayer(l_img, num_filters=3, filter_size=3)
        l0 = DenseLayer(l_conv, num_units=10)
        l_inp = InputLayer(shape=(None,))  # e.g. vector input
        l1 = DenseLayer(l_inp, num_units=10)
        l_merge = ConcatLayer([l0, l1])

        nn = NeuralNet(l_merge, update_learning_rate=0.1, verbose=2)
        nn.initialize()
        # used to raise TypeError
        print_info(nn)
    def inceptionB(self, input_layer, nfilt):
        # Corresponds to a modified version of figure 10 in the paper
        l1 = self.bn_conv(input_layer,
                          num_filters=nfilt[0][0],
                          filter_size=3,
                          stride=2)

        l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
        l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
        l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)

        l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

        return ConcatLayer([l1, l2, l3])
Esempio n. 15
0
    def setup_model(self, input=None):
        """Use lasagne to create a network of convolution layers, first using VGG19 as the framework
        and then adding augmentations for Semantic Style Transfer.
        """
        net, self.channels = {}, {}

        # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused).
        net['img']     = input or InputLayer((None, 3, None, None))
        net['conv1_1'] = ConvLayer(net['img'],     64, 3, pad=1)
        net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
        net['pool1']   = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'],   128, 3, pad=1)
        net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
        net['pool2']   = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'],   256, 3, pad=1)
        net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
        net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
        net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
        net['pool3']   = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'],   512, 3, pad=1)
        net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
        net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
        net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1)
        net['pool4']   = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
        net['conv5_1'] = ConvLayer(net['pool4'],   512, 3, pad=1)
        net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
        net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
        net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1)
        net['main']    = net['conv5_4']

        # Auxiliary network for the semantic layers, and the nearest neighbors calculations.
        net['map'] = InputLayer((1, 1, None, None))
        for j, i in itertools.product(range(5), range(4)):
            if j < 2 and i > 1: continue
            suffix = '%i_%i' % (j+1, i+1)

            if i == 0:
                net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad')
            self.channels[suffix] = net['conv'+suffix].num_filters

            if args.semantic_weight > 0.0:
                net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]])
            else:
                net['sem'+suffix] = net['conv'+suffix]

            net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape)
            net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False)

        self.network = net
Esempio n. 16
0
def buildSpatialTransformerNet():

    #st_prefix = 'st/'
    inc1_prefix = 'inc1/'
    inc2_prefix = 'inc2/'
    global_prefix = 'global/'

    net_stn_1, net_stn_2 = buildSTN()

    net_inc1_Dict = build_vgg_model(inputLayer=net_stn_1,
                                    prefix=inc1_prefix,
                                    dropout_ratio=0.5,
                                    stnFlag=False,
                                    classificationFlag=True)
    net_inc2_Dict = build_vgg_model(inputLayer=net_stn_2,
                                    prefix=inc2_prefix,
                                    dropout_ratio=0.5,
                                    stnFlag=False,
                                    classificationFlag=True)
    net_global_Dict = build_vgg_model(inputLayer=net_input,
                                      prefix=global_prefix,
                                      dropout_ratio=0.5,
                                      stnFlag=False,
                                      classificationFlag=True)

    net_inc1_output = net_inc1_Dict[inc1_prefix + 'fc7_dropout']
    net_inc2_output = net_inc2_Dict[inc2_prefix + 'fc7_dropout']
    net_global_output = net_global_Dict[global_prefix + 'fc7_dropout']

    #net_final_concat = ElemwiseSumLayer([net_global_output,net_inc1_output,net_inc2_output], [0.6,0.2,0.2],name='final/concat/output')

    net_final_concat = ConcatLayer(
        [net_global_output, net_inc1_output, net_inc2_output],
        name='final/concat/output')

    net_final_fc = DenseLayer(net_final_concat,
                              num_units=67,
                              nonlinearity=None,
                              name='final/fc')

    net_final_prob = NonlinearityLayer(net_final_fc,
                                       nonlinearity=softmax,
                                       name='final/prob')

    net_global_prob = net_global_Dict[global_prefix + 'prob']
    net_inc1_prob = net_inc1_Dict[inc1_prefix + 'prob']
    net_inc2_prob = net_inc2_Dict[inc2_prefix + 'prob']

    return net_final_prob, net_global_prob, net_inc1_prob, net_inc2_prob
Esempio n. 17
0
    def _add_decoder(self):
        """
        Decoder returns the batch of sequences of thought vectors, each corresponds to a decoded token
        reshapes this 3d tensor to 2d matrix so that the next Dense layer can convert each thought vector to
        a probability distribution vector
        """

        self._net['hid_states_decoder'] = InputLayer(
            shape=(None, self._decoder_depth, None),
            input_var=T.tensor3('hid_inits_decoder'),
            name='hid_states_decoder')

        # repeat along the sequence axis output_seq_len times, where output_seq_len is inferred from input tensor
        self._net['enc_repeated'] = RepeatLayer(
            incoming=self._net[
                'enc_result'],  # input shape = (batch_size, encoder_output_dimension)
            n=self._output_seq_len,
            name='repeat_layer')

        self._net['emb_condition_id_repeated'] = RepeatLayer(
            incoming=self._net['emb_condition_id'],
            n=self._output_seq_len,
            name='embedding_condition_id_repeated')

        self._net['dec_concated_input'] = ConcatLayer(
            incomings=[
                self._net['emb_y'], self._net['enc_repeated'],
                self._net['emb_condition_id_repeated']
            ],
            axis=2,
            name='decoder_concated_input')
        # shape = (batch_size, input_seq_len, encoder_output_dimension)

        self._net['dec_0'] = self._net['dec_concated_input']

        for dec_layer_id in xrange(1, self._decoder_depth + 1):
            # input shape = (batch_size, input_seq_len, embedding_dimension + hidden_dimension)
            self._net['dec_' + str(dec_layer_id)] = GRULayer(
                incoming=self._net['dec_' + str(dec_layer_id - 1)],
                num_units=self._hidden_layer_dim,
                grad_clipping=self._grad_clip,
                only_return_final=False,
                name='decoder_' + str(dec_layer_id),
                mask_input=self._net['input_y_mask'],
                hid_init=SliceLayer(self._net['hid_states_decoder'],
                                    dec_layer_id - 1,
                                    axis=1))

        self._net['dec'] = self._net['dec_' + str(self._decoder_depth)]
Esempio n. 18
0
 def get_network(self):
     network = lasagne.layers.InputLayer(shape=(None, self.num_features),input_var=self.input_var)
     for i in xrange(0, self.num_layers):
         network = DenseLayer(network,nonlinearity=rectify,num_units=self.num_nodes)
         if i != 0:
             network = FeaturePoolLayer(incoming=network, pool_size=2,axis=1, pool_function=theano.tensor.mean)
         for _ in xrange(0, 1):
             network = DenseLayer(network,nonlinearity=rectify,num_units=self.num_nodes)
             layers = [network]
             for _ in xrange(0, 4):
                 network = batch_norm(self.add_dense_maxout_block(network, self.num_nodes, self.dropout))
                 layers.append(network)
                 network = ConcatLayer(layers, axis=1)
     maxout = FeaturePoolLayer(incoming=network, pool_size=2,axis=1, pool_function=theano.tensor.mean)
     return lasagne.layers.DenseLayer(network, num_units=2,nonlinearity=lasagne.nonlinearities.softmax)
Esempio n. 19
0
def build_sb_resnet_phase(prev_layer, n_out, count, stride):

    remaining_sticks = []
    # Initial stick length is 1.
    stick = ExpressionLayer(prev_layer,
                            function=lambda X: T.ones((X.shape[0], 1)),
                            output_shape=(None, 1))
    layer, remaining_stick = build_bottleneck_sb_residual_layer(
        prev_layer, n_out, stride, stick)
    remaining_sticks.append(remaining_stick)
    for _ in range(count - 1):
        layer, remaining_stick = build_bottleneck_sb_residual_layer(
            layer, n_out, stride=(1, 1), remaining_stick=remaining_stick)
        remaining_sticks.append(remaining_stick)

    # Compute posteriors
    posterior_a = ConcatLayer(
        [_remaining_stick.kumar_a for _remaining_stick in remaining_sticks],
        axis=1)
    posterior_b = ConcatLayer(
        [_remaining_stick.kumar_b for _remaining_stick in remaining_sticks],
        axis=1)
    stick_lengths = ConcatLayer(remaining_sticks, axis=1)
    return layer, (posterior_a, posterior_b, stick_lengths)
Esempio n. 20
0
    def dense_layer(self):
        """Add a dense layer (incl. bottleneck layer)."""
        model = self.batchnorm_pt2(self.current)
        model = self.nonlinearity(model)

        if self.bottleneck:
            model = self.convolution(model, self.neck_size, filter_size=(1, 1))
            model = self.dropout(model)
            model = BatchNormLayer(model)
            model = self.nonlinearity(model)

        model = self.convolution(model, self.growth)
        model = self.dropout(model)
        model = self.batchnorm_pt1(model)
        self.current = ConcatLayer([self.current, model], axis=1)
Esempio n. 21
0
    def dense_block(self, network, num_layers, growth_rate, dropout,
                    name_prefix):
        # concatenated 3x3 convolutions
        for n in range(num_layers):
            nam = name_prefix + '_l' + str(n + 1)
            conv = self.bn_relu_conv(network,
                                     channels=growth_rate,
                                     filter_size=3,
                                     dropout=dropout,
                                     name_prefix=nam)
            nam = name_prefix + '_l' + str(n + 1) + '_join'
            network = ConcatLayer([network, conv], axis=1, name=nam)
            self.layers.append(layer_info('concat'))

        return network
Esempio n. 22
0
    def init_nn_structure(self, seq_length, pred_len):
        """
        Inits network structure

        :param seq_length: number of features
        :type seq_length: int
        :param pred_len: number of predicted values (target dimensionality)
        :type pred_len: int
        :return: None
        """
        self.iteration = 0
        
        theano_input = T.tensor3()
        theano_output = T.matrix()
        
        from lasagne.layers import InputLayer, LSTMLayer, DenseLayer, ExpressionLayer, ConcatLayer
        from lasagne.nonlinearities import tanh
        
        model = {}
        model['input_layer'] = InputLayer((None, seq_length, 1), input_var=theano_input)
        
        lst_concat = []
        for i, key in enumerate(self.feature_dict.keys()):
            if self.feature_dict[key] is None or len(self.feature_dict[key]) == 0:
                continue
            model['input_slice_' + str(i)] = ExpressionLayer(model['input_layer'], lambda X: X[:,self.feature_dict[key],:])
            num_units = self.num_lstm_units_large if len(self.feature_dict[key]) > 10 else self.num_lstm_units_small
            model['hidden_layer_' + str(i) + '_1'] = LSTMLayer(model['input_slice_' + str(i)], 
                               num_units, grad_clipping=self.grad_clip, nonlinearity=tanh)
            model['hidden_layer_' + str(i) + '_2'] = LSTMLayer(model['hidden_layer_' + str(i) + '_1'], 
                               num_units, grad_clipping=self.grad_clip, nonlinearity=tanh, only_return_final=True)
            lst_concat.append(model['hidden_layer_' + str(i) + '_2'])
        model['concatenate_hidden'] = ConcatLayer(lst_concat, axis=1)
        model['output_layer'] = DenseLayer(model['concatenate_hidden'], pred_len, nonlinearity=None)
        
        model_output = lasagne.layers.get_output(model['output_layer'])
        params = lasagne.layers.get_all_params(model['output_layer'], trainable=True)

        self.loss = lasagne.objectives.squared_error(model_output, theano_output).mean()
        self.lr = theano.shared(np.array(self.learning_rate, dtype='float32'))
        self.updates = lasagne.updates.adam(self.loss, params, learning_rate=self.lr)

        self.l_out = model['output_layer']
        self.trainT = theano.function([theano_input, theano_output], self.loss, updates=self.updates)
        self.compute_cost = theano.function([theano_input, theano_output], self.loss)
        self.forecast = theano.function([theano_input], model_output)
        
        '''
Esempio n. 23
0
def build_convpool_conv1d(input_vars,
                          nb_classes,
                          imSize=32,
                          n_colors=3,
                          n_timewin=3):
    """
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """
    convnets = []
    W_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, W_init = build_cnn(input_vars[i],
                                        imSize=imSize,
                                        n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i],
                                   W_init=W_init,
                                   imSize=imSize,
                                   n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)

    convpool = ReshapeLayer(convpool,
                            ([0], n_timewin, get_output_shape(convnets[0])[1]))
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
    # convpool = ReshapeLayer(convpool, (-1, numTimeWin))

    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    convpool = Conv1DLayer(convpool, 64, 3)

    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=512,
                          nonlinearity=lasagne.nonlinearities.rectify)

    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
                          num_units=nb_classes,
                          nonlinearity=lasagne.nonlinearities.softmax)
    return convpool
Esempio n. 24
0
def inceptionB(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bc(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)

    l2 = bc(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bc(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
    l2 = bc(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)

    l3 = Pool3DLayer(input_layer, pool_size=3, stride=2, pad=1)

    print 'inceptionB'
    print l1.output_shape
    print l2.output_shape
    print l3.output_shape

    return ConcatLayer([l1, l2, l3])
Esempio n. 25
0
def dense_block(network, num_layers, growth_rate, dropout, name_prefix):
    # concatenated 3x3 convolutions
    for n in range(num_layers):
        conv = affine_relu_conv(network,
                                channels=growth_rate,
                                filter_size=3,
                                dropout=dropout,
                                name_prefix=name_prefix + '_l%02d' % (n + 1))
        conv = BatchNormLayer(conv,
                              name=name_prefix + '_l%02dbn' % (n + 1),
                              beta=None,
                              gamma=None)
        network = ConcatLayer([network, conv],
                              axis=1,
                              name=name_prefix + '_l%02d_join' % (n + 1))
    return network
Esempio n. 26
0
def inceptionA(input_layer, nfilt):
    # Corresponds to a modified version of figure 5 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2)

    l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
    l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
    l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1)

    l4 = Pool2DLayer(
        input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
    l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)

    return ConcatLayer([l1, l2, l3, l4])
Esempio n. 27
0
def build_actor_critic(state_size, num_act, actor_layers, critic_layers,
                       layer_norm):
    # input layers
    l_states = InputLayer([None, state_size])
    l_actions = InputLayer([None, num_act])
    l_input_critic = ConcatLayer([l_states, l_actions])
    # actor layer
    l_actor = build_actor(l_states,
                          num_act,
                          hid_sizes=actor_layers,
                          layer_norm=layer_norm)
    # critic layer
    l_critic = build_critic(l_input_critic,
                            hid_sizes=critic_layers,
                            layer_norm=layer_norm)
    return l_states, l_actions, l_actor, l_critic
Esempio n. 28
0
    def expansion(depth, deepest):
        n_filters = filter_for_depth(depth)

        incoming = net['conv{}_2'.format(depth + 1)] if deepest else net[
            '_conv{}_2'.format(depth + 1)]

        upscaling = Upscale2DLayer(incoming, 4)
        net['upconv{}'.format(depth)] = Conv2DLayer(upscaling,
                                                    num_filters=n_filters,
                                                    filter_size=2,
                                                    stride=2,
                                                    W=HeNormal(gain='relu'),
                                                    nonlinearity=nonlinearity)

        if P.SPATIAL_DROPOUT > 0:
            bridge_from = DropoutLayer(net['conv{}_2'.format(depth)],
                                       P.SPATIAL_DROPOUT)
        else:
            bridge_from = net['conv{}_2'.format(depth)]

        net['bridge{}'.format(depth)] = ConcatLayer(
            [net['upconv{}'.format(depth)], bridge_from],
            axis=1,
            cropping=[None, None, 'center', 'center'])

        net['_conv{}_1'.format(depth)] = Conv2DLayer(
            net['bridge{}'.format(depth)],
            num_filters=n_filters,
            filter_size=3,
            pad='valid',
            W=HeNormal(gain='relu'),
            nonlinearity=nonlinearity)

        #if P.BATCH_NORMALIZATION:
        #    net['_conv{}_1'.format(depth)] = batch_norm(net['_conv{}_1'.format(depth)])

        if P.DROPOUT > 0:
            net['_conv{}_1'.format(depth)] = DropoutLayer(
                net['_conv{}_1'.format(depth)], P.DROPOUT)

        net['_conv{}_2'.format(depth)] = Conv2DLayer(
            net['_conv{}_1'.format(depth)],
            num_filters=n_filters,
            filter_size=3,
            pad='valid',
            W=HeNormal(gain='relu'),
            nonlinearity=nonlinearity)
Esempio n. 29
0
def build_inception_module(name, input_layer, nfilters):
    net = {}
    net['pool'] = PoolLayer(input_layer, pool_size=3, stride=1, pad=1)
    net['pool_proj'] = ConvLayer(net['pool'],
                                 nfilters[0],
                                 1,
                                 nonlinearity=elu,
                                 flip_filters=False)

    net['1x1'] = ConvLayer(input_layer,
                           nfilters[1],
                           1,
                           nonlinearity=elu,
                           flip_filters=False)

    net['3x3_reduce'] = ConvLayer(input_layer,
                                  nfilters[2],
                                  1,
                                  flip_filters=False)
    net['3x3'] = ConvLayer(net['3x3_reduce'],
                           nfilters[3],
                           3,
                           pad=1,
                           nonlinearity=elu,
                           flip_filters=False)

    net['5x5_reduce'] = ConvLayer(input_layer,
                                  nfilters[4],
                                  1,
                                  flip_filters=False)
    net['5x5'] = DropoutLayer(ConvLayer(net['5x5_reduce'],
                                        nfilters[5],
                                        5,
                                        pad=2,
                                        nonlinearity=elu,
                                        flip_filters=False),
                              p=0.4)

    net['output'] = ConcatLayer([
        net['1x1'],
        net['3x3'],
        net['5x5'],
        net['pool_proj'],
    ])

    return {'{}/{}'.format(name, k): v for k, v in net.items()}
Esempio n. 30
0
def build_generator(input_noise=None, input_text=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm, ConcatLayer
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, noise_dim), input_var=input_noise)
    layer2 = InputLayer(shape=(None, 1, 300), input_var=input_text)
    layer2 = ReshapeLayer(layer2, ([0], 1 * 300))

    layer = ConcatLayer([layer, layer2], axis=1)
    # fully-connected layer
    for i in range(len(layer_list)):
        layer = batch_norm(DenseLayer(layer, layer_list[i]))

    layer = batch_norm(DenseLayer(layer, 1 * 28 * 28, nonlinearity=sigmoid))
    layer = ReshapeLayer(layer, ([0], 1, 28, 28))
    print("Generator output:", layer.output_shape)
    return layer