def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = lasagne.layers.dnn.Conv3DDNNLayer(
                incoming = incoming if j == 0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad = dict['pad'][j] if 'pad' in dict else dict['border_mode'][j] if 'border_mode' in dict else None,
                stride = dict['strides'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional' else lasagne.layers.NonlinearityLayer(lasagne.layers.dnn.Pool3DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['strides'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = lasagne.layers.batch_norm(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        
            
    return lasagne.layers.ConcatLayer(incomings=branch,name=block_name)  
def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = lasagne.layers.dnn.Conv3DDNNLayer(
                incoming = incoming if j == 0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad = dict['pad'][j] if 'pad' in dict else dict['border_mode'][j] if 'border_mode' in dict else None,
                stride = dict['strides'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional' else lasagne.layers.NonlinearityLayer(lasagne.layers.dnn.Pool3DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['strides'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = lasagne.layers.batch_norm(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        
            
    return lasagne.layers.ConcatLayer(incomings=branch,name=block_name)  
Exemple #3
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1),
              crop=0,
              untie_biases=False,
              W=initmethod(),
              b=lasagne.init.Constant(0.),
              nonlinearity=lasagne.nonlinearities.rectify,
              flip_filters=False,
              **kwargs):
     super(DeconvLayer, self).__init__(incoming,
                                       num_filters,
                                       filter_size,
                                       stride,
                                       crop,
                                       untie_biases,
                                       W,
                                       b,
                                       nonlinearity,
                                       flip_filters,
                                       n=2,
                                       **kwargs)
     # rename self.crop to self.pad
     self.crop = self.pad
     del self.pad
Exemple #4
0
def InceptionUpscaleLayer(incoming, param_dict, block_name):
    branch = [0] * len(param_dict)
    # Loop across branches
    for i, dict in enumerate(param_dict):
        for j, style in enumerate(dict['style']):  # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
            # Apply Batchnorm
            branch[i] = BN(branch[i],
                           name=block_name + '_bnorm_' + str(i) + '_' +
                           str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers

    return CL(incomings=branch, name=block_name)
Exemple #5
0
def InceptionLayer(incoming, param_dict, block_name):
    branch = [0] * len(param_dict)
    # Loop across branches
    for i, dict in enumerate(param_dict):
        for j, style in enumerate(dict['style']):  # Loop up branch
            branch[i] = C2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad =  dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(lasagne.layers.dnn.Pool2DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['stride'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j]) if style=='pool'\
            else lasagne.layers.DilatedConv2DLayer(
                incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                dilation = dict['dilation'][j],
                # pad = dict['pad'][j] if 'pad' in dict else None,
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j))  if style== 'dilation'\
            else DL(
                    incoming = incoming if j==0 else branch[i],
                    num_units = dict['num_filters'][j],
                    W = initmethod('relu'),
                    b = None,
                    nonlinearity = dict['nonlinearity'][j],
                    name = block_name+'_'+str(i)+'_'+str(j))
            # Apply Batchnorm
            branch[i] = BN(branch[i],
                           name=block_name + '_bnorm_' + str(i) + '_' +
                           str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers

    return CL(incomings=branch, name=block_name)
Exemple #6
0
def MDCL(incoming, num_filters, scales, name):
    # Total number of layers
    # W = theano.shared(lasagne.utils.floatX(Orthogonal(
    winit = initmethod(0.02)
    sinit = lasagne.init.Constant(1.0 / (1 + len(scales)))
    # Number of incoming channels
    ni = lasagne.layers.get_output_shape(incoming)[1]
    # get weight parameter for this layer
    W = theano.shared(lasagne.utils.floatX(
        winit.sample((num_filters,
                      lasagne.layers.get_output_shape(incoming)[1], 3, 3))),
                      name=name + 'W')
    n = C2D(incoming=incoming,
            num_filters=num_filters,
            filter_size=[3, 3],
            stride=[1, 1],
            pad=(1, 1),
            W=W *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_base').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + 'base')
    # nc = [theano.shared(lasagne.utils.floatX(1.0/(1+len(scales))), name+'coeff_base')]
    nd = []
    for i, scale in enumerate(scales):
        if scale == 0:
            nd.append(
                C2D(incoming=incoming,
                    num_filters=num_filters,
                    filter_size=[1, 1],
                    stride=[1, 1],
                    pad=(0, 0),
                    W=T.mean(W, axis=[2, 3]).dimshuffle(0, 1, 'x', 'x') *
                    theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)),
                        name + '_coeff_1x1').dimshuffle(0, 'x', 'x', 'x'),
                    b=None,
                    nonlinearity=None,
                    name=name + str(scale)))
        else:
            nd.append(
                lasagne.layers.DilatedConv2DLayer(
                    incoming=lasagne.layers.PadLayer(incoming=incoming,
                                                     width=(scale, scale)),
                    num_filters=num_filters,
                    filter_size=[3, 3],
                    dilation=(scale, scale),
                    W=W.dimshuffle(1, 0, 2, 3) * theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)),
                        name + '_coeff_' + str(scale)).dimshuffle(
                            'x', 0, 'x', 'x'),  #.dimshuffle('x',0),
                    b=None,
                    nonlinearity=None,
                    name=name + str(scale)))
    return ESL(nd + [n])
def get_model():
    lasagne.random.set_rng(np.random.RandomState(1234))
    dims, n_channels, n_classes = tuple(cfg['dims']), cfg['n_channels'], cfg['n_classes']
    shape = (None, n_channels)+dims
    l_in = lasagne.layers.InputLayer(shape=shape)
    l_conv0 = lasagne.layers.dnn.Conv3DDNNLayer(
        incoming = l_in,
        num_filters = 32,
        filter_size = (3,3,3),
        stride = (1,1,1),
        pad = 'same',
        W = initmethod(),
        nonlinearity = None,
        name = 'l_conv0')        
    l_conv1 = ResDrop(incoming = l_conv0, 
        IB = InceptionLayer(incoming = NL(BN(l_conv0,name='bn_conv0'),elu), 
            param_dict = [{'num_filters':[8,8,16],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[8,16],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv1'),p=0.95)   
    l_conv2 = ResDrop(incoming = l_conv1, 
        IB = InceptionLayer(incoming = NL(BN(l_conv1,name='bn_conv1'),elu), 
            param_dict = [{'num_filters':[8,8,16],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[8,16],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv2'),p=0.9)
    l_conv3 = ResDrop(incoming = l_conv2, 
        IB = InceptionLayer(incoming = NL(BN(l_conv2,name='bn_conv2'),elu), 
            param_dict = [{'num_filters':[8,8,16],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[8,16],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv3'),p=0.8)             
    l_conv4 = InceptionLayer(incoming = NL(BN(l_conv3,name='bn_conv3'),elu), 
            param_dict = [{'num_filters':[16],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[16],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[16,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]},
                {'num_filters':[16,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]}],
            block_name = 'conv4')       
    l_conv5 = ResDrop(incoming = l_conv4, 
        IB = InceptionLayer(incoming = NL(BN(l_conv4,name='bn_conv4'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[16,32],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv5'),p=0.7)
    l_conv6 = ResDrop(incoming = l_conv5, 
        IB = InceptionLayer(incoming = NL(BN(l_conv5,name='bn_conv5'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[16,32],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv6'),p=0.6)
    l_conv7 = ResDrop(incoming = l_conv6, 
        IB = InceptionLayer(incoming = NL(BN(l_conv6,name='bn_conv6'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[16,32],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv7'),p=0.5)
    l_conv8 = InceptionLayer(incoming = NL(BN(l_conv7,name='bn_conv7'),elu), 
            param_dict = [{'num_filters':[32],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[32],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[32,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]},
                {'num_filters':[32,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]}],
            block_name = 'conv8')        
    l_conv9 = ResDrop(incoming = l_conv8, 
        IB = InceptionLayer(incoming = NL(BN(l_conv8,name='bn_conv8'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[32,64],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv9'),p=0.5)
    l_conv10 = ResDrop(incoming = l_conv9, 
        IB = InceptionLayer(incoming = NL(BN(l_conv9,name='bn_conv9'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[32,64],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv10'),p=0.45)
    l_conv11 = ResDrop(incoming = l_conv8, 
        IB = InceptionLayer(incoming = NL(BN(l_conv10,name='bn_conv10'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[32,64],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv11'),p=0.40)        
    l_conv12 = InceptionLayer(incoming = NL(BN(l_conv11,name='bn_conv11'),elu), 
            param_dict = [{'num_filters':[64],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[64],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[64,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]},
                {'num_filters':[64,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]}],
            block_name = 'conv12')
    l_conv13 = ResDrop(incoming = l_conv12, 
        IB = InceptionLayer(incoming = NL(BN(l_conv12,name='bn_conv12'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[64,128],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv13'),p=0.35)
    l_conv14 = ResDrop(incoming = l_conv13, 
        IB = InceptionLayer(incoming = NL(BN(l_conv13,name='bn_conv13'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[64,128],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv14'),p=0.30)
    l_conv15 = ResDrop(incoming = l_conv14, 
        IB = InceptionLayer(incoming = NL(BN(l_conv14,name='bn_conv14'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]},
                {'num_filters':[64,128],
                'filter_size':[(3,3,3)]*2,
                'border_mode':['same']*2,
                'strides':[(1,1,1)]*2,
                'nonlinearity': [elu,None],
                'style': ['convolutional']*2,
                'bnorm':[1,0]}],
            block_name = 'conv15'),p=0.25)        
    l_conv16 = InceptionLayer(incoming = NL(BN(l_conv15,name='bn_conv15'),elu), 
            param_dict = [{'num_filters':[128],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [elu],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[128],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [elu],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[128,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,elu],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]},
                {'num_filters':[128,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,elu],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]}],
            block_name = 'conv16')
    l_conv17 = ResDropNoPre(l_conv16,BN(lasagne.layers.dnn.Conv3DDNNLayer(
        incoming = l_conv16,
        num_filters = 512,
        filter_size = (3,3,3),
        pad = 'same',
        W = initmethod('relu'),
        nonlinearity = None,
        name = 'l_conv17'),name='bn_conv17'),0.5)
    l_pool = BN(lasagne.layers.GlobalPoolLayer(l_conv17),name='l_pool')
    l_fc1 = BN(lasagne.layers.DenseLayer(
        incoming = l_pool,
        num_units = 512,
        W = initmethod('relu'),
        nonlinearity = elu,
        name =  'fc1'
        ),name = 'bnorm_fc1') 
    l_fc2 = lasagne.layers.DenseLayer(
        incoming = l_fc1,
        num_units = n_classes,
        W = initmethod(),
        nonlinearity = None,
        name = 'fc2'
        )
    return {'l_in':l_in, 'l_out':l_fc2}
def get_model():
    lasagne.random.set_rng(np.random.RandomState(1234))
    dims, n_channels, n_classes = tuple(cfg['dims']), cfg['n_channels'], cfg['n_classes']
    shape = (None, n_channels)+dims
    l_in = lasagne.layers.InputLayer(shape=shape)
    l_conv0 = lasagne.layers.dnn.Conv3DDNNLayer(
        incoming = l_in,
        num_filters = 32,
        filter_size = (7,7,7),
        stride = (2,2,2),
        pad = 'same',
        W = initmethod(),
        nonlinearity = None,
        name = 'l_conv0')        
    l_conv1 = ResDrop(incoming = l_conv0, 
        IB = InceptionLayer(incoming = NL(BN(l_conv0,name='bn_conv0'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv1'),p=0.95)   
    l_conv2 = ResDrop(incoming = l_conv1, 
        IB = InceptionLayer(incoming = NL(BN(l_conv1,name='bn_conv1'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv2'),p=0.9)
    l_conv3 = ResDrop(incoming = l_conv2, 
        IB = InceptionLayer(incoming = NL(BN(l_conv2,name='bn_conv2'),elu), 
            param_dict = [{'num_filters':[16,16,32],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv3'),p=0.8)             
    l_conv4 = InceptionLayer(incoming = NL(BN(l_conv3,name='bn_conv3'),elu), 
            param_dict = [{'num_filters':[16],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[16],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[16,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]},
                {'num_filters':[16,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]}],
            block_name = 'conv4')       
    l_conv5 = ResDrop(incoming = l_conv4, 
        IB = InceptionLayer(incoming = NL(BN(l_conv4,name='bn_conv4'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv5'),p=0.7)
    l_conv6 = ResDrop(incoming = l_conv5, 
        IB = InceptionLayer(incoming = NL(BN(l_conv5,name='bn_conv5'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv6'),p=0.6)
    l_conv7 = ResDrop(incoming = l_conv6, 
        IB = InceptionLayer(incoming = NL(BN(l_conv6,name='bn_conv6'),elu), 
            param_dict = [{'num_filters':[32,32,64],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv7'),p=0.6)
    l_conv8 = InceptionLayer(incoming = NL(BN(l_conv7,name='bn_conv7'),elu), 
            param_dict = [{'num_filters':[32],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[32],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[32,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]},
                {'num_filters':[32,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]}],
            block_name = 'conv8')        
    l_conv9 = ResDrop(incoming = l_conv8, 
        IB = InceptionLayer(incoming = NL(BN(l_conv8,name='bn_conv8'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv9'),p=0.5)
    l_conv10 = ResDrop(incoming = l_conv9, 
        IB = InceptionLayer(incoming = NL(BN(l_conv9,name='bn_conv9'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv10'),p=0.5)
    l_conv11 = ResDrop(incoming = l_conv8, 
        IB = InceptionLayer(incoming = NL(BN(l_conv10,name='bn_conv10'),elu), 
            param_dict = [{'num_filters':[64,64,128],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv11'),p=0.5)        
    l_conv12 = InceptionLayer(incoming = NL(BN(l_conv11,name='bn_conv11'),elu), 
            param_dict = [{'num_filters':[64],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[64],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [None],
                'style': ['convolutional'],
                'bnorm':[0]},
                {'num_filters':[64,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]},
                {'num_filters':[64,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,None],
                'style': ['convolutional','pool'],
                'bnorm':[0,0]}],
            block_name = 'conv12')
    l_conv13 = ResDrop(incoming = l_conv12, 
        IB = InceptionLayer(incoming = NL(BN(l_conv12,name='bn_conv12'),elu), 
            param_dict = [{'num_filters':[128,128,256],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv13'),p=0.5)
    l_conv14 = ResDrop(incoming = l_conv13, 
        IB = InceptionLayer(incoming = NL(BN(l_conv13,name='bn_conv13'),elu), 
            param_dict = [{'num_filters':[128,128,256],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv14'),p=0.4)
    l_conv15 = ResDrop(incoming = l_conv14, 
        IB = InceptionLayer(incoming = NL(BN(l_conv14,name='bn_conv14'),elu), 
            param_dict = [{'num_filters':[128,128,256],
                'filter_size':[(1,1,1),(3,3,3),(1,1,1)],
                'border_mode':['same']*3,
                'strides':[(1,1,1)]*3,
                'nonlinearity': [elu,elu,None],
                'style': ['convolutional']*3,
                'bnorm':[1,1,0]}],
            block_name = 'conv15'),p=0.3)
    l_conv16 = InceptionLayer(incoming = NL(BN(l_conv15,name='bn_conv15'),elu), 
            param_dict = [{'num_filters':[128],
                'filter_size':[(3,3,3)],
                'border_mode':['same'],
                'strides':[(2,2,2)],
                'nonlinearity': [elu],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[128],
                'filter_size':[(1,1,1)],
                'pad':[(0,0,0)],
                'strides':[(2,2,2)],
                'nonlinearity': [elu],
                'style': ['convolutional'],
                'bnorm':[1]},
                {'num_filters':[128,1],
                'mode': [0,'max'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,elu],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]},
                {'num_filters':[128,1],
                'mode': [0,'average_inc_pad'],
                'filter_size':[(3,3,3),(3,3,3)],
                'pad':[(1,1,1),(1,1,1)],
                'strides':[(1,1,1),(2,2,2)],
                'nonlinearity': [None,elu],
                'style': ['convolutional','pool'],
                'bnorm':[0,1]}],
            block_name = 'conv16')            
    l_conv17 = ResDropNoPre(l_conv16,BN(lasagne.layers.dnn.Conv3DDNNLayer(
        incoming = l_conv16,
        num_filters = 512,
        filter_size = (3,3,3),
        pad = 'same',
        W = initmethod('relu'),
        nonlinearity = None,
        name = 'l_conv17'),name='bn_conv17'),0.2)
    l_pool = BN(lasagne.layers.GlobalPoolLayer(l_conv17),name='l_pool')
    l_fc1 = BN(lasagne.layers.DenseLayer(
        incoming = l_pool,
        num_units = 512,
        W = initmethod('relu'),
        nonlinearity = elu,
        name =  'fc1'
        ),name = 'bnorm_fc1') 
    l_fc2 = lasagne.layers.DenseLayer(
        incoming = l_fc1,
        num_units = n_classes,
        W = initmethod(),
        nonlinearity = None,
        name = 'fc2'
        )
    return {'l_in':l_in, 'l_out':l_fc2}
Exemple #9
0
def get_model(interp=False):
    dims, n_channels = tuple(cfg['dims']), cfg['n_channels']
    shape = (None, n_channels) + dims
    l_in = lasagne.layers.InputLayer(shape=shape)
    l_enc_conv1 = C2D(incoming=l_in,
                      num_filters=128,
                      filter_size=[5, 5],
                      stride=[2, 2],
                      pad=(2, 2),
                      W=initmethod(0.02),
                      nonlinearity=lrelu(0.2),
                      name='enc_conv1')
    l_enc_conv2 = BN(C2D(incoming=l_enc_conv1,
                         num_filters=256,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv2'),
                     name='bnorm2')
    l_enc_conv3 = BN(C2D(incoming=l_enc_conv2,
                         num_filters=512,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv3'),
                     name='bnorm3')
    l_enc_conv4 = BN(C2D(incoming=l_enc_conv3,
                         num_filters=1024,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv4'),
                     name='bnorm4')

    print(lasagne.layers.get_output_shape(l_enc_conv4, (196, 3, 64, 64)))
    l_enc_fc1 = BN(DL(incoming=l_enc_conv4,
                      num_units=1000,
                      W=initmethod(0.02),
                      nonlinearity=relu,
                      name='enc_fc1'),
                   name='bnorm_enc_fc1')

    # Define latent values
    l_enc_mu, l_enc_logsigma = [
        BN(DL(incoming=l_enc_fc1,
              num_units=cfg['num_latents'],
              nonlinearity=None,
              name='enc_mu'),
           name='mu_bnorm'),
        BN(DL(incoming=l_enc_fc1,
              num_units=cfg['num_latents'],
              nonlinearity=None,
              name='enc_logsigma'),
           name='ls_bnorm')
    ]
    l_Z_IAF = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z_IAF')
    l_IAF_mu, l_IAF_logsigma = [
        MADE(l_Z_IAF, [cfg['num_latents']], 'l_IAF_mu'),
        MADE(l_Z_IAF, [cfg['num_latents']], 'l_IAF_ls')
    ]
    l_Z = IAFLayer(l_Z_IAF, l_IAF_mu, l_IAF_logsigma, name='l_Z')
    l_dec_fc2 = DL(incoming=l_Z,
                   num_units=512 * 16,
                   nonlinearity=lrelu(0.2),
                   W=initmethod(0.02),
                   name='l_dec_fc2')
    l_unflatten = lasagne.layers.ReshapeLayer(
        incoming=l_dec_fc2,
        shape=([0], 512, 4, 4),
    )
    l_dec_conv1 = DeconvLayer(incoming=l_unflatten,
                              num_filters=512,
                              filter_size=[5, 5],
                              stride=[2, 2],
                              crop=(2, 2),
                              W=initmethod(0.02),
                              nonlinearity=None,
                              name='dec_conv1')
    l_dec_conv2a = MDBLOCK(incoming=l_dec_conv1,
                           num_filters=512,
                           scales=[0, 2],
                           name='dec_conv2a',
                           nonlinearity=lrelu(0.2))
    l_dec_conv2 = DeconvLayer(incoming=l_dec_conv2a,
                              num_filters=256,
                              filter_size=[5, 5],
                              stride=[2, 2],
                              crop=(2, 2),
                              W=initmethod(0.02),
                              nonlinearity=None,
                              name='dec_conv2')
    l_dec_conv3a = MDBLOCK(incoming=l_dec_conv2,
                           num_filters=256,
                           scales=[0, 2, 3],
                           name='dec_conv3a',
                           nonlinearity=lrelu(0.2))
    l_dec_conv3 = DeconvLayer(incoming=l_dec_conv3a,
                              num_filters=128,
                              filter_size=[5, 5],
                              stride=[2, 2],
                              crop=(2, 2),
                              W=initmethod(0.02),
                              nonlinearity=None,
                              name='dec_conv3')
    l_dec_conv4a = MDBLOCK(incoming=l_dec_conv3,
                           num_filters=128,
                           scales=[0, 2, 3],
                           name='dec_conv4a',
                           nonlinearity=lrelu(0.2))
    l_dec_conv4 = BN(DeconvLayer(incoming=l_dec_conv4a,
                                 num_filters=128,
                                 filter_size=[5, 5],
                                 stride=[2, 2],
                                 crop=(2, 2),
                                 W=initmethod(0.02),
                                 nonlinearity=lrelu(0.2),
                                 name='dec_conv4'),
                     name='bnorm_dc4')

    R = NL(MDCL(l_dec_conv4, num_filters=2, scales=[2, 3, 4], name='R'),
           sigmoid)
    G = NL(
        ESL([
            MDCL(l_dec_conv4, num_filters=2, scales=[2, 3, 4], name='G_a'),
            MDCL(R, num_filters=2, scales=[2, 3, 4], name='G_b')
        ]), sigmoid)
    B = NL(
        ESL([
            MDCL(l_dec_conv4, num_filters=2, scales=[2, 3, 4], name='B_a'),
            MDCL(CL([R, G]), num_filters=2, scales=[2, 3, 4], name='B_b')
        ]), sigmoid)
    l_out = CL([
        beta_layer(SL(R, slice(0, 1), 1), SL(R, slice(1, 2), 1)),
        beta_layer(SL(G, slice(0, 1), 1), SL(G, slice(1, 2), 1)),
        beta_layer(SL(B, slice(0, 1), 1), SL(B, slice(1, 2), 1))
    ])

    minibatch_discrim = MinibatchLayer(
        lasagne.layers.GlobalPoolLayer(l_enc_conv4),
        num_kernels=500,
        name='minibatch_discrim')
    l_discrim = DL(incoming=minibatch_discrim,
                   num_units=3,
                   nonlinearity=lasagne.nonlinearities.softmax,
                   b=None,
                   W=initmethod(0.02),
                   name='discrimi')

    return {
        'l_in': l_in,
        'l_out': l_out,
        'l_mu': l_enc_mu,
        'l_ls': l_enc_logsigma,
        'l_Z': l_Z,
        'l_IAF_mu': l_IAF_mu,
        'l_IAF_ls': l_IAF_logsigma,
        'l_Z_IAF': l_Z_IAF,
        'l_introspect': [l_enc_conv1, l_enc_conv2, l_enc_conv3, l_enc_conv4],
        'l_discrim': l_discrim
    }
Exemple #10
0
def MDCL(incoming, num_filters, scales, name, dnn=True):
    if dnn:
        from lasagne.layers.dnn import Conv2DDNNLayer as C2D
    # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly.
    winit = initmethod(0.02)

    # Initialization method for the coefficients
    sinit = lasagne.init.Constant(1.0 / (1 + len(scales)))

    # Number of incoming channels
    ni = lasagne.layers.get_output_shape(incoming)[1]

    # Weight parameter--the primary parameter for this block
    W = theano.shared(lasagne.utils.floatX(
        winit.sample((num_filters,
                      lasagne.layers.get_output_shape(incoming)[1], 3, 3))),
                      name=name + 'W')

    # Primary Convolution Layer--No Dilation
    n = C2D(
        incoming=incoming,
        num_filters=num_filters,
        filter_size=[3, 3],
        stride=[1, 1],
        pad=(1, 1),
        W=W * theano.shared(lasagne.utils.floatX(
            sinit.sample(num_filters)), name + '_coeff_base').dimshuffle(
                0, 'x', 'x', 'x'
            ),  # Note the broadcasting dimshuffle for the num_filter scalars.
        b=None,
        nonlinearity=None,
        name=name + 'base')
    # List of remaining layers. This should probably just all be concatenated into a single list rather than being a separate deal.
    nd = []
    for i, scale in enumerate(scales):

        # I don't think 0 dilation is technically defined (or if it is it's just the regular filter) but I use it here as a convenient keyword to grab the 1x1 mean conv.
        if scale == 0:
            nd.append(
                C2D(incoming=incoming,
                    num_filters=num_filters,
                    filter_size=[1, 1],
                    stride=[1, 1],
                    pad=(0, 0),
                    W=T.mean(W, axis=[2, 3]).dimshuffle(0, 1, 'x', 'x') *
                    theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)),
                        name + '_coeff_1x1').dimshuffle(0, 'x', 'x', 'x'),
                    b=None,
                    nonlinearity=None,
                    name=name + str(scale)))
        # Note the dimshuffles in this layer--these are critical as the current DilatedConv2D implementation uses a backward pass.
        else:
            nd.append(
                lasagne.layers.DilatedConv2DLayer(
                    incoming=lasagne.layers.PadLayer(incoming=incoming,
                                                     width=(scale, scale)),
                    num_filters=num_filters,
                    filter_size=[3, 3],
                    dilation=(scale, scale),
                    W=W.dimshuffle(1, 0, 2, 3) * theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)), name +
                        '_coeff_' + str(scale)).dimshuffle('x', 0, 'x', 'x'),
                    b=None,
                    nonlinearity=None,
                    name=name + str(scale)))
    return ESL(nd + [n])
Exemple #11
0
def DSL(incoming, num_filters, scales, name, dnn=True):
    if dnn:
        from lasagne.layers.dnn import Conv2DDNNLayer as C2D
    # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly.
    winit = initmethod(0.02)

    # Initialization method for the coefficients
    sinit = lasagne.init.Constant(1.0 / (1 + len(scales)))

    # Number of incoming channels
    ni = lasagne.layers.get_output_shape(incoming)[1]

    # Weight parameter--the primary parameter for this block
    W = theano.shared(lasagne.utils.floatX(
        winit.sample((num_filters,
                      lasagne.layers.get_output_shape(incoming)[1], 3, 3))),
                      name=name + 'W')

    # Main layer--3x3 conv with stride 2
    n = C2D(incoming=incoming,
            num_filters=num_filters,
            filter_size=[3, 3],
            stride=[2, 2],
            pad=(1, 1),
            W=W *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_base').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + 'base')

    nd = []
    for i, scale in enumerate(scales):

        p = P2D(
            incoming=incoming,
            pool_size=scale,
            stride=2,
            pad=(1, 1) if i else (0, 0),
            mode='average_exc_pad',
        )

        nd.append(
            C2D(
                incoming=p,
                num_filters=num_filters,
                filter_size=[3, 3],
                stride=(1, 1),
                pad=(1, 1),
                W=W *
                theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                              name + '_coeff_' + str(scale)).dimshuffle(
                                  0, 'x', 'x', 'x'),  #.dimshuffle('x',0),
                b=None,
                nonlinearity=None,
                name=name + str(scale)))

    nd.append(
        C2D(incoming=incoming,
            num_filters=num_filters,
            filter_size=[1, 1],
            stride=[2, 2],
            pad=(0, 0),
            W=T.mean(W, axis=[2, 3]).dimshuffle(0, 1, 'x', 'x') *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_1x1').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + '1x1'))

    return ESL(nd + [n])
Exemple #12
0
def USL(incoming, num_filters, scales, name, dnn=True):
    if dnn:
        from lasagne.layers.dnn import Conv2DDNNLayer as C2D

    # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly.
    winit = initmethod(0.02)

    # Initialization method for the coefficients
    sinit = lasagne.init.Constant(1.0 / (1 + len(scales)))

    # Number of incoming channels
    ni = lasagne.layers.get_output_shape(incoming)[1]

    # Weight parameter--the primary parameter for this block
    W = theano.shared(lasagne.utils.floatX(
        winit.sample((num_filters,
                      lasagne.layers.get_output_shape(incoming)[1], 3, 3))),
                      name=name + 'W')

    # Primary Convolution Layer--No Dilation
    n = C2D(incoming=Upscale2DLayer(incoming, 2),
            num_filters=num_filters,
            filter_size=[3, 3],
            stride=[1, 1],
            pad=(1, 1),
            W=W *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_base').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + 'base')
    # Remaining layers
    nd = []
    for i, scale in enumerate(scales):
        if scale == 0:
            nd.append(
                C2D(incoming=Upscale2DLayer(incoming, 2),
                    num_filters=num_filters,
                    filter_size=[1, 1],
                    stride=[1, 1],
                    pad=(0, 0),
                    W=T.mean(W, axis=[2, 3]).dimshuffle(0, 1, 'x', 'x') *
                    theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)),
                        name + '_coeff_1x1').dimshuffle(0, 'x', 'x', 'x'),
                    b=None,
                    nonlinearity=None,
                    name=name + '1x1'))
        else:
            nd.append(
                lasagne.layers.DilatedConv2DLayer(
                    incoming=lasagne.layers.PadLayer(incoming=Upscale2DLayer(
                        incoming, 2),
                                                     width=(scale, scale)),
                    num_filters=num_filters,
                    filter_size=[3, 3],
                    dilation=(scale, scale),
                    W=W.dimshuffle(1, 0, 2, 3) * theano.shared(
                        lasagne.utils.floatX(sinit.sample(num_filters)), name +
                        '_coeff_' + str(scale)).dimshuffle('x', 0, 'x', 'x'),
                    b=None,
                    nonlinearity=None,
                    name=name + str(scale)))

    # A single deconv layer is also concatenated here. Like I said, it's a prototype!
    nd.append(
        DeconvLayer(
            incoming=incoming,
            num_filters=num_filters,
            filter_size=[3, 3],
            stride=[2, 2],
            crop=(1, 1),
            W=W.dimshuffle(1, 0, 2, 3) *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_deconv').dimshuffle('x', 0, 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + 'deconv'))

    return ESL(nd + [n])
def get_model(interp=False):
    dims, n_channels, n_classes = tuple(
        cfg['dims']), cfg['n_channels'], cfg['n_classes']
    shape = (None, n_channels) + dims
    l_in = lasagne.layers.InputLayer(shape=shape)
    l_enc_conv1 = C2D(incoming=l_in,
                      num_filters=128,
                      filter_size=[5, 5],
                      stride=[2, 2],
                      pad=(2, 2),
                      W=initmethod(0.02),
                      nonlinearity=lrelu(0.2),
                      name='enc_conv1')
    l_enc_conv2 = BN(C2D(incoming=l_enc_conv1,
                         num_filters=256,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv2'),
                     name='bnorm2')
    l_enc_conv3 = BN(C2D(incoming=l_enc_conv2,
                         num_filters=512,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv3'),
                     name='bnorm3')
    l_enc_conv4 = BN(C2D(incoming=l_enc_conv3,
                         num_filters=1024,
                         filter_size=[5, 5],
                         stride=[2, 2],
                         pad=(2, 2),
                         W=initmethod(0.02),
                         nonlinearity=lrelu(0.2),
                         name='enc_conv4'),
                     name='bnorm4')
    l_enc_fc1 = BN(DL(incoming=l_enc_conv4,
                      num_units=1000,
                      W=initmethod(0.02),
                      nonlinearity=elu,
                      name='enc_fc1'),
                   name='bnorm_enc_fc1')
    l_enc_mu, l_enc_logsigma = [
        BN(DL(incoming=l_enc_fc1,
              num_units=cfg['num_latents'],
              nonlinearity=None,
              name='enc_mu'),
           name='mu_bnorm'),
        BN(DL(incoming=l_enc_fc1,
              num_units=cfg['num_latents'],
              nonlinearity=None,
              name='enc_logsigma'),
           name='ls_bnorm')
    ]

    l_Z = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z')
    l_dec_fc2 = BN(DL(incoming=l_Z,
                      num_units=1024 * 16,
                      nonlinearity=relu,
                      W=initmethod(0.02),
                      name='l_dec_fc2'),
                   name='bnorm_dec_fc2')
    l_unflatten = lasagne.layers.ReshapeLayer(
        incoming=l_dec_fc2,
        shape=([0], 1024, 4, 4),
    )
    l_dec_conv1 = BN(DeconvLayer(incoming=l_unflatten,
                                 num_filters=512,
                                 filter_size=[5, 5],
                                 stride=[2, 2],
                                 crop=(2, 2),
                                 W=initmethod(0.02),
                                 nonlinearity=relu,
                                 name='dec_conv1'),
                     name='bnorm_dc1')
    l_dec_conv2 = BN(DeconvLayer(incoming=l_dec_conv1,
                                 num_filters=256,
                                 filter_size=[5, 5],
                                 stride=[2, 2],
                                 crop=(2, 2),
                                 W=initmethod(0.02),
                                 nonlinearity=relu,
                                 name='dec_conv2'),
                     name='bnorm_dc2')
    l_dec_conv3 = BN(DeconvLayer(incoming=l_dec_conv2,
                                 num_filters=128,
                                 filter_size=[5, 5],
                                 stride=[2, 2],
                                 crop=(2, 2),
                                 W=initmethod(0.02),
                                 nonlinearity=relu,
                                 name='dec_conv3'),
                     name='bnorm_dc3')
    l_out = DeconvLayer(incoming=l_dec_conv3,
                        num_filters=3,
                        filter_size=[5, 5],
                        stride=[2, 2],
                        crop=(2, 2),
                        W=initmethod(0.02),
                        b=None,
                        nonlinearity=lasagne.nonlinearities.tanh,
                        name='dec_out')

    minibatch_discrim = MinibatchLayer(
        lasagne.layers.GlobalPoolLayer(l_enc_conv4),
        num_kernels=500,
        name='minibatch_discrim')
    l_discrim = DL(incoming=minibatch_discrim,
                   num_units=1,
                   nonlinearity=lasagne.nonlinearities.sigmoid,
                   b=None,
                   W=initmethod(),
                   name='discrimi')

    return {
        'l_in': l_in,
        'l_out': l_out,
        'l_mu': l_enc_mu,
        'l_ls': l_enc_logsigma,
        'l_latents': l_Z,
        'l_introspect': [l_enc_conv1, l_enc_conv2, l_enc_conv3, l_enc_conv4],
        'l_discrim': l_discrim
    }
Exemple #14
0
def DSL(incoming, num_filters, scales, name):
    # Total number of layers
    # W = theano.shared(lasagne.utils.floatX(Orthogonal(
    winit = initmethod(0.02)
    sinit = lasagne.init.Constant(1.0 / (1 + len(scales)))
    # Number of incoming channels
    ni = lasagne.layers.get_output_shape(incoming)[1]
    # get weight parameter for this layer
    W = theano.shared(lasagne.utils.floatX(
        winit.sample((num_filters,
                      lasagne.layers.get_output_shape(incoming)[1], 3, 3))),
                      name=name + 'W')
    n = C2D(incoming=incoming,
            num_filters=num_filters,
            filter_size=[3, 3],
            stride=[2, 2],
            pad=(1, 1),
            W=W *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_base').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + 'base')

    # nc = [theano.shared(lasagne.utils.floatX(1.0/(1+len(scales))), name+'coeff_base')]
    nd = []
    for i, scale in enumerate(scales):

        p = P2D(
            incoming=incoming,
            pool_size=scale,
            stride=2,
            pad=(1, 1) if i else (0, 0),
            mode='average_exc_pad',
        )

        nd.append(
            C2D(
                incoming=p,
                num_filters=num_filters,
                filter_size=[3, 3],
                stride=(1, 1),
                pad=(1, 1),
                W=W *
                theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                              name + '_coeff_' + str(scale)).dimshuffle(
                                  0, 'x', 'x', 'x'),  #.dimshuffle('x',0),
                b=None,
                nonlinearity=None,
                name=name + str(scale)))

    nd.append(
        C2D(incoming=incoming,
            num_filters=num_filters,
            filter_size=[1, 1],
            stride=[2, 2],
            pad=(0, 0),
            W=T.mean(W, axis=[2, 3]).dimshuffle(0, 1, 'x', 'x') *
            theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)),
                          name + '_coeff_1x1').dimshuffle(0, 'x', 'x', 'x'),
            b=None,
            nonlinearity=None,
            name=name + '1x1'))
    # nc.append()

    return ESL(nd + [n])
def get_model(dnn=True):
    if dnn:
        import lasagne.layers.dnn
        from lasagne.layers.dnn import Conv2DDNNLayer as C2D
        from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
                                           host_from_gpu,
                                           gpu_contiguous, HostFromGpu,
                                           gpu_alloc_empty)
        from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConv, GpuDnnConvGradI, dnn_conv, dnn_pool
        from layers import DeconvLayer
    else:
        import lasagne.layers
        from lasagne.layers import Conv2DLayer as C2D
    
    dims, n_channels, n_classes = tuple(cfg['dims']), cfg['n_channels'], cfg['n_classes']
    shape = (None, n_channels)+dims
    l_in = lasagne.layers.InputLayer(shape=shape)
    l_enc_conv1 = C2D(
        incoming = l_in,
        num_filters = 128,
        filter_size = [5,5],
        stride = [2,2],
        pad = (2,2),
        W = initmethod(0.02),
        nonlinearity = lrelu(0.2),
        flip_filters=False,
        name =  'enc_conv1'
        )
    l_enc_conv2 = BN(C2D(
        incoming = l_enc_conv1,
        num_filters = 256,
        filter_size = [5,5],
        stride = [2,2],
        pad = (2,2),
        W = initmethod(0.02),
        nonlinearity = lrelu(0.2),
        flip_filters=False,
        name =  'enc_conv2'
        ),name = 'bnorm2')
    l_enc_conv3 = BN(C2D(
        incoming = l_enc_conv2,
        num_filters = 512,
        filter_size = [5,5],
        stride = [2,2],
        pad = (2,2),
        W = initmethod(0.02),
        nonlinearity = lrelu(0.2),
        flip_filters=False,
        name =  'enc_conv3'
        ),name = 'bnorm3')
    l_enc_conv4 = BN(C2D(
        incoming = l_enc_conv3,
        num_filters = 1024,
        filter_size = [5,5],
        stride = [2,2],
        pad = (2,2),
        W = initmethod(0.02),
        nonlinearity = lrelu(0.2),
        flip_filters=False,
        name =  'enc_conv4'
        ),name = 'bnorm4')         
    l_enc_fc1 = BN(DL(
        incoming = l_enc_conv4,
        num_units = 1000,
        W = initmethod(0.02),
        nonlinearity = elu,
        name =  'enc_fc1'
        ),
        name = 'bnorm_enc_fc1')
    l_enc_mu,l_enc_logsigma = [BN(DL(incoming = l_enc_fc1,num_units=cfg['num_latents'],nonlinearity = None,name='enc_mu'),name='mu_bnorm'),
                               BN(DL(incoming = l_enc_fc1,num_units=cfg['num_latents'],nonlinearity = None,name='enc_logsigma'),name='ls_bnorm')]

    l_Z = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z')
    l_dec_fc2 = BN(DL(
        incoming = l_Z,
        num_units = 1024*16,
        nonlinearity = relu,
        W=initmethod(0.02),
        name='l_dec_fc2'),
        name = 'bnorm_dec_fc2') 
    l_unflatten = lasagne.layers.ReshapeLayer(
        incoming = l_dec_fc2,
        shape = ([0],1024,4,4),
        )
    if dnn:
        l_dec_conv1 = BN(DeconvLayer(
            incoming = l_unflatten,
            num_filters = 512,
            filter_size = [5,5],
            stride = [2,2],
            crop = (2,2),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv1'
            ),name = 'bnorm_dc1')
        l_dec_conv2 = BN(DeconvLayer(
            incoming = l_dec_conv1,
            num_filters = 256,
            filter_size = [5,5],
            stride = [2,2],
            crop = (2,2),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv2'
            ),name = 'bnorm_dc2')
        l_dec_conv3 = BN(DeconvLayer(
            incoming = l_dec_conv2,
            num_filters = 128,
            filter_size = [5,5],
            stride = [2,2],
            crop = (2,2),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv3'
            ),name = 'bnorm_dc3')
        l_out = DeconvLayer(
            incoming = l_dec_conv3,
            num_filters = 3,
            filter_size = [5,5],
            stride = [2,2],
            crop = (2,2),
            W = initmethod(0.02),
            b = None,
            nonlinearity = lasagne.nonlinearities.tanh,
            name =  'dec_out'
            )
    else:    
        l_dec_conv1 = SL(SL(BN(TC2D(
            incoming = l_unflatten,
            num_filters = 512,
            filter_size = [5,5],
            stride = [2,2],
            crop = (1,1),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv1'
            ),name = 'bnorm_dc1'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
        l_dec_conv2 = SL(SL(BN(TC2D(
            incoming = l_dec_conv1,
            num_filters = 256,
            filter_size = [5,5],
            stride = [2,2],
            crop = (1,1),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv2'
            ),name = 'bnorm_dc2'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
        l_dec_conv3 = SL(SL(BN(TC2D(
            incoming = l_dec_conv2,
            num_filters = 128,
            filter_size = [5,5],
            stride = [2,2],
            crop = (1,1),
            W = initmethod(0.02),
            nonlinearity = relu,
            name =  'dec_conv3'
            ),name = 'bnorm_dc3'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
        l_out = SL(SL(TC2D(
            incoming = l_dec_conv3,
            num_filters = 3,
            filter_size = [5,5],
            stride = [2,2],
            crop = (1,1),
            W = initmethod(0.02),
            b = None,
            nonlinearity = lasagne.nonlinearities.tanh,
            name =  'dec_out'
            ),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
# l_in,num_filters=1,filter_size=[5,5],stride=[2,2],crop=[1,1],W=dc.W,b=None,nonlinearity=None)
    minibatch_discrim =  MinibatchLayer(lasagne.layers.GlobalPoolLayer(l_enc_conv4), num_kernels=500,name='minibatch_discrim')    
    l_discrim = DL(incoming = minibatch_discrim, 
        num_units = 1,
        nonlinearity = lasagne.nonlinearities.sigmoid,
        b = None,
        W=initmethod(),
        name = 'discrimi')
        

        
    return {'l_in':l_in, 
            'l_out':l_out,
            'l_mu':l_enc_mu,
            'l_ls':l_enc_logsigma,            
            'l_Z':l_Z,
            'l_introspect':[l_enc_conv1, l_enc_conv2,l_enc_conv3,l_enc_conv4],
            'l_discrim' : l_discrim}