Beispiel #1
0
def define_model(input_var, **kwargs):
    """ Defines the model and returns (network, validation network output)
        
    -Return layers.get_output(final_layer_name) if validation network output and 
        train network output are the same
    
    -For example, return layers.get_output(final_layer_name, deterministic = true) 
        if there is a dropout layer
            
    -Use **kwargs to pass model specific parameters
    """

    image_size = 32
    conv_filter_count = 100
    conv_filter_size = 5
    pool_size = 2
    n_dense_units = 3000

    input = layers.InputLayer(shape=(None, 3, image_size, image_size),
                              input_var=input_var)

    greyscale_input = our_layers.GreyscaleLayer(
        incoming=input,
        random_greyscale=True,
    )

    conv1 = layers.Conv2DLayer(
        incoming=greyscale_input,
        num_filters=conv_filter_count,
        filter_size=conv_filter_size,
        stride=1,
        nonlinearity=lasagne.nonlinearities.sigmoid,
    )

    pool1 = layers.MaxPool2DLayer(
        incoming=conv1,
        pool_size=pool_size,
        stride=pool_size,
    )

    dense1 = layers.DenseLayer(
        incoming=pool1,
        num_units=n_dense_units,
        nonlinearity=lasagne.nonlinearities.rectify,
    )

    pre_unpool1 = layers.DenseLayer(
        incoming=dense1,
        num_units=conv_filter_count * (image_size + conv_filter_size - 1)**2 /
        (pool_size * pool_size),
        nonlinearity=lasagne.nonlinearities.linear,
    )

    pre_unpool1 = layers.ReshapeLayer(
        incoming=pre_unpool1,
        shape=(input_var.shape[0], conv_filter_count) +
        ((image_size + conv_filter_size - 1) / 2,
         (image_size + conv_filter_size - 1) / 2),
    )

    unpool1 = our_layers.Unpool2DLayer(
        incoming=pre_unpool1,
        kernel_size=2,
    )

    deconv1 = layers.Conv2DLayer(
        incoming=unpool1,
        num_filters=3,
        filter_size=conv_filter_size,
        stride=1,
        nonlinearity=lasagne.nonlinearities.sigmoid,
    )

    output = layers.ReshapeLayer(incoming=deconv1, shape=input_var.shape)

    return (output, layers.get_output(output))
Beispiel #2
0
def define_model(input_var, **kwargs):
    """ Defines the model and returns (network, validation network output)
        
    -Return layers.get_output(final_layer_name) if validation network output and 
        train network output are the same
    
    -For example, return layers.get_output(final_layer_name, deterministic = true) 
        if there is a dropout layer
            
    -Use **kwargs to pass model specific parameters
    """
    
    conv1_filter_count = 100
    conv1_filter_size = 5
    pool1_size = 2
    
    conv2_filter_count = 100
    conv2_filter_size = 5
    pool2_size = 2
    
    n_dense_units = 3000
    
    batch_size = input_var.shape[0]
    image_size = 32
    # note: ignore_border is True by defualt, so rounding down
    after_pool1 = image_size // pool1_size
    after_pool2 = after_pool1 // pool2_size
    
    input = layers.InputLayer(
        shape = (None, 3, image_size, image_size),
        input_var = input_var
    )
    
    greyscale_input = our_layers.GreyscaleLayer(
        incoming = input,
        random_greyscale = True,
    )
    
    conv1 = layers.Conv2DLayer(
        incoming = greyscale_input,
        num_filters = conv1_filter_count,
        filter_size = conv1_filter_size,
        stride = 1,
        pad = 'same',
        nonlinearity = lasagne.nonlinearities.tanh,
    )
    
    pool1 = layers.MaxPool2DLayer(
        incoming = conv1,
        pool_size = pool1_size,
        stride = pool1_size,
    )
    
    conv2 = layers.Conv2DLayer(
        incoming = pool1,
        num_filters = conv2_filter_count,
        filter_size = conv2_filter_size,
        stride = 1,
        pad = 'same',
        nonlinearity = lasagne.nonlinearities.tanh,
    )
    
    pool2 = layers.MaxPool2DLayer(
        incoming = conv2,
        pool_size = pool2_size,
        stride = pool2_size,
    )
    
    dense1 = layers.DenseLayer(
        incoming = pool2,
        num_units = n_dense_units, 
        nonlinearity = lasagne.nonlinearities.tanh,
    )
    
    pre_unpool2 = layers.DenseLayer(
        incoming = dense1,
        num_units = conv2_filter_count * (after_pool2 ** 2),
        nonlinearity = lasagne.nonlinearities.tanh,
    )

    pre_unpool2 = layers.ReshapeLayer(
        incoming = pre_unpool2, 
        shape = (batch_size, conv2_filter_count) + (after_pool2, after_pool2),
    )
    
    unpool2 = our_layers.Unpool2DLayer(
        incoming = pre_unpool2,
        kernel_size = pool2_size,
    )

    deconv2 = layers.Conv2DLayer(
        incoming = unpool2,
        num_filters = conv1_filter_count,
        filter_size = conv2_filter_size,
        stride = 1,
        pad = 'same',
        nonlinearity = lasagne.nonlinearities.tanh,
    )
    
    unpool1 = our_layers.Unpool2DLayer(
        incoming = deconv2,
        kernel_size = pool1_size,
    )

    deconv1 = layers.Conv2DLayer(
        incoming = unpool1,
        num_filters = 3,
        filter_size = conv1_filter_size,
        stride = 1,
        pad = 'same',
        nonlinearity = lasagne.nonlinearities.tanh,
    )
  
    output = layers.ReshapeLayer(
        incoming = deconv1,
        shape = input_var.shape
    )
    
    return (output, layers.get_output(output))