예제 #1
0
def conv2D(x,
           name,
           kernel_size=(3, 3),
           num_filters=32,
           strides=(1, 1),
           activation=STANDARD_NONLINEARITY,
           normalisation=tf.identity,
           normalise_post_activation=False,
           dropout_p=None,
           padding="SAME",
           weight_init='he_normal',
           add_bias=True,
           **kwargs):
    '''
    Standard nets2D convolutional layer
    kwargs can have training, and potentially other normalisation paramters
    '''

    bottom_num_filters = x.get_shape().as_list()[-1]

    weight_shape = [
        kernel_size[0], kernel_size[1], bottom_num_filters, num_filters
    ]
    bias_shape = [num_filters]

    strides_augm = [1, strides[0], strides[1], 1]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)
        op = tf.nn.conv2d(x,
                          filter=weights,
                          strides=strides_augm,
                          padding=padding)

        biases = None  # so there is always something for summary
        if add_bias and normalisation is tfnorm.batch_norm:
            logging.info('Turning of bias because using batch norm.')
            add_bias = False

        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
def dilated_conv2D(bottom,
                   name,
                   kernel_size=(3, 3),
                   num_filters=32,
                   rate=2,
                   activation=STANDARD_NONLINEARITY,
                   normalisation=tf.identity,
                   normalise_post_activation=False,
                   dropout_p=None,
                   padding="SAME",
                   weight_init='he_normal',
                   add_bias=True,
                   **kwargs):
    '''
    nets2D dilated convolution layer. This layer can be used to increase the receptive field of a network. 
    It is described in detail in this paper: Yu et al, Multi-Scale Context Aggregation by Dilated Convolutions, 
    2015 (https://arxiv.org/pdf/1511.07122.pdf) 
    '''

    bottom_num_filters = bottom.get_shape().as_list()[3]

    weight_shape = [
        kernel_size[0], kernel_size[1], bottom_num_filters, num_filters
    ]
    bias_shape = [num_filters]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)

        op = tf.nn.atrous_conv2d(bottom,
                                 filters=weights,
                                 rate=rate,
                                 padding=padding)

        biases = None
        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
def conv3D(x,
           name,
           kernel_size=(3, 3, 3),
           num_filters=32,
           strides=(1, 1, 1),
           activation=STANDARD_NONLINEARITY,
           normalisation=tf.identity,
           normalise_post_activation=False,
           dropout_p=None,
           padding="SAME",
           weight_init='he_normal',
           add_bias=True,
           **kwargs):
    '''
    Standard nets3D convolutional layer
    '''

    bottom_num_filters = x.get_shape().as_list()[-1]

    weight_shape = [
        kernel_size[0], kernel_size[1], kernel_size[2], bottom_num_filters,
        num_filters
    ]
    bias_shape = [num_filters]

    strides_augm = [1, strides[0], strides[1], strides[2], 1]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)
        op = tf.nn.conv3d(x,
                          filter=weights,
                          strides=strides_augm,
                          padding=padding)

        biases = None
        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
def dense_layer(bottom,
                name,
                hidden_units=512,
                activation=STANDARD_NONLINEARITY,
                normalisation=tfnorm.batch_norm,
                normalise_post_activation=False,
                dropout_p=None,
                weight_init='he_normal',
                add_bias=True,
                **kwargs):
    '''
    Dense a.k.a. fully connected layer
    '''

    bottom_flat = utils.flatten(bottom)
    bottom_rhs_dim = utils.get_rhs_dim(bottom_flat)

    weight_shape = [bottom_rhs_dim, hidden_units]
    bias_shape = [hidden_units]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)

        op = tf.matmul(bottom_flat, weights)

        biases = None
        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
def transposed_conv3D(bottom,
                      name,
                      kernel_size=(4, 4, 4),
                      num_filters=32,
                      strides=(2, 2, 2),
                      output_shape=None,
                      activation=STANDARD_NONLINEARITY,
                      normalisation=tf.identity,
                      normalise_post_activation=False,
                      dropout_p=None,
                      padding="SAME",
                      weight_init='he_normal',
                      add_bias=True,
                      **kwargs):
    '''
    Standard nets2D transpose (also known as deconvolution) layer. Default behaviour upsamples the input by a
    factor of 2. 
    '''

    bottom_shape = bottom.get_shape().as_list()

    if output_shape is None:
        batch_size = tf.shape(bottom)[0]
        output_shape = tf.stack([
            batch_size, bottom_shape[1] * strides[0],
            bottom_shape[2] * strides[1], bottom_shape[3] * strides[2],
            num_filters
        ])

    bottom_num_filters = bottom_shape[4]

    weight_shape = [
        kernel_size[0], kernel_size[1], kernel_size[2], num_filters,
        bottom_num_filters
    ]

    bias_shape = [num_filters]

    strides_augm = [1, strides[0], strides[1], strides[2], 1]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)

        op = tf.nn.conv3d_transpose(bottom,
                                    filter=weights,
                                    output_shape=output_shape,
                                    strides=strides_augm,
                                    padding=padding)

        # op = tf.reshape(op, output_shape)

        biases = None
        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op