コード例 #1
0
def dense_layer(bottom,
                name,
                hidden_units=512,
                activation=tf.nn.relu,
                weight_init='he_normal',
                add_bias=True):
    '''
    Dense a.k.a. fully connected layer
    '''

    bottom_flat = utils.flatten(bottom)
    bottom_rhs_dim = utils.get_rhs_dim(bottom_flat)

    weight_shape = [bottom_rhs_dim, hidden_units]
    bias_shape = [hidden_units]

    with tf.variable_scope(name):

        weights = get_weight_variable(weight_shape,
                                      name='W',
                                      type=weight_init,
                                      regularize=True)

        op = tf.matmul(bottom_flat, weights)

        biases = None
        if add_bias:
            biases = get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)
        op = activation(op)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
コード例 #2
0
ファイル: layers.py プロジェクト: hm-tsai/USZ_ML_workshop
def deconv2D_layer(bottom,
                   name,
                   kernel_size=(4, 4),
                   num_filters=32,
                   strides=(2, 2),
                   output_shape=None,
                   activation=tf.nn.relu,
                   padding="SAME",
                   weight_init='he_normal'):
    '''
    Standard 2D transpose (also known as deconvolution) layer. Default behaviour upsamples the input by a
    factor of 2. 
    '''

    bottom_shape = bottom.get_shape().as_list()
    if output_shape is None:
        output_shape = tf.stack([
            bottom_shape[0], bottom_shape[1] * strides[0],
            bottom_shape[2] * strides[1], num_filters
        ])

    bottom_num_filters = bottom_shape[3]

    weight_shape = [
        kernel_size[0], kernel_size[1], num_filters, bottom_num_filters
    ]
    bias_shape = [num_filters]
    strides_augm = [1, strides[0], strides[1], 1]

    with tf.name_scope(name):

        if weight_init == 'he_normal':
            N = utils.get_rhs_dim(bottom)
            weights = _weight_variable_he_normal(weight_shape,
                                                 N,
                                                 name=name + '_w')
        elif weight_init == 'simple':
            weights = _weight_variable_simple(weight_shape, name=name + '_w')
        elif weight_init == 'bilinear':
            weights = _weight_variable_bilinear(weight_shape, name=name + '_w')
        else:
            raise ValueError('Unknown weight initialisation method %s' %
                             weight_init)

        biases = _bias_variable(bias_shape, name=name + '_b')

        op = tf.nn.conv2d_transpose(bottom,
                                    filter=weights,
                                    output_shape=output_shape,
                                    strides=strides_augm,
                                    padding=padding)
        op = tf.nn.bias_add(op, biases)
        op = activation(op)

        # Tensorboard variables
        tf.summary.histogram(weights.name, weights)
        tf.summary.histogram(biases.name, biases)
        tf.summary.histogram(op.op.name + '/activations', op)

        return op
コード例 #3
0
ファイル: layers.py プロジェクト: hm-tsai/USZ_ML_workshop
def conv3D_layer(bottom,
                 name,
                 kernel_size=(3, 3, 3),
                 num_filters=32,
                 strides=(1, 1, 1),
                 activation=tf.nn.relu,
                 padding="SAME",
                 weight_init='he_normal'):
    '''
    Standard 3D convolutional layer
    '''

    bottom_num_filters = bottom.get_shape().as_list()[-1]

    weight_shape = [
        kernel_size[0], kernel_size[1], kernel_size[2], bottom_num_filters,
        num_filters
    ]
    bias_shape = [num_filters]

    strides_augm = [1, strides[0], strides[1], strides[2], 1]

    with tf.name_scope(name):

        if weight_init == 'he_normal':
            N = utils.get_rhs_dim(bottom)
            weights = _weight_variable_he_normal(weight_shape,
                                                 N,
                                                 name=name + '_w')
        elif weight_init == 'simple':
            weights = _weight_variable_simple(weight_shape, name=name + '_w')
        else:
            raise ValueError('Unknown weight initialisation method %s' %
                             weight_init)

        biases = _bias_variable(bias_shape, name=name + '_b')

        op = tf.nn.conv3d(bottom,
                          filter=weights,
                          strides=strides_augm,
                          padding=padding)
        op = tf.nn.bias_add(op, biases)
        op = activation(op)

        # Tensorboard variables
        tf.summary.histogram(weights.name, weights)
        tf.summary.histogram(biases.name, biases)
        tf.summary.histogram(op.op.name + '/activations', op)

        return op
コード例 #4
0
ファイル: layers.py プロジェクト: hm-tsai/USZ_ML_workshop
def conv2D_dilated_layer(bottom,
                         name,
                         kernel_size=(3, 3),
                         num_filters=32,
                         rate=1,
                         activation=tf.nn.relu,
                         padding="SAME",
                         weight_init='he_normal'):
    '''
    2D dilated convolution layer. This layer can be used to increase the receptive field of a network. 
    It is described in detail in this paper: Yu et al, Multi-Scale Context Aggregation by Dilated Convolutions, 
    2015 (https://arxiv.org/pdf/1511.07122.pdf) 
    '''

    bottom_num_filters = bottom.get_shape().as_list()[3]

    weight_shape = [
        kernel_size[0], kernel_size[1], bottom_num_filters, num_filters
    ]
    bias_shape = [num_filters]

    with tf.variable_scope(name):

        if weight_init == 'he_normal':
            N = utils.get_rhs_dim(bottom)
            weights = _weight_variable_he_normal(weight_shape,
                                                 N,
                                                 name=name + '_w')
        elif weight_init == 'simple':
            weights = _weight_variable_simple(weight_shape, name=name + '_w')
        else:
            raise ValueError('Unknown weight initialisation method %s' %
                             weight_init)

        biases = _bias_variable(bias_shape, name=name + '_b')

        op = tf.nn.atrous_conv2d(bottom,
                                 filters=weights,
                                 rate=rate,
                                 padding=padding)
        op = tf.nn.bias_add(op, biases)
        op = activation(op)

        # Tensorboard variables
        tf.summary.histogram(weights.name, weights)
        tf.summary.histogram(biases.name, biases)
        tf.summary.histogram(op.op.name + '/activations', op)

        return op
コード例 #5
0
def dense_layer(bottom,
                name,
                hidden_units=512,
                activation=STANDARD_NONLINEARITY,
                normalisation=tfnorm.batch_norm,
                normalise_post_activation=False,
                dropout_p=None,
                weight_init='he_normal',
                add_bias=True,
                **kwargs):
    '''
    Dense a.k.a. fully connected layer
    '''

    bottom_flat = utils.flatten(bottom)
    bottom_rhs_dim = utils.get_rhs_dim(bottom_flat)

    weight_shape = [bottom_rhs_dim, hidden_units]
    bias_shape = [hidden_units]

    with tf.variable_scope(name):

        weights = utils.get_weight_variable(weight_shape,
                                            name='W',
                                            type=weight_init,
                                            regularize=True)

        op = tf.matmul(bottom_flat, weights)

        biases = None
        if add_bias:
            biases = utils.get_bias_variable(bias_shape, name='b')
            op = tf.nn.bias_add(op, biases)

        if not normalise_post_activation:
            op = activation(normalisation(op, **kwargs))
        else:
            op = normalisation(activation(op), **kwargs)

        if dropout_p is not None:
            op = dropout(op, keep_prob=dropout_p, **kwargs)

        # Add Tensorboard summaries
        _add_summaries(op, weights, biases)

        return op
コード例 #6
0
ファイル: layers.py プロジェクト: hm-tsai/USZ_ML_workshop
def dense_layer(bottom,
                name,
                hidden_units=512,
                activation=tf.nn.relu,
                weight_init='he_normal'):
    '''
    Dense a.k.a. fully connected layer
    '''

    bottom_flat = utils.flatten(bottom)
    bottom_rhs_dim = utils.get_rhs_dim(bottom_flat)

    weight_shape = [bottom_rhs_dim, hidden_units]
    bias_shape = [hidden_units]

    with tf.name_scope(name):

        if weight_init == 'he_normal':
            N = bottom_rhs_dim
            weights = _weight_variable_he_normal(weight_shape,
                                                 N,
                                                 name=name + '_w')
        elif weight_init == 'simple':
            weights = _weight_variable_simple(weight_shape, name=name + '_w')
        else:
            raise ValueError('Unknown weight initialisation method %s' %
                             weight_init)

        biases = _bias_variable(bias_shape, name=name + '_b')

        op = tf.matmul(bottom_flat, weights)
        op = tf.nn.bias_add(op, biases)
        op = activation(op)

        # Tensorboard variables
        tf.summary.histogram(weights.name, weights)
        tf.summary.histogram(biases.name, biases)
        tf.summary.histogram(op.op.name + '/activations', op)

        return op