Ejemplo n.º 1
0
def dense(
    inputs,
    units,
    use_bias=True,
    trainable=True,
    kernel_initializer=tf.variance_scaling_initializer(),
    bias_initializer=tf.zeros_initializer()
):

    net = tf.layers.dense(
        inputs,
        units=units,
        activation=None,
        use_bias=use_bias,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
        trainable=trainable
    )

    _log_hparams(
        classname='Dense',
        layername=net.name,
        units=units,
        use_bias=use_bias,
        trainable=trainable,
        out_shape=str(net.get_shape()),
        out_dtype=net.dtype
    )

    return net
def prelu(inputs, channel_shared=False, trainable=True, name='prelu'):
    def parametric_relu(_x):

        if channel_shared:
            w_shape = (1, )

        else:
            w_shape = int(_x.get_shape()[-1])

        alphas = tf.get_variable('alpha',
                                 w_shape,
                                 trainable=trainable,
                                 initializer=tf.initializers.truncated_normal(
                                     mean=-1.0, stddev=0.2))

        alphas = tf.nn.sigmoid(alphas, name="constraining_alpha_var_in_0_1")

        return tf.maximum(_x, _x * alphas)

    with tf.variable_scope(name):
        net = parametric_relu(inputs)

    _log_hparams(classname='PReLU',
                 layername=net.name,
                 channel_shared=channel_shared,
                 trainable=trainable,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def sigmoid(x, name='sigmoid'):

    net = tf.math.sigmoid(x, name=name)

    _log_hparams(classname='Sigmoid',
                 layername=net.name,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def tanh(inputs, name='tanh'):

    net = tf.math.tanh(inputs, name=name)

    _log_hparams(classname='TanH',
                 layername=net.name,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def elu(features, name='elu'):

    net = tf.nn.elu(features, name=name)

    _log_hparams(classname='ELU',
                 layername=net.name,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 6
0
def flatten(inputs, name='flatten'):

    net = tf.layers.flatten(inputs, name=name)

    _log_hparams(classname='Flatten',
                 layername=net.name,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def relu6(inputs, name='relu6'):

    net = tf.nn.relu6(inputs, name=name)

    _log_hparams(classname='ReLU6',
                 layername=net.name,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 8
0
def squeeze(tensor, axis, name='squeeze'):

    net = tf.squeeze(tensor, axis=axis, name=name)

    _log_hparams(classname='Squeeze',
                 layername=net.name,
                 axis=axis,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 9
0
def reshape(tensor, shape, name='reshape'):

    net = tf.reshape(tensor, shape=shape, name=name)

    _log_hparams(classname='Reshape',
                 layername=net.name,
                 shape=shape,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 10
0
def concat(values, axis, name='concat'):

    net = tf.concat(values=values, axis=axis, name=name)

    _log_hparams(classname='Concat',
                 layername=net.name,
                 axis=axis,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def leaky_relu(features, alpha=0.2, name='leaky_relu'):

    net = tf.nn.leaky_relu(features, alpha=alpha, name=name)

    _log_hparams(classname='LeakyReLU',
                 layername=net.name,
                 alpha=alpha,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def crelu(features, name='crelu', axis=-1):

    net = tf.nn.crelu(features, name=name, axis=axis)

    _log_hparams(classname='CReLU',
                 layername=net.name,
                 axis=axis,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 13
0
def conv2d(inputs,
           n_channels=8,
           kernel_size=(3, 3),
           strides=(1, 1),
           padding='VALID',
           data_format='NHWC',
           dilation_rate=(1, 1),
           use_bias=True,
           kernel_initializer=tf.variance_scaling_initializer(),
           bias_initializer=tf.zeros_initializer(),
           trainable=True):

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" %
            data_format)

    if padding.upper() not in ['SAME', 'VALID']:
        raise ValueError(
            "Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" %
            padding.upper())

    net = tf.layers.conv2d(inputs,
                           filters=n_channels,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding=padding,
                           dilation_rate=dilation_rate,
                           data_format='channels_last'
                           if data_format == 'NHWC' else 'channels_first',
                           use_bias=use_bias,
                           kernel_initializer=kernel_initializer,
                           bias_initializer=bias_initializer,
                           trainable=trainable,
                           activation=None)

    _log_hparams(classname='Conv2D',
                 layername=net.name,
                 n_channels=n_channels,
                 kernel_size=kernel_size,
                 strides=strides,
                 padding=padding,
                 data_format=data_format,
                 dilation_rate=dilation_rate,
                 use_bias=use_bias,
                 trainable=trainable,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def softmax(inputs, axis=None, name="softmax"):

    net = tf.nn.softmax(
        inputs,
        axis=axis,
        name=name,
    )

    _log_hparams(classname='Softmax',
                 layername=net.name,
                 axis=axis,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 15
0
def max_pooling2d(inputs,
                  pool_size=(2, 2),
                  strides=None,
                  padding='valid',
                  data_format=None,
                  name="max_pooling2d"):

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" %
            data_format)

    if padding.lower() not in ['same', 'valid']:
        raise ValueError(
            "Unknown padding: `%s` (accepted: ['same', 'valid'])" % padding)
    '''
    net = tf.keras.layers.MaxPool2D(
        pool_size=pool_size,
        strides=strides,
        padding=padding,
        data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
        name=name,
    )(inputs)
    '''

    net = tf.layers.max_pooling2d(inputs,
                                  pool_size=pool_size,
                                  strides=strides,
                                  padding=padding,
                                  data_format='channels_first' if data_format
                                  == 'NCHW' else 'channels_last',
                                  name=name)

    _log_hparams(classname='MaxPooling2D',
                 layername=net.name,
                 pool_size=pool_size,
                 strides=strides,
                 padding=padding,
                 data_format=data_format,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 16
0
def pad(inputs, paddings, mode='CONSTANT', name='padding', constant_values=0):

    if mode.upper() not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
        raise ValueError(
            "Unknown padding mode: `%s` (accepted: ['CONSTANT', 'REFLECT', 'SYMMETRIC'])"
            % mode)

    net = tf.pad(inputs,
                 paddings=paddings,
                 mode=mode,
                 name=name,
                 constant_values=constant_values)

    _log_hparams(classname='Padding',
                 layername=net.name,
                 paddings=paddings,
                 mode=mode,
                 constant_values=constant_values,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 17
0
def reduce_mean(inputs,
                keepdims=None,
                data_format='channels_last',
                name='spatial_mean'):

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" %
            data_format)

    axes = [1, 2] if data_format == 'NHWC' else [2, 3]

    net = tf.math.reduce_mean(inputs, axis=axes, keepdims=keepdims, name=name)

    _log_hparams(classname='ReduceMean',
                 layername=net.name,
                 axis=axes,
                 keepdims=keepdims,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
def dropout(inputs,
            rate=0.5,
            noise_shape=None,
            seed=None,
            training=False,
            name=None):

    layer = tf.keras.layers.Dropout(rate,
                                    noise_shape=noise_shape,
                                    seed=seed,
                                    name=name)
    net = layer.apply(inputs, training=training)

    _log_hparams(classname='Dropout',
                 layername=net.name,
                 noise_shape=noise_shape,
                 training=training,
                 seed=seed,
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net
Ejemplo n.º 19
0
def deconv2d(inputs,
             n_channels=8,
             kernel_size=(3, 3),
             padding='VALID',
             data_format='NHWC',
             use_bias=True,
             kernel_initializer=tf.variance_scaling_initializer(),
             bias_initializer=tf.zeros_initializer(),
             trainable=True,
             use_upscale_conv=True):

    padding = padding.upper()  # Enforce capital letters for the padding mode

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" %
            data_format)

    if padding not in ['SAME', 'VALID']:
        raise ValueError(
            "Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" % padding)

    with tf.variable_scope("deconv2d"):

        if use_upscale_conv:

            layer = layers.upscale_2d(
                inputs,
                size=(2, 2),
                method=tf.image.ResizeMethod.
                NEAREST_NEIGHBOR,  # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
                align_corners=True,
                is_scale=True,
                data_format=data_format)

            layer = layers.conv2d(layer,
                                  n_channels=n_channels,
                                  kernel_size=kernel_size,
                                  strides=(1, 1),
                                  padding=padding,
                                  data_format=data_format,
                                  use_bias=use_bias,
                                  trainable=trainable,
                                  kernel_initializer=kernel_initializer,
                                  bias_initializer=bias_initializer)

        else:

            input_shape = inputs.get_shape()

            layer = tf.layers.conv2d_transpose(
                inputs=inputs,
                filters=n_channels,
                kernel_size=kernel_size,
                strides=(2, 2),
                padding=padding,
                data_format='channels_first'
                if data_format == "NCHW" else "channels_last",
                use_bias=use_bias,
                trainable=trainable,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)

            _log_hparams(classname='Conv2DTranspose',
                         layername=layer.name,
                         n_channels=n_channels,
                         kernel_size=kernel_size,
                         strides=(2, 2),
                         padding=padding,
                         data_format=data_format,
                         use_bias=use_bias,
                         trainable=trainable,
                         input_shape=str(input_shape),
                         out_shape=str(layer.get_shape()),
                         out_dtype=layer.dtype)

    return layer
Ejemplo n.º 20
0
def upscale_2d(inputs,
               size,
               is_scale=True,
               method=0,
               align_corners=True,
               data_format='NHWC',
               name='upsample2d_layer'):

    if not isinstance(size, (list, tuple)) and len(size) == 2:
        raise AssertionError()

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format received: `%s` (allowed: `NHWC`, `NCHW`)" %
            data_format)

    input_shape = inputs.get_shape()

    if len(inputs.get_shape()) == 3:
        if is_scale:
            size_h = size[0] * int(inputs.get_shape()[0])
            size_w = size[1] * int(inputs.get_shape()[1])
            _size = [size_h, size_w]
        else:
            _size = size

    elif len(inputs.get_shape()) == 4:
        if data_format == 'NCHW':
            inputs = tf.transpose(inputs, [0, 2, 3, 1])  # NCHW => NHWC

        if is_scale:
            size_h = size[0] * int(inputs.get_shape()[1])
            size_w = size[1] * int(inputs.get_shape()[2])
            _size = [size_h, size_w]
        else:
            _size = size

    else:
        raise Exception("Do not support shape %s" % str(inputs.get_shape()))

    with tf.variable_scope(name):
        net = tf.image.resize_images(inputs,
                                     size=_size,
                                     method=method,
                                     align_corners=align_corners)

    if data_format == 'NCHW' and len(inputs.get_shape()) == 4:
        net = tf.transpose(net, [0, 3, 1, 2])  # NHWC => NCHW

    _log_hparams(classname='Upscale2D',
                 layername=net.name,
                 size=size,
                 is_scale=is_scale,
                 method=method,
                 align_corners=align_corners,
                 data_format=data_format,
                 input_shape=str(input_shape),
                 out_shape=str(net.get_shape()),
                 out_dtype=net.dtype)

    return net