Beispiel #1
0
def dwconv(x,
           kernel,
           multiplier=1,
           stride=1,
           pad=0,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=False,
           **kwargs):

    if pad:
        pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
        x = tf.pad(x, pads, mode='CONSTANT')

    kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
    stride = _stride_shape(2, stride)

    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)
    out = tf.nn.depthwise_conv2d(x, W, stride, padding)
    if bias:
        outdim = kernel[2] * multiplier
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Beispiel #2
0
def subpixel(x,
             kernel,
             factor=2,
             stride=1,
             pad=0,
             padding='SAME',
             initializer=tf.he_uniform,
             bias=False,
             **kwargs):
    from .ireshape import channel_to_space

    assert x.ndim == 4  # implemented for 4D tensor

    indim = x.dims[-1]
    outdim = indim * factor * factor

    kernel = _kernel_shape(2, kernel, indim, outdim)
    stride = _stride_shape(2, stride)

    W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))
    out = tf.nn.conv2d(x, W, stride, padding=padding)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer())
        out = tf.nn.bias_add(out, b)

    # periodic shuffle
    out = channel_to_space(out, factor)

    return out
Beispiel #3
0
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
    """
    out = dense( shape=shape, init=None, paramset=None)
    :param x: tensor
    :param bias:
    :param outdim: output_size
    :param initializer:
    :param name:
    :return: layer | output | (output, params)
    """
    if x.ndim == 4:
        x = x.flat2d()

    assert x.ndim == 2

    outshape = not isinstance(outdim, int)
    if outshape:
        dim = [-1] + list(outdim)
        outdim = np.prod(outdim)

    shape = [x.dims[-1], outdim]
    W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
    # W = tf.get_weight('W', initializer=initializer(shape))
    out = x.dot(W)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer())
        out = tf.nn.bias_add(out, b)

    if outshape:
        # make reshape
        out = out.reshape(dim)

    return tf.identity(out, name=name)
Beispiel #4
0
def pleaky(x):
    """
    parametric leakyrelu
    :param x:
    :return:
    """
    alpha = tf.get_bias('alpha',
                        shape=(),
                        initializer=tf.constant_initializer(0.01))
    return tf.maximum(x, x * alpha)
Beispiel #5
0
def deconv(x,
           outdim,
           kernel,
           stride=1,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=False,
           extra=None,
           **kwargs):
    nd = x.ndim - 2
    out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride, padding,
                                 extra)
    oshape = tf.TensorShape(out_shape)
    if out_shape[0] is None:
        out_shape[0] = tf.shape(x)[0]
        out_shape = tf.stack(out_shape)

    kernel_shape = _kernel_shape(nd, kernel, outdim,
                                 x.dims[-1])  # swap in and out channel
    stride = _stride_shape(nd, stride)  # stride

    W = tf.get_weight('W',
                      shape=kernel_shape,
                      initializer=initializer(kernel_shape))

    if nd == 2:
        out = tf.nn.conv2d_transpose(x,
                                     W,
                                     out_shape,
                                     strides=stride,
                                     padding=padding)
    elif nd == 3:
        out = tf.nn.conv3d_transpose(x,
                                     W,
                                     out_shape,
                                     strides=stride,
                                     padding=padding)
    else:
        raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))

    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    out.set_shape(oshape)

    return out
Beispiel #6
0
def inorm(x,
          beta=0.0,
          gamma=1.0,
          stddev=0.002,
          epsilon=1e-5,
          axis=None,
          trainable=True,
          **kwargs):
    """
    instance normalization normalization for (W,H)
    same output not regard to trainmode
    # https://arxiv.org/pdf/1607.08022.pdf for instance normalization
    # z = gamma * (x-m)/s + beta
    # note gamma, beta
    :param x: [BHWC] is common case
    :param gamma:
    :param beta:
    :param epsilon:
    :return:
    """
    axes = list(range(1, 1 + x.ndim -
                      2))  # axes = [1,2] for BWHC except batch, channel
    m, v = tf.nn.moments(x, axes=axes, keep_dims=True)

    shapelast = x.dims[-1:]
    if trainable:
        init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
        init_beta = tf.constant_initializer(beta)
        gamma_t = tf.get_weight(name='gamma',
                                shape=shapelast,
                                initializer=init_gamma)
        beta_t = tf.get_bias(name='beta',
                             shape=shapelast,
                             initializer=init_beta)
    else:
        gamma_t = gamma
        beta_t = beta

    # out = (x - m) / tf.sqrt(v + epsilon)
    # out = tf.nn.batch_normalization(x, m, v, beta, gamma, epsilon)
    out = tf.nn.batch_normalization(x,
                                    m,
                                    v,
                                    offset=beta_t,
                                    scale=gamma_t,
                                    variance_epsilon=epsilon)

    return out
Beispiel #7
0
def conv3d(x,
           outdim,
           kernel,
           stride=1,
           pad=0,
           padding='SAME',
           mode='CONSTANT',
           initializer=tf.he_uniform,
           bias=False,
           **kwargs):

    kernel = _kernel_shape(3, kernel, x.dims[-1], outdim)
    stride = _stride_shape(3, stride)  # stride 5-dim

    pads = None
    if padding == 'SAME' and mode != 'CONSTANT':
        # pad manually
        half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2,
                (kernel[2] - 1) // 2)
        pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]),
                (pad + half[1], pad + kernel[1] - 1 - half[1]),
                (pad + half[2], pad + kernel[2] - 1 - half[2]), (0, 0)]
        padding = 'VALID'  # change to valid because manually padded
    elif pad:
        pads = [(0, 0), (pad, pad), (pad, pad), (pad, pad), (0, 0)]
    if pads is not None:
        x = tf.pad(x, pads, mode=mode)

    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)
    out = tf.nn.conv3d(x, W, stride, padding)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Beispiel #8
0
def atrous(x,
           outdim,
           kernel,
           rate,
           pad=0,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=None,
           **kwargs):
    # todo rate per axis?

    assert isinstance(pad, int)
    nd = x.ndim - 2
    if pad:
        pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
        x = tf.pad(x, pads, mode='CONSTANT')

    kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)

    if nd == 1:
        out = _atrous1d(x, W, rate, padding=padding)
    elif nd == 2:
        out = tf.nn.atrous_conv2d(x, W, rate, padding)
    else:
        raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))

    if bias is not None:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Beispiel #9
0
def bias(x, initializer=tf.zeros_initializer, name=None):
    outdim = x.dims[-1]
    b = tf.get_bias('b', shape=(outdim, ), initializer=initializer())
    return tf.nn.bias_add(x, b, name=name)