Esempio n. 1
0
def dwconv(x,
           kernel,
           multiplier=1,
           stride=1,
           pad=0,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=False,
           **kwargs):

    if pad:
        pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
        x = tf.pad(x, pads, mode='CONSTANT')

    kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
    stride = _stride_shape(2, stride)

    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)
    out = tf.nn.depthwise_conv2d(x, W, stride, padding)
    if bias:
        outdim = kernel[2] * multiplier
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Esempio n. 2
0
def subpixel(x,
             kernel,
             factor=2,
             stride=1,
             pad=0,
             padding='SAME',
             initializer=tf.he_uniform,
             bias=False,
             **kwargs):
    from .ireshape import channel_to_space

    assert x.ndim == 4  # implemented for 4D tensor

    indim = x.dims[-1]
    outdim = indim * factor * factor

    kernel = _kernel_shape(2, kernel, indim, outdim)
    stride = _stride_shape(2, stride)

    W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))
    out = tf.nn.conv2d(x, W, stride, padding=padding)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer())
        out = tf.nn.bias_add(out, b)

    # periodic shuffle
    out = channel_to_space(out, factor)

    return out
Esempio n. 3
0
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
    """
    out = dense( shape=shape, init=None, paramset=None)
    :param x: tensor
    :param bias:
    :param outdim: output_size
    :param initializer:
    :param name:
    :return: layer | output | (output, params)
    """
    if x.ndim == 4:
        x = x.flat2d()

    assert x.ndim == 2

    outshape = not isinstance(outdim, int)
    if outshape:
        dim = [-1] + list(outdim)
        outdim = np.prod(outdim)

    shape = [x.dims[-1], outdim]
    W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
    # W = tf.get_weight('W', initializer=initializer(shape))
    out = x.dot(W)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer())
        out = tf.nn.bias_add(out, b)

    if outshape:
        # make reshape
        out = out.reshape(dim)

    return tf.identity(out, name=name)
Esempio n. 4
0
def deconv(x,
           outdim,
           kernel,
           stride=1,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=False,
           extra=None,
           **kwargs):
    nd = x.ndim - 2
    out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride, padding,
                                 extra)
    oshape = tf.TensorShape(out_shape)
    if out_shape[0] is None:
        out_shape[0] = tf.shape(x)[0]
        out_shape = tf.stack(out_shape)

    kernel_shape = _kernel_shape(nd, kernel, outdim,
                                 x.dims[-1])  # swap in and out channel
    stride = _stride_shape(nd, stride)  # stride

    W = tf.get_weight('W',
                      shape=kernel_shape,
                      initializer=initializer(kernel_shape))

    if nd == 2:
        out = tf.nn.conv2d_transpose(x,
                                     W,
                                     out_shape,
                                     strides=stride,
                                     padding=padding)
    elif nd == 3:
        out = tf.nn.conv3d_transpose(x,
                                     W,
                                     out_shape,
                                     strides=stride,
                                     padding=padding)
    else:
        raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))

    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    out.set_shape(oshape)

    return out
Esempio n. 5
0
def conv3d(x,
           outdim,
           kernel,
           stride=1,
           pad=0,
           padding='SAME',
           mode='CONSTANT',
           initializer=tf.he_uniform,
           bias=False,
           **kwargs):

    kernel = _kernel_shape(3, kernel, x.dims[-1], outdim)
    stride = _stride_shape(3, stride)  # stride 5-dim

    pads = None
    if padding == 'SAME' and mode != 'CONSTANT':
        # pad manually
        half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2,
                (kernel[2] - 1) // 2)
        pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]),
                (pad + half[1], pad + kernel[1] - 1 - half[1]),
                (pad + half[2], pad + kernel[2] - 1 - half[2]), (0, 0)]
        padding = 'VALID'  # change to valid because manually padded
    elif pad:
        pads = [(0, 0), (pad, pad), (pad, pad), (pad, pad), (0, 0)]
    if pads is not None:
        x = tf.pad(x, pads, mode=mode)

    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)
    out = tf.nn.conv3d(x, W, stride, padding)
    if bias:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Esempio n. 6
0
def atrous(x,
           outdim,
           kernel,
           rate,
           pad=0,
           padding='SAME',
           initializer=tf.he_uniform,
           bias=None,
           **kwargs):
    # todo rate per axis?

    assert isinstance(pad, int)
    nd = x.ndim - 2
    if pad:
        pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
        x = tf.pad(x, pads, mode='CONSTANT')

    kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
    W = tf.get_weight('W',
                      shape=kernel,
                      initializer=initializer(kernel),
                      **kwargs)

    if nd == 1:
        out = _atrous1d(x, W, rate, padding=padding)
    elif nd == 2:
        out = tf.nn.atrous_conv2d(x, W, rate, padding)
    else:
        raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))

    if bias is not None:
        b = tf.get_bias('b',
                        shape=(outdim, ),
                        initializer=tf.zeros_initializer(),
                        **kwargs)
        out = tf.nn.bias_add(out, b)

    return out
Esempio n. 7
0
def bn(x,
       stddev=0.002,
       beta=0.0,
       gamma=1.0,
       epsilon=1e-5,
       momentum=0.99,
       axis=-1,
       training=None,
       **kwargs):
    if kwargs.pop('scale', True):
        init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
    else:
        init_gamma = None
    if kwargs.pop('center', True):
        init_beta = tf.constant_initializer(beta)
    else:
        init_beta = None

    reuse = tf.get_variable_scope().reuse
    if training is None and (reuse or kwargs.get('reuse', False)):
        training = False
    elif training is None:
        training = x.graph.is_training
    # reuse = reuse is None or reuse is True
    out = tf.layers.batch_normalization(
        x,
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        beta_initializer=init_beta,
        gamma_initializer=init_gamma,
        moving_mean_initializer=tf.zeros_initializer(),
        moving_variance_initializer=tf.ones_initializer(),
        training=training,
        **kwargs)
    return out