예제 #1
0
def sg_conv1d(tensor, opt):
    r"""Applies a 1-D convolution.
    
    Args:
      tensor: A `Tensor`.
      size: An `integer` representing `[kernel width]`.
        If not specified, 2 is set implicitly.
      stride: An `integer`. The number of entries by which
        the filter is moved right at each step.
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      pad: Either `SAME` (Default) or `VALID`.
      bias: Boolean. Whether to add biases to the filters.
      
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # default options
    opt += tf.sg_opt(size=2, stride=1, pad='SAME')

    # parameter initialize
    w = init.he_uniform('W', (opt.size, opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.conv1d(tensor, w, stride=opt.stride,
                       padding=opt.pad) + (b if opt.bias else 0)

    return out
예제 #2
0
def sg_emb(**kwargs):
    r"""Returns an embedding layer or a look-up table.
    
    Args:
      name: A name for the layer (required).
      emb: A 2-D array. Has the shape of `[vocabulary size -1, embedding dimension size]`.
        Note that the first row is filled with 0's because they correspond to padding.
      in_dim: A positive `integer`. The size of input dimension.
      dim: A positive `integer`. The size of output dimension.
      voca_size: A positive int32.
      
    Returns:
      A 2-D tensor.
    """
    opt = tf.sg_opt(kwargs)
    assert opt.name is not None, 'name is mandatory.'

    import sg_initializer as init

    if opt.emb is None:
        # initialize embedding matrix
        assert opt.voca_size is not None, 'voca_size is mandatory.'
        assert opt.dim is not None, 'dim is mandatory.'
        w = init.he_uniform(opt.name, (opt.voca_size - 1, opt.dim))
    else:
        # use given embedding matrix
        w = init.external(opt.name, value=opt.emb)

    # 1st row should be zero and not be updated by backprop because of zero padding.
    emb = tf.concat(0, [tf.zeros((1, opt.dim), dtype=tf.sg_floatx), w])

    return emb
예제 #3
0
def sg_aconv(tensor, opt):
    r"""Applies a 2-D atrous (or dilated) convolution.
    
    Args:
      tensor: A 4-D `Tensor`.
      size: A tuple or list of integers of length 2 representing `[kernel height, kernel width]`.
        Can be an int if both values are the same.
        If not specified, (3, 3) is set automatically.
      rate: A positive int32. The stride with which we sample input values across
        the `height` and `width` dimensions. Default is 2.
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      pad: Either `SAME` (Default) or `VALID`.
      bias: Boolean. Whether to add biases to the filters.
            
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # default options
    opt += tf.sg_opt(size=(3, 3), rate=2, pad='SAME')
    opt.size = opt.size if isinstance(opt.size,
                                      (tuple, list)) else [opt.size, opt.size]

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.atrous_conv2d(tensor, w, rate=opt.rate,
                              padding=opt.pad) + (b if opt.bias else 0)

    return out
예제 #4
0
def sg_aconv1d(tensor, opt):

    # default options
    opt += tf.sg_opt(size=(2 if opt.causal else 3), rate=1, pad='SAME')

    # parameter initialize
    w = init.he_uniform('W', (1, opt.size, opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    if opt.causal:
        # pre-padding for causality
        if opt.pad == 'SAME':
            pad_len = (opt.size - 1) * opt.rate  # padding size
            x = tf.pad(tensor,
                       [[0, 0], [pad_len, 0], [0, 0]]).sg_expand_dims(dim=1)
        else:
            x = tensor.sg_expand_dims(dim=1)
        # apply 2d convolution
        out = tf.nn.atrous_conv2d(x, w, rate=opt.rate,
                                  padding='VALID') + (b if opt.bias else 0)
    else:
        # apply 2d convolution
        out = tf.nn.atrous_conv2d(
            tensor.sg_expand_dims(dim=1), w, rate=opt.rate,
            padding=opt.pad) + (b if opt.bias else 0)
    # reduce dimension
    out = out.sg_squeeze(dim=1)

    return out
예제 #5
0
def sg_upconv(tensor, opt):
    # default options
    opt += tf.sg_opt(size=(3, 3), stride=(1, 2, 2, 1), pad='SAME')
    opt.size = opt.size if isinstance(opt.size,
                                      (tuple, list)) else [opt.size, opt.size]
    opt.stride = opt.stride if isinstance(
        opt.stride, (tuple, list)) else [1, opt.stride, opt.stride, 1]
    opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(
        opt.stride) == 2 else opt.stride

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.dim, opt.in_dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # tedious shape handling for conv2d_transpose
    shape = tensor.get_shape().as_list()
    out_shape = [
        tf.shape(tensor)[0], shape[1] * opt.stride[1],
        shape[2] * opt.stride[2], opt.dim
    ]

    # apply convolution
    out = tf.nn.conv2d_transpose(tensor,
                                 w,
                                 output_shape=tf.pack(out_shape),
                                 strides=opt.stride,
                                 padding=opt.pad) + (b if opt.bias else 0)
    # reset shape is needed because conv2d_transpose() erase all shape information.
    out.set_shape([None, out_shape[1], out_shape[2], opt.dim])

    return out
예제 #6
0
def sg_dense(tensor, opt):
    # parameter initialize
    w = init.he_uniform('W', (opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply transform
    out = tf.matmul(tensor, w) + (b if opt.bias else 0)

    return out
예제 #7
0
def sg_upconv(tensor, opt):
    r"""Applies a upconvolution (or convolution transpose).
    
    Args:
      tensor: A 4-D `Tensor`.
      size: A tuple or list of integers of length 2 representing `[kernel height, kernel width]`.
        Can be an int if both values are the same.
        If not specified, (3, 3) is set implicitly.
        The default value is [1, 2, 2, 1].
      stride: A tuple or list of integers of length 2 or 4 representing stride dimensions.
        If the length is 2, i.e., (a, b), the stride is `[1, a, b, 1]`.
        If the length is 4, i.e., (a, b, c, d), the stride is `[a, b, c, d]`.
        Can be an int. If the length is an int, i.e., a, the stride is `[1, a, a, 1]`.
      in_dim: A positive `integer`. The size of input dimension.
      dim: A positive `integer`. The size of output dimension.
      pad: Either `SAME` (Default) or `VALID`. 
      bias: Boolean. If True, biases are added.
            
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # default options
    opt += tf.sg_opt(size=(3, 3), stride=(1, 2, 2, 1), pad='SAME')
    opt.size = opt.size if isinstance(opt.size,
                                      (tuple, list)) else [opt.size, opt.size]
    opt.stride = opt.stride if isinstance(
        opt.stride, (tuple, list)) else [1, opt.stride, opt.stride, 1]
    opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(
        opt.stride) == 2 else opt.stride

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.dim, opt.in_dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # tedious shape handling for conv2d_transpose
    shape = tensor.get_shape().as_list()
    out_shape = [
        tf.shape(tensor)[0], shape[1] * opt.stride[1],
        shape[2] * opt.stride[2], opt.dim
    ]

    # apply convolution
    out = tf.nn.conv2d_transpose(tensor,
                                 w,
                                 output_shape=tf.pack(out_shape),
                                 strides=opt.stride,
                                 padding=opt.pad) + (b if opt.bias else 0)
    # reset shape is needed because conv2d_transpose() erase all shape information.
    out.set_shape([None, out_shape[1], out_shape[2], opt.dim])

    return out
예제 #8
0
def sg_aconv(tensor, opt):
    # default options
    opt += tf.sg_opt(size=(3, 3), rate=2, pad='VALID')
    opt.size = opt.size if isinstance(opt.size, (tuple, list)) else [opt.size, opt.size]

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.atrous_conv2d(tensor, w, rate=opt.rate, padding=opt.pad) + (b if opt.bias else 0)

    return out
예제 #9
0
def sg_conv1d(tensor, opt):
    # default options
    opt += tf.sg_opt(size=2, stride=1, pad='SAME')

    # parameter initialize
    w = init.he_uniform('W', (opt.size, opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.conv1d(tensor, w, stride=opt.stride,
                       padding=opt.pad) + (b if opt.bias else 0)

    return out
예제 #10
0
def sg_conv(tensor, opt):
    # default options
    opt += tf.sg_opt(size=(3, 3), stride=(1, 1, 1, 1), pad='SAME')
    opt.size = opt.size if isinstance(opt.size, (tuple, list)) else [opt.size, opt.size]
    opt.stride = opt.stride if isinstance(opt.stride, (tuple, list)) else [1, opt.stride, opt.stride, 1]
    opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(opt.stride) == 2 else opt.stride

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.conv2d(tensor, w, strides=opt.stride, padding=opt.pad) + (b if opt.bias else 0)

    return out
예제 #11
0
def sg_aconv1d(tensor, opt):
    r"""Applies 1-D atrous (or dilated) convolution.
    
    Args:
      tensor: A 3-D `Tensor`.
      causal: Boolean. If True, zeros are padded before the time axis such that
        each activation unit doesn't have receptive neurons beyond the equivalent time step.
      size: An `integer` representing `[kernel width]`. As a default it is set to 2
        if causal is True, 3 otherwise. 
      rate: A positive int32. The stride with which we sample input values across
        the `height` and `width` dimensions. Default is 1.
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      pad: Either `SAME` (Default) or `VALID`.
      bias: Boolean. Whether to add biases to the filters.
            
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # default options
    opt += tf.sg_opt(size=(2 if opt.causal else 3), rate=1, pad='SAME')

    # parameter initialize
    w = init.he_uniform('W', (1, opt.size, opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    if opt.causal:
        # pre-padding for causality
        if opt.pad == 'SAME':
            pad_len = (opt.size - 1) * opt.rate  # padding size
            x = tf.pad(tensor,
                       [[0, 0], [pad_len, 0], [0, 0]]).sg_expand_dims(dim=1)
        else:
            x = tensor.sg_expand_dims(dim=1)
        # apply 2d convolution
        out = tf.nn.atrous_conv2d(x, w, rate=opt.rate,
                                  padding='VALID') + (b if opt.bias else 0)
    else:
        # apply 2d convolution
        out = tf.nn.atrous_conv2d(
            tensor.sg_expand_dims(dim=1), w, rate=opt.rate,
            padding=opt.pad) + (b if opt.bias else 0)
    # reduce dimension
    out = out.sg_squeeze(dim=1)

    return out
예제 #12
0
def sg_emb(**kwargs):
    opt = tf.sg_opt(kwargs)
    assert opt.name is not None, 'name is mandatory.'

    import sg_initializer as init

    if opt.emb is None:
        # initialize embedding matrix
        assert opt.voca_size is not None, 'voca_size is mandatory.'
        assert opt.dim is not None, 'dim is mandatory.'
        w = init.he_uniform(opt.name, (opt.voca_size - 1, opt.dim))
    else:
        # use given embedding matrix
        w = init.external(opt.name, value=opt.emb)

    # 1st row should be zero and not be updated by backprop because of zero padding.
    emb = tf.concat(0, [tf.zeros((1, opt.dim), dtype=tf.sg_floatx), w])

    return emb
예제 #13
0
def sg_dense(tensor, opt):
    r"""Applies a full connection.
    
    Args:
      tensor: A 2-D `Tensor`.
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      bias: Boolean. If True, biases are added. 
      
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # parameter initialize
    w = init.he_uniform('W', (opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply transform
    out = tf.matmul(tensor, w) + (b if opt.bias else 0)

    return out
예제 #14
0
def sg_conv(tensor, opt):
    r"""Applies a 2-D convolution.
    
    Args:
      tensor: A 4-D `Tensor`.
      size: A tuple or list of integers of length 2 representing `[kernel height, kernel width]`.
        Can be an int if both values are the same.
        If not specified, (3, 3) is set implicitly.
      stride: A tuple or list of integers of length 2 or 4 representing stride dimensions.
        If the length is 2, i.e., (a, b), the stride is `[1, a, b, 1]`.
        If the length is 4, i.e., (a, b, c, d), the stride is `[a, b, c, d]`.
        Can be an int. If the length is an int, i.e., a, the stride is `[1, a, a, 1]`.
        The default value is [1, 1, 1, 1].
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      pad: Either `SAME` (Default) or `VALID`. 
      bias: Boolean. If True, biases are added.

    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # default options
    opt += tf.sg_opt(size=(3, 3), stride=(1, 1, 1, 1), pad='SAME')
    opt.size = opt.size if isinstance(opt.size,
                                      (tuple, list)) else [opt.size, opt.size]
    opt.stride = opt.stride if isinstance(
        opt.stride, (tuple, list)) else [1, opt.stride, opt.stride, 1]
    opt.stride = [1, opt.stride[0], opt.stride[1], 1] if len(
        opt.stride) == 2 else opt.stride

    # parameter initialize
    w = init.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply convolution
    out = tf.nn.conv2d(tensor, w, strides=opt.stride,
                       padding=opt.pad) + (b if opt.bias else 0)

    return out