out = input_.sg_aconv1d_gpus(size=opt.size,
                                     rate=opt.rate,
                                     causal=opt.causal,
                                     act='leaky_relu',
                                     ln=opt.causal,
                                     name="aconv_" + opt.name)

        # dimension recover and residual connection
        out = out.sg_conv1d_gpus(size=1, dim=in_dim,
                                 name="convo_" + opt.name) + tensor

    return out


# inject residual multiplicative block
tf.sg_inject_func(sg_res_block)


@tf.sg_layer_func_gpus
def sg_quasi_conv1d(tensor, opt):

    opt += tf.sg_opt(is_enc=False, causal=True)

    # Split into H and H_zfo
    H = tensor[:Hp.batch_size]
    H_z = tensor[Hp.batch_size:2 * Hp.batch_size]
    H_f = tensor[2 * Hp.batch_size:3 * Hp.batch_size]
    H_o = tensor[3 * Hp.batch_size:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0
Beispiel #2
0
    Args:
      tensor: A `Tensor` (automatically passed by decorator).
      opt:
        bn: Boolean. If True, batch normalization is applied.
        ln: Boolean. If True, layer normalization is applied.
        dout: A float of range [0, 100). A dropout rate. Default is 0.
        act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
    Returns:
      The same tensor as `tensor`.
    """
    return tensor


# inject the custom identity function
tf.sg_inject_func(identity)


# residual block
@tf.sg_sugar_func
def sg_res_block(tensor, opt):
    # default rate
    opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False, dout=0)

    # input dimension
    in_dim = tensor.get_shape().as_list()[-1]

    with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):
        # reduce dimension
        input_ = (tensor.sg_bypass(act='relu',
                                   ln=(not opt.is_first),
Beispiel #3
0
    O = O.sg_bypass(act="sigmoid")  # (16, 300, 320)

    # Masking
    M = tf.sign(tf.abs(H))[:, :, :1]  # (16, 300, 1) float32. 0 or 1
    Z *= M  # broadcasting
    F *= M  # broadcasting
    O *= M  # broadcasting

    # Concat
    ZFO = tf.concat(axis=0, values=[Z, F, O])

    return ZFO  # (16*3, 150, 320)


# injection
tf.sg_inject_func(sg_quasi_conv1d)


@tf.sg_rnn_layer_func
def sg_quasi_rnn(tensor, opt):
    # Split
    if opt.att:
        H, Z, F, O = tf.split(axis=0, num_or_size_splits=4,
                              value=tensor)  # (16, 150, 320) for all
    else:
        Z, F, O = tf.split(axis=0, num_or_size_splits=3,
                           value=tensor)  # (16, 150, 320) for all

    # step func
    def step(z, f, o, c):
        '''
Beispiel #4
0
        # reduce dimension
        input_ = (tensor
                  .sg_bypass(act='relu', ln=(not opt.is_first), name='bypass')  # do not
                  .sg_conv1d(size=1, dim=in_dim/2, act='relu', ln=True, name='conv_in'))

        # 1xk conv dilated
        out = (input_
               .sg_aconv1d(size=opt.size, rate=opt.rate, causal=opt.causal, act='relu', ln=True, name='aconv'))

        # dimension recover and residual connection
        out = out.sg_conv1d(size=1, dim=in_dim, name='conv_out') + tensor

    return out

# inject residual multiplicative block
tf.sg_inject_func(sg_res_block)


#
# encode graph ( atrous convolution )
#
def encode(x):

    with tf.sg_context(name='encoder'):
        res = x
        # loop dilated conv block
        for i in range(num_blocks):
            res = (res
                   .sg_res_block(size=5, block=i, rate=1, is_first=True)
                   .sg_res_block(size=5, block=i, rate=2)
                   .sg_res_block(size=5, block=i, rate=4)
    Args:
      tensor: A `Tensor` (automatically passed by decorator).
      opt:
        bn: Boolean. If True, batch normalization is applied.
        ln: Boolean. If True, layer normalization is applied.
        dout: A float of range [0, 100). A dropout rate. Default is 0.
        act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
    Returns:
      The same tensor as `tensor`.
    """
    return tensor


# inject the custom identity function
tf.sg_inject_func(identity)


# residual block
@tf.sg_sugar_func
def sg_res_block(tensor, opt):
    # default rate
    opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False, dout=0)

    # input dimension
    in_dim = tensor.get_shape().as_list()[-1]

    with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):
        # reduce dimension
        input_ = (tensor
                  .sg_bypass(act='relu', ln=(not opt.is_first), name='bypass')