Beispiel #1
0
def discriminator(tensor):

    # reuse flag
    reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0

    with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu', reuse=reuse):
        # shared part
        shared = (tensor
                  .sg_conv(dim=64, name='conv1')
                  .sg_conv(dim=128, name='conv2')
                  .sg_flatten()
                  .sg_dense(dim=1024, name='fc1'))

        # discriminator end
        disc = shared.sg_dense(dim=1, act='linear', name='disc').sg_squeeze()

        # shared recognizer part
        recog_shared = shared.sg_dense(dim=128, name='recog')

        # categorical auxiliary classifier end
        cat = recog_shared.sg_dense(dim=cat_dim, act='linear', name='cat')

        # continuous auxiliary classifier end
        con = recog_shared.sg_dense(dim=con_dim, act='sigmoid', name='con')

        return disc, cat, con
Beispiel #2
0
    def res_block(tensor, size, rate, block, dim=num_dim):

        with tf.sg_context(name='block_%d_%d' % (block, rate)):

            # filter convolution
            conv_filter = tensor.sg_aconv1d(size=size,
                                            rate=rate,
                                            act='tanh',
                                            bn=True,
                                            name='conv_filter')

            # gate convolution
            conv_gate = tensor.sg_aconv1d(size=size,
                                          rate=rate,
                                          act='sigmoid',
                                          bn=True,
                                          name='conv_gate')

            # output by gate multiplying
            out = conv_filter * conv_gate

            # final output
            out = out.sg_conv1d(size=1,
                                dim=dim,
                                act='tanh',
                                bn=True,
                                name='conv_out')

            # residual and skip output
            return out + tensor, out
def sg_res_block(tensor, opt):
    # default rate
    opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False, dout=0)

    # input dimension
    in_dim = tensor.get_shape().as_list()[-1]

    with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):
        # reduce dimension
        input_ = (
            tensor.sg_bypass(act='relu', bn=(not opt.is_first),
                             name='bypass')  # do not
            .sg_conv1d(size=1,
                       dim=in_dim / 2,
                       act='relu',
                       bn=True,
                       regularizer=reg_type,
                       name='conv_in'))

        # 1xk conv dilated
        out = (input_.sg_aconv1d(size=opt.size,
                                 rate=opt.rate,
                                 dout=opt.dout,
                                 causal=opt.causal,
                                 act='relu',
                                 bn=True,
                                 regularizer=reg_type,
                                 name='aconv'))

        # dimension recover and residual connection
        out = out.sg_conv1d(
            size=1, dim=in_dim, regularizer=reg_type, name='conv_out') + tensor

    return out
Beispiel #4
0
def sg_quasi_conv1d(tensor, opt):
    opt += tf.sg_opt(is_enc=False)
    # Split into H and H_zfo
    H = tensor[:Hp.bs]
    H_z = tensor[Hp.bs:2 * Hp.bs]
    H_f = tensor[2 * Hp.bs:3 * Hp.bs]
    H_o = tensor[3 * Hp.bs:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0

    # Convolution and merging
    with tf.sg_context(act="linear",
                       causal=(not opt.is_enc),
                       bn=opt.is_enc,
                       ln=(not opt.is_enc)):
        Z = H.sg_aconv1d() + H_z  # (16, 300, 320)
        F = H.sg_aconv1d() + H_f  # (16, 300, 320)
        O = H.sg_aconv1d() + H_o  # (16, 300, 320)

    # Activation
    Z = Z.sg_bypass(act="tanh")  # (16, 300, 320)
    F = F.sg_bypass(act="sigmoid")  # (16, 300, 320)
    O = O.sg_bypass(act="sigmoid")  # (16, 300, 320)

    # Masking
    M = tf.sign(tf.abs(H))[:, :, :1]  # (16, 300, 1) float32. 0 or 1
    Z *= M  # broadcasting
    F *= M  # broadcasting
    O *= M  # broadcasting

    # Concat
    ZFO = tf.concat([Z, F, O], 0)

    return ZFO  # (16*3, 150, 320)
Beispiel #5
0
    def __call__(self, x_t, state, size, scope=None, reuse_vars=False):

        (prev_c, prev_h) = state
        scope = scope or tf.get_variable_scope()
        print("____reuse_______", reuse_vars)
        with tf.variable_scope(scope, reuse=True):
            w_ic = tf.get_variable("w_ic")
            w_fc = tf.get_variable("w_fc")
            w_oc = tf.get_variable("w_oc")

        with tf.sg_context(dev=self._dev, reuse=reuse_vars):
            i = x_t.sg_conv1d_gpus(name = "ix_",size=size)+\
            prev_h.sg_conv1d_gpus(name = "ih_",size=size)+\
            prev_c*w_ic

            f = x_t.sg_aconv1d_gpus(name = "fx_",size=size)+\
            prev_h.sg_aconv1d_gpus(name = "fh_",size=size)+\
            prev_c*w_fc

            c = x_t.sg_conv1d_gpus(name = "cx_",size=size)+\
            prev_h.sg_conv1d_gpus(name = "ch_",size=size)

            o = x_t.sg_conv1d_gpus(name = "ox_",size=size)+\
            prev_h.sg_conv1d_gpus(name = "oh_",size=size)+\
            prev_c*w_oc

        new_c = prev_c * tf.sigmoid(f) + tf.sigmoid(i) * self._activation(c)
        new_h = self._activation(new_c) * tf.sigmoid(o)

        return (new_c, new_h)
Beispiel #6
0
def decode(x, voca_size):

    with tf.sg_context(name='decoder'):
        res = x
        # loop dilated causal conv block
        for i in range(num_blocks):
            res = (res.sg_res_block(size=3,
                                    block=i,
                                    rate=1,
                                    causal=True,
                                    is_first=True).sg_res_block(
                                        size=3, block=i, rate=2,
                                        causal=True).sg_res_block(
                                            size=3,
                                            block=i,
                                            rate=4,
                                            causal=True).sg_res_block(
                                                size=3,
                                                block=i,
                                                rate=8,
                                                causal=True).sg_res_block(
                                                    size=3,
                                                    block=i,
                                                    rate=16,
                                                    causal=True))

        # final fully convolution layer for softmax
        res = res.sg_conv1d(size=1, dim=voca_size, name='conv_final')

    return res
Beispiel #7
0
	def forward(self, inputs):
		# reuse = len([t for t in tf.global_variables() if t.name.startswith(self.scope)]) > 0
		with tf.sg_context(scope=self.scope, act='sigmoid', bn=False):
			self.network['predict'] = (inputs.sg_flatten()
											.sg_dense(dim=20, name='fc1')
											.sg_dense(dim=10, act='linear', name='predict'))

			return self.network['predict']
Beispiel #8
0
def acnn_classify(x, num_classes, test=False, causal=False):
    with tf.sg_context(name='acnn_classify'):
        dropout = 0 if test else default_dout
        res = x.sg_conv1d(size=1,
                          dim=latent_dim,
                          ln=True,
                          regularizer=reg_type,
                          name='conv_input_formatter')

        # loop dilated causal conv block
        for i in range(num_blocks):
            res = (res.sg_res_block(size=3,
                                    block=i,
                                    rate=1,
                                    causal=causal,
                                    dout=dropout,
                                    is_first=True).sg_res_block(
                                        size=3,
                                        block=i,
                                        rate=2,
                                        causal=causal,
                                        dout=dropout).sg_res_block(
                                            size=3,
                                            block=i,
                                            rate=4,
                                            causal=causal,
                                            dout=dropout).sg_res_block(
                                                size=3,
                                                block=i,
                                                rate=8,
                                                causal=causal,
                                                dout=dropout).sg_res_block(
                                                    size=3,
                                                    block=i,
                                                    rate=16,
                                                    causal=causal,
                                                    dout=dropout))

        in_dim = res.get_shape().as_list()[-1]

        res = res.sg_conv1d(size=1,
                            dim=in_dim,
                            dout=dropout,
                            act='relu',
                            ln=True,
                            regularizer=reg_type,
                            name='conv_comress')

        # fully convolution layer
        res = res.sg_conv1d(size=1,
                            dim=num_classes,
                            dout=dropout,
                            act='relu',
                            ln=True,
                            regularizer=reg_type,
                            name='conv_final').sg_softmax()

        return res
Beispiel #9
0
def Net(aa, yt, x):
    s = aa.shape[1]
    with tf.sg_context(name='NNReg',
                       stride=1,
                       act='leaky_relu',
                       bn=True,
                       reuse=tf.AUTO_REUSE):
        yt = tf.expand_dims(yt, 2)

        v1 = tf.expand_dims(x, 2).sg_conv(dim=16,
                                          size=(1, 1),
                                          name='gen9',
                                          pad="SAME",
                                          bn=True)
        v2 = v1.sg_conv(dim=64, size=(1, 1), name='gen1', pad="SAME", bn=True)
        v3 = v2.sg_conv(dim=128, size=(1, 1), name='gen2', pad="SAME", bn=True)
        v4 = v3.sg_conv(dim=256, size=(1, 1), name='gen3', pad="SAME", bn=True)
        v5 = v4.sg_conv(dim=512, size=(1, 1), name='gen4', pad="SAME", bn=True)
        v5 = tf.tile(tf.expand_dims(tf.reduce_max(v5, axis=1), axis=1),
                     [1, s, 1, 1])
        vv5 = v5

        v1 = yt.sg_conv(dim=16, size=(1, 1), name='gen99', pad="SAME", bn=True)
        v2 = v1.sg_conv(dim=64, size=(1, 1), name='gen11', pad="SAME", bn=True)
        v3 = v2.sg_conv(dim=128,
                        size=(1, 1),
                        name='gen22',
                        pad="SAME",
                        bn=True)
        v4 = v3.sg_conv(dim=256,
                        size=(1, 1),
                        name='gen33',
                        pad="SAME",
                        bn=True)
        v5 = v4.sg_conv(dim=512,
                        size=(1, 1),
                        name='gen44',
                        pad="SAME",
                        bn=True)
        v5 = tf.tile(tf.expand_dims(tf.reduce_max(v5, axis=1), axis=1),
                     [1, s, 1, 1])

        ff = tf.concat([tf.expand_dims(aa, 2), v5], axis=-1)
        ff = tf.concat([ff, vv5], axis=-1)
        f1 = ff.sg_conv(dim=256, size=(1, 1), name='f1', pad="SAME", bn=True)
        f2 = f1.sg_conv(dim=128, size=(1, 1), name='f2', pad="SAME", bn=True)

        f3 = f2.sg_conv(dim=3,
                        size=(1, 1),
                        name='f3',
                        pad="SAME",
                        bn=False,
                        act="linear")
        f3 = tf.squeeze(f3, axis=2)

    return f3
Beispiel #10
0
def sg_densenet_layer(x, opt):
    r"""Applies basic architecture of densenet layer.

    Note that the fc layers in the original architecture
      will be replaced with fully convolutional layers.
      For convenience, We still call them fc layers, though.

    Args:
      x: A `Tensor`.
      opt:
          dim: An integer. Dimension for this resnet layer
          num: Number of times to repeat
          act: String. 'relu' (default). the activation function name
          trans: Boolean. If True(default), transition layer will be applied.
          reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
          name: String. (optional) Used as convolution layer prefix

    Returns:
      A `Tensor`.
    """
    assert opt.dim is not None, 'dim is mandatory.'
    assert opt.num is not None, 'num is mandatory.'

    # default stride
    opt += tf.sg_opt(stride=1, act='relu', trans=True)

    # format convolutional layer name
    def cname(index):
        return opt.name if opt.name is None else opt.name + '_%d' % index

    # dense layer
    with tf.sg_context(bias=False, reuse=opt.reuse):
        out = x
        for i in range(opt.num):
            # dense block
            out_new = (out.sg_bypass(act=opt.act,
                                     bn=True,
                                     name=cname(3 * i + 1)).sg_conv(
                                         dim=opt.dim // 4,
                                         size=1,
                                         act=opt.act,
                                         bn=True,
                                         name=cname(3 * i + 2)).sg_conv(
                                             dim=opt.dim,
                                             size=3,
                                             name=cname(3 * i + 3)))
            out = tf.concat([out_new, out], 3)

        # transition layer
        if opt.trans:
            out = (out.sg_bypass(act=opt.act, bn=True,
                                 name=cname(3 * i + 4)).sg_conv(
                                     size=1,
                                     name=cname(3 * i + 5)).sg_pool(avg=True))

    return out
Beispiel #11
0
def discriminator(x):

    reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
    with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu', bn=True, reuse=reuse):
        res = (x.sg_conv(dim=64, name='conv_1')
                .sg_conv(dim=128, name='conv_2')
                .sg_upconv(dim=64, name='conv_3')
                .sg_upconv(dim=1, act='linear', name='conv_4'))

    return res
Beispiel #12
0
def get_logit(x, voca_size):

    # residual block
    def res_block(tensor, size, rate, block, dim=num_dim):

        with tf.sg_context(name='block_%d_%d' % (block, rate)):

            # filter convolution
            conv_filter = tensor.sg_aconv1d(size=size, rate=rate, act='tanh', bn=True, name='conv_filter')

            # gate convolution
            conv_gate = tensor.sg_aconv1d(size=size, rate=rate,  act='sigmoid', bn=True, name='conv_gate')

            # output by gate multiplying
            out = conv_filter * conv_gate

            # final output
            out = out.sg_conv1d(size=1, dim=dim, act='tanh', bn=True, name='conv_out')

            # residual and skip output
            return out + tensor, out

    # expand dimension
    with tf.sg_context(name='front'):
        z = x.sg_conv1d(size=1, dim=num_dim, act='tanh', bn=True, name='conv_in')
        print "le premier z",z
    # dilated conv block loop
    skip = 0  # skip connections
    for i in range(num_blocks):
        for r in [1, 2, 4, 8, 16]:
            z, s = res_block(z, size=7, rate=r, block=i)
            print "z,s",z,s
            skip += s

    # final logit layers
    with tf.sg_context(name='logit'):
        logit = (skip
                 .sg_conv1d(size=1, act='tanh', bn=True, name='conv_1')
                 .sg_conv1d(size=1, dim=voca_size, name='conv_2'))
        print "logit",logit

    return logit
def classifier(x, num_classes, voca_size, test=False):
    with tf.sg_context(name='classifier'):
        dropout = 0 if test else default_dout
        res = x.sg_conv1d(size=1,
                          dim=latent_dim,
                          bn=True,
                          regularizer=reg_type,
                          name='decompressor')

        # loop dilated causal conv block
        for i in range(num_blocks):
            res = (res.sg_res_block(size=8,
                                    block=i,
                                    rate=1,
                                    causal=False,
                                    is_first=True).sg_res_block(
                                        size=8, block=i, rate=2,
                                        causal=False).sg_res_block(
                                            size=8,
                                            block=i,
                                            rate=4,
                                            causal=False).sg_res_block(
                                                size=5,
                                                block=i,
                                                rate=8,
                                                causal=False).sg_res_block(
                                                    size=5,
                                                    block=i,
                                                    rate=16,
                                                    causal=False))

        in_dim = res.get_shape().as_list()[-1]
        res = res.sg_conv1d(size=1,
                            dim=in_dim,
                            dout=dropout,
                            bn=True,
                            regularizer=reg_type,
                            name='conv_dout_final')

        # final fully convolution layer for softmax
        res = res.sg_conv1d(size=1,
                            dim=in_dim / 2,
                            dout=dropout,
                            act='relu',
                            bn=True,
                            regularizer=reg_type,
                            name='conv_relu_final')

        # perform max over time pooling
        res = res.sg_max(axis=[1])

        res = res.sg_dense(dim=num_classes, name='fc_layer')

    return res
def get_loss(opt):

    # conv layers
    with tf.sg_context(name='convs', act='relu', bn=True):
        conv = (opt.input[opt.gpu_index].sg_conv(
            dim=16, name='conv1').sg_pool().sg_conv(
                dim=32,
                name='conv2').sg_pool().sg_conv(dim=32,
                                                name='conv3').sg_pool())

    # fc layers
    with tf.sg_context(name='fcs', act='relu', bn=True):
        logit = (conv.sg_flatten().sg_dense(dim=256,
                                            name='fc1').sg_dense(dim=10,
                                                                 act='linear',
                                                                 bn=False,
                                                                 name='fc2'))

        # cross entropy loss with logit
        return logit.sg_ce(target=opt.target[opt.gpu_index])
Beispiel #15
0
def get_logit(x, voca_size):
    def res_block(tensor, size, rate, block, dim=num_dim):

        with tf.sg_context(name='block_%d_%d' % (block, rate)):
            conv_filter = tensor.sg_aconv1d(size=size,
                                            rate=rate,
                                            act='tanh',
                                            bn=True,
                                            name='conv_filter')

            conv_gate = tensor.sg_aconv1d(size=size,
                                          rate=rate,
                                          act='sigmoid',
                                          bn=True,
                                          name='conv_gate')
            out = conv_filter * conv_gate
            out = out.sg_conv1d(size=1,
                                dim=dim,
                                act='tanh',
                                bn=True,
                                name='conv_out')
            return out + tensor, out

    with tf.sg_context(name='front'):
        z = x.sg_conv1d(size=1,
                        dim=num_dim,
                        act='tanh',
                        bn=True,
                        name='conv_in')
    skip = 0
    for i in range(num_blocks):
        for r in [1, 2, 4, 8, 16]:
            z, s = res_block(z, size=7, rate=r, block=i)
            skip += s
    with tf.sg_context(name='logit'):
        logit = (skip.sg_conv1d(size=1, act='tanh', bn=True,
                                name='conv_1').sg_conv1d(size=1,
                                                         dim=voca_size,
                                                         name='conv_2'))

    return logit
Beispiel #16
0
def generator(x):

    reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0
    with tf.sg_context(name='generator', size=4, stride=2, act='leaky_relu', bn=True, reuse=reuse):

        # generator network
        res = (x.sg_dense(dim=1024, name='fc_1')
               .sg_dense(dim=7*7*128, name='fc_2')
               .sg_reshape(shape=(-1, 7, 7, 128))
               .sg_upconv(dim=64, name='conv_1')
               .sg_upconv(dim=1, act='sigmoid', bn=False, name='conv_2'))
    return res
Beispiel #17
0
def discriminator(tensor):
    # reuse flag
    reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
    with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu', reuse=reuse):
        res = (tensor
               .sg_conv(dim=64, name='conv1')
               .sg_conv(dim=128, name='conv2')
               .sg_flatten()
               .sg_dense(dim=1024, name='fc1')
               .sg_dense(dim=1, act='linear', name='fc2')
               .sg_squeeze())
        return res
Beispiel #18
0
def sg_quasi_conv1d(tensor, opt):
    '''
    Args:
      tensor: A 3-D tensor of either [batch size, time steps, embedding size] for original
          X or [batch size * 4, time steps, embedding size] for the others.
           
    '''
    opt += tf.sg_opt(is_enc=False)

    # Split into H and H_zfo
    H = tensor[:Hp.batch_size]
    H_z = tensor[Hp.batch_size:2 * Hp.batch_size]
    H_f = tensor[2 * Hp.batch_size:3 * Hp.batch_size]
    H_o = tensor[3 * Hp.batch_size:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0

    # Convolution and merging
    with tf.sg_context(size=opt.size, act="linear", causal=(not opt.is_enc)):
        Z = H.sg_aconv1d() + H_z  # (16, 150, 320)
        F = H.sg_aconv1d() + H_f  # (16, 150, 320)
        O = H.sg_aconv1d() + H_o  # (16, 150, 320)

    # Activation
    with tf.sg_context(ln=True):
        Z = Z.sg_bypass(act="tanh")  # (16, 150, 320)
        F = F.sg_bypass(act="sigmoid")  # (16, 150, 320)
        O = O.sg_bypass(act="sigmoid")  # (16, 150, 320)

    # Masking
    M = tf.sign(tf.abs(tf.reduce_sum(
        H, axis=-1, keep_dims=True)))  # (16, 150, 1) float32. 0 or 1
    Z *= M  # broadcasting
    F *= M  # broadcasting
    O *= M  # broadcasting

    # Concat
    ZFO = tf.concat([Z, F, O], 0)

    return ZFO  # (16*3, 150, 320)
Beispiel #19
0
def sg_vgg_16(x, opt):

    # conv1
    with tf.sg_context(name='conv1', act='relu'):
        y = x.sg_conv(dim=64).sg_pool().sg_conv().sg_pool()

    # conv2
    with tf.sg_context(name='conv2', act='relu'):
        y = y.sg_conv(dim=128).sg_pool().sg_conv().sg_pool()

    # conv3
    with tf.sg_context(name='conv3', act='relu'):
        y = y.sg_conv(
            dim=256).sg_pool().sg_conv().sg_pool().sg_conv().sg_pool()

    # conv4
    with tf.sg_context(name='conv4', act='relu'):
        y = y.sg_conv(
            dim=512).sg_pool().sg_conv().sg_pool().sg_conv().sg_pool()

    # conv5
    with tf.sg_context(name='conv5', act='relu'):
        y = y.sg_conv(
            dim=512).sg_pool().sg_conv().sg_pool().sg_conv().sg_pool()

    # fc6~7
    with tf.sg_context(act='relu', dout=opt.dout):
        y = y.sg_flatten().sg_dense(dim=4096, name='fc6')
        y = y.sg_dense(dim=4096, name='fc7')

    # final fc8
    y = y.sg_dense(dim=10, name='fc8')

    return y
Beispiel #20
0
def get_logit(x, voca_size):

    # residual block
    def res_block(tensor, size, rate, block, dim=num_dim):

        with tf.sg_context(name='block_%d_%d' % (block, rate)):

            # filter convolution
            conv_filter = tensor.sg_aconv1d(size=size, rate=rate, act='tanh', bn=True, name='conv_filter')

            # gate convolution
            conv_gate = tensor.sg_aconv1d(size=size, rate=rate,  act='sigmoid', bn=True, name='conv_gate')

            # output by gate multiplying
            out = conv_filter * conv_gate

            # final output
            out = out.sg_conv1d(size=1, dim=dim, act='tanh', bn=True, name='conv_out')

            # residual and skip output
            return out + tensor, out

    # expand dimension
    with tf.sg_context(name='front'):
        z = x.sg_conv1d(size=1, dim=num_dim, act='tanh', bn=True, name='conv_in')

    # dilated conv block loop
    skip = 0  # skip connections
    for i in range(num_blocks):
        for r in [1, 2, 4, 8, 16]:
            z, s = res_block(z, size=7, rate=r, block=i)
            skip += s

    # final logit layers
    with tf.sg_context(name='logit'):
        logit = (skip
                 .sg_conv1d(size=1, act='tanh', bn=True, name='conv_1')
                 .sg_conv1d(size=1, dim=voca_size, name='conv_2'))

    return logit
Beispiel #21
0
    def __init__(self, mode="train"):
        '''
        Args:
          mode: A string. Either "train" or "test"
        '''
        self.char2idx, self.idx2char = load_char_vocab()
        self.word2idx, self.idx2word = load_word_vocab()

        if mode == "train":
            self.x, self.y, self.num_batch = get_batch_data()
        else:
            self.x = tf.placeholder(tf.int32, [None, Hyperparams.seqlen])

        self.emb_x = tf.sg_emb(name='emb_x',
                               voca_size=len(self.char2idx),
                               dim=Hyperparams.embed_dim)
        self.enc = self.x.sg_lookup(emb=self.emb_x)

        with tf.sg_context(size=5, act='relu', bn=True):
            for _ in range(20):
                dim = self.enc.get_shape().as_list()[-1]
                self.enc += self.enc.sg_conv1d(
                    dim=dim)  # (64, 50, 300) float32

        self.enc = self.enc.sg_conv1d(size=1,
                                      dim=len(self.word2idx),
                                      act='linear',
                                      bn=False)  # (64, 50, 21293) float32

        #         self.logits = self.enc.sg_mean(dims=[1], keep_dims=False) # (64, 21293) float32

        # Weighted Sum. Updated on Feb. 15, 2017.
        def make_weights(size):
            weights = tf.range(1, size + 1, dtype=tf.float32)
            weights *= 1. / ((1 + size) * size // 2)
            weights = tf.expand_dims(weights, 0)
            weights = tf.expand_dims(weights, -1)
            return weights

        self.weights = make_weights(Hyperparams.seqlen)  # (1, 50, 1)
        self.enc *= self.weights  # Broadcasting
        self.logits = self.enc.sg_sum(axis=[1], keep_dims=False)  # (64, 21293)

        if mode == "train":
            self.ce = self.logits.sg_ce(target=self.y,
                                        mask=False,
                                        one_hot=False)
            self.istarget = tf.not_equal(self.y, tf.ones_like(
                self.y)).sg_float()  # 1: Unkown
            self.reduced_loss = ((self.ce * self.istarget).sg_sum()) / (
                self.istarget.sg_sum() + 1e-5)
            tf.sg_summary_loss(self.reduced_loss, "reduced_loss")
Beispiel #22
0
def generator(tensor):

    # reuse flag
    reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0

    with tf.sg_context(name='generator', size=4, stride=2, act='relu', bn=True, reuse=reuse):
        res = (tensor
               .sg_dense(dim=1024, name='fc1')
               .sg_dense(dim=7*7*128, name='fc2')
               .sg_reshape(shape=(-1, 7, 7, 128))
               .sg_upconv(dim=64, name='conv1')
               .sg_upconv(dim=1, act='sigmoid', bn=False, name='conv2'))
    return res
Beispiel #23
0
def sg_vgg_19(tensor, opt):
    r"""Applies vgg 19 model.
    Note that the fc layers in the original architecture 
      will be replaced with fully convolutional layers.
      For convenience, We still call them fc layers, though.
    
    Args:
      tensor: A `Tensor`.
      num_class: number of class.
      conv_only: Boolean. If True, fc layers are not applied.
      squeeze: Boolean. If True, the dimensions with size 1 in the final outputs will be removed.
    
    Returns:
      A `Tesnor`. 
    """
    opt += tf.sg_opt(num_class=1000, conv_only=False, squeeze=True)

    # convolution layers
    with tf.sg_context(name='conv', act='relu'):
        conv = (tensor.sg_conv(dim=64).sg_conv().sg_pool().sg_conv(
            dim=128).sg_conv().sg_pool().sg_conv(
                dim=256).sg_conv().sg_conv().sg_conv().sg_pool().sg_conv(
                    dim=512).sg_conv().sg_conv().sg_conv().sg_pool().sg_conv(
                        dim=512).sg_conv().sg_conv().sg_conv().sg_pool())

    # fully convolution layers
    with tf.sg_context(name='fc', act='relu', pad='VALID'):
        fc = (conv.sg_conv(dim=4096, size=7, dout=opt.dout).sg_conv(
            dim=4096, size=1, dout=opt.dout).sg_conv(dim=opt.num_class,
                                                     size=1,
                                                     act='linear'))

    if opt.conv_only:
        return conv
    else:
        if opt.squeeze:
            return fc.sg_squeeze(dim=(1, 2))
        else:
            return fc
Beispiel #24
0
def encode(x):

    with tf.sg_context(name='encoder'):
        res = x
        # loop dilated conv block
        for i in range(num_blocks):
            res = (res
                   .sg_res_block(size=5, block=i, rate=1, is_first=True)
                   .sg_res_block(size=5, block=i, rate=2)
                   .sg_res_block(size=5, block=i, rate=4)
                   .sg_res_block(size=5, block=i, rate=8)
                   .sg_res_block(size=5, block=i, rate=16))

    return res
def sg_quasi_conv1d(tensor, opt):

    opt += tf.sg_opt(is_enc=False, causal=True)

    # Split into H and H_zfo
    H = tensor[:Hp.batch_size]
    H_z = tensor[Hp.batch_size:2 * Hp.batch_size]
    H_f = tensor[2 * Hp.batch_size:3 * Hp.batch_size]
    H_o = tensor[3 * Hp.batch_size:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0

    # Convolution and merging
    with tf.sg_context(size=opt.size,
                       act="linear",
                       causal=opt.causal and (not opt.is_enc),
                       dev=opt.dev,
                       reuse=opt.reuse_vars):
        Z = H.sg_aconv1d_gpus(name="aconvz_" +
                              opt.name) + H_z  # (b, seqlen, hd)
        F = H.sg_aconv1d_gpus(name="aconvf_" +
                              opt.name) + H_f  # (b, seqlen, hd)
        O = H.sg_aconv1d_gpus(name="aconvo_" +
                              opt.name) + H_o  # (b, seqlen, hd)

    # Activation
    with tf.sg_context(dev=opt.dev, reuse=opt.reuse_vars):
        Z = Z.sg_bypass_gpus(act="tanh",
                             name="tanhz_" + opt.name)  # (b, seqlen, hd)
        F = F.sg_bypass_gpus(act="sigmoid",
                             name="sigmf_" + opt.name)  # (b, seqlen, hd)
        O = O.sg_bypass_gpus(act="sigmoid",
                             name="sigmo_" + opt.name)  # (b, seqlen, hd)

    ZFO = tf.concat([Z, F, O], 0)

    return ZFO  # (batch*3, seqlen, hiddim)
Beispiel #26
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)

        words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes])

    return res
def penalize_loss(gamma, lambd, tensor, tensor_n):

    #gamma * (vector-vector_d)**2 - lamada * (vector dot vector_d)/(nor(vector)*nor(vector_d))

    with tf.sg_context(name='penalize'):

        square = tf.reduce_sum(tf.reduce_sum(tf.square(tensor - tensor_n), 2),
                               1)

        cosine = tf.reduce_sum(
            tf.reduce_sum(
                tf.multiply(tf.nn.l2_normalize(tensor, 2),
                            tf.nn.l2_normalize(tensor_n, 2)), 2), 1)

        return gamma * square - lambd * cosine
Beispiel #28
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)

        words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes])

    return res
Beispiel #29
0
def sg_resnet_layer(x, opt):
    r"""Applies basic architecture of residual net.
    
    Note that the fc layers in the original architecture 
      will be replaced with fully convolutional layers.
      For convenience, We still call them fc layers, though.
    
    Args:
      x: A `Tensor`.
      opt:
          dim: An integer. Dimension for this resnet layer
          num: Number of times to repeat
          act: String. 'relu' (default). the activation function name
          reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
          name: String. (optional) Used as convolution layer prefix

    Returns:
      A `Tensor`. 
    """
    assert opt.dim is not None, 'dim is mandatory.'
    assert opt.num is not None, 'num is mandatory.'

    # default stride
    opt += tf.sg_opt(stride=1, act='relu')

    # format convolutional layer name
    def cname(index):
        return opt.name if opt.name is None else opt.name + '_%d' % index

    with tf.sg_context(bias=False, reuse=opt.reuse):
        # 1st block
        out = (x
               .sg_bypass(act=opt.act, bn=True, name=cname(0))
               .sg_conv(dim=opt.dim, size=1, act=opt.act, stride=opt.stride, bn=True, name=cname(1))
               .sg_conv(dim=opt.dim, size=3, act=opt.act, bn=True, name=cname(2))
               .sg_conv(dim=opt.dim*4, size=1, name=cname(3)))
        out += x.sg_conv(dim=opt.dim * 4, size=1, stride=opt.stride, name=cname(4))

        # middle blocks
        for i in range(1, opt.num):
            out_new = (out
                       .sg_bypass(act=opt.act, name=cname(4 * i + 1))
                       .sg_conv(dim=opt.dim, size=1, act=opt.act, bn=True, name=cname(4 * i + 2))
                       .sg_conv(dim=opt.dim, size=3, act=opt.act, bn=True, name=cname(4 * i + 3))
                       .sg_conv(dim=opt.dim*4, size=1, name=cname(4 * i + 4)))
            out += out_new

    return out
Beispiel #30
0
def discriminator(tensor):
    reuse = len([
        t for t in tf.global_variables() if t.name.startswith('discriminator')
    ]) > 0
    with tf.sg_context(name='discriminator',
                       size=4,
                       stride=2,
                       act='leaky_relu',
                       bn=True,
                       reuse=reuse):
        res = (tensor.sg_dense(dim=4096, name='fc1').sg_dense(
            dim=512, name='fc2').sg_dense(dim=1,
                                          act='sigmoid',
                                          bn=False,
                                          name='fc3').sg_squeeze())
        return res
Beispiel #31
0
def generator(tensor):
    reuse = len(
        [t
         for t in tf.global_variables() if t.name.startswith('generator')]) > 0
    with tf.sg_context(name='generator',
                       size=4,
                       stride=2,
                       act='leaky_relu',
                       bn=True,
                       reuse=reuse):
        res = (tensor.sg_dense(dim=1024, name='fc1').sg_dense(dim=4096,
                                                              act='relu',
                                                              bn=False,
                                                              name='fc3'))

        return res
Beispiel #32
0
    def res_block(tensor, size, rate, block, dim=num_dim):

        with tf.sg_context(name='block_%d_%d' % (block, rate)):

            # filter convolution
            conv_filter = tensor.sg_aconv1d(size=size, rate=rate, act='tanh', bn=True, name='conv_filter')

            # gate convolution
            conv_gate = tensor.sg_aconv1d(size=size, rate=rate,  act='sigmoid', bn=True, name='conv_gate')

            # output by gate multiplying
            out = conv_filter * conv_gate

            # final output
            out = out.sg_conv1d(size=1, dim=dim, act='tanh', bn=True, name='conv_out')

            # residual and skip output
            return out + tensor, out
Beispiel #33
0
    def __init__(self, is_train=True):
        # inputs
        if is_train:
            self.X, self.Y, self.num_batch = get_batch_data(
            )  # (16, 9, 9, 1), (16, 9, 9)
            self.X_val, self.Y_val, _ = get_batch_data(is_train=False)
        else:
            self.X = tf.placeholder(tf.float32, [None, 9, 9, 1])

        with tf.sg_context(size=3, act='relu', bn=True):
            self.logits = self.X.sg_identity()
            for _ in range(5):
                self.logits = (self.logits.sg_conv(dim=512))
            self.logits = self.logits.sg_conv(
                dim=10, size=1, act='linear',
                bn=False)  # (16, 9, 9, 10) float32

        if is_train:
            self.ce = self.logits.sg_ce(target=self.Y,
                                        mask=False)  # (16, 9, 9) dtype=float32
            self.istarget = tf.equal(
                self.X.sg_squeeze(), tf.zeros_like(self.X.sg_squeeze())
            ).sg_float()  # zeros: 1, non-zeros: 0 (16, 9, 9) dtype=float32
            self.loss = self.ce * self.istarget  # (16, 9, 9) dtype=float32
            self.reduced_loss = self.loss.sg_sum() / self.istarget.sg_sum()
            tf.sg_summary_loss(self.reduced_loss, "reduced_loss")

            # accuracy evaluation ( for train set )
            self.preds = (self.logits.sg_argmax()).sg_int()
            self.hits = tf.equal(self.preds, self.Y).sg_float()
            self.acc_train = (self.hits *
                              self.istarget).sg_sum() / self.istarget.sg_sum()

            # accuracy evaluation ( for validation set )
            self.preds_ = (self.logits.sg_reuse(
                input=self.X_val).sg_argmax()).sg_int()
            self.hits_ = tf.equal(self.preds_, self.Y_val).sg_float()
            self.istarget_ = tf.equal(self.X_val.sg_squeeze(),
                                      tf.zeros_like(
                                          self.X_val.sg_squeeze())).sg_float()
            self.acc_val = (self.hits_ *
                            self.istarget_).sg_sum() / self.istarget_.sg_sum()
Beispiel #34
0
def acnn_classify(x, num_classes, test=False, causal=False):
    with tf.sg_context(name='acnn_classify'):
        dropout = 0 if test else default_dout
        res = x.sg_conv1d(size=1, dim=latent_dim, ln=True, regularizer=reg_type, name='conv_input_formatter')

        # loop dilated causal conv block
        for i in range(num_blocks):
            res = (res
                   .sg_res_block(size=3, block=i, rate=1, causal=causal, dout=dropout, is_first=True)
                   .sg_res_block(size=3, block=i, rate=2, causal=causal, dout=dropout)
                   .sg_res_block(size=3, block=i, rate=4, causal=causal, dout=dropout)
                   .sg_res_block(size=3, block=i, rate=8, causal=causal, dout=dropout)
                   .sg_res_block(size=3, block=i, rate=16, causal=causal, dout=dropout))

        in_dim = res.get_shape().as_list()[-1]

        res = res.sg_conv1d(size=1, dim=in_dim, dout=dropout, act='relu', ln=True, regularizer=reg_type,
                            name='conv_comress')

        # fully convolution layer
        res = res.sg_conv1d(size=1, dim=num_classes, dout=dropout, act='relu', ln=True, regularizer=reg_type,
                            name='conv_final').sg_softmax()

        return res
Beispiel #35
0
def sg_res_block(tensor, opt):
    # default rate
    opt += tf.sg_opt(size=3, rate=1, causal=False, is_first=False, dout=0)

    # input dimension
    in_dim = tensor.get_shape().as_list()[-1]

    with tf.sg_context(name='block_%d_%d' % (opt.block, opt.rate)):
        # reduce dimension
        input_ = (tensor
                  .sg_bypass(act='relu', ln=(not opt.is_first), name='bypass')
                  .sg_conv1d(size=1, dim=in_dim / 2, act='relu', ln=True, regularizer=reg_type, name='conv_in'))

        # 1xk conv dilated
        out = (input_
               .sg_aconv1d(size=opt.size, rate=opt.rate, causal=opt.causal, act='relu', ln=True,
                           regularizer=reg_type, name='aconv'))

        # dimension recover and residual connection
        out = out.sg_conv1d(size=1, dim=in_dim, regularizer=reg_type, name='conv_out') + tensor

        out = out.identity(ln=True, name='layer_norm')

    return out
Beispiel #36
0
#
# create generator
#

# random class number
z_cat = tf.multinomial(tf.ones((batch_size, num_category), dtype=tf.sg_floatx) / num_category, 1).sg_squeeze()

# random seed = random categorical variable + random uniform
z = z_cat.sg_one_hot(depth=num_category).sg_concat(target=tf.random_uniform((batch_size, num_dim-num_category)))

# random continuous variable
z_cont = z[:, num_category:num_category+num_cont]

# generator network
with tf.sg_context(name='generator', size=(4, 1), stride=(2, 1), act='relu', bn=True):
    gen = (z.sg_dense(dim=1024)
           .sg_dense(dim=48*1*128)
           .sg_reshape(shape=(-1, 48, 1, 128))
           .sg_upconv(dim=64)
           .sg_upconv(dim=32)
           .sg_upconv(dim=2, act='sigmoid', bn=False))

#
# create discriminator & recognizer
#

# create real + fake image input
xx = tf.concat(0, [x, gen])

with tf.sg_context(name='discriminator', size=(4, 1), stride=(2, 1), act='leaky_relu'):
Beispiel #37
0
# category variables
z = (tf.ones(batch_size, dtype=tf.sg_intx) * target_num).sg_one_hot(depth=num_category)

# continuous variables
z = z.sg_concat(target=[target_cval_1.sg_expand_dims(), target_cval_2.sg_expand_dims()])

# random seed = categorical variable + continuous variable + random uniform
z = z.sg_concat(target=tf.random_uniform((batch_size, num_dim-num_cont-num_category)))


#
# create generator
#

# generator network
with tf.sg_context(name='generator', size=(4, 1), stride=(2, 1), act='relu', bn=True):
    gen = (z.sg_dense(dim=1024)
           .sg_dense(dim=48*1*128)
           .sg_reshape(shape=(-1, 48, 1, 128))
           .sg_upconv(dim=64)
           .sg_upconv(dim=32)
           .sg_upconv(dim=2, act='sigmoid', bn=False).sg_squeeze())


#
# run generator
#
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:
        tf.sg_init(sess)
        # restore parameters
Beispiel #38
0
x = data.train.image

# corrupted image
x_small = tf.image.resize_bicubic(x, (14, 14))
x_bicubic = tf.image.resize_bicubic(x_small, (28, 28)).sg_squeeze()
x_nearest = tf.image.resize_images(x_small, (28, 28), tf.image.ResizeMethod.NEAREST_NEIGHBOR).sg_squeeze()

#
# create generator
#
# I've used ESPCN scheme
# http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf
#

# generator network
with tf.sg_context(name='generator', act='relu', bn=True):
    gen = (x_small
           .sg_conv(dim=32)
           .sg_conv()
           .sg_conv(dim=4, act='sigmoid', bn=False)
           .sg_periodic_shuffle(factor=2)
           .sg_squeeze())

#
# run generator
#

fig_name = 'asset/train/sample.png'
with tf.Session() as sess:
    with tf.sg_queue_context(sess):