Example #1
0
def dynamic_L(x):
    '''Attention machanism to combine the infomation, 
       from https://arxiv.org/pdf/1612.01627.pdf.

    Args:
        x: a tensor with shape [batch, time, dimension]

    Returns:
        a tensor with shape [batch, dimension]

    Raises:
    '''
    key_0 = tf.get_variable(
        name='key',
        shape=[x.shape[-1]],
        dtype=tf.float32,
        initializer=tf.random_uniform_initializer(
            -tf.sqrt(6./tf.cast(x.shape[-1], tf.float32)),
            tf.sqrt(6./tf.cast(x.shape[-1], tf.float32))))

    key = op.dense(x, add_bias=False) #[batch, time, dimension]
    weight = tf.reduce_sum(tf.multiply(key, key_0), axis=-1)  #[batch, time]
    weight = tf.expand_dims(tf.nn.softmax(weight), -1)  #[batch, time, 1]

    L = tf.reduce_sum(tf.multiply(x, weight), axis=1) #[batch, dimension]
    return L 
Example #2
0
def FFN(x, out_dimension_0=None, out_dimension_1=None):
    '''Add two dense connected layer, max(0, x*W0+b0)*W1+b1.

    Args:
        x: a tensor with shape [batch, time, dimension]
        out_dimension: a number which is the output dimension

    Returns:
        a tensor with shape [batch, time, out_dimension]

    Raises:
    '''
    with tf.variable_scope('FFN_1'):
        y = op.dense(x, out_dimension_0) #x*W0+b0
        y = tf.nn.relu(y)
    with tf.variable_scope('FFN_2'):   #max(0, x*W0+b0)*W1+b1
        z = op.dense(y, out_dimension_1) #, add_bias=False)  #!!!!
    return z
Example #3
0
 def __call__(self, z, reuse=False):
     with tf.variable_scope(self.name) as vs:
         if reuse:
             vs.reuse_variables()
         ch=self.fmd[0]*self.fmd[0]*self.channela[0]
         hf = dense(z, ch,use_wscale=False)
         hc=tf.nn.relu(tf.reshape(hf,[-1, self.fmd[0], self.fmd[0], self.channela[0]]))   # 4x4
         fmd1=tf.nn.relu(deconv2d(hc,self.channela[1],k=self.kersize[0],use_wscale=False,name='deconv1')) #8x8
         fmd2=tf.nn.relu(deconv2d(fmd1,self.channela[2],k=self.kersize[1],use_wscale=False,name='deconv2')) #16x16
         fmd3=tf.nn.relu(deconv2d(fmd2,self.channela[3],k=self.kersize[2],use_wscale=False,name='deconv3')) #32x32
         gx = tf.nn.sigmoid(deconv2d(fmd3,3,k=self.kersize[3],use_wscale=False,name='deconv4')) #64x64
         return gx
Example #4
0
 def __call__(self, z, reuse=False):
     with tf.variable_scope(self.name) as vs:
         if reuse:
             vs.reuse_variables()
         ch=self.fmd[0]*self.fmd[0]*self.channelg[0]
         hf = dense(z, ch,use_wscale=False)
         hc=tf.nn.relu(tf.reshape(hf,[-1, self.fmd[0], self.fmd[0], self.channelg[0]]))   # 4x4
         fmd1=tf.nn.relu(deconv2d(hc,self.channelg[1],k=self.kersize[0],use_wscale=False,name='deconv1')) #8x8
         fmd2=tf.nn.relu(deconv2d(fmd1,self.channelg[2],k=self.kersize[1],use_wscale=False,name='deconv2')) #16x16
         fmd3=tf.nn.relu(deconv2d(fmd2,self.channelg[3],k=self.kersize[2],use_wscale=False,name='deconv3')) #32x32
         gdfq = tf.nn.tanh(deconv2d(fmd3,2,k=self.kersize[3],use_wscale=False,name='deconv4')) #64x64
         gdf = tf.reshape(gdfq,(-1,self.imgsize*self.imgsize,2))
         gdf = tf.transpose(gdf,[0,2,1])
         return gdf