コード例 #1
0
  def cl_loss_from_embedding(self,embedded,return_intermediate=False):
    self.reshape_input = tf.concat([tf.reshape(self.input_data,[-1,self.args.word_dim]),tf.constant(np.zeros((1,self.args.word_dim),dtype=np.float32))],0)
      
    input_f1 = tf.nn.l2_normalize(tf.reduce_sum(tf.nn.embedding_lookup(self.reshape_input,self.entMentIndex),1),1)
    print 'input_f1:',input_f1
  
    input_f2,_,_ =self.layers['BiLSTM'](tf.nn.embedding_lookup(self.reshape_input,self.entCtxLeftIndex))
    
    input_f2 = tf.nn.l2_normalize(tf.reduce_sum(input_f2,1),1)
    print 'input_f2:',input_f2
    
    input_f3,_,_ = self.layers['BiLSTM'](tf.nn.embedding_lookup(self.reshape_input,self.entCtxRightIndex))
    input_f3 = tf.nn.l2_normalize(tf.reduce_sum(input_f3,1),1)
    print 'input_f2:',input_f3
    
    
    self.input_total = tf.nn.sigmoid(tf.concat([input_f1,input_f2,input_f3],1))
  
    if self.args.dropout:
      self.input_total =  tf.nn.dropout(self.input_total,self.keep_prob)
        
        
    prediction =self.layers['fullyConnect'](self.input_total,tf.nn.sigmoid)

    loss = tf.reduce_mean(layers_lib.classification_loss('figer',self.dense_outputdata,prediction))
    
    return prediction,loss
コード例 #2
0
  def cl_loss_from_embedding(self,
                             embedded,
                             inputs=None,
                             return_intermediates=False):
    """Compute classification loss from embedding.

    Args:
      embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
      inputs: VatxtInput, defaults to self.cl_inputs.
      return_intermediates: bool, whether to return intermediate tensors or only
        the final loss.

    Returns:
      If return_intermediates is True:
        lstm_out, next_state, logits, loss
      Else:
        loss
    """
    if inputs is None:
      inputs = self.cl_inputs

    lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
                                               inputs.length)
    logits = self.layers['cl_logits'](lstm_out)
    loss = layers_lib.classification_loss(logits, inputs.labels, inputs.weights)

    if return_intermediates:
      return lstm_out, next_state, logits, loss
    else:
      return loss
コード例 #3
0
 def cl_loss_from_embedding(self,embedded,return_intermediate=False):
   with tf.device('/gpu:0'):
     output = tf.concat([tf.reshape(self.input_data,[-1,self.args.word_dim]),tf.constant(np.zeros((1,self.args.word_dim),dtype=np.float32))],0)
     
   self.input_f1 = tf.nn.embedding_lookup(output,self.entMentIndex)
   self.input_f2 = tf.nn.embedding_lookup(output,self.entCtxLeftIndex)
   self.input_f3 = tf.nn.embedding_lookup(output,self.entCtxRightIndex)
   
  
   self.input_f1 = tf.concat([self.input_f1,self.pos_f1],-1)
   print 'input_f1:',self.input_f1
   
   
   self.input_f2 = tf.concat([self.input_f2,self.pos_f2],-1)
   print 'input_f2:',self.input_f2
   
   
   
   self.input_f3 = tf.concat([self.input_f3,self.pos_f3],-1)
   
   
   self.h_pool_f1 = self.layers['CNN'](tf.expand_dims(self.input_f1,-1),5)
   print 'h_pool_f1:',self.h_pool_f1
   
   
   
   self.h_pool_f2 = self.layers['CNN'](tf.expand_dims(self.input_f2,-1),10)
   print 'h_pool_f2:',self.h_pool_f2
   
   self.h_pool_f3 = self.layers['CNN'](tf.expand_dims(self.input_f3,-1),10)
   print 'h_pool_f3:',self.h_pool_f3
   
   
   if self.args.dropout:
     self.h_pool_f1 = tf.nn.dropout(self.h_pool_f1,self.keep_prob)
     self.h_pool_f2 = tf.nn.dropout(self.h_pool_f2,self.keep_prob)
     self.h_pool_f3 = tf.nn.dropout(self.h_pool_f3,self.keep_prob)
   
   '''
   @final results
   '''
   
   self.ctx_input = tf.concat([self.h_pool_f2,self.h_pool_f3],1)
   
   prediction_ctx = self.layers['fullyConnect_ctx'](self.ctx_input,activation_fn=tf.nn.relu)   #utilize the relu activative function
    
   prediction_ment = self.layers['fullyConnect_ment'](tf.nn.l2_normalize(self.h_pool_f1,1),activation_fn=tf.nn.relu)
   
   
   prediction = prediction_ctx + prediction_ment
                           
   loss = tf.reduce_mean(layers_lib.classification_loss('figer',self.dense_outputdata,prediction))  #rerank the entities
   return prediction,loss
コード例 #4
0
ファイル: graphs.py プロジェクト: 812864539/models
  def cl_loss_from_embedding(self,
                             embedded,
                             inputs=None,
                             return_intermediates=False):
    """Compute classification loss from embedding.

    Args:
      embedded: Length 2 tuple of 3-D float Tensor
        [batch_size, num_timesteps, embedding_dim].
      inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
      return_intermediates: bool, whether to return intermediate tensors or only
        the final loss.

    Returns:
      If return_intermediates is True:
        lstm_out, next_states, logits, loss
      Else:
        loss
    """
    if inputs is None:
      inputs = self.cl_inputs

    out = []
    for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
                                      inputs):
      out.append(self.layers[layer_name](emb, inp.state, inp.length))
    lstm_outs, next_states = zip(*out)

    # Concatenate output of forward and reverse LSTMs
    lstm_out = tf.concat(lstm_outs, 1)

    logits = self.layers['cl_logits'](lstm_out)
    f_inputs, _ = inputs  # pylint: disable=unpacking-non-sequence
    loss = layers_lib.classification_loss(logits, f_inputs.labels,
                                          f_inputs.weights)

    if return_intermediates:
      return lstm_out, next_states, logits, loss
    else:
      return loss
コード例 #5
0
    def cl_loss_from_embedding(self,
                               embedded,
                               inputs=None,
                               return_intermediates=False):
        """Compute classification loss from embedding.

    Args:
      embedded: Length 2 tuple of 3-D float Tensor
        [batch_size, num_timesteps, embedding_dim].
      inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
      return_intermediates: bool, whether to return intermediate tensors or only
        the final loss.

    Returns:
      If return_intermediates is True:
        lstm_out, next_states, logits, loss
      Else:
        loss
    """
        if inputs is None:
            inputs = self.cl_inputs

        out = []
        for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
                                          inputs):
            out.append(self.layers[layer_name](emb, inp.state, inp.length))
        lstm_outs, next_states = zip(*out)

        # Concatenate output of forward and reverse LSTMs
        lstm_out = tf.concat(lstm_outs, 1)

        logits = self.layers['cl_logits'](lstm_out)
        f_inputs, _ = inputs  # pylint: disable=unpacking-non-sequence
        loss = layers_lib.classification_loss(logits, f_inputs.labels,
                                              f_inputs.weights)

        if return_intermediates:
            return lstm_out, next_states, logits, loss
        else:
            return loss
コード例 #6
0
ファイル: graphs.py プロジェクト: void-yu/NSE
    def cl_loss_from_embedding(self,
                               embedded,
                               inputs=None,
                               return_intermediates=False):
        """Compute classification loss from embedding.

    Args:
      embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
      inputs: VatxtInput, defaults to self.cl_inputs.
      return_intermediates: bool, whether to return intermediate tensors or only
        the final loss.

    Returns:
      If return_intermediates is True:
        lstm_out, next_state, logits, loss
      Else:
        loss
    """
        if inputs is None:
            inputs = self.cl_inputs

        lstm_out, next_state = self.layers['naive_lstm'](embedded,
                                                         inputs.state,
                                                         inputs.length)
        if FLAGS.single_label:
            indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1],
                               1)
            lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
            labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
            weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
        else:
            labels = inputs.labels
            weights = inputs.weights
        logits = self.layers['cl_logits'](lstm_out)
        loss = layers_lib.classification_loss(logits, labels, weights)

        if return_intermediates:
            return lstm_out, next_state, logits, loss
        else:
            return loss
コード例 #7
0
 def cl_loss_from_embedding(self,embedded,return_intermediate=False):
   with tf.device('/gpu:1'):
     output,_ = self.layers['BiLSTM'](embedded)
     output = tf.concat([tf.reshape(output,[-1,2*self.args.rnn_size]),tf.constant(np.zeros((1,2*self.args.rnn_size),dtype=np.float32))],0)
     
   input_f1 =tf.nn.l2_normalize(tf.reduce_sum(tf.nn.embedding_lookup(output,self.entMentIndex),1),1)
   
   #input_f2 =tf.nn.l2_normalize(tf.reduce_sum(tf.nn.embedding_lookup(output,self.entCtxLeftIndex),1),1)
   
   #input_f3 =tf.nn.l2_normalize(tf.reduce_sum(tf.nn.embedding_lookup(output,self.entCtxRightIndex),1),1)
   
   f2_temp = tf.nn.embedding_lookup(output,self.entCtxLeftIndex)
   f3_temp = tf.nn.embedding_lookup(output,self.entCtxRightIndex)
   
   f2_atten = tf.nn.softmax(tf.einsum('aij,ajk->aik', f2_temp, tf.expand_dims(input_f1,-1)),-1)  #Batch matrix multiplication
   f3_atten = tf.nn.softmax(tf.einsum('aij,ajk->aik', f3_temp, tf.expand_dims(input_f1,-1)),-1) 
   
   input_f2 = tf.einsum('aij,ajk->aik',tf.transpose(f2_temp,[0,2,1]),f2_atten)[:,:,0]
   input_f3 = tf.einsum('aij,ajk->aik',tf.transpose(f3_temp,[0,2,1]),f3_atten)[:,:,0]
   
   print 'f2_input:',input_f2
   print 'f3_input:',input_f3
   
   input_ctx = tf.concat([input_f2,input_f3],1)
   
   if self.args.dropout:  #dropout position is here!
     input_f1 =  tf.nn.dropout(input_f1,self.keep_prob)
     input_ctx =  tf.nn.dropout(input_ctx,self.keep_prob)
       
   prediction_l1_ment = self.layers['fullyConnect_ment'](input_f1,activation_fn=None)
   prediction_ment = tf.matmul(prediction_l1_ment,self.hier)
   
   print 'ment:',prediction_ment
   prediction_ctx = self.layers['fullyConnect_ctx'](input_ctx,activation_fn=None)
   print 'ctx:',prediction_ctx
   prediction = tf.nn.sigmoid(prediction_ment + prediction_ctx)
   
   loss = tf.reduce_mean(layers_lib.classification_loss('figer',self.dense_outputdata,prediction))
   return prediction,loss
コード例 #8
0
ファイル: graphs.py プロジェクト: 812864539/models
  def cl_loss_from_embedding(self,
                             embedded,
                             inputs=None,
                             return_intermediates=False):
    """Compute classification loss from embedding.

    Args:
      embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
      inputs: VatxtInput, defaults to self.cl_inputs.
      return_intermediates: bool, whether to return intermediate tensors or only
        the final loss.

    Returns:
      If return_intermediates is True:
        lstm_out, next_state, logits, loss
      Else:
        loss
    """
    if inputs is None:
      inputs = self.cl_inputs

    lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
                                               inputs.length)
    if FLAGS.single_label:
      indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
      lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
      labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
      weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
    else:
      labels = inputs.labels
      weights = inputs.weights
    logits = self.layers['cl_logits'](lstm_out)
    loss = layers_lib.classification_loss(logits, labels, weights)

    if return_intermediates:
      return lstm_out, next_state, logits, loss
    else:
      return loss