示例#1
0
    def add_biaffine(self, inputs):
        ## inputs [seq_len, batch_size, units]
        ## first define four different MLPs
        arc_roles = ['arc-dep', 'arc-head']
        rel_roles = ['rel-dep', 'rel-head']
        vectors = {}
        for arc_role in arc_roles:
            for i in xrange(self.opts.mlp_num_layers):
                if i == 0:
                    inputs_dim = self.outputs_dim
                    vector_mlp = inputs
                else:
                    inputs_dim = self.opts.arc_mlp_units
                weights = get_mlp_weights('{}_MLP_Layer{}'.format(arc_role, i), inputs_dim, self.opts.arc_mlp_units)
                vector_mlp = self.add_dropout(tf.map_fn(lambda x: mlp(x, weights), vector_mlp), self.mlp_prob)
                ## [seq_len, batch_size, 2*mlp_units]
            vectors[arc_role] = vector_mlp
        weights = get_arc_weights('arc', self.opts.arc_mlp_units)
        arc_output = arc_equation(vectors['arc-head'], vectors['arc-dep'], weights) # [batch_size, seq_len, seq_len] dim 1: deps, dim 2: heads
#        arc_predictions = get_arcs(arc_output, self.test_opts) # [batch_size, seq_len]
        arc_predictions = tf.argmax(arc_output, 2) # [batch_size, seq_len]
        for rel_role in rel_roles:
            for i in xrange(self.opts.mlp_num_layers):
                if i == 0:
                    inputs_dim = self.outputs_dim
                    vector_mlp = inputs
                else:
                    inputs_dim = self.opts.rel_mlp_units
                weights = get_mlp_weights('{}_MLP_Layer{}'.format(rel_role, i), inputs_dim, self.opts.rel_mlp_units)
                vector_mlp = self.add_dropout(tf.map_fn(lambda x: mlp(x, weights), vector_mlp), self.mlp_prob)
                ## [seq_len, batch_size, 2*mlp_units]
            vectors[rel_role] = vector_mlp
        weights = get_rel_weights('rel', self.opts.rel_mlp_units, self.loader.nb_rels)
        rel_output, rel_scores = rel_equation(vectors['rel-head'], vectors['rel-dep'], weights, arc_predictions)  #[batch_size, seq_len, nb_rels]
        return arc_output, rel_output, rel_scores
 def add_biaffine(self, inputs):
     arc_roles = ['arc-dep', 'arc-head']
     rel_roles = ['rel-dep', 'rel-head']
     joint_roles = ['jk', 'stag']
     vectors = {}
     for arc_role in arc_roles:
         for i in range(self.opts.mlp_num_layers):
             if i == 0:
                 inputs_dim = self.outputs_dim
                 vector_mlp = inputs
             else:
                 inputs_dim = self.opts.arc_mlp_units
             weights = get_mlp_weights('{}_MLP_Layer{}'.format(arc_role, i),
                                       inputs_dim, self.opts.arc_mlp_units)
             vector_mlp = self.add_dropout(
                 tf.map_fn(lambda x: mlp(x, weights), vector_mlp),
                 self.mlp_prob)
             ## [seq_len, batch_size, 2*mlp_units]
         vectors[arc_role] = vector_mlp
     weights = get_arc_weights('arc', self.opts.arc_mlp_units)
     arc_output = arc_equation(
         vectors['arc-head'], vectors['arc-dep'], weights
     )  # [batch_size, seq_len, seq_len] dim 1: deps, dim 2: heads
     #        arc_predictions = get_arcs(arc_output, self.test_opts) # [batch_size, seq_len]
     arc_predictions = tf.argmax(arc_output, 2)  # [batch_size, seq_len]
     for rel_role in rel_roles:
         for i in range(self.opts.mlp_num_layers):
             if i == 0:
                 inputs_dim = self.outputs_dim
                 vector_mlp = inputs
             else:
                 inputs_dim = self.opts.rel_mlp_units
             weights = get_mlp_weights('{}_MLP_Layer{}'.format(rel_role, i),
                                       inputs_dim, self.opts.rel_mlp_units)
             vector_mlp = self.add_dropout(
                 tf.map_fn(lambda x: mlp(x, weights), vector_mlp),
                 self.mlp_prob)
             ## [seq_len, batch_size, 2*mlp_units]
         vectors[rel_role] = vector_mlp
     weights = get_rel_weights('rel', self.opts.rel_mlp_units,
                               self.loader.nb_rels)
     rel_output, rel_scores = rel_equation(
         vectors['rel-head'], vectors['rel-dep'], weights,
         arc_predictions)  #[batch_size, seq_len, nb_rels]
     ## joint stagging
     for joint_role in joint_roles:
         for i in range(self.opts.mlp_num_layers):
             if i == 0:
                 inputs_dim = self.outputs_dim
                 vector_mlp = inputs
             else:
                 inputs_dim = self.opts.joint_mlp_units
             weights = get_mlp_weights(
                 '{}_MLP_Layer{}'.format(joint_role, i), inputs_dim,
                 self.opts.joint_mlp_units)
             vector_mlp = self.add_dropout(
                 tf.map_fn(lambda x: mlp(x, weights), vector_mlp),
                 self.mlp_prob)
             ## [seq_len, batch_size, 2*mlp_units]
         vectors[joint_role] = vector_mlp
     weights = get_joint_weights('stag', self.opts.joint_mlp_units,
                                 self.loader.nb_stags)
     self.stag_embeddings = tf.transpose(weights['W-joint'], [1, 0])
     joint_output = joint_equation(
         vectors['stag'], weights)  # [batch_size, seq_len, nb_stags]
     weights = get_joint_weights('jk', self.opts.joint_mlp_units,
                                 self.loader.nb_jk)
     joint_output_jk = joint_equation(
         vectors['jk'], weights)  # [batch_size, seq_len, nb_stags]
     return arc_output, rel_output, rel_scores, joint_output, joint_output_jk