示例#1
0
    def __init__(self, config, models, tensor_dict=None):
        self.config = config
        self.model = models[0]
        self.models = models
        self.global_step = models[0].global_step
        self.yp = models[0].yp
        self.yp2 = models[0].yp2
        self.yp_list = models[0].decoder_inference
        self.yp_mat = models[0].decoder_train_softmax
        self.loss = models[0].loss
        self.tensor_dict = {} if tensor_dict is None else tensor_dict

        self.y = models[0].y

        #word2index = config.word2idx.copy()
        #word2index.update(config.new_word2idx)

        #self.index2word = {v: k for k, v in word2index.items()}

        with tf.name_scope("eval_concat"):
            N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
            self.yp = tf.concat(
                [padded_reshape(model.yp, [N, M, JX]) for model in models], 0)
            self.yp2 = tf.concat(
                [padded_reshape(model.yp2, [N, M, JX]) for model in models], 0)
            self.loss = tf.add_n([model.loss
                                  for model in models]) / len(models)
示例#2
0
    def __init__(self, config, models, tensor_dict=None):
        super(MultiGPUF1Evaluator, self).__init__(config,
                                                  models[0],
                                                  tensor_dict=tensor_dict)
        self.models = models
        with tf.name_scope("eval_concat"):

            if config.split_supports == True:
                N, M, JX = config.batch_size, 1, config.max_para_size
            else:
                N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
            self.yp = tf.concat(axis=0,
                                values=[
                                    padded_reshape(model.yp, [N, M, JX])
                                    for model in models
                                ])
            self.yp2 = tf.concat(axis=0,
                                 values=[
                                     padded_reshape(model.yp2, [N, M, JX])
                                     for model in models
                                 ])
            self.wy = tf.concat(axis=0,
                                values=[
                                    padded_reshape(model.wy, [N, M, JX])
                                    for model in models
                                ])
            self.loss = tf.add_n([model.get_loss()
                                  for model in models]) / len(models)
示例#3
0
 def __init__(self, config, models, tensor_dict=None):
     super(MultiGPUF1Evaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
     self.models = models
     with tf.name_scope("eval_concat"):
         N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
         self.yp = tf.concat(0, [padded_reshape(model.yp, [N, M, JX]) for model in models])
         self.yp2 = tf.concat(0, [padded_reshape(model.yp2, [N, M, JX]) for model in models])
         self.loss = tf.add_n([model.loss for model in models])/len(models)
示例#4
0
 def __init__(self, config, models, tensor_dict=None):
     super(MultiGPUF1Evaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
     self.models = models
     with tf.name_scope("eval_concat"):
         N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
         self.yp = tf.concat(0, [padded_reshape(model.yp, [N, M, JX]) for model in models])
         self.yp2 = tf.concat(0, [padded_reshape(model.yp2, [N, M, JX]) for model in models])
         self.loss = tf.add_n([model.loss for model in models])/len(models)
示例#5
0
    def __init__(self, config, models, tensor_dict=None):
        super(MultiGPUClassificationAccuracyEvaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
        self.models = models
        with tf.name_scope("eval_concat"):
            N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size

            C = 3 if config.data_dir.startswith('data/snli') else 2
            each_yp = lambda model: padded_reshape(model.yp0, [N, M, C])
            self.yp = tf.concat(axis=0, values=[each_yp(model) for model in models])
            self.loss = tf.add_n([model.loss for model in models])/len(models)
            self.p = tf.concat(axis=0, values=[model.tensor_dict['a_u'] for model in models])
    def __init__(self, config, models, tensor_dict=None):
        super(MultiGPUF1Evaluator, self).__init__(config,
                                                  models[0],
                                                  tensor_dict=tensor_dict)
        self.models = models
        with tf.name_scope("eval_concat"):
            N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
            self.yp = tf.concat(axis=0,
                                values=[
                                    padded_reshape(model.yp, [N, M, JX])
                                    for model in models
                                ])
            self.yp2 = tf.concat(axis=0,
                                 values=[
                                     padded_reshape(model.yp2, [N, M, JX])
                                     for model in models
                                 ])

            self.wy = tf.concat(axis=0,
                                values=[
                                    padded_reshape(model.wy, [N, M, JX])
                                    for model in models
                                ])
            self.loss = tf.add_n([model.get_loss()
                                  for model in models]) / len(models)
            if config.dataset == 'hotpotqa':
                self.yp3 = tf.concat(axis=0,
                                     values=[
                                         padded_reshape(model.yp3, [N, 1, 2])
                                         for model in models
                                     ])
                self.yp3_yesno = tf.concat(axis=0,
                                           values=[
                                               padded_reshape(
                                                   model.yp3_yesno, [N, 1, 2])
                                               for model in models
                                           ])
示例#7
0
    def __init__(self, config, models, tensor_dict=None):
        super(MultiGPUF1CandidateDocSelEvaluator,
              self).__init__(config, models[0], tensor_dict=tensor_dict)

        self.models = models
        with tf.name_scope("eval_concat"):

            if config.split_supports == True:
                N, M, JX = config.batch_size, 1, tf.reduce_max(
                    [tf.shape(model.yp)[2] for model in models])
            else:
                N, M, JX = config.batch_size, config.max_num_sents, tf.reduce_max(
                    [tf.shape(model.yp)[2] for model in models])

            self.yp = tf.concat(axis=0,
                                values=[
                                    padded_reshape(model.yp, [N, M, JX])
                                    for model in models
                                ])
            self.loss = tf.add_n([model.get_loss()
                                  for model in models]) / len(models)