コード例 #1
0
    def eval_step(self,data,ids,all_data):
        """
        Evaluates model on a eval set
        """
        doc_cls_predictions = []
        doc_cls_probabilities = []

        labels = []
        for batch in data_iterator(data,all_data,self.config.text_field_names,ids = ids, batch_size=self.config.batch_size, shuffle=False):

            batch_cls = np.array(batch['target'])
            labels.extend(batch['label'])

            batch_inputs = []
            batch_input_actual_num_sents = []
            batch_input_actual_sent_lengths = []
            batch_similar_docs = []
            batch_similar_actual_num_sents = []
            batch_similar_actual_sent_lengths = []
            for name in self.config.text_field_names:
                batch_inputs.append(batch[name])
                batch_input_actual_num_sents.append(batch[name + '_actual_num_sents'])
                batch_input_actual_sent_lengths.append(batch[name + '_actual_sent_lengths'])
                batch_similar_docs.append(batch[name + '_similar_docs'])
                batch_similar_actual_num_sents.append(batch[name + '_similar_docs_actual_num_sents'])
                batch_similar_actual_sent_lengths.append(batch[name + '_similar_docs_actual_sent_lengths'])

            feed_dict = {
                self.model.cnn_dropout_keep_prob: 1.0,
                self.model.input_cls: batch_cls
            }
            feed_dict.update({ph: data for ph, data in zip(self.model.input_doc, batch_inputs)})
            feed_dict.update(
                {ph: data for ph, data in zip(self.model.doc_actual_num_sents, batch_input_actual_num_sents)})
            feed_dict.update(
                {ph: data for ph, data in zip(self.model.doc_actual_sent_lengths, batch_input_actual_sent_lengths)})
            feed_dict.update({ph: data for ph, data in zip(self.model.similar_docs, batch_similar_docs)})
            feed_dict.update(
                {ph: data for ph, data in zip(self.model.similar_doc_actual_num_sents, batch_similar_actual_num_sents)})
            feed_dict.update({ph: data for ph, data in
                              zip(self.model.similar_doc_actual_sent_lengths, batch_similar_actual_sent_lengths)})

            step, summaries, doc_cls_prob= self.sess.run(
                [self.global_step, self.dev_summary_op, self.model.doc_cls_probabilities],
                feed_dict)
            _, doc_cls_pred = make_prediction(doc_cls_prob)
            doc_cls_probabilities.extend(doc_cls_prob)
            doc_cls_predictions.extend(doc_cls_pred.tolist())


        doc_precision, doc_recall, doc_f1_score, status = precision_recall_fscore_support(labels, np.array(doc_cls_predictions),
                                                                                labels=range(0, self.config.num_classes),
                                                                                pos_label=None,
                                                                                average='macro')


        mae,_ = calmacroMAE(labels, np.array(doc_cls_predictions), self.config.num_classes)
        print(doc_precision, doc_recall, doc_f1_score,mae)

        return doc_precision, doc_recall, doc_f1_score,mae
コード例 #2
0
    def eval(self,test_data,all_data, test_ids, config):
        doc_cls_predictions,_ =self.predict(test_data,all_data,test_ids,config,config.model_path)
        labels=[]
        for batch in data_iterator(test_data, all_data, config.text_field_names, ids=test_ids, batch_size=config.batch_size,
                                   shuffle=False):
            labels+=batch['label']
        doc_cls_reports = evalReport(labels, doc_cls_predictions,config, config.num_classes)

        return doc_cls_reports
コード例 #3
0
    def predict(test_data, all_data, test_ids, config, model_path):
        graph = tf.Graph()
        with graph.as_default():
            start_time = time.time()
            session_conf = tf.ConfigProto(
                allow_soft_placement=config.allow_soft_placement,
                log_device_placement=config.log_device_placement)
            sess = tf.Session(config=session_conf)
            end_time = time.time()
            print("load session time : %f" % (end_time - start_time))

            print('')
            with sess.as_default():
                start_time = time.time()

                tf.saved_model.loader.load(
                    sess, [tf.saved_model.tag_constants.SERVING], model_path)
                end_time = time.time()

                print("load model time : %f" % (end_time - start_time))

                input_tensors = {}

                for name in config.text_field_names:
                    input_tensors[name +
                                  '_input_doc'] = graph.get_tensor_by_name(
                                      name + '_input_doc' + ":0")
                    input_tensors[
                        name + '_actual_num_sents'] = graph.get_tensor_by_name(
                            name + '_doc_actual_num_sents' + ":0")
                #input_tensors['cnn_dropout_keep_prob'] = graph.get_tensor_by_name("cnn_dropout_keep_prob:0")

                #doc_cls_predictions = graph.get_operation_by_name("predictions").outputs[0]
                doc_cls_probabilities = graph.get_operation_by_name(
                    "probabilities").outputs[0]

                cls_predictions = []
                cls_probabilities = []
                for batch in data_iterator(test_data,
                                           all_data,
                                           config.text_field_names,
                                           ids=test_ids,
                                           batch_size=config.batch_size,
                                           shuffle=False):

                    batch_inputs = []
                    batch_input_actual_num_sents = []
                    batch_similar_actual_num_sents = []
                    batch_similar_docs = []
                    input_data = {}
                    for name in config.text_field_names:
                        input_data[name + '_input_doc'] = batch[name]
                        input_data[name + '_actual_num_sents'] = batch[
                            name + '_actual_num_sents']

                    feed_dict = {}
                    for key in input_tensors:
                        feed_dict[input_tensors[key]] = input_data[key]

                    doc_cls_probs = sess.run(doc_cls_probabilities, feed_dict)
                    _, doc_cls_pred = make_prediction(doc_cls_probs)
                    cls_predictions.extend(doc_cls_pred.tolist())
                    cls_probabilities.extend(doc_cls_probs)
        tf.reset_default_graph()

        return cls_predictions, cls_probabilities
コード例 #4
0
    def output_similarity_vectors(test_data, all_data, test_ids, config,
                                  model_path):
        graph = tf.Graph()
        with graph.as_default():
            start_time = time.time()
            session_conf = tf.ConfigProto(
                allow_soft_placement=config.allow_soft_placement,
                log_device_placement=config.log_device_placement)
            sess = tf.Session(config=session_conf)
            end_time = time.time()
            print("load session time : %f" % (end_time - start_time))

            print('')
            with sess.as_default():
                start_time = time.time()

                tf.saved_model.loader.load(
                    sess, [tf.saved_model.tag_constants.SERVING], model_path)
                end_time = time.time()

                print("load model time : %f" % (end_time - start_time))

                input_tensors = {}

                for name in config.text_field_names:
                    input_tensors[name +
                                  '_input_doc'] = graph.get_tensor_by_name(
                                      name + '_input_doc' + ":0")
                    input_tensors[name +
                                  '_similar_docs'] = graph.get_tensor_by_name(
                                      name + '_similar_docs' + ":0")
                    input_tensors[
                        name + '_actual_num_sents'] = graph.get_tensor_by_name(
                            name + '_doc_actual_num_sents' + ":0")
                    input_tensors[
                        name +
                        '_similar_docs_actual_num_sents'] = graph.get_tensor_by_name(
                            name + '_similar_doc_actual_num_sents' + ":0")
                # input_tensors['cnn_dropout_keep_prob'] = graph.get_tensor_by_name("cnn_dropout_keep_prob:0")

                # doc_cls_predictions = graph.get_operation_by_name("predictions").outputs[0]
                #doc_cls_probabilities = graph.get_operation_by_name("probabilities").outputs[0]

                similarity_vectors = []
                for i, name in enumerate(config.text_field_names):
                    if i == 0:
                        similarity_vectors.append(
                            graph.get_tensor_by_name("Mean:0"))
                    else:
                        similarity_vectors.append(
                            graph.get_tensor_by_name("Mean_%d:0" % i))

                predicted_similarity_vectors = []
                for batch in data_iterator(test_data,
                                           all_data,
                                           config.text_field_names,
                                           ids=test_ids,
                                           batch_size=config.batch_size,
                                           shuffle=False):
                    input_data = {}
                    for name in config.text_field_names:
                        input_data[name + '_input_doc'] = batch[name]
                        input_data[name +
                                   '_similar_docs'] = batch[name +
                                                            '_similar_docs']
                        input_data[name + '_actual_num_sents'] = batch[
                            name + '_actual_num_sents']
                        input_data[name +
                                   '_similar_docs_actual_num_sents'] = batch[
                                       name + '_similar_docs_actual_num_sents']

                    feed_dict = {}
                    for key in input_tensors:
                        feed_dict[input_tensors[key]] = input_data[key]

                    batch_similarity_vectors = sess.run(
                        similarity_vectors, feed_dict)
                    batch_similarity_vectors = np.stack(
                        batch_similarity_vectors, axis=1)
                    print(batch_similarity_vectors.shape)
                    predicted_similarity_vectors.extend(
                        batch_similarity_vectors.tolist())
        tf.reset_default_graph()

        return predicted_similarity_vectors
コード例 #5
0
ファイル: baseModel.py プロジェクト: nlpconf/patentProjects
    def run_epoch(self, trainData, devData,all_data,train_ids, dev_ids):
        early_stop = 0
        #print([trainData[k]['label'] for k in list(trainData.keys())[0:10]])
        for e in np.arange(self.config.num_epochs):
            sum_loss = 0
            for step, trainBatch in enumerate(data_iterator(
                    trainData, all_data, self.config.text_field_names, ids = train_ids, batch_size=self.config.batch_size, shuffle=True)):
                # Training loop. For each batch...

                # size_batch = len(x_batch)
                # print("current step ", step)
                #print('base', trainBatch['label'])
                sum_loss += self.train_step(trainBatch, e)

                current_step = tf.train.global_step(self.sess, self.global_step)
            early_stop += 1

            if e % self.config.evaluate_every == 0:
                print("\nDevolope:")
                p_dev_cls, r_dev_cls, f_dev_cls, mae_dev_cls = self.eval_step(devData,dev_ids,all_data)
                # best_f1_dev = max(f1_dev,best_f1_dev)
                if self.config.evaluate_metric == 'f' and self.best_f_dev_cls  < f_dev_cls:
                    self.best_f_dev_cls = f_dev_cls
                    self.best_p_dev_cls = p_dev_cls
                    self.best_r_dev_cls = r_dev_cls
                    early_stop = 0
                    self.saveModel(self.checkpoint_path)
                if self.config.evaluate_metric == 'p' and self.best_p_dev_cls < p_dev_cls:
                    self.best_f_dev_cls = f_dev_cls
                    self.best_p_dev_cls = p_dev_cls
                    self.best_r_dev_cls = r_dev_cls
                    early_stop = 0
                    self.saveModel(self.checkpoint_path)

                if self.config.evaluate_metric == 'mae' and self.best_mae_dev_cls > mae_dev_cls:
                    self.best_f_dev_cls = f_dev_cls
                    self.best_p_dev_cls = p_dev_cls
                    self.best_r_dev_cls = r_dev_cls
                    self.best_mae_dev_cls = mae_dev_cls
                    early_stop = 0
                    self.saveModel(self.checkpoint_path)
            if early_stop >= self.config.early_stop and e >= self.config.min_epochs:
                print('early stop at : ' + str(e))
                break
        print("\nDevelope:")
        p_dev_cls, r_dev_cls, f_dev_cls, mae_dev_cls = self.eval_step(devData,dev_ids,all_data)

        # if self.best_f_dev_cls  < f_dev_cls
        if self.config.evaluate_metric == 'f' and self.best_f_dev_cls < f_dev_cls:
            self.best_f_dev_cls = f_dev_cls
            self.best_p_dev_cls = p_dev_cls
            self.best_r_dev_cls = r_dev_cls
            self.saveModel(self.checkpoint_path)
        if self.config.evaluate_metric == 'p' and self.best_p_dev_cls < p_dev_cls:
            self.best_f_dev_cls = f_dev_cls
            self.best_p_dev_cls = p_dev_cls
            self.best_r_dev_cls = r_dev_cls
            self.saveModel(self.checkpoint_path)


        if self.config.evaluate_metric == 'mae' and self.best_mae_dev_cls > mae_dev_cls:
            self.best_f_dev_cls = f_dev_cls
            self.best_p_dev_cls = p_dev_cls
            self.best_r_dev_cls = r_dev_cls
            self.best_mae_dev_cls = mae_dev_cls
            self.saveModel(self.checkpoint_path)