Пример #1
0
    def _train_with_summary(self):
        img_batch, label_batch, labels, _ = self.tr_ds.get_next_batch(
            self.sess)
        feed = {
            self.model.inputs: img_batch,
            self.model.labels: label_batch,
            self.model.is_training: True
        }

        fetches = [
            self.model.total_loss, self.model.ctc_loss,
            self.model.regularization_loss, self.model.global_step,
            self.model.lr, self.model.merged_summay, self.model.dense_decoded,
            self.model.edit_distance, self.model.train_op
        ]

        batch_cost, _, _, global_step, lr, summary, predicts, edit_distance, _ = self.sess.run(
            fetches, feed)
        self.train_writer.add_summary(summary, global_step)

        predicts = [
            self.converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in predicts
        ]
        accuracy, _ = infer.calculate_accuracy(predicts, labels)

        tf_utils.add_scalar_summary(self.train_writer, "train_accuracy",
                                    accuracy, global_step)
        tf_utils.add_scalar_summary(self.train_writer, "train_edit_distance",
                                    edit_distance, global_step)

        return batch_cost, global_step, lr
Пример #2
0
    def _do_val(self, dataset, epoch, step, name, sess, model, converter, train_writer, cfg, result_dir):
        if dataset is None:
            return None

        accuracy, edit_distance = infer.validation(sess, model.feeds(), model.fetches(),
                                                   dataset, converter, result_dir, name, step)

        tf_utils.add_scalar_summary(train_writer, "%s_accuracy" % name, accuracy, step)
        tf_utils.add_scalar_summary(train_writer, "%s_edit_distance" % name, edit_distance, step)

        print("epoch: %d/%d, %s accuracy = %.3f" % (epoch, cfg.epochs, name, accuracy))
        return accuracy
Пример #3
0
    def _train_with_summary(self, model, tr_ds, sess, train_writer, converter):
        img_batch, label_batch, labels, *rest = tr_ds.get_next_batch(sess)
        image_batch_shape = img_batch.shape
        w = self.round_up(image_batch_shape[2] / 4)
        char_num = [len(l) for l in labels]
        pos_init = [[-1, -1]]

        # print('image_batch:',img_batch)
        feed = {
            model.inputs: img_batch,
            model.labels: label_batch,
            model.bat_labels: label_batch[1],
            model.len_labels: w,
            model.char_num: char_num,
            model.char_pos_init: pos_init,
            model.is_training: True
        }

        fetches = [
            model.total_loss, model.ctc_loss, model.regularization_loss,
            model.global_step, model.lr, model.merged_summary,
            model.dense_decoded, model.edit_distance, model.train_op,
            model.min_k, model.max_k
        ]

        batch_cost, _, _, global_step, lr, summary, predicts, edit_distance, _, min_k, max_k = sess.run(
            fetches, feed)
        train_writer.add_summary(summary, global_step)
        if min_k:
            for k, (i, v, p) in enumerate(zip(*max_k)):
                print(
                    '最大距离差的第 {} 个字符:[{}], 距离差:[{:.05}], prob:[{:.05}]'.format(
                        k, converter.decode_maps[i], v, p))
            for k, (i, v, p) in enumerate(zip(*min_k)):
                print(
                    '最小距离差的第 {} 个字符:[{}], 距离差:[{:.05}], prob:[{:.05}]'.format(
                        k, converter.decode_maps[i], v, p))

        print(batch_cost)
        predicts = [
            converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in predicts
        ]
        accuracy, _ = infer.calculate_accuracy(predicts, labels)

        tf_utils.add_scalar_summary(train_writer, "train_accuracy", accuracy,
                                    global_step)
        tf_utils.add_scalar_summary(train_writer, "train_edit_distance",
                                    edit_distance, global_step)

        return batch_cost, global_step, lr
Пример #4
0
    def _train_with_summary(self, model, tr_ds, sess, train_writer,converter):
        img_batch, label_batch, labels, positions, _ = tr_ds.get_next_batch(sess)
        image_batch_shape = img_batch.shape
        w = self.round_up(image_batch_shape[2]/4)
        positions_list = []
        for position_str in positions:
            list2 = [6940 for i in range(w)]
            position_list = str(position_str, encoding = "utf8").split(',')
            num_list_new = [int(x) for x in position_list]
            list2[:len(num_list_new)] = num_list_new

            positions_list.append(list2)
        con_labels_batch = np.array(positions_list)
        con_labels_batch = con_labels_batch.reshape((-1))

        # print('image_batch:',img_batch)
        feed = {model.inputs: img_batch,
                model.labels: label_batch,
                model.con_labels: con_labels_batch,
                model.len_labels: w,
                model.is_training: True}

        fetches = [model.total_loss,
                   model.ctc_loss,
                   model.regularization_loss,
                   model.global_step,
                   model.lr,
                   model.merged_summay,
                   model.dense_decoded,
                   model.edit_distance,
                   model.train_op]

        batch_cost,_, _, global_step, lr, summary, predicts, edit_distance, _ = sess.run(fetches, feed)
        train_writer.add_summary(summary, global_step)

        print(batch_cost)
        predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in predicts]
        accuracy, _ = infer.calculate_accuracy(predicts, labels)

        tf_utils.add_scalar_summary(train_writer, "train_accuracy", accuracy, global_step)
        tf_utils.add_scalar_summary(train_writer, "train_edit_distance", edit_distance, global_step)

        return batch_cost, global_step, lr