Пример #1
0
 def evaluate_output(self, tst_data, out, num_batches, metrics: List[tf.keras.metrics.Metric]):
     # out.write('x\ty_true\ty_pred\n')
     for metric in metrics:
         metric.reset_states()
     for idx, batch in enumerate(tst_data):
         outputs = self.model.predict_on_batch(batch[0])
         for metric in metrics:
             metric(batch[1], outputs, outputs._keras_mask if hasattr(outputs, '_keras_mask') else None)
         self.evaluate_output_to_file(batch, outputs, out)
         print('\r{}/{} {}'.format(idx + 1, num_batches, format_metrics(metrics)), end='')
     print()
Пример #2
0
    def evaluate(self, input_path: str, save_dir=None, output=False, batch_size=128, logger: logging.Logger = None,
                 callbacks: List[tf.keras.callbacks.Callback] = None, warm_up=True, verbose=True, **kwargs):
        input_path = get_resource(input_path)
        file_prefix, ext = os.path.splitext(input_path)
        name = os.path.basename(file_prefix)
        if not name:
            name = 'evaluate'
        if save_dir and not logger:
            logger = init_logger(name=name, root_dir=save_dir, level=logging.INFO if verbose else logging.WARN,
                                 mode='w')
        tst_data = self.transform.file_to_dataset(input_path, batch_size=batch_size)
        samples = size_of_dataset(tst_data)
        num_batches = math.ceil(samples / batch_size)
        if warm_up:
            self.model.predict_on_batch(list(tst_data.take(1))[0])
        if output:
            assert save_dir, 'Must pass save_dir in order to output'
            if isinstance(output, bool):
                output = os.path.join(save_dir, name) + '.predict' + ext
            elif isinstance(output, str):
                output = output
            else:
                raise RuntimeError('output ({}) must be of type bool or str'.format(repr(output)))
        timer = Timer()
        loss, score, output = self.evaluate_dataset(tst_data, callbacks, output, num_batches)
        delta_time = timer.stop()
        speed = samples / delta_time.delta_seconds

        if logger:
            f1: IOBES_F1 = None
            for metric in self.model.metrics:
                if isinstance(metric, IOBES_F1):
                    f1 = metric
                    break
            extra_report = ''
            if f1:
                overall, by_type, extra_report = f1.state.result(full=True, verbose=False)
                extra_report = ' \n' + extra_report
            logger.info('Evaluation results for {} - '
                        'loss: {:.4f} - {} - speed: {:.2f} sample/sec{}'
                        .format(name + ext, loss,
                                format_scores(score) if isinstance(score, dict) else format_metrics(self.model.metrics),
                                speed, extra_report))
        if output:
            logger.info('Saving output to {}'.format(output))
            with open(output, 'w', encoding='utf-8') as out:
                self.evaluate_output(tst_data, out, num_batches, self.model.metrics)

        return loss, score, speed