def evaluate(self, epoch=None, verbose=True, level='minor'): """ level: 'minor', 'mayor' or 'full'. """ # if not self.conf['crf']['use_crf']: # return super().evaluate(epoch=epoch, verbose=verbose) if level not in ['minor', 'mayor', 'full', 'none', 'one_image']: logging.error("Unknown evaluation level.") assert False self.model.train(False) logging.info("Evaluating Model on the Validation Dataset.") # logging.info("Evaluating Model on the Validation Dataset.") start_time = time.time() val_metric = self.val_evaluator.evaluate(epoch=epoch, level=level) # train_metric, train_base = self.val_evaluator.evaluate() dur = time.time() - start_time logging.info("Finished Validation run in {} minutes.".format(dur / 60)) logging.info("") logging.info("Evaluating Model on the Training Dataset.") start_time = time.time() train_metric = self.train_evaluator.evaluate(epoch=epoch, level=level) duration = time.time() - start_time logging.info("Finished Training run in {} minutes.".format(duration / 60)) logging.info("") if val_metric is None: logging.info("Valmetric is None. Stopping evaluation.") return self.model.train(True) if verbose: # Prepare pretty print names = val_metric.get_pp_names(time_unit="ms", summary=False) table = pp.TablePrinter(row_names=names) values = val_metric.get_pp_values(time_unit="ms", summary=False, ignore_first=False) smoothed = self.val_evaluator.smoother.update_weights(values) table.add_column(smoothed, name="Validation") table.add_column(values, name="Val (raw)") values = train_metric.get_pp_values(time_unit="ms", summary=False, ignore_first=False) smoothed = self.train_evaluator.smoother.update_weights(values) table.add_column(smoothed, name="Training") table.add_column(values, name="Train (raw)") table.print_table() if epoch is not None: vdict = val_metric.get_pp_dict(time_unit="ms", summary=True, ignore_first=False) self.logger.add_values(value_dict=vdict, step=epoch, prefix='val') tdic = train_metric.get_pp_dict(time_unit="ms", summary=True, ignore_first=False) self.logger.add_values(value_dict=tdic, step=epoch, prefix='train') runname = os.path.basename(self.model.logdir) if len(runname.split("_")) > 2: runname = "{}_{}_{}".format( runname.split("_")[0], runname.split("_")[1], runname.split("_")[2]) if runname == '': runname = "ResNet50" def median(data, weight=20): return np.median(data[-weight:]) max_epochs = self.model.trainer.max_epochs out_str = ( "Summary: [{:22}](Translation: {:.2f} | {:.2f} " "Rotation: {:.2f} | {:.2f}" " Epoch: {} / {}").format( runname[0:22], 100 * median(self.logger.data['val\\Average Accuracy']), 100 * median(self.logger.data['train\\Average Accuracy']), 100 * median(self.logger.data['train\\Average Accuracy π']), 100 * median(self.logger.data['train\\Average Accuracy π']), epoch, max_epochs) logging.info(out_str)
def evaluate(self, epoch=None, verbose=True, level='minor'): """ level: 'minor', 'mayor' or 'full'. """ # if not self.conf['crf']['use_crf']: # return super().evaluate(epoch=epoch, verbose=verbose) if level not in ['minor', 'mayor', 'full', 'none', 'one_image']: logging.error("Unknown evaluation level.") assert False self.model.train(False) logging.info("Evaluating Model on the Validation Dataset.") # logging.info("Evaluating Model on the Validation Dataset.") start_time = time.time() val_metric = self.val_evaluator.evaluate(epoch=epoch, level=level) # train_metric, train_base = self.val_evaluator.evaluate() dur = time.time() - start_time logging.info("Finished Validation run in {} minutes.".format(dur / 60)) logging.info("") logging.info("Evaluating Model on the Training Dataset.") start_time = time.time() train_metric = self.train_evaluator.evaluate(epoch=epoch, level=level) duration = time.time() - start_time logging.info("Finished Training run in {} minutes.".format( duration / 60)) logging.info("") if val_metric is None: logging.info("Valmetric is None. Stopping evaluation.") return self.model.train(True) if verbose: # Prepare pretty print names = val_metric.get_pp_names(time_unit="ms", summary=False) table = pp.TablePrinter(row_names=names) values = val_metric.get_pp_values( time_unit="ms", summary=False, ignore_first=False) smoothed = self.val_evaluator.smoother.update_weights(values) table.add_column(smoothed, name="Validation") table.add_column(values, name="Val (raw)") values = train_metric.get_pp_values( time_unit="ms", summary=False, ignore_first=False) smoothed = self.train_evaluator.smoother.update_weights(values) table.add_column(smoothed, name="Training") table.add_column(values, name="Train (raw)") table.print_table() if epoch is not None: vdict = val_metric.get_pp_dict(time_unit="ms", summary=True, ignore_first=False) self.logger.add_values(value_dict=vdict, step=epoch, prefix='val') tdic = train_metric.get_pp_dict(time_unit="ms", summary=True, ignore_first=False) self.logger.add_values(value_dict=tdic, step=epoch, prefix='train') self._print_summery_string(epoch)
def evaluate(self, epoch=None, verbose=True, level='minor'): """ level: 'minor', 'mayor' or 'full'. """ # if not self.conf['crf']['use_crf']: # return super().evaluate(epoch=epoch, verbose=verbose) if level not in ['minor', 'mayor', 'full', 'none', 'one_image']: logging.error("Unknown evaluation level.") assert False self.model.train(False) metrics = [] for evaluator in self.evaluators: name = evaluator.name logging.info("Running Evaluator '{}'.".format(name)) # logging.info("Evaluating Model on the Validation Dataset.") start_time = time.time() metric = evaluator.evaluate(epoch=epoch, level=level) metrics.append(metric) # train_metric, train_base = self.val_evaluator.evaluate() dur = time.time() - start_time logging.info("Finished Running '{}' in {} minutes.".format( name, dur / 60)) logging.info("") if metrics[0] is None: logging.info("First Metric is None. Stopping evaluation.") return self.model.train(True) names = metrics[0].get_pp_names(time_unit="ms", summary=False) table = pp.TablePrinter(row_names=names) for i, metric in enumerate(metrics): if verbose: # Prepare pretty print name = self.evaluators[i].name values = metric.get_pp_values(time_unit="ms", summary=False, ignore_first=False) smoothed = self.evaluators[i].smoother.update_weights(values) table.add_column(smoothed, name=name) table.add_column(values, name=name) if epoch is not None: vdict = metric.get_pp_dict(time_unit="ms", summary=True, ignore_first=False) self.logger.add_values(value_dict=vdict, step=epoch, prefix=name) self._print_summery_string(epoch) table.print_table()