def main(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) sly.logger.info('Input settings:', extra={'config': settings}) if IOU not in settings: raise RuntimeError( '"{}" field is missing. Please set Intersection over Union threshold' .format(IOU)) if CONFIDENCE_TAG_NAME not in settings: raise RuntimeError( f'{CONFIDENCE_TAG_NAME!r} field is missing. Please set the tag name to read prediction confidence from.' ) confidence_tag_name = settings[CONFIDENCE_TAG_NAME] confidence_threshold = settings.get(CONFIDENCE_THRESHOLD, 0.0) metric = MAPMetric(settings[CLASSES_MAPPING], settings[IOU], confidence_tag_name=confidence_tag_name, confidence_threshold=confidence_threshold) applier = sly.MetricProjectsApplier(metric, settings) # Input sanity checks. check_class_mapping(applier.project_gt, applier.project_pred, settings[CLASSES_MAPPING]) if not applier.project_pred.meta.tag_metas.has_key(confidence_tag_name): raise RuntimeError( f'Tag {confidence_tag_name!r} cannot be found in the project with predictions ' f'{applier.project_pred.name!r} does not have that tag. Make sure you specify the correct ' f'confidence tag name as a {CONFIDENCE_TAG_NAME!r} setting in the plugin config.' ) applier.run_evaluation() metric.log_total_metrics()
def main(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) sly.logger.info('Input settings:', extra={'config': settings}) metric = IoUMetric(settings[CLASSES_MAPPING]) applier = sly.MetricProjectsApplier(metric, settings) check_class_mapping(applier.project_gt, applier.project_pred, settings[CLASSES_MAPPING]) applier.run_evaluation() metric.log_total_metrics()
def main(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) sly.logger.info('Input settings:', extra={'config': settings}) if IOU not in settings: raise RuntimeError( '"{}" field is missing. Please set Intersection over Union threshold' .format(IOU)) metric = PrecisionRecallMetric(settings[CLASSES_MAPPING], settings[IOU]) applier = sly.MetricProjectsApplier(metric, settings) check_class_mapping(applier.project_gt, applier.project_pred, settings[CLASSES_MAPPING]) applier.run_evaluation() metric.log_total_metrics()