Esempio n. 1
0
def _download_project_optimized(api: Api,
                                project_id,
                                project_dir,
                                datasets_whitelist=None,
                                cache=None,
                                progress_cb=None):
    project_info = api.project.get_info_by_id(project_id)
    project_id = project_info.id
    logger.info(
        f"Annotations are not cached (always download latest version from server)"
    )
    project_fs = Project(project_dir, OpenMode.CREATE)
    meta = ProjectMeta.from_json(api.project.get_meta(project_id))
    project_fs.set_meta(meta)
    for dataset_info in api.dataset.get_list(project_id):
        dataset_name = dataset_info.name
        dataset_id = dataset_info.id
        need_download = True
        if datasets_whitelist is not None and dataset_id not in datasets_whitelist:
            need_download = False
        if need_download is True:
            dataset = project_fs.create_dataset(dataset_name)
            _download_dataset(api,
                              dataset,
                              dataset_id,
                              cache=cache,
                              progress_cb=progress_cb)
Esempio n. 2
0
def _report_metrics(m_type, epoch, metrics):
    logger.info('metrics', extra={
        'event_type': EventType.METRICS,
        'type': m_type,
        'epoch': epoch,
        'metrics': metrics
    })
Esempio n. 3
0
 def print_evaluation_values(tag_pair_metrics):
     labels = [
         ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE,
         TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE
     ]
     for label in labels:
         logger.info('    {0}:   {1:2.4f}'.format(
             label.ljust(16), tag_pair_metrics[label]))
Esempio n. 4
0
def report_checkpoint_saved(checkpoint_idx, subdir, sizeb, best_now, optional_data):
    logger.info('checkpoint', extra={
        'event_type': EventType.CHECKPOINT,
        'id': checkpoint_idx,
        'subdir': subdir,
        'sizeb': sizeb,
        'best_now': best_now,
        'optional': optional_data
    })
Esempio n. 5
0
    def _construct_and_fill_model(self):
        super()._construct_and_fill_model()

        self.device_ids = sly.env.remap_gpu_devices([self._config[GPU_DEVICE]])
        n_cls = (max(self.out_class_mapping.keys()) + 1)
        self.model = create_model_for_inference(n_cls=n_cls,
                                                device_ids=self.device_ids,
                                                model_dir=TaskPaths.MODEL_DIR)
        logger.info('Weights are loaded.')
Esempio n. 6
0
    def _construct_and_fill_model(self):
        super()._construct_and_fill_model()
        device_ids = sly.env.remap_gpu_devices([self._config[GPU_DEVICE]])
        num_layers = determine_resnet_model_configuration(TaskPaths.MODEL_CONFIG_PATH)
        self.model = create_model(num_layers=num_layers, n_cls=(max(self.classification_tags_to_idx.values()) + 1),
                                  device_ids=device_ids)

        self.model = WeightsRW(TaskPaths.MODEL_DIR).load_strictly(self.model)
        self.model.eval()
        logger.info('Weights are loaded.')
Esempio n. 7
0
def report_dtl_verification_finished(output):
    '''
    Logs a message with level INFO on logger
    :param output: str
    '''
    logger.info('Verification finished.',
                extra={
                    'output': output,
                    'event_type': EventType.TASK_VERIFIED
                })
Esempio n. 8
0
 def get_metrics(self):  # Macro-evaluation
     logger.info('Start evaluation of macro metrics.')
     result = {
         gt_class: {
             AP:
             self._calculate_average_precision(
                 gt_class, self._gt_to_pred_class_mapping[gt_class],
                 pair_counters)
         }
         for gt_class, pair_counters in self._counters.items()
     }
     logger.info('Finish macro evaluation')
     return result
Esempio n. 9
0
    def _load_train_config(self):  # @TODO: partly copypasted from SingleImageInferenceBase
        self._load_raw_model_config_json()

        self.classification_tags = self._model_out_img_tags()
        logger.info('Read model out tags', extra={'tags': self.classification_tags.to_json()})
        self.classification_tags_to_idx = self.train_config[self.classification_tags_to_idx_key]
        logger.info('Read model internal tags mapping', extra={'tags_mapping': self.classification_tags_to_idx})

        self._model_out_meta = ProjectMeta(obj_classes=ObjClassCollection(),
                                           img_tag_metas=self.classification_tags,
                                           obj_tag_metas=self._model_out_obj_tags())

        self.idx_to_classification_tags = {v: k for k, v in self.classification_tags_to_idx.items()}
        self._determine_model_input_size()
Esempio n. 10
0
    def _check_projects_compatible_structure(self):
        if self._project_gt.datasets.keys() != self._project_pred.datasets.keys():  # Keys is sorted - ok
            raise RuntimeError('Projects must contain same datasets.')
        if self._project_gt.total_items != self._project_pred.total_items:
            raise RuntimeError('Projects must contain same number of samples.')
        for ds_gt in self._project_gt.datasets:
            ds_pred = self._project_pred.datasets.get(ds_gt.name)
            for sample_name in ds_gt:
                if not ds_pred.item_exists(sample_name):
                    raise RuntimeError('Projects must contain identically named samples in respective datasets. ' +
                                       'Ground truth project has sample {!r} in dataset {!r}, but prediction project ' +
                                       'does not.'.format(sample_name, ds_gt.name))

        logger.info('Projects structure has been read. Samples: {} per project.'.format(self._project_gt.total_items))
Esempio n. 11
0
    def _construct_and_fill_model(self):
        super()._construct_and_fill_model()

        self.device_ids = sly.env.remap_gpu_devices([self._config[GPU_DEVICE]])
        n_cls = (max(self.out_class_mapping.keys()) + 1)
        use_batchnorm = self.train_config[SETTINGS]['use_batchnorm']

        model = ICNet(n_classes=n_cls,
                      input_size=self.input_size,
                      is_batchnorm=use_batchnorm)
        WeightsRW(TaskPaths.MODEL_DIR).load_strictly(model)
        logger.info('Weights are loaded.')
        self.model = DataParallel(model, device_ids=self.device_ids)
        self.model.eval()
        self.model.cuda()
Esempio n. 12
0
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        classes_values = self.get_metrics()
        for i, (cls_gt, pair_values) in enumerate(classes_values.items()):
            average_precision = pair_values[AP]
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(cls_gt,
                                                                           self._gt_to_pred_class_mapping[cls_gt]))
            logger.info('Average Precision (AP): {}'.format(average_precision))

        log_line()
        log_head(' Mean metrics values ')
        logger.info('Mean Average Precision (mAP): {}'.format(self.average_per_class_avg_precision(classes_values)))
        log_line()
Esempio n. 13
0
    def log_total_metrics(self):
        logger.info('**************** Result IoU metric values ****************')
        logger.info('NOTE! Values for "intersection" and "union" are in pixels.')
        for i, (cls_gt, values) in enumerate(self.get_metrics().items(), start=1):
            iou_line = _iou_log_line(values[IOU], values[INTERSECTION], values[UNION])
            logger.info('{}. Classes {} <-> {}:   {}'.format(i, cls_gt, self._class_mapping[cls_gt], iou_line))

        total_values = self.get_total_metrics()
        logger.info(
            'Total:   {}'.format(_iou_log_line(total_values[IOU], total_values[INTERSECTION], total_values[UNION])))
Esempio n. 14
0
    def log_total_metrics(self):
        def exp_one(arg):
            return str(arg).center(20)

        def exp_arg(args):
            return [exp_one(arg) for arg in args]

        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(
            self._iou_threshold))
        log_head(' Confusion matrix ')

        sorted_gt_names = sorted(self._class_mapping.keys())
        pred_names = [
            self._class_mapping[gt_name] for gt_name in sorted_gt_names
        ]
        logger.info(''.join(exp_arg([''] + pred_names + ['False Negatives'])))
        for gt_name in sorted_gt_names:
            logger.info(''.join([exp_one(gt_name)] + exp_arg([
                self._confusion_matrix[gt_name, pred_name]
                for pred_name in pred_names
            ]) + [exp_one(self._unmatched_gt[gt_name])]))
            log_line()
        logger.info(''.join([exp_one('False Positives')] + exp_arg(
            [self._unmatched_pred[pred_name]
             for pred_name in pred_names]) + [exp_one('0')]))
        log_line()
Esempio n. 15
0
    def log_total_metrics(self):
        log_line()
        log_head(' Result metrics values for {} IoU threshold '.format(self._iou_threshold))

        for i, (gt_class, values) in enumerate(self.get_metrics().items()):
            log_line()
            log_head(' Results for pair of classes <<{} <-> {}>>  '.format(gt_class,
                                                                           self._gt_to_pred_class_mapping[gt_class]))
            logger.info('Precision: {}'.format(values[PRECISION]))
            logger.info('Recall: {}'.format(values[RECALL]))

        log_line()
        log_head(' Total metrics values ')
        total_values = self.get_total_metrics()
        logger.info('Precision: {}'.format(total_values[PRECISION]))
        logger.info('Recall: {}'.format(total_values[RECALL]))
        log_line()
Esempio n. 16
0
def create_model(num_layers, n_cls, device_ids):
    logger.info('Will construct ResNet{} model.'.format(num_layers))
    model = num_layers_to_model[num_layers]()
    model.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, n_cls)

    logger.info('Model has been constructed (w/out weights).')
    model = DataParallel(model, device_ids=device_ids).cuda()
    logger.info('Model has been loaded into GPU(s).', extra={'remapped_device_ids': device_ids})
    return model
Esempio n. 17
0
    def _construct_and_fill_model(self):
        super()._construct_and_fill_model()
        self.device_ids = sly.env.remap_gpu_devices([self._config[GPU_DEVICE]])
        self.graph, self.model = inference_lib.construct_and_fill_model(
            self.class_title_to_idx, self.input_size_limits,
            TaskPaths.MODEL_DIR)
        logger.info('Weights are loaded.')

        logger.info('Warming up the model with a dummy image.')
        with self.graph.as_default():
            self.model.detect([np.zeros([256, 256, 3], dtype=np.uint8)],
                              verbose=0)
        logger.info('Model warmup finished.')
Esempio n. 18
0
    def log_total_metrics(self):
        common_info = """
                P = condition positive (the number of real positive cases in the data)
                N = condition negative (the number of real negative cases in the data)
                TP = True Positive prediction
                TN = True Negative prediction
                FP = False Positive prediction (Type I error)
                FN = False Negative prediction (Type II error)
                Accuracy = (TP + TN)/(TP + TN + FP + FN) = TRUE/TOTAL
                Precision = TP / (TP + FP)
                Recall = TP / (TP + FN)
                F1-Measure = (2 * TP) / (2 * TP + FP + FN)
                """

        log_line()
        log_line(c='*')
        for line in common_info.split('\n'):
            line = line.strip()
            if len(line) > 0:
                logger.info(line.ljust(80))

        log_line(c='*')
        log_line()

        def print_evaluation_values(tag_pair_metrics):
            labels = [
                ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE,
                TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE
            ]
            for label in labels:
                logger.info('    {0}:   {1:2.4f}'.format(
                    label.ljust(16), tag_pair_metrics[label]))

        for i, (tag_name_gt,
                tag_metrics) in enumerate(self.get_metrics().items(), start=1):
            logger.info('{}) {} <--> {}:'.format(
                i, tag_name_gt, self._tags_mapping[tag_name_gt]))
            print_evaluation_values(tag_metrics)
            log_line()

        logger.info('Total values:')
        total_values = self.get_total_metrics()
        print_evaluation_values(total_values)
        log_line()

        log_line(c='*')
Esempio n. 19
0
def _download_dataset(api: Api,
                      dataset,
                      dataset_id,
                      cache=None,
                      progress_cb=None):
    images = api.image.get_list(dataset_id)

    images_to_download = images

    # copy images from cache to task folder and download corresponding annotations
    if cache:
        images_to_download, images_in_cache, images_cache_paths = _split_images_by_cache(
            images, cache)
        if len(images_to_download) + len(images_in_cache) != len(images):
            raise RuntimeError(
                "Error with images cache during download. Please contact support."
            )
        logger.info(f"Download dataset: {dataset.name}",
                    extra={
                        "total": len(images),
                        "in cache": len(images_in_cache),
                        "to download": len(images_to_download)
                    })
        if len(images_in_cache) > 0:
            img_cache_ids = [img_info.id for img_info in images_in_cache]
            ann_info_list = api.annotation.download_batch(
                dataset_id, img_cache_ids, progress_cb)
            img_name_to_ann = {
                ann.image_id: ann.annotation
                for ann in ann_info_list
            }
            for batch in batched(list(zip(images_in_cache,
                                          images_cache_paths)),
                                 batch_size=50):
                for img_info, img_cache_path in batch:
                    item_name = _maybe_append_image_extension(
                        img_info.name, img_info.ext)
                    dataset.add_item_file(item_name,
                                          img_cache_path,
                                          img_name_to_ann[img_info.id],
                                          _validate_item=False,
                                          _use_hardlink=True)
                progress_cb(len(batch))

    # download images from server
    if len(images_to_download) > 0:
        # prepare lists for api methods
        img_ids = []
        img_paths = []
        for img_info in images_to_download:
            img_ids.append(img_info.id)
            # TODO download to a temp file and use dataset api to add the image to the dataset.
            img_paths.append(
                os.path.join(
                    dataset.img_dir,
                    _maybe_append_image_extension(img_info.name,
                                                  img_info.ext)))

        # download annotations
        ann_info_list = api.annotation.download_batch(dataset_id, img_ids,
                                                      progress_cb)
        img_name_to_ann = {
            ann.image_id: ann.annotation
            for ann in ann_info_list
        }
        api.image.download_paths(dataset_id, img_ids, img_paths, progress_cb)
        for img_info, img_path in zip(images_to_download, img_paths):
            dataset.add_item_file(img_info.name, img_path,
                                  img_name_to_ann[img_info.id])

        if cache:
            img_hashes = [img_info.hash for img_info in images_to_download]
            cache.write_objects(img_paths, img_hashes)
Esempio n. 20
0
 def get_metrics(self):  # Macro-evaluation
     logger.info('Start evaluation of macro metrics.')
     result = {pair_name: {AP: self._calculate_average_precision(pair_name, pair_counters)}
               for pair_name, pair_counters in self._counters.items()}
     logger.info('Finish macro evaluation')
     return result
Esempio n. 21
0
def report_import_finished():
    logger.info('import finished',
                extra={'event_type': EventType.IMPORT_APPLIED})
Esempio n. 22
0
def log_head(string):
    logger.info(string.center(80, '*'))
Esempio n. 23
0
def log_line(length=80, c=' '):
    logger.info(c * length)
Esempio n. 24
0
def report_inference_finished():
    logger.info('model applied', extra={'event_type': EventType.MODEL_APPLIED})
Esempio n. 25
0
def report_dtl_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('DTL finished', extra={'event_type': EventType.DTL_APPLIED})
Esempio n. 26
0
def report_inference_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('model applied', extra={'event_type': EventType.MODEL_APPLIED})
Esempio n. 27
0
def report_import_finished():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('import finished',
                extra={'event_type': EventType.IMPORT_APPLIED})
Esempio n. 28
0
def report_agent_rpc_ready():
    '''
    Logs a message with level INFO on logger
    '''
    logger.info('Ready to get events',
                extra={'event_type': EventType.TASK_DEPLOYED})
Esempio n. 29
0
def report_dtl_finished():
    logger.info('DTL finished', extra={'event_type': EventType.DTL_APPLIED})
Esempio n. 30
0
def report_dtl_verification_finished(output):
    logger.info('Verification finished.',
                extra={
                    'output': output,
                    'event_type': EventType.TASK_VERIFIED
                })