コード例 #1
0
def add_annotations_to_sequences(annotations_dir: str, temp_sequences_dir: str,
                                 sequences_dir: str):
    """
    Extract the bounding box annotations from the COCO JSONs for all datasets labeled in this round.

    Args:
        annotations_dir: Path to directory with the annotations in COCO JSONs at the root level.
        temp_sequences_dir: Path to a flat directory of JSONs ending in '_temp.json' which are
            MegaDB sequences without the bounding box annotations.
        sequences_dir: Path to a directory to output corresponding bounding box-included sequences
            in MegaDB format.

    Returns:
        None. JSON files will be written to sequences_dir.
    """
    assert os.path.exists(annotations_dir), \
        f'annotations_dir {annotations_dir} does not exist'
    assert os.path.isdir(annotations_dir), \
        f'annotations_dir {annotations_dir} is not a directory'
    assert os.path.exists(temp_sequences_dir), \
        f'temp_sequences_dir {temp_sequences_dir} does not exist'
    assert os.path.isdir(temp_sequences_dir), \
        f'temp_sequences_dir {temp_sequences_dir} is not a directory'
    os.makedirs(sequences_dir, exist_ok=True)

    temp_megadb_files = path_utils.recursive_file_list(temp_sequences_dir)
    temp_megadb_files = [i for i in temp_megadb_files if i.endswith('.json')]
    print(f'{len(temp_megadb_files)} temporary MegaDB dataset files found.')

    annotation_files = path_utils.recursive_file_list(annotations_dir)
    annotation_files = [i for i in annotation_files if i.endswith('.json')]
    print(
        f'{len(annotation_files)} annotation_files found. Extracting annotations...'
    )

    # dataset name : (seq_id, frame_num) : [bbox, bbox]
    # where bbox is a dict with str 'category' and list 'bbox'
    all_image_bbox: Dict[str, Dict[Tuple[str, int], list]]
    all_image_bbox = defaultdict(lambda: {})

    for p in tqdm(annotation_files):
        incoming_coco = IndexedJsonDb(p)
        assert bbox_categories_str == json.dumps(incoming_coco.db['categories']), \
            f'Incoming COCO JSON has a different category mapping! {p}'

        # iterate over image_id_to_image rather than image_id_to_annotations so we include
        # the confirmed empty images
        for image_id, image_entry in incoming_coco.image_id_to_image.items():
            image_file_name = image_entry['file_name']
            # The file_name field in the incoming json looks like
            # alka_squirrels.seq2020_05_07_25C.frame119221.jpg
            dataset_name, seq_id, frame_num = file_name_to_parts(
                image_file_name)
            bbox_field = []  # empty means this image is confirmed empty

            annotations = incoming_coco.image_id_to_annotations.get(
                image_id, [])
            for coco_anno in annotations:
                if coco_anno['category_id'] == 5:
                    assert len(coco_anno['bbox']) == 0, f'{coco_anno}'

                    # there seems to be a bug in the annotations where sometimes there's a
                    # non-empty label along with a label of category_id 5
                    # ignore the empty label (they seem to be actually non-empty)
                    continue

                assert coco_anno['category_id'] is not None, f'{p} {coco_anno}'

                bbox_field.append({
                    'category':
                    bbox_cat_map[coco_anno['category_id']],
                    'bbox':
                    ct_utils.truncate_float_array(coco_anno['bbox'],
                                                  precision=4)
                })
            all_image_bbox[dataset_name][(seq_id, frame_num)] = bbox_field

    print('\nAdding bounding boxes to the MegaDB dataset files...')
    for p in temp_megadb_files:
        basename = os.path.basename(p)
        dataset_name = basename.split('_temp.')[0] if basename.endswith('_temp.json') \
            else basename.split('.json')[0]
        print(f'Adding to dataset {dataset_name}')
        dataset_image_bbox = all_image_bbox.get(dataset_name, None)
        if dataset_image_bbox is None:
            print('Skipping, no annotations found for this dataset\n')
            continue

        with open(p) as f:
            sequences = json.load(f)

        num_images_updated = 0
        for seq in tqdm(sequences):
            assert seq['dataset'] == dataset_name
            seq_id = seq['seq_id']
            for im in seq['images']:
                frame_num = im.get('frame_num', 1)
                bbox_field = dataset_image_bbox.get((seq_id, frame_num), None)
                if bbox_field is not None:  # empty list also evaluates to False
                    im['bbox'] = bbox_field
                    num_images_updated += 1
        print(
            f'Dataset {dataset_name} had {num_images_updated} images updated\n'
        )

        with open(os.path.join(sequences_dir, f'{dataset_name}.json'),
                  'w',
                  encoding='utf-8') as f:
            json.dump(sequences, f, indent=1, ensure_ascii=False)
コード例 #2
0
ファイル: visualize_db.py プロジェクト: lyk125/CameraTraps
def process_images(db_path,output_dir,image_base_dir,options=None):
    """
    Writes images and html to output_dir to visualize the annotations in the json file
    db_path.
    
    db_path can also be a previously-loaded database.
    
    Returns the html filename and the database:
        
    return htmlOutputFile,image_db
    """    
    
    if options is None:
        options = DbVizOptions()
    
    print(options.__dict__)
    
    os.makedirs(os.path.join(output_dir, 'rendered_images'), exist_ok=True)
    assert(os.path.isdir(image_base_dir))
    
    if isinstance(db_path,str):
        assert(os.path.isfile(db_path))    
        print('Loading database from {}...'.format(db_path))
        image_db = json.load(open(db_path))
        print('...done')
    elif isinstance(db_path,dict):
        print('Using previously-loaded DB')
        image_db = db_path
    else:
        raise ValueError('Illegal dictionary or filename')    
        
    annotations = image_db['annotations']
    images = image_db['images']
    categories = image_db['categories']
    
    # Optionally remove all images without bounding boxes, *before* sampling
    if options.trim_to_images_with_bboxes:
        
        bHasBbox = [False] * len(annotations)
        for iAnn,ann in enumerate(annotations):
            if 'bbox' in ann:
                assert isinstance(ann['bbox'],list)
                bHasBbox[iAnn] = True
        annotationsWithBboxes = list(compress(annotations, bHasBbox))
        
        imageIDsWithBboxes = [x['image_id'] for x in annotationsWithBboxes]
        imageIDsWithBboxes = set(imageIDsWithBboxes)
        
        bImageHasBbox = [False] * len(images)
        for iImage,image in enumerate(images):
            imageID = image['id']
            if imageID in imageIDsWithBboxes:
                bImageHasBbox[iImage] = True
        imagesWithBboxes = list(compress(images, bImageHasBbox))
        images = imagesWithBboxes
                
    # Optionally remove images with specific labels, *before* sampling
    if options.classes_to_exclude is not None:
     
        print('Indexing database')
        indexed_db = IndexedJsonDb(image_db)
        bValidClass = [True] * len(images)        
        for iImage,image in enumerate(images):
            classes = indexed_db.get_classes_for_image(image)
            for excludedClass in options.classes_to_exclude:
                if excludedClass in classes:
                   bValidClass[iImage] = False
                   break
               
        imagesWithValidClasses = list(compress(images, bValidClass))
        images = imagesWithValidClasses    
    
    # Put the annotations in a dataframe so we can select all annotations for a given image
    print('Creating data frames')
    df_anno = pd.DataFrame(annotations)
    df_img = pd.DataFrame(images)
    
    # Construct label map
    label_map = {}
    for cat in categories:
        label_map[int(cat['id'])] = cat['name']
    
    # Take a sample of images
    if options.num_to_visualize is not None:
        df_img = df_img.sample(n=options.num_to_visualize,random_state=options.random_seed)
    
    images_html = []
    
    # Set of dicts representing inputs to render_db_bounding_boxes:
    #
    # bboxes, boxClasses, image_path
    rendering_info = []
    
    print('Preparing rendering list')
    # iImage = 0
    for iImage in tqdm(range(len(df_img))):
        
        img_id = df_img.iloc[iImage]['id']
        img_relative_path = df_img.iloc[iImage]['file_name']
        img_path = os.path.join(image_base_dir, image_filename_to_path(img_relative_path, image_base_dir))
    
        annos_i = df_anno.loc[df_anno['image_id'] == img_id, :]  # all annotations on this image
    
        bboxes = []
        boxClasses = []
        
        # All the class labels we've seen for this image (with out without bboxes)
        imageCategories = set()
        
        annotationLevelForImage = ''
        
        # Iterate over annotations for this image
        # iAnn = 0; anno = annos_i.iloc[iAnn]
        for iAnn,anno in annos_i.iterrows():
        
            if 'sequence_level_annotation' in anno:
                bSequenceLevelAnnotation = anno['sequence_level_annotation']
                if bSequenceLevelAnnotation:
                    annLevel = 'sequence'
                else:
                    annLevel = 'image'
                if annotationLevelForImage == '':
                    annotationLevelForImage = annLevel
                elif annotationLevelForImage != annLevel:
                    annotationLevelForImage = 'mixed'
                    
            categoryID = anno['category_id']
            categoryName = label_map[categoryID]
            if options.add_search_links:
                categoryName = categoryName.replace('"','')
                categoryName = '<a href="https://www.bing.com/images/search?q={}">{}</a>'.format(categoryName,categoryName)
            imageCategories.add(categoryName)
            
            if 'bbox' in anno:
                bbox = anno['bbox']        
                if isinstance(bbox,float):
                    assert math.isnan(bbox), "I shouldn't see a bbox that's neither a box nor NaN"
                    continue
                bboxes.append(bbox)
                boxClasses.append(anno['category_id'])
        
        imageClasses = ', '.join(imageCategories)
                
        file_name = '{}_gtbbox.jpg'.format(img_id.lower().split('.jpg')[0])
        file_name = file_name.replace('/', '~')
        
        rendering_info.append({'bboxes':bboxes, 'boxClasses':boxClasses, 'img_path':img_path,
                               'output_file_name':file_name})
                
        labelLevelString = ''
        if len(annotationLevelForImage) > 0:
            labelLevelString = ' (annotation level: {})'.format(annotationLevelForImage)
            
        # We're adding html for an image before we render it, so it's possible this image will
        # fail to render.  For applications where this script is being used to debua a database
        # (the common case?), this is useful behavior, for other applications, this is annoying.
        #
        # TODO: optionally write html only for images where rendering succeeded
        images_html.append({
            'filename': '{}/{}'.format('rendered_images', file_name),
            'title': '{}<br/>{}, number of boxes: {}, class labels: {}{}'.format(img_relative_path,img_id, len(bboxes), imageClasses, labelLevelString),
            'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        })
    
    # ...for each image

    def render_image_info(rendering_info):
        
        img_path = rendering_info['img_path']
        bboxes = rendering_info['bboxes']
        bboxClasses = rendering_info['boxClasses']
        output_file_name = rendering_info['output_file_name']
        
        if not os.path.exists(img_path):
            print('Image {} cannot be found'.format(img_path))
            return
            
        try:
            original_image = vis_utils.open_image(img_path)
            original_size = original_image.size
            image = vis_utils.resize_image(original_image, options.viz_size[0], options.viz_size[1])
        except Exception as e:
            print('Image {} failed to open. Error: {}'.format(img_path, e))
            return
            
        vis_utils.render_db_bounding_boxes(boxes=bboxes, classes=bboxClasses,
                                           image=image, original_size=original_size,
                                           label_map=label_map)
        image.save(os.path.join(output_dir, 'rendered_images', output_file_name))
    
    # ...def render_image_info
    
    print('Rendering images')
    start_time = time.time()
    if options.parallelize_rendering:
        if options.parallelize_rendering_n_cores is None:
            pool = ThreadPool()
        else:
            print('Rendering images with {} workers'.format(options.parallelize_rendering_n_cores))
            pool = ThreadPool(options.parallelize_rendering_n_cores)
            tqdm(pool.imap(render_image_info, rendering_info), total=len(rendering_info))
    else:
        for file_info in tqdm(rendering_info):        
            render_image_info(file_info)
    elapsed = time.time() - start_time
    
    print('Rendered {} images in {}'.format(len(rendering_info),humanfriendly.format_timespan(elapsed)))
        
    if options.sort_by_filename:    
        images_html = sorted(images_html, key=lambda x: x['filename'])
        
    htmlOutputFile = os.path.join(output_dir, 'index.html')
    
    htmlOptions = options.htmlOptions
    if isinstance(db_path,str):
        htmlOptions['headerHtml'] = '<h1>Sample annotations from {}</h1>'.format(db_path)
    else:
        htmlOptions['headerHtml'] = '<h1>Sample annotations</h1>'
    write_html_image_list(
            filename=htmlOutputFile,
            images=images_html,
            options=htmlOptions)

    print('Visualized {} images, wrote results to {}'.format(len(images_html),htmlOutputFile))
    
    return htmlOutputFile,image_db
コード例 #3
0
def process_batch_results(options):

    ppresults = PostProcessingResults()
    
    ##%% Expand some options for convenience

    output_dir = options.output_dir


    ##%% Prepare output dir

    os.makedirs(output_dir, exist_ok=True)


    ##%% Load ground truth if available

    ground_truth_indexed_db = None
    
    if options.ground_truth_json_file and len(options.ground_truth_json_file) > 0:

        ground_truth_indexed_db = IndexedJsonDb(options.ground_truth_json_file, b_normalize_paths=True,
                                                filename_replacements=options.ground_truth_filename_replacements)

        # Mark images in the ground truth as positive or negative
        n_negative, n_positive, n_unknown, n_ambiguous = mark_detection_status(ground_truth_indexed_db,
            negative_classes=options.negative_classes, unknown_classes=options.unlabeled_classes)
        print('Finished loading and indexing ground truth: {} negative, {} positive, {} unknown, {} ambiguous'.format(
                n_negative, n_positive, n_unknown, n_ambiguous))


    ##%% Load detection results

    if options.api_detection_results is None:
        detection_results, other_fields = load_api_results(options.api_output_file,
                                                 normalize_paths=True,
                                                 filename_replacements=options.api_output_filename_replacements)
        ppresults.api_detection_results = detection_results
        ppresults.api_other_fields = other_fields
        
    else:
        print('Bypassing detection results loading...')
        assert options.api_other_fields is not None
        detection_results = options.api_detection_results
        other_fields = options.api_other_fields
        
    detection_categories_map = other_fields['detection_categories']
    if 'classification_categories' in other_fields:
        classification_categories_map = other_fields['classification_categories']
    else:
        classification_categories_map = {}

    # Add a column (pred_detection_label) to indicate predicted detection status, not separating out the classes    
    if options.include_almost_detections:
        detection_results['pred_detection_label'] = DetectionStatus.DS_ALMOST
        confidences = detection_results['max_detection_conf']
        detection_results.loc[confidences >= options.confidence_threshold,'pred_detection_label'] = DetectionStatus.DS_POSITIVE
        detection_results.loc[confidences < options.almost_detection_confidence_threshold,'pred_detection_label'] = DetectionStatus.DS_NEGATIVE        
    else:
        detection_results['pred_detection_label'] = \
        np.where(detection_results['max_detection_conf'] >= options.confidence_threshold,
                 DetectionStatus.DS_POSITIVE, DetectionStatus.DS_NEGATIVE)
        
    n_positives = sum(detection_results['pred_detection_label'] == DetectionStatus.DS_POSITIVE)
    print('Finished loading and preprocessing {} rows from detector output, predicted {} positives'.format(
            len(detection_results), n_positives))

    if options.include_almost_detections:
        n_almosts = sum(detection_results['pred_detection_label'] == DetectionStatus.DS_ALMOST)
        print('...and {} almost-positives'.format(n_almosts))
    

    ##%% If we have ground truth, remove images we can't match to ground truth

    if ground_truth_indexed_db is not None:

        b_match = [False] * len(detection_results)

        detector_files = detection_results['file'].tolist()

        # fn = detector_files[0]; print(fn)
        for i_fn, fn in enumerate(detector_files):

            # assert fn in ground_truth_indexed_db.filename_to_id, 'Could not find ground truth for row {} ({})'.format(i_fn,fn)
            if fn in ground_truth_indexed_db.filename_to_id:
                b_match[i_fn] = True

        print('Confirmed filename matches to ground truth for {} of {} files'.format(sum(b_match), len(detector_files)))

        detection_results = detection_results[b_match]
        detector_files = detection_results['file'].tolist()

        assert len(detector_files) > 0, 'No detection files available, possible ground truth path issue?'
        
        print('Trimmed detection results to {} files'.format(len(detector_files)))

    
    ##%% Sample images for visualization

    images_to_visualize = detection_results

    if options.num_images_to_sample > 0 and options.num_images_to_sample <= len(detection_results):
    
        images_to_visualize = images_to_visualize.sample(options.num_images_to_sample, random_state=options.sample_seed)

    output_html_file = ''

    style_header = """<head>
        <style type="text/css">
        <!--
        a { text-decoration:none; }
        body { font-family:segoe ui, calibri, "trebuchet ms", verdana, arial, sans-serif; }
        div.contentdiv { margin-left:20px; }
        -->
        </style>
        </head>"""

        
    ##%% Fork here depending on whether or not ground truth is available

    # If we have ground truth, we'll compute precision/recall and sample tp/fp/tn/fn.
    #
    # Otherwise we'll just visualize detections/non-detections.

    if ground_truth_indexed_db is not None:

        ##%% Detection evaluation: compute precision/recall

        # numpy array of detection probabilities
        p_detection = detection_results['max_detection_conf'].values
        n_detections = len(p_detection)

        # numpy array of bools (0.0/1.0), and -1 as null value
        gt_detections = np.zeros(n_detections, dtype=float)

        for i_detection, fn in enumerate(detector_files):
            image_id = ground_truth_indexed_db.filename_to_id[fn]
            image = ground_truth_indexed_db.image_id_to_image[image_id]
            detection_status = image['_detection_status']

            if detection_status == DetectionStatus.DS_NEGATIVE:
                gt_detections[i_detection] = 0.0
            elif detection_status == DetectionStatus.DS_POSITIVE:
                gt_detections[i_detection] = 1.0
            else:
                gt_detections[i_detection] = -1.0

        # Don't include ambiguous/unknown ground truth in precision/recall analysis
        b_valid_ground_truth = gt_detections >= 0.0

        p_detection_pr = p_detection[b_valid_ground_truth]
        gt_detections_pr = gt_detections[b_valid_ground_truth]

        print('Including {} of {} values in p/r analysis'.format(np.sum(b_valid_ground_truth),
              len(b_valid_ground_truth)))

        precisions, recalls, thresholds = precision_recall_curve(gt_detections_pr, p_detection_pr)

        # For completeness, include the result at a confidence threshold of 1.0
        thresholds = np.append(thresholds, [1.0])

        precisions_recalls = pd.DataFrame(data={
                'confidence_threshold': thresholds,
                'precision': precisions,
                'recall': recalls
            })

        # Compute and print summary statistics
        average_precision = average_precision_score(gt_detections_pr, p_detection_pr)
        print('Average precision: {:.1%}'.format(average_precision))

        # Thresholds go up throughout precisions/recalls/thresholds; find the last
        # value where recall is at or above target.  That's our precision @ target recall.
        target_recall = 0.9
        b_above_target_recall = np.where(recalls >= target_recall)
        if not np.any(b_above_target_recall):
            precision_at_target_recall = 0.0
        else:
            i_target_recall = np.argmax(b_above_target_recall)
            precision_at_target_recall = precisions[i_target_recall]
        print('Precision at {:.1%} recall: {:.1%}'.format(target_recall, precision_at_target_recall))

        cm = confusion_matrix(gt_detections_pr, np.array(p_detection_pr) > options.confidence_threshold)

        # Flatten the confusion matrix
        tn, fp, fn, tp = cm.ravel()

        precision_at_confidence_threshold = tp / (tp + fp)
        recall_at_confidence_threshold = tp / (tp + fn)
        f1 = 2.0 * (precision_at_confidence_threshold * recall_at_confidence_threshold) / \
            (precision_at_confidence_threshold + recall_at_confidence_threshold)

        print('At a confidence threshold of {:.1%}, precision={:.1%}, recall={:.1%}, f1={:.1%}'.format(
                options.confidence_threshold, precision_at_confidence_threshold, recall_at_confidence_threshold, f1))

        ##%% Collect classification results, if they exist
        
        classifier_accuracies = []
        
        # Mapping of classnames to idx for the confusion matrix.
        #
        # The lambda is actually kind of a hack, because we use assume that
        # the following code does not reassign classname_to_idx
        classname_to_idx = collections.defaultdict(lambda: len(classname_to_idx))
        
        # Confusion matrix as defaultdict of defaultdict
        #
        # Rows / first index is ground truth, columns / second index is predicted category
        classifier_cm = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
        
        # iDetection = 0; fn = detector_files[iDetection]; print(fn)
        assert len(detector_files) == len(detection_results)
        for iDetection,fn in enumerate(detector_files):
            
            image_id = ground_truth_indexed_db.filename_to_id[fn]
            image = ground_truth_indexed_db.image_id_to_image[image_id]
            detections = detection_results['detections'].iloc[iDetection]
            pred_class_ids = [det['classifications'][0][0] \
                for det in detections if 'classifications' in det.keys()]
            pred_classnames = [classification_categories_map[pd] for pd in pred_class_ids]

            # If this image has classification predictions, and an unambiguous class
            # annotated, and is a positive image...
            if len(pred_classnames) > 0 \
                    and '_unambiguous_category' in image.keys() \
                    and image['_detection_status'] == DetectionStatus.DS_POSITIVE:
                        
                # The unambiguous category, we make this a set for easier handling afterward
                gt_categories = set([image['_unambiguous_category']])
                pred_categories = set(pred_classnames)
                
                # Compute the accuracy as intersection of union,
                # i.e. (# of categories in both prediciton and GT)
                #      divided by (# of categories in either prediction or GT
                #
                # In case of only one GT category, the result will be 1.0, if
                # prediction is one category and this category matches GT
                #
                # It is 1.0/(# of predicted top-1 categories), if the GT is
                # one of the predicted top-1 categories.
                #
                # It is 0.0, if none of the predicted categories is correct
                
                classifier_accuracies.append(
                    len(gt_categories & pred_categories)
                    / len(gt_categories | pred_categories)
                )
                image['_classification_accuracy'] = classifier_accuracies[-1]
                
                # Distribute this accuracy across all predicted categories in the
                # confusion matrix
                assert len(gt_categories) == 1
                gt_class_idx = classname_to_idx[list(gt_categories)[0]]
                for pred_category in pred_categories:
                    pred_class_idx = classname_to_idx[pred_category]
                    classifier_cm[gt_class_idx][pred_class_idx] += 1

        # ...for each file in the detection results
        
        # If we have classification results
        if len(classifier_accuracies) > 0:
            
            # Build confusion matrix as array from classifier_cm
            all_class_ids = sorted(classname_to_idx.values())
            classifier_cm_array = np.array(
                [[classifier_cm[r_idx][c_idx] for c_idx in all_class_ids] for r_idx in all_class_ids], dtype=float)
            classifier_cm_array /= (classifier_cm_array.sum(axis=1, keepdims=True) + 1e-7)

            # Print some statistics
            print("Finished computation of {} classification results".format(len(classifier_accuracies)))
            print("Mean accuracy: {}".format(np.mean(classifier_accuracies)))

            # Prepare confusion matrix output
            
            # Get confusion matrix as string
            sio = io.StringIO()
            np.savetxt(sio, classifier_cm_array * 100, fmt='%5.1f')
            cm_str = sio.getvalue()
            # Get fixed-size classname for each idx
            idx_to_classname = {v:k for k,v in classname_to_idx.items()}
            classname_list = [idx_to_classname[idx] for idx in sorted(classname_to_idx.values())]
            classname_headers = ['{:<5}'.format(cname[:5]) for cname in classname_list]

            # Prepend class name on each line and add to the top
            cm_str_lines = [' ' * 16 + ' '.join(classname_headers)]
            cm_str_lines += ['{:>15}'.format(cn[:15]) + ' ' + cm_line for cn, cm_line in zip(classname_list, cm_str.splitlines())]

            # Print formatted confusion matrix
            print("Confusion matrix: ")
            print(*cm_str_lines, sep='\n')

            # Plot confusion matrix
            
            # To manually add more space at bottom: plt.rcParams['figure.subplot.bottom'] = 0.1
            #
            # Add 0.5 to figsize for every class. For two classes, this will result in
            # fig = plt.figure(figsize=[4,4])
            fig = vis_utils.plot_confusion_matrix(
                            classifier_cm_array,
                            classname_list,
                            normalize=False,
                            title='Confusion matrix',
                            cmap=plt.cm.Blues,
                            vmax=1.0,
                            use_colorbar=True,
                            y_label=True)
            cm_figure_relative_filename = 'confusion_matrix.png'
            cm_figure_filename = os.path.join(output_dir, cm_figure_relative_filename)
            plt.savefig(cm_figure_filename)
            plt.close(fig)

        # ...if we have classification results
        
        
        ##%% Render output

        # Write p/r table to .csv file in output directory
        pr_table_filename = os.path.join(output_dir, 'prec_recall.csv')
        precisions_recalls.to_csv(pr_table_filename, index=False)

        # Write precision/recall plot to .png file in output directory
        t = 'Precision-Recall curve: AP={:0.1%}, P@{:0.1%}={:0.1%}'.format(
                average_precision, target_recall, precision_at_target_recall)
        fig = vis_utils.plot_precision_recall_curve(precisions, recalls, t)
        pr_figure_relative_filename = 'prec_recall.png'
        pr_figure_filename = os.path.join(output_dir, pr_figure_relative_filename)
        plt.savefig(pr_figure_filename)
        # plt.show(block=False)
        plt.close(fig)


        ##%% Sampling
        
        # Sample true/false positives/negatives with correct/incorrect top-1
        # classification and render to html

        # Accumulate html image structs (in the format expected by write_html_image_lists)
        # for each category, e.g. 'tp', 'fp', ..., 'class_bird', ...
        images_html = collections.defaultdict(lambda: [])
        # Add default entries by accessing them for the first time
        [images_html[res] for res in ['tp', 'tpc', 'tpi', 'fp', 'tn', 'fn']]  # Siyu: what does this do? This line should have no effect
        for res in images_html.keys():
            os.makedirs(os.path.join(output_dir, res), exist_ok=True)

        image_count = len(images_to_visualize)

        # Each element will be a list of 2-tuples, with elements [collection name,html info struct]
        rendering_results = []
        
        # Each element will be a three-tuple with elements file,max_conf,detections
        files_to_render = []
        
        # Assemble the information we need for rendering, so we can parallelize without
        # dealing with Pandas
        # i_row = 0; row = images_to_visualize.iloc[0]
        for _, row in images_to_visualize.iterrows():

            # Filenames should already have been normalized to either '/' or '\'
            files_to_render.append([row['file'],row['max_detection_conf'],row['detections']])
            
        def render_image_with_gt(file_info):

            image_relative_path = file_info[0]
            max_conf = file_info[1]
            detections = file_info[2]

            # This should already have been normalized to either '/' or '\'

            image_id = ground_truth_indexed_db.filename_to_id.get(image_relative_path, None)
            if image_id is None:
                print('Warning: couldn''t find ground truth for image {}'.format(image_relative_path))
                return None

            image = ground_truth_indexed_db.image_id_to_image[image_id]
            annotations = ground_truth_indexed_db.image_id_to_annotations[image_id]

            gt_status = image['_detection_status']

            gt_presence = bool(gt_status)

            gt_classes = CameraTrapJsonUtils.annotations_to_classnames(
                    annotations,ground_truth_indexed_db.cat_id_to_name)
            gt_class_summary = ','.join(gt_classes)

            if gt_status > DetectionStatus.DS_MAX_DEFINITIVE_VALUE:
                print('Skipping image {}, does not have a definitive ground truth status (status: {}, classes: {})'.format(
                        image_id, gt_status, gt_class_summary))
                return None

            detected = max_conf > options.confidence_threshold

            if gt_presence and detected:
                if '_classification_accuracy' not in image.keys():
                    res = 'tp'
                elif np.isclose(1, image['_classification_accuracy']):
                    res = 'tpc'
                else:
                    res = 'tpi'
            elif not gt_presence and detected:
                res = 'fp'
            elif gt_presence and not detected:
                res = 'fn'
            else:
                res = 'tn'

            display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.3f}%, <b>Image</b>: {}'.format(
                res.upper(), str(gt_presence), gt_class_summary,
                max_conf * 100, image_relative_path)

            rendered_image_html_info = render_bounding_boxes(options.image_base_dir,
                                                                image_relative_path,
                                                                display_name,
                                                                detections,
                                                                res,
                                                                detection_categories_map,
                                                                classification_categories_map,
                                                                options)

            image_result = None
            if len(rendered_image_html_info) > 0:
                image_result = [[res,rendered_image_html_info]]
                for gt_class in gt_classes:
                    image_result.append(['class_{}'.format(gt_class),rendered_image_html_info])
            
            return image_result
            
        # ...def render_image_with_gt(file_info)
        
        start_time = time.time()
        if options.parallelize_rendering:
            if options.parallelize_rendering_n_cores is None:
                pool = ThreadPool()
            else:
                print('Rendering images with {} workers'.format(options.parallelize_rendering_n_cores))
                pool = ThreadPool(options.parallelize_rendering_n_cores)
            rendering_results = list(tqdm(pool.imap(render_image_with_gt, files_to_render), total=len(files_to_render)))    
        else:
            # file_info = files_to_render[0]
            for file_info in tqdm(files_to_render):        
                rendering_results.append(render_image_with_gt(file_info))
        elapsed = time.time() - start_time
        
        # Map all the rendering results in the list rendering_results into the 
        # dictionary images_html
        image_rendered_count = 0
        for rendering_result in rendering_results:
            if rendering_result is None:
                continue
            image_rendered_count += 1
            for assignment in rendering_result:
                images_html[assignment[0]].append(assignment[1])
                
        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)

        print('{} images rendered (of {})'.format(image_rendered_count,image_count))

        # Write index.html
        all_tp_count = image_counts['tp'] + image_counts['tpc'] + image_counts['tpi']
        total_count = all_tp_count + image_counts['tn'] + image_counts['fp'] + image_counts['fn']
        
        classification_detection_results = """&nbsp;&nbsp;&nbsp;&nbsp;<a href="tpc.html">with all correct top-1 predictions (TPC)</a> ({})<br/>
           &nbsp;&nbsp;&nbsp;&nbsp;<a href="tpi.html">with one or more incorrect top-1 prediction (TPI)</a> ({})<br/>
           &nbsp;&nbsp;&nbsp;&nbsp;<a href="tp.html">without classification evaluation</a><sup>*</sup> ({})<br/>""".format(
            image_counts['tpc'],
            image_counts['tpi'],
            image_counts['tp']            
        )
        
        index_page = """<html>
        {}
        <body>
        <h2>Evaluation</h2>

        <h3>Sample images</h3>
        <div style="margin-left:20px;">
        <p>A sample of {} images, annotated with detections above {:.1%} confidence.</p>
        <a href="tp.html">True positives (TP)</a> ({}) ({:0.1%})<br/>
        CLASSIFICATION_PLACEHOLDER_1
        <a href="tn.html">True negatives (TN)</a> ({}) ({:0.1%})<br/>
        <a href="fp.html">False positives (FP)</a> ({}) ({:0.1%})<br/>
        <a href="fn.html">False negatives (FN)</a> ({}) ({:0.1%})<br/>
        CLASSIFICATION_PLACEHOLDER_2
        </div>        
        """.format(
            style_header,
            image_count, options.confidence_threshold,
            all_tp_count, all_tp_count/total_count,
            image_counts['tn'], image_counts['tn']/total_count,
            image_counts['fp'], image_counts['fp']/total_count,
            image_counts['fn'], image_counts['fn']/total_count
        )
        
        index_page += """
            <h3>Detection results</h3>
            <div class="contentdiv">
            <p>At a confidence threshold of {:0.1%}, precision={:0.1%}, recall={:0.1%}</p>
            <p><strong>Precision/recall summary for all {} images</strong></p><img src="{}"><br/>
            </div>
            """.format(
                options.confidence_threshold, precision_at_confidence_threshold, recall_at_confidence_threshold,
                len(detection_results), pr_figure_relative_filename
           )
        
        if len(classifier_accuracies) > 0:
            index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_1',classification_detection_results)
            index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_2',"""<p><sup>*</sup>We do not evaluate the classification result of images 
                if the classification information is missing, if the image contains
                categories like &lsquo;empty&rsquo; or &lsquo;human&rsquo;, or if the image has multiple 
                classification labels.</p>""")
        else:
            index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_1','')
            index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_2','')
            
        if len(classifier_accuracies) > 0:
            index_page += """
                <h3>Classification results</h3>
                <div class="contentdiv">
                <p>Classification accuracy: {:.2%}<br>
                The accuracy is computed only for images with exactly one classification label.
                The accuracy of an image is computed as 1/(number of unique detected top-1 classes),
                i.e. if the model detects multiple boxes with different top-1 classes, then the accuracy
                decreases and the image is put into 'TPI'.</p>
                <p>Confusion matrix:</p>
                <p><img src="{}"></p>
                <div style='font-family:monospace;display:block;'>{}</div>
                </div>
                """.format(
                    np.mean(classifier_accuracies),
                    cm_figure_relative_filename,
                    "<br>".join(cm_str_lines).replace(' ', '&nbsp;')
                )
                
        # Show links to each GT class
        #
        # We could do this without classification results; currently we don't.
        if len(classname_to_idx) > 0:
            
            index_page += '<h3>Images of specific classes</h3><br/><div class="contentdiv">'
            # Add links to all available classes
            for cname in sorted(classname_to_idx.keys()):
                index_page += "<a href='class_{0}.html'>{0}</a> ({1})<br>".format(
                    cname,
                    len(images_html['class_{}'.format(cname)]))
            index_page += "</div>"
            
        # Close body and html tags
        index_page += "</body></html>"
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))

    # ...for each image
    
    
    ##%% Otherwise, if we don't have ground truth...

    else:

        ##%% Sample detections/non-detections

        # Accumulate html image structs (in the format expected by write_html_image_list)
        # for each category
        images_html = collections.defaultdict(lambda: [])        
        
        # Add default entries by accessing them for the first time
        [images_html[res] for res in ['detections', 'non_detections']]
        if options.include_almost_detections:
            images_html['almost_detections']
            
        # Create output directories
        for res in images_html.keys():
            os.makedirs(os.path.join(output_dir, res), exist_ok=True)

        image_count = len(images_to_visualize)
        has_classification_info = False
        
        # Each element will be a list of 2-tuples, with elements [collection name,html info struct]
        rendering_results = []

        # Each element will be a three-tuple with elements [file,max_conf,detections]
        files_to_render = []
        
        # Assemble the information we need for rendering, so we can parallelize without
        # dealing with Pandas
        # i_row = 0; row = images_to_visualize.iloc[0]
        for _, row in images_to_visualize.iterrows():

            # Filenames should already have been normalized to either '/' or '\'
            files_to_render.append([row['file'],
                                    row['max_detection_conf'],
                                    row['detections']])
            
        # Local function for parallelization
        def render_image_no_gt(file_info):
            
            image_relative_path = file_info[0]
            max_conf = file_info[1]
            detections = file_info[2]
            
            detection_status = DetectionStatus.DS_UNASSIGNED            
            if max_conf >= options.confidence_threshold:
                detection_status = DetectionStatus.DS_POSITIVE
            else:
                if options.include_almost_detections:
                    if max_conf >= options.almost_detection_confidence_threshold:
                        detection_status = DetectionStatus.DS_ALMOST
                    else:
                        detection_status = DetectionStatus.DS_NEGATIVE
                else:
                    detection_status = DetectionStatus.DS_NEGATIVE
            
            if detection_status == DetectionStatus.DS_POSITIVE:
                res = 'detections'
            elif detection_status == DetectionStatus.DS_NEGATIVE:
                res = 'non_detections'
            else:
                assert detection_status == DetectionStatus.DS_ALMOST
                res = 'almost_detections'

            display_name = '<b>Result type</b>: {}, <b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(
                res, image_relative_path, max_conf)

            rendering_options = copy.copy(options)
            if detection_status == DetectionStatus.DS_ALMOST:
                rendering_options.confidence_threshold = rendering_options.almost_detection_confidence_threshold
            rendered_image_html_info = render_bounding_boxes(options.image_base_dir,
                                                                image_relative_path,
                                                                display_name,
                                                                detections,
                                                                res,
                                                                detection_categories_map,
                                                                classification_categories_map,
                                                                rendering_options)
            
            image_result = None
            if len(rendered_image_html_info) > 0:
                image_result = [[res,rendered_image_html_info]]
                for det in detections:
                    if 'classifications' in det:
                        top1_class = classification_categories_map[det['classifications'][0][0]]
                        image_result.append(['class_{}'.format(top1_class),rendered_image_html_info])
            
            return image_result
        
        # ...def render_image_no_gt(file_info):
        
        start_time = time.time()
        if options.parallelize_rendering:
            if options.parallelize_rendering_n_cores is None:
                pool = ThreadPool()
            else:
                print('Rendering images with {} workers'.format(options.parallelize_rendering_n_cores))
                pool = ThreadPool(options.parallelize_rendering_n_cores)
            rendering_results = list(tqdm(pool.imap(render_image_no_gt, files_to_render), total=len(files_to_render)))    
        else:
            for file_info in tqdm(files_to_render):        
                rendering_results.append(render_image_no_gt(file_info))            
        elapsed = time.time() - start_time
        
        # Map all the rendering results in the list rendering_results into the 
        # dictionary images_html
        image_rendered_count = 0
        for rendering_result in rendering_results:
            if rendering_result is None:
                continue
            image_rendered_count += 1
            for assignment in rendering_result:
                if 'class' in assignment[0]:
                    has_classification_info = True
                images_html[assignment[0]].append(assignment[1])
                
        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)
        
        if image_rendered_count == 0:
            seconds_per_image = 0
        else:
            seconds_per_image = elapsed/image_rendered_count
            
        print('Rendered {} images (of {}) in {} ({} per image)'.format(image_rendered_count,
              image_count,humanfriendly.format_timespan(elapsed),
              humanfriendly.format_timespan(seconds_per_image)))

        # Write index.HTML
        total_images = image_counts['detections'] + image_counts['non_detections']
        if options.include_almost_detections:
            total_images += image_counts['almost_detections']
        assert total_images == image_count, \
            'Error: image_count is {}, total_images is {}'.format(image_count,total_images)
        
        almost_detection_string = ''
        if options.include_almost_detections:
            almost_detection_string = ' (&ldquo;almost detection&rdquo; threshold at {:.1%})'.format(options.almost_detection_confidence_threshold)
            
        index_page = """<html>{}<body>
        <h2>Visualization of results</h2>
        <p>A sample of {} images, annotated with detections above {:.1%} confidence{}.</p>
        <h3>Sample images</h3>
        <div class="contentdiv">
        <a href="detections.html">detections</a> ({}, {:.1%})<br/>
        <a href="non_detections.html">non-detections</a> ({}, {:.1%})<br/>""".format(
            style_header,image_count, options.confidence_threshold, almost_detection_string,
            image_counts['detections'], image_counts['detections']/total_images,
            image_counts['non_detections'], image_counts['non_detections']/total_images
        )
        
        if options.include_almost_detections:
            index_page += """<a href="almost_detections.html">almost-detections</a> ({}, {:.1%})<br/>""".format( 
                    image_counts['almost_detections'], image_counts['almost_detections']/total_images)
        
        index_page += '</div>\n'
        
        if has_classification_info:
            index_page += "<h3>Images of detected classes</h3>"
            index_page += "<p>The same image might appear under multiple classes if multiple species were detected.</p>\n<div class='contentdiv'>\n"
        
            # Add links to all available classes
            for cname in sorted(classification_categories_map.values()):
                ccount = len(images_html['class_{}'.format(cname)])
                if ccount > 0:
                    index_page += "<a href='class_{}.html'>{}</a> ({})<br/>\n".format(cname, cname.lower(), ccount)
            index_page += "</div>\n"
            
        index_page += "</body></html>"
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))

        # os.startfile(output_html_file)
        
    # ...if we do/don't have ground truth

    ppresults.output_html_file = output_html_file
    return ppresults
コード例 #4
0
annotation_list_filename = r'd:\wildlife_data\zsl_borneo\all_img_ids_to_bbox.json'
image_json = r'd:\wildlife_data\zsl_borneo\201906cameratraps\0.5\zsl_camera_traps_201906.json'
image_base = r'd:\wildlife_data\zsl_borneo\201906cameratraps\0.5'
output_base = r'd:\wildlife_data\zsl_borneo'

human_classes = ['human', 'hunter']

#%% Load data

with open(annotation_list_filename, 'r') as f:
    annotation_list = json.load(f)

# with open(image_json,'r') as f:
#    data = json.load(f)
indexedData = IndexedJsonDb(image_json)

print('Done loading data')

#%% Sanity-check data

options = sanity_check_json_db.SanityCheckOptions()
options.baseDir = image_base
options.bCheckImageSizes = False
options.bCheckImageExistence = True
options.bFindUnusedImages = False

sortedCategories = sanity_check_json_db.sanity_check_json_db(
    indexedData.db, options)

#%% Label previews
コード例 #5
0
def process_batch_results(options):

    ##%% Expand some options for convenience

    output_dir = options.output_dir
    confidence_threshold = options.confidence_threshold

    ##%% Prepare output dir

    os.makedirs(output_dir, exist_ok=True)

    ##%% Load ground truth if available

    ground_truth_indexed_db = None

    if len(options.ground_truth_json_file) > 0:

        ground_truth_indexed_db = IndexedJsonDb(
            options.ground_truth_json_file,
            b_normalize_paths=True,
            filename_replacements=options.ground_truth_filename_replacements)

        # Mark images in the ground truth as positive or negative
        (nNegative, nPositive, nUnknown, nAmbiguous) = mark_detection_status(
            ground_truth_indexed_db,
            negative_classes=options.negative_classes,
            unknown_classes=options.unlabeled_classes)
        print(
            'Finished loading and indexing ground truth: {} negative, {} positive, {} unknown, {} ambiguous'
            .format(nNegative, nPositive, nUnknown, nAmbiguous))

    ##%% Load detection results

    detection_results = load_api_results(
        options.detector_output_file,
        normalize_paths=True,
        filename_replacements=options.detector_output_filename_replacements)

    # Add a column (pred_detection_label) to indicate predicted detection status
    import numpy as np
    detection_results['pred_detection_label'] = \
        np.where(detection_results['max_confidence'] >= options.confidence_threshold,
                 DetectionStatus.DS_POSITIVE, DetectionStatus.DS_NEGATIVE)

    nPositives = sum(detection_results['pred_detection_label'] ==
                     DetectionStatus.DS_POSITIVE)
    print(
        'Finished loading and preprocessing {} rows from detector output, predicted {} positives'
        .format(len(detection_results), nPositives))

    ##%% If we have ground truth, remove images we can't match to ground truth

    # ground_truth_indexed_db.db['images'][0]
    if ground_truth_indexed_db is not None:

        b_match = [False] * len(detection_results)

        detector_files = detection_results['image_path'].to_list()

        for iFn, fn in enumerate(detector_files):

            # assert fn in ground_truth_indexed_db.filename_to_id, 'Could not find ground truth for row {} ({})'.format(iFn,fn)
            if fn in fn in ground_truth_indexed_db.filename_to_id:
                b_match[iFn] = True

        print('Confirmed filename matches to ground truth for {} of {} files'.
              format(sum(b_match), len(detector_files)))

        detection_results = detection_results[b_match]
        detector_files = detection_results['image_path'].to_list()

        print('Trimmed detection results to {} files'.format(
            len(detector_files)))

    ##%% Sample images for visualization

    images_to_visualize = detection_results

    if options.num_images_to_sample > 0 and options.num_images_to_sample < len(
            detection_results):

        images_to_visualize = images_to_visualize.sample(
            options.num_images_to_sample, random_state=options.sample_seed)

    ##%% Fork here depending on whether or not ground truth is available

    # If we have ground truth, we'll compute precision/recall and sample tp/fp/tn/fn.
    #
    # Otherwise we'll just visualize detections/non-detections.

    if ground_truth_indexed_db is not None:

        ##%% Compute precision/recall

        # numpy array of detection probabilities
        p_detection = detection_results['max_confidence'].values
        n_detections = len(p_detection)

        # numpy array of bools (0.0/1.0)
        gt_detections = np.zeros(n_detections, dtype=float)

        for iDetection, fn in enumerate(detector_files):
            image_id = ground_truth_indexed_db.filename_to_id[fn]
            image = ground_truth_indexed_db.image_id_to_image[image_id]
            detection_status = image['_detection_status']

            if detection_status == DetectionStatus.DS_NEGATIVE:
                gt_detections[iDetection] = 0.0
            elif detection_status == DetectionStatus.DS_POSITIVE:
                gt_detections[iDetection] = 1.0
            else:
                gt_detections[iDetection] = -1.0

        # Don't include ambiguous/unknown ground truth in precision/recall analysis
        b_valid_ground_truth = gt_detections >= 0.0

        p_detection_pr = p_detection[b_valid_ground_truth]
        gt_detections_pr = gt_detections[b_valid_ground_truth]

        print('Including {} of {} values in p/r analysis'.format(
            np.sum(b_valid_ground_truth), len(b_valid_ground_truth)))

        precisions, recalls, thresholds = precision_recall_curve(
            gt_detections_pr, p_detection_pr)

        # For completeness, include the result at a confidence threshold of 1.0
        thresholds = np.append(thresholds, [1.0])

        precisions_recalls = pd.DataFrame(
            data={
                'confidence_threshold': thresholds,
                'precision': precisions,
                'recall': recalls
            })

        # Compute and print summary statistics
        average_precision = average_precision_score(gt_detections_pr,
                                                    p_detection_pr)
        print('Average precision: {:.2f}'.format(average_precision))

        # Thresholds go up throughout precisions/recalls/thresholds; find the last
        # value where recall is at or above target.  That's our precision @ target recall.
        target_recall = 0.9
        b_above_target_recall = np.where(recalls >= target_recall)
        if not np.any(b_above_target_recall):
            precision_at_target_recall = 0.0
        else:
            i_target_recall = np.argmax(b_above_target_recall)
            precision_at_target_recall = precisions[i_target_recall]
        print('Precision at {:.2f} recall: {:.2f}'.format(
            target_recall, precision_at_target_recall))

        cm = confusion_matrix(gt_detections_pr,
                              np.array(p_detection_pr) > confidence_threshold)

        # Flatten the confusion matrix
        tn, fp, fn, tp = cm.ravel()

        precision_at_confidence_threshold = tp / (tp + fp)
        recall_at_confidence_threshold = tp / (tp + fn)
        f1 = 2.0 * (precision_at_confidence_threshold * recall_at_confidence_threshold) / \
            (precision_at_confidence_threshold + recall_at_confidence_threshold)

        print(
            'At a confidence threshold of {:.2f}, precision={:.2f}, recall={:.2f}, f1={:.2f}'
            .format(confidence_threshold, precision_at_confidence_threshold,
                    recall_at_confidence_threshold, f1))

        ##%% Render output

        # Write p/r table to .csv file in output directory
        pr_table_filename = os.path.join(output_dir, 'prec_recall.csv')
        precisions_recalls.to_csv(pr_table_filename, index=False)

        # Write precision/recall plot to .png file in output directory
        step_kwargs = ({'step': 'post'})
        fig = plt.figure()
        plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
        plt.fill_between(recalls,
                         precisions,
                         alpha=0.2,
                         color='b',
                         **step_kwargs)

        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.ylim([0.0, 1.05])
        plt.xlim([0.0, 1.05])
        t = 'Precision-Recall curve: AP={:0.2f}, P@{:0.2f}={:0.2f}'.format(
            average_precision, target_recall, precision_at_target_recall)
        plt.title(t)
        pr_figure_relative_filename = 'prec_recall.png'
        pr_figure_filename = os.path.join(output_dir,
                                          pr_figure_relative_filename)
        plt.savefig(pr_figure_filename)
        # plt.show(block=False)
        plt.close(fig)

        ##%% Sample true/false positives/negatives and render to html

        os.makedirs(os.path.join(output_dir, 'tp'), exist_ok=True)
        os.makedirs(os.path.join(output_dir, 'fp'), exist_ok=True)
        os.makedirs(os.path.join(output_dir, 'tn'), exist_ok=True)
        os.makedirs(os.path.join(output_dir, 'fn'), exist_ok=True)

        # Accumulate html image structs (in the format expected by write_html_image_lists)
        # for each category
        images_html = {'tp': [], 'fp': [], 'tn': [], 'fn': []}

        count = 0

        # i_row = 0; row = images_to_visualize.iloc[0]
        for i_row, row in tqdm(images_to_visualize.iterrows(),
                               total=len(images_to_visualize)):

            image_relative_path = row['image_path']

            # This should already have been normalized to either '/' or '\'

            image_id = ground_truth_indexed_db.filename_to_id.get(
                image_relative_path, None)
            if image_id is None:
                print('Warning: couldn'
                      't find ground truth for image {}'.format(
                          image_relative_path))
                continue

            image_info = ground_truth_indexed_db.image_id_to_image[image_id]
            annotations = ground_truth_indexed_db.image_id_to_annotations[
                image_id]

            gt_status = image_info['_detection_status']

            if gt_status > DetectionStatus.DS_MAX_DEFINITIVE_VALUE:
                print(
                    'Skipping image {}, does not have a definitive ground truth status'
                    .format(i_row, gt_status))
                continue

            gt_presence = bool(gt_status)

            gt_class_name = CameraTrapJsonUtils.annotationsToString(
                annotations, ground_truth_indexed_db.cat_id_to_name)

            max_conf = row['max_confidence']
            boxes_and_scores = row['detections']

            detected = True if max_conf > confidence_threshold else False

            if gt_presence and detected:
                res = 'tp'
            elif not gt_presence and detected:
                res = 'fp'
            elif gt_presence and not detected:
                res = 'fn'
            else:
                res = 'tn'

            display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.2f}%, <b>Image</b>: {}'.format(
                res.upper(), str(gt_presence), gt_class_name, max_conf * 100,
                image_relative_path)

            rendered_image_html_info = render_bounding_boxes(
                options.image_base_dir, image_relative_path, display_name,
                boxes_and_scores, res, options)

            if len(rendered_image_html_info) > 0:
                images_html[res].append(rendered_image_html_info)

            count += 1

        # ...for each image in our sample

        print('{} images rendered'.format(count))

        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)

        # Write index.HTML
        index_page = """<html><body>
        <p><strong>A sample of {} images, annotated with detections above {:.1f}% confidence.</strong></p>
        
        <a href="tp.html">True positives (tp)</a> ({})<br/>
        <a href="tn.html">True negatives (tn)</a> ({})<br/>
        <a href="fp.html">False positives (fp)</a> ({})<br/>
        <a href="fn.html">False negatives (fn)</a> ({})<br/>
        <p>At a confidence threshold of {:0.1f}%, precision={:0.2f}, recall={:0.2f}</p>
        <p><strong>Precision/recall summary for all {} images</strong></p><img src="{}"><br/>
        </body></html>""".format(
            count, confidence_threshold * 100, image_counts['tp'],
            image_counts['tn'], image_counts['fp'], image_counts['fn'],
            confidence_threshold * 100,
            precision_at_confidence_threshold, recall_at_confidence_threshold,
            len(detection_results), pr_figure_relative_filename)
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))

    ##%% Otherwise, if we don't have ground truth...

    else:

        ##%% Sample detections/non-detections

        os.makedirs(os.path.join(output_dir, 'detections'), exist_ok=True)
        os.makedirs(os.path.join(output_dir, 'non_detections'), exist_ok=True)

        # Accumulate html image structs (in the format expected by write_html_image_lists)
        # for each category
        images_html = {
            'detections': [],
            'non_detections': [],
        }

        count = 0

        # i_row = 0; row = images_to_visualize.iloc[0]
        for i_row, row in tqdm(images_to_visualize.iterrows(),
                               total=len(images_to_visualize)):

            image_relative_path = row['image_path']

            # This should already have been normalized to either '/' or '\'
            max_conf = row['max_confidence']
            boxes_and_scores = row['detections']
            detected = True if max_conf > confidence_threshold else False

            if detected:
                res = 'detections'
            else:
                res = 'non_detections'

            display_name = '<b>Result type</b>: {}, <b>Image</b>: {}'.format(
                res.upper(), image_relative_path)

            rendered_image_html_info = render_bounding_boxes(
                options.image_base_dir, image_relative_path, display_name,
                boxes_and_scores, res, options)
            if len(rendered_image_html_info) > 0:
                images_html[res].append(rendered_image_html_info)

            count += 1

        # ...for each image in our sample

        print('{} images rendered'.format(count))

        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)

        # Write index.HTML
        index_page = """<html><body>
        <p><strong>A sample of {} images, annotated with detections above {:.1f}% confidence.</strong></p>
        
        <a href="detections.html">Detections</a> ({})<br/>
        <a href="non_detections.html">Non-detections</a> ({})<br/>
        </body></html>""".format(count, confidence_threshold * 100,
                                 image_counts['detections'],
                                 image_counts['non_detections'])
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))
コード例 #6
0
def process_batch_results(options):

    ##%% Expand some options for convenience

    output_dir = options.output_dir
    confidence_threshold = options.confidence_threshold

    ##%% Prepare output dir

    os.makedirs(output_dir, exist_ok=True)

    ##%% Load ground truth if available

    ground_truth_indexed_db = None

    if options.ground_truth_json_file and len(
            options.ground_truth_json_file) > 0:

        ground_truth_indexed_db = IndexedJsonDb(
            options.ground_truth_json_file,
            b_normalize_paths=True,
            filename_replacements=options.ground_truth_filename_replacements)

        # Mark images in the ground truth as positive or negative
        n_negative, n_positive, n_unknown, n_ambiguous = mark_detection_status(
            ground_truth_indexed_db,
            negative_classes=options.negative_classes,
            unknown_classes=options.unlabeled_classes)
        print(
            'Finished loading and indexing ground truth: {} negative, {} positive, {} unknown, {} ambiguous'
            .format(n_negative, n_positive, n_unknown, n_ambiguous))

    ##%% Load detection results

    detection_results, other_fields = load_api_results(
        options.api_output_file,
        normalize_paths=True,
        filename_replacements=options.api_output_filename_replacements)
    detection_categories_map = other_fields['detection_categories']
    if 'classification_categories' in other_fields:
        classification_categories_map = other_fields[
            'classification_categories']
    else:
        classification_categories_map = {}

    # Add a column (pred_detection_label) to indicate predicted detection status, not separating out the classes
    detection_results['pred_detection_label'] = \
        np.where(detection_results['max_detection_conf'] >= options.confidence_threshold,
                 DetectionStatus.DS_POSITIVE, DetectionStatus.DS_NEGATIVE)

    n_positives = sum(detection_results['pred_detection_label'] ==
                      DetectionStatus.DS_POSITIVE)
    print(
        'Finished loading and preprocessing {} rows from detector output, predicted {} positives'
        .format(len(detection_results), n_positives))

    ##%% If we have ground truth, remove images we can't match to ground truth

    # ground_truth_indexed_db.db['images'][0]
    if ground_truth_indexed_db is not None:

        b_match = [False] * len(detection_results)

        detector_files = detection_results['file'].tolist()

        for i_fn, fn in enumerate(detector_files):

            # assert fn in ground_truth_indexed_db.filename_to_id, 'Could not find ground truth for row {} ({})'.format(i_fn,fn)
            if fn in fn in ground_truth_indexed_db.filename_to_id:
                b_match[i_fn] = True

        print('Confirmed filename matches to ground truth for {} of {} files'.
              format(sum(b_match), len(detector_files)))

        detection_results = detection_results[b_match]
        detector_files = detection_results['file'].tolist()

        print('Trimmed detection results to {} files'.format(
            len(detector_files)))

    ##%% Sample images for visualization

    images_to_visualize = detection_results

    if options.num_images_to_sample > 0 and options.num_images_to_sample <= len(
            detection_results):

        images_to_visualize = images_to_visualize.sample(
            options.num_images_to_sample, random_state=options.sample_seed)

    ##%% Fork here depending on whether or not ground truth is available

    output_html_file = ''

    # If we have ground truth, we'll compute precision/recall and sample tp/fp/tn/fn.
    #
    # Otherwise we'll just visualize detections/non-detections.

    if ground_truth_indexed_db is not None:

        ##%% DETECTION EVALUATION: Compute precision/recall

        # numpy array of detection probabilities
        p_detection = detection_results['max_detection_conf'].values
        n_detections = len(p_detection)

        # numpy array of bools (0.0/1.0), and -1 as null value
        gt_detections = np.zeros(n_detections, dtype=float)

        for i_detection, fn in enumerate(detector_files):
            image_id = ground_truth_indexed_db.filename_to_id[fn]
            image = ground_truth_indexed_db.image_id_to_image[image_id]
            detection_status = image['_detection_status']

            if detection_status == DetectionStatus.DS_NEGATIVE:
                gt_detections[i_detection] = 0.0
            elif detection_status == DetectionStatus.DS_POSITIVE:
                gt_detections[i_detection] = 1.0
            else:
                gt_detections[i_detection] = -1.0

        # Don't include ambiguous/unknown ground truth in precision/recall analysis
        b_valid_ground_truth = gt_detections >= 0.0

        p_detection_pr = p_detection[b_valid_ground_truth]
        gt_detections_pr = gt_detections[b_valid_ground_truth]

        print('Including {} of {} values in p/r analysis'.format(
            np.sum(b_valid_ground_truth), len(b_valid_ground_truth)))

        precisions, recalls, thresholds = precision_recall_curve(
            gt_detections_pr, p_detection_pr)

        # For completeness, include the result at a confidence threshold of 1.0
        thresholds = np.append(thresholds, [1.0])

        precisions_recalls = pd.DataFrame(
            data={
                'confidence_threshold': thresholds,
                'precision': precisions,
                'recall': recalls
            })

        # Compute and print summary statistics
        average_precision = average_precision_score(gt_detections_pr,
                                                    p_detection_pr)
        print('Average precision: {:.1%}'.format(average_precision))

        # Thresholds go up throughout precisions/recalls/thresholds; find the last
        # value where recall is at or above target.  That's our precision @ target recall.
        target_recall = 0.9
        b_above_target_recall = np.where(recalls >= target_recall)
        if not np.any(b_above_target_recall):
            precision_at_target_recall = 0.0
        else:
            i_target_recall = np.argmax(b_above_target_recall)
            precision_at_target_recall = precisions[i_target_recall]
        print('Precision at {:.1%} recall: {:.1%}'.format(
            target_recall, precision_at_target_recall))

        cm = confusion_matrix(gt_detections_pr,
                              np.array(p_detection_pr) > confidence_threshold)

        # Flatten the confusion matrix
        tn, fp, fn, tp = cm.ravel()

        precision_at_confidence_threshold = tp / (tp + fp)
        recall_at_confidence_threshold = tp / (tp + fn)
        f1 = 2.0 * (precision_at_confidence_threshold * recall_at_confidence_threshold) / \
            (precision_at_confidence_threshold + recall_at_confidence_threshold)

        print(
            'At a confidence threshold of {:.1%}, precision={:.1%}, recall={:.1%}, f1={:.1%}'
            .format(confidence_threshold, precision_at_confidence_threshold,
                    recall_at_confidence_threshold, f1))

        ##%% CLASSIFICATION evaluation
        classifier_accuracies = []
        # Mapping of classnames to idx for the confusion matrix.
        # The lambda is actually kind of a hack, because we use assume that
        # the following code does not reassign classname_to_idx
        classname_to_idx = collections.defaultdict(
            lambda: len(classname_to_idx))
        # Confusion matrix as defaultdict of defaultdict
        # Rows / first index is ground truth, columns / second index is predicted category
        classifier_cm = collections.defaultdict(
            lambda: collections.defaultdict(lambda: 0))
        for iDetection, fn in enumerate(detector_files):
            image_id = ground_truth_indexed_db.filename_to_id[fn]
            image = ground_truth_indexed_db.image_id_to_image[image_id]
            pred_class_ids = [det['classifications'][0][0] \
                for det in detection_results['detections'][iDetection] if 'classifications' in det.keys()]
            pred_classnames = [
                classification_categories_map[pd] for pd in pred_class_ids
            ]

            # If this image has classification predictions, and an unambiguous class
            # annotated, and is a positive image
            if len(pred_classnames) > 0 \
                    and '_unambiguous_category' in image.keys() \
                    and image['_detection_status'] == DetectionStatus.DS_POSITIVE:
                # The unambiguous category, we make this a set for easier handling afterward
                # TODO: actually we can replace the unambiguous category by all annotated
                # categories. However, then the confusion matrix doesn't make sense anymore
                # TODO: make sure we are using the class names as strings in both, not IDs
                gt_categories = set([image['_unambiguous_category']])
                pred_categories = set(pred_classnames)
                # Compute the accuracy as intersection of union,
                # i.e. (# of categories in both prediciton and GT)
                #      divided by (# of categories in either prediction or GT
                # In case of only one GT category, the result will be 1.0, if
                # prediction is one category and this category matches GT
                # It is 1.0/(# of predicted top-1 categories), if the GT is
                # one of the predicted top-1 categories.
                # It is 0.0, if none of the predicted categories is correct
                classifier_accuracies.append(
                    len(gt_categories & pred_categories) /
                    len(gt_categories | pred_categories))
                image['_classification_accuracy'] = classifier_accuracies[-1]
                # Distribute this accuracy across all predicted categories in the
                # confusion matrix
                assert len(gt_categories) == 1
                gt_class_idx = classname_to_idx[list(gt_categories)[0]]
                for pred_category in pred_categories:
                    pred_class_idx = classname_to_idx[pred_category]
                    classifier_cm[gt_class_idx][pred_class_idx] += 1

        # If we have classification results
        if len(classifier_accuracies) > 0:
            # Build confusion matrix as array from classifier_cm
            all_class_ids = sorted(classname_to_idx.values())
            classifier_cm_array = np.array(
                [[classifier_cm[r_idx][c_idx] for c_idx in all_class_ids]
                 for r_idx in all_class_ids],
                dtype=float)
            classifier_cm_array /= (
                classifier_cm_array.sum(axis=1, keepdims=True) + 1e-7)

            # Print some statistics
            print("Finished computation of {} classification results".format(
                len(classifier_accuracies)))
            print("Mean accuracy: {}".format(np.mean(classifier_accuracies)))

            # Prepare confusion matrix output
            # Get CM matrix as string
            sio = io.StringIO()
            np.savetxt(sio, classifier_cm_array * 100, fmt='%5.1f')
            cm_str = sio.getvalue()
            # Get fixed-size classname for each idx
            idx_to_classname = {v: k for k, v in classname_to_idx.items()}
            classname_list = [
                idx_to_classname[idx]
                for idx in sorted(classname_to_idx.values())
            ]
            classname_headers = [
                '{:<5}'.format(cname[:5]) for cname in classname_list
            ]

            # Prepend class name on each line and add to the top
            cm_str_lines = [' ' * 16 + ' '.join(classname_headers)]
            cm_str_lines += [
                '{:>15}'.format(cn[:15]) + ' ' + cm_line
                for cn, cm_line in zip(classname_list, cm_str.splitlines())
            ]

            # print formatted confusion matrix
            print("Confusion matrix: ")
            print(*cm_str_lines, sep='\n')

            # Plot confusion matrix
            # To manually add more space at bottom: plt.rcParams['figure.subplot.bottom'] = 0.1
            # Add 0.5 to figsize for every class. For two classes, this will result in
            # fig = plt.figure(figsize=[4,4])
            fig = vis_utils.plot_confusion_matrix(classifier_cm_array,
                                                  classname_list,
                                                  normalize=False,
                                                  title='Confusion matrix',
                                                  cmap=plt.cm.Blues,
                                                  vmax=1.0,
                                                  use_colorbar=True,
                                                  y_label=True)
            cm_figure_relative_filename = 'confusion_matrix.png'
            cm_figure_filename = os.path.join(output_dir,
                                              cm_figure_relative_filename)
            plt.savefig(cm_figure_filename)
            plt.close(fig)

        ##%% Render output

        # Write p/r table to .csv file in output directory
        pr_table_filename = os.path.join(output_dir, 'prec_recall.csv')
        precisions_recalls.to_csv(pr_table_filename, index=False)

        # Write precision/recall plot to .png file in output directory
        t = 'Precision-Recall curve: AP={:0.1%}, P@{:0.1%}={:0.1%}'.format(
            average_precision, target_recall, precision_at_target_recall)
        fig = vis_utils.plot_precision_recall_curve(precisions, recalls, t)
        pr_figure_relative_filename = 'prec_recall.png'
        pr_figure_filename = os.path.join(output_dir,
                                          pr_figure_relative_filename)
        plt.savefig(pr_figure_filename)
        # plt.show(block=False)
        plt.close(fig)

        ##%% Sample true/false positives/negatives with correct/incorrect top-1
        # classification and render to html

        # Accumulate html image structs (in the format expected by write_html_image_lists)
        # for each category, e.g. 'tp', 'fp', ..., 'class_bird', ...
        images_html = collections.defaultdict(lambda: [])
        # Add default entries by accessing them for the first time
        [images_html[res] for res in ['tp', 'tpc', 'tpi', 'fp', 'tn', 'fn']]
        for res in images_html.keys():
            os.makedirs(os.path.join(output_dir, res), exist_ok=True)

        count = 0

        # i_row = 0; row = images_to_visualize.iloc[0]
        for i_row, row in tqdm(images_to_visualize.iterrows(),
                               total=len(images_to_visualize)):

            image_relative_path = row['file']

            # This should already have been normalized to either '/' or '\'

            image_id = ground_truth_indexed_db.filename_to_id.get(
                image_relative_path, None)
            if image_id is None:
                print('Warning: couldn'
                      't find ground truth for image {}'.format(
                          image_relative_path))
                continue

            image = ground_truth_indexed_db.image_id_to_image[image_id]
            annotations = ground_truth_indexed_db.image_id_to_annotations[
                image_id]

            gt_status = image['_detection_status']

            if gt_status > DetectionStatus.DS_MAX_DEFINITIVE_VALUE:
                print(
                    'Skipping image {}, does not have a definitive ground truth status'
                    .format(i_row, gt_status))
                continue

            gt_presence = bool(gt_status)

            gt_classes = CameraTrapJsonUtils.annotations_to_classnames(
                annotations, ground_truth_indexed_db.cat_id_to_name)
            gt_class_summary = ','.join(gt_classes)

            max_conf = row['max_detection_conf']
            detections = row['detections']

            detected = max_conf > confidence_threshold

            if gt_presence and detected:
                if '_classification_accuracy' not in image.keys():
                    res = 'tp'
                elif np.isclose(1, image['_classification_accuracy']):
                    res = 'tpc'
                else:
                    res = 'tpi'
            elif not gt_presence and detected:
                res = 'fp'
            elif gt_presence and not detected:
                res = 'fn'
            else:
                res = 'tn'

            display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.2f}%, <b>Image</b>: {}'.format(
                res.upper(), str(gt_presence), gt_class_summary,
                max_conf * 100, image_relative_path)

            rendered_image_html_info = render_bounding_boxes(
                options.image_base_dir, image_relative_path, display_name,
                detections, res, detection_categories_map,
                classification_categories_map, options)

            if len(rendered_image_html_info) > 0:
                images_html[res].append(rendered_image_html_info)
                for gt_class in gt_classes:
                    images_html['class_{}'.format(gt_class)].append(
                        rendered_image_html_info)

            count += 1

        # ...for each image in our sample

        print('{} images rendered'.format(count))

        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)

        # Write index.HTML
        all_tp_count = image_counts['tp'] + image_counts['tpc'] + image_counts[
            'tpi']
        total_count = all_tp_count + image_counts['tn'] + image_counts[
            'fp'] + image_counts['fn']
        index_page = """<html><body>
        <h2>Evaluation</h2>

        <h3>Sample images</h3>
        <p>A sample of {} images, annotated with detections above {:.1%} confidence.</p>
        True positives (TP) ({} or {:0.1%})<br/>
        -- <a href="tpc.html">with all correct top-1 predictions (TPC)</a> ({})<br/>
        -- <a href="tpi.html">with one or more incorrect top-1 prediction (TPI)</a> ({})<br/>
        -- <a href="tp.html">without classification evaluation</a> (*) ({})<br/>
        <a href="tn.html">True negatives (TN)</a> ({} or {:0.1%})<br/>
        <a href="fp.html">False positives (FP)</a> ({} or {:0.1%})<br/>
        <a href="fn.html">False negatives (FN)</a> ({} or {:0.1%})<br/>
        <p>(*) We do not evaluate the classification result of images, if the classification
        information is missing, if the image contains
        categories like 'empty' or 'human', or if the image has multiple classification
        labels.</p>""".format(
            count, confidence_threshold, all_tp_count,
            all_tp_count / total_count, image_counts['tpc'],
            image_counts['tpi'], image_counts['tp'], image_counts['tn'],
            image_counts['tn'] / total_count, image_counts['fp'],
            image_counts['fp'] / total_count, image_counts['fn'],
            image_counts['fn'] / total_count)
        index_page += """
            <h3>Detection results</h3>
            <p>At a confidence threshold of {:0.1%}, precision={:0.1%}, recall={:0.1%}</p>
            <p><strong>Precision/recall summary for all {} images</strong></p><img src="{}"><br/>
            """.format(confidence_threshold, precision_at_confidence_threshold,
                       recall_at_confidence_threshold, len(detection_results),
                       pr_figure_relative_filename)
        if len(classifier_accuracies) > 0:
            index_page += """
                <h3>Classification results</h3>
                <p>Classification accuracy: {:.2%}<br>
                The accuracy is computed only for images with exactly one classification label.
                The accuracy of an image is computed as 1/(number of unique detected top-1 classes),
                i.e. if the model detects multiple boxes with different top-1 classes, then the accuracy
                decreases and the image is put into 'TPI'.</p>
                <p>Confusion matrix:</p>
                <p><img src="{}"></p>
                <div style='font-family:monospace;display:block;'>{}</div>
                """.format(np.mean(classifier_accuracies),
                           cm_figure_relative_filename,
                           "<br>".join(cm_str_lines).replace(' ', '&nbsp;'))
        # Show links to each GT class
        index_page += "<h3>Images of specific classes:</h3>"
        # Add links to all available classes
        for cname in sorted(classname_to_idx.keys()):
            index_page += "<a href='class_{0}.html'>{0}</a> ({1})<br>".format(
                cname, len(images_html['class_{}'.format(cname)]))
        # Close body and html tag
        index_page += "</body></html>"
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))

    ##%% Otherwise, if we don't have ground truth...

    else:

        ##%% Sample detections/non-detections

        os.makedirs(os.path.join(output_dir, 'detections'), exist_ok=True)
        os.makedirs(os.path.join(output_dir, 'non_detections'), exist_ok=True)

        # Accumulate html image structs (in the format expected by write_html_image_lists)
        # for each category
        images_html = collections.defaultdict(lambda: [])
        # Add default entries by accessing them for the first time
        [images_html[res] for res in ['detections', 'non_detections']]
        for res in images_html.keys():
            os.makedirs(os.path.join(output_dir, res), exist_ok=True)

        count = 0

        has_classification_info = False
        # i_row = 0; row = images_to_visualize.iloc[0]
        for i_row, row in tqdm(images_to_visualize.iterrows(),
                               total=len(images_to_visualize)):

            image_relative_path = row['file']

            # This should already have been normalized to either '/' or '\'
            max_conf = row['max_detection_conf']
            detections = row['detections']
            detected = True if max_conf > confidence_threshold else False

            if detected:
                res = 'detections'
            else:
                res = 'non_detections'

            display_name = '<b>Result type</b>: {}, <b>Image</b>: {}, <b>Max conf</b>: {}'.format(
                res, image_relative_path, max_conf)

            rendered_image_html_info = render_bounding_boxes(
                options.image_base_dir, image_relative_path, display_name,
                detections, res, detection_categories_map,
                classification_categories_map, options)
            if len(rendered_image_html_info) > 0:
                images_html[res].append(rendered_image_html_info)
                for det in detections:
                    if 'classifications' in det:
                        has_classification_info = True
                        top1_class = classification_categories_map[
                            det['classifications'][0][0]]
                        images_html['class_{}'.format(top1_class)].append(
                            rendered_image_html_info)

            count += 1

        # ...for each image in our sample

        print('{} images rendered'.format(count))

        # Prepare the individual html image files
        image_counts = prepare_html_subpages(images_html, output_dir)

        # Write index.HTML
        total_images = image_counts['detections'] + image_counts[
            'non_detections']
        index_page = """<html><body>
        <h2>Visualization of results</h2>
        <p>A sample of {} images, annotated with detections above {:.1%} confidence.</p>
        <h3>Sample images</h3>

        <a href="detections.html">Detections</a> ({}, {:.1%})<br/>
        <a href="non_detections.html">Non-detections</a> ({}, {:.1%})<br/>""".format(
            count, confidence_threshold, image_counts['detections'],
            image_counts['detections'] / total_images,
            image_counts['non_detections'],
            image_counts['non_detections'] / total_images)

        if has_classification_info:
            index_page += "<h3>Images of detected classes</h3>"
            index_page += "<p>The same image might appear under multiple classes if multiple species were detected.</p>"
        # Add links to all available classes
        for cname in sorted(classification_categories_map.values()):
            ccount = len(images_html['class_{}'.format(cname)])
            if ccount > 0:
                index_page += "<a href='class_{0}.html'>{0}</a> ({1})<br>".format(
                    cname, ccount)

        index_page += "</body></html>"
        output_html_file = os.path.join(output_dir, 'index.html')
        with open(output_html_file, 'w') as f:
            f.write(index_page)

        print('Finished writing html to {}'.format(output_html_file))

    # ...if we do/don't have ground truth

    ppresults = PostProcessingResults()
    ppresults.output_html_file = output_html_file
    return ppresults
コード例 #7
0
def make_cct_embedded(image_db=None, bbox_db=None):
    """
    Takes in path to the COCO Camera Trap format jsons for images (species labels) and/or
    bboxes (animal/human/vehicle) labels and embed the class names and annotations into the image entries.

    Since IndexedJsonDb() can take either a path or a loaded json object as a dict, both
    arguments can be paths or loaded json objects

    Returns:
        an embedded version of the COCO Camera Trap format json database
    """


    # at first a dict of image_id: image_obj with annotations embedded, then it becomes
    # an array of image objects
    docs = {}

    # %% integrate the image DB
    if image_db:
        print('Loading image DB...')
        cct_json_db = IndexedJsonDb(image_db)
        docs = cct_json_db.image_id_to_image  # each image entry is first assigned the image object

        # takes in image entries and species and other annotations in the image DB
        num_images_with_more_than_1_species = 0
        for image_id, annotations in cct_json_db.image_id_to_annotations.items():
            docs[image_id]['annotations'] = {
                'species': []
            }
            if len(annotations) > 1:
                num_images_with_more_than_1_species += 1
            for anno in annotations:
                # convert the species category to explicit string name
                cat_name = cct_json_db.cat_id_to_name[anno['category_id']]
                docs[image_id]['annotations']['species'].append(cat_name)

                # there may be other fields in the annotation object
                for anno_field_name, anno_field_val in anno.items():
                    # these fields should already be gotten from the image object
                    if anno_field_name not in ['category_id', 'id', 'image_id', 'datetime', 'location', 'sequence_level_annotation', 'seq_id', 'seq_num_frames', 'frame_num']:
                        docs[image_id]['annotations'][anno_field_name] = anno_field_val

        print('Number of items from the image DB:', len(docs))
        print('Number of images with more than 1 species: {} ({}% of image DB)'.format(
            num_images_with_more_than_1_species, round(100 * num_images_with_more_than_1_species / len(docs), 2)))

    #%% integrate the bbox DB
    if bbox_db:
        print('Loading bbox DB...')
        cct_bbox_json_db = IndexedJsonDb(bbox_db)

        # add any images that are not in the image DB
        # also add any fields in the image object that are not present already
        num_added = 0
        num_amended = 0
        for image_id, image_obj in cct_bbox_json_db.image_id_to_image.items():
            if image_id not in docs:
                docs[image_id] = image_obj
                num_added += 1

            amended = False
            for field_name, val in image_obj.items():
                if field_name not in docs[image_id]:
                    docs[image_id][field_name] = val
                    amended = True
            if amended:
                num_amended += 1

        print('Number of images added from bbox DB entries: ', num_added)
        print('Number of images amended: ', num_amended)
        print('Number of items in total: ', len(docs))

        # add bbox to the annotations field
        num_more_than_1_bbox = 0

        for image_id, bbox_annotations in cct_bbox_json_db.image_id_to_annotations.items():

            # for any newly added images
            if 'annotations' not in docs[image_id]:
                docs[image_id]['annotations'] = {}

            docs[image_id]['annotations']['bbox'] = []

            if len(bbox_annotations) > 1:
                num_more_than_1_bbox += 1

            for bbox_anno in bbox_annotations:
                item_bbox = {
                    'category': cct_bbox_json_db.cat_id_to_name[bbox_anno['category_id']],
                    # 'bbox_abs': bbox_anno['bbox'],
                }

                if 'width' in docs[image_id]:
                    image_w = docs[image_id]['width']
                    image_h = docs[image_id]['height']
                    x, y, w, h = bbox_anno['bbox']
                    item_bbox['bbox_rel'] = [
                        truncate_float(x / image_w),
                        truncate_float(y / image_h),
                        truncate_float(w / image_w),
                        truncate_float(h / image_h)
                    ]

                docs[image_id]['annotations']['bbox'].append(item_bbox)

            # not keeping height and width
            del docs[image_id]['width']
            del docs[image_id]['height']

        print('Number of images with more than one bounding box: {} ({}% of all entries)'.format(
            num_more_than_1_bbox, 100 * num_more_than_1_bbox / len(docs), 2))
    else:
        print('No bbox DB provided.')

    assert len(docs) > 0, 'No image entries found in the image or bbox DB jsons provided.'

    docs = list(docs.values())
    return docs
コード例 #8
0
ファイル: pc_to_json.py プロジェクト: microsoft/CameraTraps
annotations_all = annotations

#%%

if db_sampling_scheme == 'all':

    pass

elif db_sampling_scheme == 'labeled' or db_sampling_scheme == 'preview':

    json_data = {}
    json_data['images'] = images
    json_data['annotations'] = annotations
    json_data['categories'] = categories

    indexed_db = IndexedJsonDb(json_data)

    # Collect the images we want
    sampled_images = []
    for im in images:
        classes = indexed_db.get_classes_for_image(im)
        if 'unlabeled' in classes and len(classes) == 1:
            pass
        else:
            sampled_images.append(im)

    if db_sampling_scheme == 'preview':
        n_sample = n_unlabeled_to_sample
        if n_sample == -1:
            n_sample = len(labeled_images)
        if n_sample > len(labeled_images) and cap_unlabeled_to_labeled:
コード例 #9
0
def visualize_incoming_annotations(args):
    print('Connecting to MegaDB to get the datasets table...')
    megadb_utils = MegadbUtils()
    datasets_table = megadb_utils.get_datasets_table()

    print('Loading the MegaDB entries...')
    with open(args.megadb_entries) as f:
        sequences = json.load(f)
    print(f'Total number of sequences: {len(sequences)}')
    dataset_seq_images = defaultdict(dict)
    for seq in sequences:
        dataset_seq_images[seq['dataset']][seq['seq_id']] = seq['images']

    print('Loading incoming annotation entries...')
    incoming = IndexedJsonDb(args.incoming_annotation)
    print(
        f'Number of images in this annotation file: {len(incoming.image_id_to_image)}'
    )

    if args.num_to_visualize != -1 and args.num_to_visualize <= len(
            incoming.image_id_to_image):
        incoming_id_to_anno = sample(
            list(incoming.image_id_to_annotations.items()),
            args.num_to_visualize)
    else:
        incoming_id_to_anno = incoming.image_id_to_annotations.items()

    # The file_name field in the incoming json looks like alka_squirrels.seq2020_05_07_25C.frame119221.jpg
    # we need to use the dataset, sequence and frame info to find the actual path in blob storage
    # using the sequences
    images_html = []
    for image_id, annotations in tqdm(incoming_id_to_anno):
        if args.trim_to_images_bboxes_labeled and annotations[0][
                'category_id'] == 5:
            # category_id 5 is No Object Visible
            continue

        anno_file_name = incoming.image_id_to_image[image_id]['file_name']
        parts = anno_file_name.split('.')
        dataset_name = parts[0]
        seq_id = parts[1].split('seq')[1]
        frame_num = int(parts[2].split('frame')[1])

        im_rel_path = get_image_rel_path(dataset_seq_images, dataset_name,
                                         seq_id, frame_num)
        if im_rel_path is None:
            print(f'Not found in megadb entries: dataset {dataset_name},'
                  f' seq_id {seq_id}, frame_num {frame_num}')
            continue

        im_full_path = megadb_utils.get_full_path(datasets_table, dataset_name,
                                                  im_rel_path)

        # download the image
        container_client = megadb_utils.get_storage_client(
            datasets_table, dataset_name)
        downloader = container_client.download_blob(im_full_path)
        image_file = io.BytesIO()
        blob_props = downloader.download_to_stream(image_file)
        image = vis_utils.open_image(image_file)

        boxes = [anno['bbox'] for anno in annotations]
        classes = [anno['category_id'] for anno in annotations]

        vis_utils.render_iMerit_boxes(boxes,
                                      classes,
                                      image,
                                      label_map=incoming.cat_id_to_name)

        file_name = '{}_gtbbox.jpg'.format(
            os.path.splitext(anno_file_name)[0].replace('/', '~'))
        image = vis_utils.resize_image(image, args.output_image_width)
        image.save(os.path.join(args.output_dir, 'rendered_images', file_name))

        images_html.append({
            'filename':
            '{}/{}'.format('rendered_images', file_name),
            'title':
            '{}, number of boxes: {}'.format(
                anno_file_name, len([b for b in boxes if len(b) > 0])),
            'textStyle':
            'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        })

    # Write to HTML
    images_html = sorted(images_html, key=lambda x: x['filename'])
    write_html_image_list(filename=os.path.join(args.output_dir, 'index.html'),
                          images=images_html,
                          options={
                              'headerHtml':
                              '<h1>Sample annotations from {}</h1>'.format(
                                  args.incoming_annotation)
                          })

    print('Visualized {} images.'.format(len(images_html)))
コード例 #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'dataset_name',
        type=str,
        help=
        'a short string representing the dataset to be used as a partition key in the DB'
    )
    parser.add_argument(
        '--image_db',
        type=str,
        help='path to the json containing the image DB in CCT format')
    parser.add_argument(
        '--bbox_db',
        type=str,
        help='path to the json containing the bbox DB in CCT format')
    parser.add_argument('--embedded_db',
                        type=str,
                        required=True,
                        help='path to store the resulting json')
    args = parser.parse_args()

    assert len(args.dataset_name) > 0, 'dataset name cannot be an empty string'

    if args.image_db:
        assert os.path.exists(
            args.image_db
        ), 'image_db file path provided does not point to a file'
    if args.bbox_db:
        assert os.path.exists(
            args.bbox_db
        ), 'bbox_db file path provided does not point to a file'

    #%% integrate the image DB

    # at first a dict of image_id: image_obj with annotations embedded,
    # then its values becomes the array of documents that will get uploaded to Cosmos DB
    docs = {}

    if args.image_db:
        print('Loading image DB...')
        cct_json_db = IndexedJsonDb(args.image_db)
        docs = cct_json_db.image_id_to_image  # each image entry is first assigned the image object

        # takes in image entries and species and other annotations in the image DB
        num_images_with_more_than_1_species = 0
        for image_id, annotations in cct_json_db.image_id_to_annotations.items(
        ):
            docs[image_id]['annotations'] = {'species': []}
            if len(annotations) > 1:
                num_images_with_more_than_1_species += 1
            for anno in annotations:
                # convert the species category to explicit string name
                cat_name = cct_json_db.cat_id_to_name[anno['category_id']]
                docs[image_id]['annotations']['species'].append(cat_name)

                # there may be other fields in the annotation object
                for anno_field_name, anno_field_val in anno.items():
                    # these fields should already be gotten from the image object
                    if anno_field_name not in [
                            'category_id', 'id', 'image_id', 'datetime',
                            'location', 'sequence_level_annotation', 'seq_id',
                            'seq_num_frames', 'frame_num'
                    ]:
                        docs[image_id]['annotations'][
                            anno_field_name] = anno_field_val

        print('Number of items from the image DB:', len(docs))
        print(
            'Number of images with more than 1 species: {} ({}% of image DB)'.
            format(
                num_images_with_more_than_1_species,
                round(100 * num_images_with_more_than_1_species / len(docs),
                      2)))

    #%% integrate the bbox DB
    if args.bbox_db:
        print('Loading bbox DB...')
        cct_bbox_json_db = IndexedJsonDb(args.bbox_db)

        # add any images that are not in the image DB
        # also add any fields in the image object that are not present already
        num_added = 0
        num_amended = 0
        for image_id, image_obj in cct_bbox_json_db.image_id_to_image.items():
            if image_id not in docs:
                docs[image_id] = image_obj
                num_added += 1

            amended = False
            for field_name, val in image_obj.items():
                if field_name not in docs[image_id]:
                    docs[image_id][field_name] = val
                    amended = True
            if amended:
                num_amended += 1

        print('Number of images added from bbox DB entries: ', num_added)
        print('Number of images amended: ', num_amended)
        print('Number of items in total: ', len(docs))

        # add bbox to the annotations field
        num_more_than_1_bbox = 0

        for image_id, bbox_annotations in cct_bbox_json_db.image_id_to_annotations.items(
        ):

            # for any newly added images
            if 'annotations' not in docs[image_id]:
                docs[image_id]['annotations'] = {}

            docs[image_id]['annotations']['bbox'] = []

            if len(bbox_annotations) > 1:
                num_more_than_1_bbox += 1

            for bbox_anno in bbox_annotations:
                item_bbox = {
                    'category':
                    cct_bbox_json_db.cat_id_to_name[bbox_anno['category_id']],
                    'bbox_abs':
                    bbox_anno['bbox'],
                }

                if 'width' in docs[image_id]:
                    image_w = docs[image_id]['width']
                    image_h = docs[image_id]['height']
                    x, y, w, h = bbox_anno['bbox']
                    item_bbox['bbox_rel'] = [
                        truncate_float(x / image_w),
                        truncate_float(y / image_h),
                        truncate_float(w / image_w),
                        truncate_float(h / image_h)
                    ]

                docs[image_id]['annotations']['bbox'].append(item_bbox)

        print(
            'Number of images with more than one bounding box: {} ({}% of all entries)'
            .format(num_more_than_1_bbox,
                    100 * num_more_than_1_bbox / len(docs), 2))
    else:
        print('No bbox DB provided.')

    assert len(
        docs
    ) > 0, 'No image entries found in the image or bbox DB jsons provided.'

    docs = list(docs.values())

    #%% processing
    # get rid of any trailing '.JPG' for the id field
    # insert the 'dataset' attribute used as the partition key
    # replace illegal chars (for Cosmos DB) in the id field of the image
    # replace directory separator with tilde ~
    # rename the id field (reserved word) to image_id
    illegal_char_map = {'/': '~', '\\': '~', '?': '__qm__', '#': '__pound__'}

    for i in docs:
        i['id'] = i['id'].split('.JPG')[0].split('.jpg')[0]

        for illegal, replacement in illegal_char_map.items():
            i['id'] = i['id'].replace(illegal, replacement)

        i['dataset'] = args.dataset_name

        i['image_id'] = i['id']
        del i['id']

    #%% some validation
    print('Example items:')
    print()
    print(docs[0])
    print()
    print(docs[-1])
    print()

    num_both_species_bbox = 0
    for item in docs:
        if 'annotations' in item:
            if 'species' in item['annotations'] and 'bbox' in item[
                    'annotations']:
                num_both_species_bbox += 1
    print(
        'Number of images with both species and bbox annotations: {} ({}% of all entries)'
        .format(num_both_species_bbox,
                round(100 * num_both_species_bbox / len(docs), 2)))

    #%% save the embedded json database
    write_json(args.embedded_db, docs)