예제 #1
0
    def render_image_info(rendering_info):

        img_path = rendering_info['img_path']
        bboxes = rendering_info['bboxes']
        bboxClasses = rendering_info['boxClasses']
        output_file_name = rendering_info['output_file_name']

        if not os.path.exists(img_path):
            print('Image {} cannot be found'.format(img_path))
            return

        try:
            original_image = vis_utils.open_image(img_path)
            original_size = original_image.size
            image = vis_utils.resize_image(original_image, options.viz_size[0],
                                           options.viz_size[1])
        except Exception as e:
            print('Image {} failed to open. Error: {}'.format(img_path, e))
            return

        vis_utils.render_db_bounding_boxes(boxes=bboxes,
                                           classes=bboxClasses,
                                           image=image,
                                           original_size=original_size,
                                           label_map=label_map)
        image.save(
            os.path.join(output_dir, 'rendered_images', output_file_name))
def render_bounding_box(detection, inputFileName, outputFileName, lineWidth):

    im = open_image(inputFileName)
    d = detection.to_api_detection()
    render_detection_bounding_boxes([d],
                                    im,
                                    thickness=lineWidth,
                                    confidence_threshold=-10)
    im.save(outputFileName)
def render_bounding_box(bbox, inputFileName, imageOutputFilename, linewidth):
    im = open_image(inputFileName)
    # bbox is [x, y, width, height] where x, y is the top left corner, normalized units wrt width and height of image
    x_min, y_min, x_max, y_max = ct_utils.convert_coords_to_xyxy(bbox)
    draw_bounding_box_on_image(im,
                               y_min,
                               x_min,
                               y_max,
                               x_max,
                               thickness=linewidth)
    im.save(imageOutputFilename)
def render_bounding_boxes(image_base_dir,
                          image_relative_path,
                          display_name,
                          detections,
                          res,
                          detection_categories_map=None,
                          classification_categories_map=None,
                          options=None):

    if options is None:
        options = PostProcessingOptions()

    # Leaving code in place for reading from blob storage, may support this
    # in the future.
    """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

    image_full_path = os.path.join(image_base_dir, image_relative_path)
    if not os.path.isfile(image_full_path):
        print('Warning: could not find image file {}'.format(image_full_path))
        return ''

    image = vis_utils.open_image(image_full_path)
    image = vis_utils.resize_image(image, options.viz_target_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detection_categories_map,
        classification_label_map=classification_categories_map,
        confidence_threshold=options.confidence_threshold,
        thickness=4)

    # Render images to a flat folder... we can use os.sep here because we've
    # already normalized paths
    sample_name = res + '_' + image_relative_path.replace(os.sep, '~')

    image.save(os.path.join(options.output_dir, res, sample_name))

    # Use slashes regardless of os
    file_name = '{}/{}'.format(res, sample_name)

    return {
        'filename':
        file_name,
        'title':
        display_name,
        'textStyle':
        'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
    }
예제 #5
0
def render_image_info(rendering, args):
    blob_service = rendering['blob_service']
    image_obj = io.BytesIO()
    _ = blob_service.get_blob_to_stream(rendering['container_name'], rendering['blob_path'], image_obj)

    # resize is for displaying them more quickly
    image = vis_utils.resize_image(vis_utils.open_image(image_obj), args.output_image_width)
    vis_utils.render_megadb_bounding_boxes(rendering['bbox'], image)

    annotated_img_name = rendering['annotated_img_name']
    annotated_img_path = os.path.join(args.output_dir, 'rendered_images', annotated_img_name)
    image.save(annotated_img_path)
예제 #6
0
def render_image_info(rendering, args):
    blob_service = rendering['blob_service']
    image_obj = io.BytesIO()

    try:
        _ = blob_service.get_blob_to_stream(rendering['container_name'], rendering['blob_path'], image_obj)
    except Exception as e:
        print(f'Image not found in blob storage: {rendering["blob_path"]}')
        print(e)
        return

    # resize is for displaying them more quickly
    image = vis_utils.resize_image(vis_utils.open_image(image_obj), args.output_image_width)
    vis_utils.render_megadb_bounding_boxes(rendering['bbox'], image)

    annotated_img_name = rendering['annotated_img_name']
    annotated_img_path = os.path.join(args.output_dir, 'rendered_images', annotated_img_name)
    image.save(annotated_img_path)
예제 #7
0
def render_image_info(rendering, args):
    storage_client = rendering['storage_client']
    image_obj = io.BytesIO()

    try:
        storage_client.download_blob(
            rendering['blob_path']).readinto(image_obj)
    except Exception as e:
        print(f'Image not found in blob storage: {rendering["blob_path"]}')
        print(e)
        return

    # resize is for displaying them more quickly
    image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                   args.output_image_width)
    vis_utils.render_megadb_bounding_boxes(rendering['bbox'], image)

    annotated_img_name = rendering['annotated_img_name']
    annotated_img_path = os.path.join(args.output_dir, 'rendered_images',
                                      annotated_img_name)
    image.save(annotated_img_path)
 def render_image_pair(fn):
     
     input_image_path = os.path.join(options.image_folder,fn)
     assert os.path.isfile(input_image_path), 'Image {} does not exist'.format(input_image_path)
     
     im = visualization_utils.open_image(input_image_path)
     image_pair = image_pairs[fn]
     detections_a = image_pair[0]['detections']
     detections_b = image_pair[1]['detections']
     
     """
     def render_detection_bounding_boxes(detections, image,
                                         label_map={},
                                         classification_label_map={},
                                         confidence_threshold=0.8, thickness=4, expansion=0,
                                         classification_confidence_threshold=0.3,
                                         max_classifications=3,
                                         colormap=COLORS):
     """
     if options.target_width is not None:
         im = visualization_utils.resize_image(im, options.target_width)
         
     visualization_utils.render_detection_bounding_boxes(detections_a,im,
                                                         confidence_threshold=pairwise_options.rendering_confidence_threshold_a,
                                                         thickness=4,expansion=0,
                                                         colormap=options.colormap_a,
                                                         textalign=visualization_utils.TEXTALIGN_LEFT)
     visualization_utils.render_detection_bounding_boxes(detections_b,im,
                                                         confidence_threshold=pairwise_options.rendering_confidence_threshold_b,
                                                         thickness=2,expansion=0,
                                                         colormap=options.colormap_b,
                                                         textalign=visualization_utils.TEXTALIGN_RIGHT)
 
     output_image_fn = path_utils.flatten_path(fn)
     output_image_path = os.path.join(category_folder,output_image_fn)
     im.save(output_image_path)           
     return output_image_path
예제 #9
0
def processImages(db_path,
                  output_dir,
                  image_base_dir,
                  options=None,
                  bbox_db=None):
    """
    Writes images and html to output_dir to visualize the annotations in the json file
    db_path.
    
    Returns the html filename and the bbox database.
    """

    if options is None:
        options = DbVizOptions()

    print(options.__dict__)

    os.makedirs(os.path.join(output_dir, 'rendered_images'), exist_ok=True)
    assert (os.path.isfile(db_path))
    assert (os.path.isdir(image_base_dir))

    if bbox_db is None:
        print('Loading the database...')
        bbox_db = json.load(open(db_path))
        print('...done')

    annotations = bbox_db['annotations']
    images = bbox_db['images']
    categories = bbox_db['categories']

    # Optionally remove all images without bounding boxes, *before* sampling
    if options.trim_to_images_with_bboxes:

        bHasBbox = [False] * len(annotations)
        for iAnn, ann in enumerate(annotations):
            if 'bbox' in ann:
                assert isinstance(ann['bbox'], list)
                bHasBbox[iAnn] = True
        annotationsWithBboxes = list(compress(annotations, bHasBbox))

        imageIDsWithBboxes = [x['image_id'] for x in annotationsWithBboxes]
        imageIDsWithBboxes = set(imageIDsWithBboxes)

        bImageHasBbox = [False] * len(images)
        for iImage, image in enumerate(images):
            imageID = image['id']
            if imageID in imageIDsWithBboxes:
                bImageHasBbox[iImage] = True
        imagesWithBboxes = list(compress(images, bImageHasBbox))
        images = imagesWithBboxes

    # put the annotations in a dataframe so we can select all annotations for a given image
    df_anno = pd.DataFrame(annotations)
    df_img = pd.DataFrame(images)

    # construct label map
    label_map = {}
    for cat in categories:
        label_map[int(cat['id'])] = cat['name']

    # take a sample of images
    if options.num_to_visualize is not None:
        df_img = df_img.sample(n=options.num_to_visualize,
                               random_state=options.random_seed)

    images_html = []

    # iImage = 0
    for iImage in tqdm(range(len(df_img))):

        img_id = df_img.iloc[iImage]['id']
        img_relative_path = df_img.iloc[iImage]['file_name']
        img_path = os.path.join(
            image_base_dir,
            imageFilenameToPath(img_relative_path, image_base_dir))

        if not os.path.exists(img_path):
            print('Image {} cannot be found'.format(img_path))
            continue

        annos_i = df_anno.loc[df_anno['image_id'] ==
                              img_id, :]  # all annotations on this image

        try:
            originalImage = vis_utils.open_image(img_path)
            original_size = originalImage.size
            image = vis_utils.resize_image(originalImage, options.viz_size[0],
                                           options.viz_size[1])
        except Exception as e:
            print('Image {} failed to open. Error: {}'.format(img_path, e))
            continue

        bboxes = []
        boxClasses = []

        # All the class labels we've seen for this image (with out without bboxes)
        imageCategories = set()

        annotationLevelForImage = ''

        # Iterate over annotations for this image
        # iAnn = 0; anno = annos_i.iloc[iAnn]
        for iAnn, anno in annos_i.iterrows():

            if 'sequence_level_annotation' in anno:
                bSequenceLevelAnnotation = anno['sequence_level_annotation']
                if bSequenceLevelAnnotation:
                    annLevel = 'sequence'
                else:
                    annLevel = 'image'
                if annotationLevelForImage == '':
                    annotationLevelForImage = annLevel
                elif annotationLevelForImage != annLevel:
                    annotationLevelForImage = 'mixed'

            categoryID = anno['category_id']
            categoryName = label_map[categoryID]
            if options.add_search_links:
                categoryName = categoryName.replace('"', '')
                categoryName = '<a href="https://www.bing.com/images/search?q={}">{}</a>'.format(
                    categoryName, categoryName)
            imageCategories.add(categoryName)

            if 'bbox' in anno:
                bbox = anno['bbox']
                if isinstance(bbox, float):
                    assert math.isnan(
                        bbox
                    ), "I shouldn't see a bbox that's neither a box nor NaN"
                    continue
                bboxes.append(bbox)
                boxClasses.append(anno['category_id'])

        imageClasses = ', '.join(imageCategories)

        # render bounding boxes in-place
        vis_utils.render_db_bounding_boxes(bboxes, boxClasses, image,
                                           original_size, label_map)

        file_name = '{}_gtbbox.jpg'.format(img_id.lower().split('.jpg')[0])
        file_name = file_name.replace('/', '~')
        image.save(os.path.join(output_dir, 'rendered_images', file_name))

        labelLevelString = ''
        if len(annotationLevelForImage) > 0:
            labelLevelString = ' (annotation level: {})'.format(
                annotationLevelForImage)

        images_html.append({
            'filename':
            '{}/{}'.format('rendered_images', file_name),
            'title':
            '{}<br/>{}, number of boxes: {}, class labels: {}{}'.format(
                img_relative_path, img_id, len(bboxes), imageClasses,
                labelLevelString),
            'textStyle':
            'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        })

    # ...for each image

    if options.sort_by_filename:
        images_html = sorted(images_html, key=lambda x: x['filename'])

    htmlOutputFile = os.path.join(output_dir, 'index.html')

    htmlOptions = options.htmlOptions
    htmlOptions['headerHtml'] = '<h1>Sample annotations from {}</h1>'.format(
        db_path)
    write_html_image_list(filename=htmlOutputFile,
                          images=images_html,
                          options=htmlOptions)

    print('Visualized {} images, wrote results to {}'.format(
        len(images_html), htmlOutputFile))

    return htmlOutputFile, bbox_db
def visualize_detector_output(detector_output_path: str,
                              out_dir: str,
                              images_dir: str,
                              is_azure: bool = False,
                              confidence: float = 0.8,
                              sample: int = -1,
                              output_image_width: int = 700,
                              random_seed: Optional[int] = None) -> List[str]:
    """Draw bounding boxes on images given the output of the detector.

    Args:
        detector_output_path: str, path to detector output json file
        out_dir: str, path to directory for saving annotated images
        images_dir: str, path to local images dir, or a SAS URL to an Azure Blob
            Storage container
        is_azure: bool, whether images_dir points to an Azure URL
        confidence: float, threshold above which annotations will be rendered
        sample: int, maximum number of images to annotate, -1 for all
        output_image_width: int, width in pixels to resize images for display,
            set to -1 to use original image width
        random_seed: int, for deterministic image sampling when sample != -1

    Returns: list of str, paths to annotated images
    """
    # arguments error checking
    assert confidence > 0 and confidence < 1, (
        f'Confidence threshold {confidence} is invalid, must be in (0, 1).')

    assert os.path.exists(detector_output_path), (
        f'Detector output file does not exist at {detector_output_path}.')

    if is_azure:
        # we don't import sas_blob_utils at the top of this file in order to
        # accommodate the MegaDetector Colab notebook which does not have
        # the azure-storage-blob package installed
        import sas_blob_utils
    else:
        assert os.path.isdir(images_dir)

    os.makedirs(out_dir, exist_ok=True)

    #%% Load detector output

    with open(detector_output_path) as f:
        detector_output = json.load(f)
    assert 'images' in detector_output, (
        'Detector output file should be a json with an "images" field.')
    images = detector_output['images']

    detector_label_map = DEFAULT_DETECTOR_LABEL_MAP
    if 'detection_categories' in detector_output:
        print('detection_categories provided')
        detector_label_map = detector_output['detection_categories']

    num_images = len(images)
    print(f'Detector output file contains {num_images} entries.')

    if sample > 0:
        assert num_images >= sample, (
            f'Sample size {sample} greater than number of entries '
            f'({num_images}) in detector result.')

        if random_seed is not None:
            images = sorted(images, key=lambda x: x['file'])
            random.seed(random_seed)

        random.shuffle(images)
        images = sorted(images[:sample], key=lambda x: x['file'])
        print(f'Sampled {len(images)} entries from the detector output file.')

    #%% Load images, annotate them and save

    print('Starting to annotate the images...')
    num_saved = 0
    annotated_img_paths = []
    image_obj: Any  # str for local images, BytesIO for Azure images

    for entry in tqdm(images):
        image_id = entry['file']

        if 'failure' in entry:
            print(f'Skipping {image_id}, failure: "{entry["failure"]}"')
            continue

        # max_conf = entry['max_detection_conf']

        if is_azure:
            blob_uri = sas_blob_utils.build_blob_uri(container_uri=images_dir,
                                                     blob_name=image_id)
            if not sas_blob_utils.check_blob_exists(blob_uri):
                container = sas_blob_utils.get_container_from_uri(images_dir)
                print(f'Image {image_id} not found in blob container '
                      f'{container}; skipped.')
                continue
            image_obj, _ = sas_blob_utils.download_blob_to_stream(blob_uri)
        else:
            image_obj = os.path.join(images_dir, image_id)
            if not os.path.exists(image_obj):
                print(f'Image {image_id} not found in images_dir; skipped.')
                continue

        # resize is for displaying them more quickly
        image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                       output_image_width)

        vis_utils.render_detection_bounding_boxes(
            entry['detections'],
            image,
            label_map=detector_label_map,
            confidence_threshold=confidence)

        for char in ['/', '\\', ':']:
            image_id = image_id.replace(char, '~')
        annotated_img_path = os.path.join(out_dir, f'anno_{image_id}')
        annotated_img_paths.append(annotated_img_path)
        image.save(annotated_img_path)
        num_saved += 1

        if is_azure:
            image_obj.close()  # BytesIO object

    print(f'Rendered detection results on {num_saved} images, '
          f'saved to {out_dir}.')

    return annotated_img_paths
def render_bounding_boxes(image_base_dir, image_relative_path, display_name, detections, res,
                          detection_categories_map=None, classification_categories_map=None, options=None):
        """
        Renders detection bounding boxes on a single image.  
        
        The source image is:
            
            image_base_dir / image_relative_path
            
        The target image is, for example:
            
            [options.output_dir] / ['detections' or 'non-detections'] / [filename with slashes turned into tildes]
        
        Returns the html info struct for this image in the form that's used for 
        write_html_image_list.
        """
        
        if options is None:
            options = PostProcessingOptions()

        # Leaving code in place for reading from blob storage, may support this
        # in the future.
        """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

        image_full_path = os.path.join(image_base_dir, image_relative_path)
        
        # isfile() is slow when mounting remote directories; much faster to just try/except
        # on the image open.
        if False:
            if not os.path.isfile(image_full_path):
                print('Warning: could not find image file {}'.format(image_full_path))
                return ''
        
        try:
            image = vis_utils.open_image(image_full_path)
        except:
            print('Warning: could not open image file {}'.format(image_full_path))            
            return ''
        
        if options.viz_target_width is not None:
            image = vis_utils.resize_image(image, options.viz_target_width)

        vis_utils.render_detection_bounding_boxes(detections, image,
                                                  label_map=detection_categories_map,
                                                  classification_label_map=classification_categories_map,
                                                  confidence_threshold=options.confidence_threshold,
                                                  thickness=options.line_thickness,expansion=options.box_expansion)

        # Render images to a flat folder... we can use os.sep here because we've
        # already normalized paths
        sample_name = res + '_' + path_utils.flatten_path(image_relative_path)        
        fullpath = os.path.join(options.output_dir, res, sample_name)
        try:
            image.save(fullpath)
        except OSError as e:
            # errno.ENAMETOOLONG doesn't get thrown properly on Windows, so 
            # we awkwardly check against a hard-coded limit
            if (e.errno == errno.ENAMETOOLONG) or (len(fullpath) >= 259):
                extension = os.path.splitext(sample_name)[1]
                sample_name = res + '_' + str(uuid.uuid4()) + extension
                image.save(os.path.join(options.output_dir, res, sample_name))
            else:
                raise

        # Use slashes regardless of os
        file_name = '{}/{}'.format(res, sample_name)

        return {
            'filename': file_name,
            'title': display_name,
            'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        }
        if not os.path.exists(image_obj):
            print('Image {} is not found at local images_dir; skipped.'.format(
                image_id))
            continue
    else:
        if not blob_service.exists(container_name, blob_name=image_id):
            print('Image {} is not found in the blob container {}; skipped.'.
                  format(image_id, container_name))
            continue

        image_obj = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id,
                                            image_obj)

    # resize is for displaying them more quickly
    image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                   args.output_image_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detector_label_map,
        confidence_threshold=args.confidence)

    annotated_img_name = 'anno_' + image_id.replace('/', '~').replace(
        '\\', '~')
    annotated_img_path = os.path.join(args.out_dir, annotated_img_name)
    image.save(annotated_img_path)
    num_saved += 1

print('Rendered detection results on {} images, saved to {}.'.format(
def visualize_detector_output(args):

    #%% Load detector output

    os.makedirs(args.out_dir, exist_ok=True)

    images_local = True if args.images_dir is not None else False

    detector_output = json.load(open(args.detector_output_path))

    assert 'images' in detector_output, 'Detector output file should be a json with an "images" field.'
    images = detector_output['images']

    detector_label_map = DEFAULT_DETECTOR_LABEL_MAP
    if 'detection_categories' in detector_output:
        print('detection_categories provided')
        detector_label_map = detector_output['detection_categories']

    num_images = len(images)
    print('Detector output file contains {} entries.'.format(num_images))

    if args.sample > 0:
        assert num_images >= args.sample, \
            'Sample size {} specified greater than number of entries ({}) in detector result.'.format(args.sample, num_images)

        if args.random_seed:
            images = sorted(images, key=lambda x: x['file'])
            random.seed(args.random_seed)

        random.shuffle(images)
        images = sorted(images[:args.sample], key=lambda x: x['file'])
        print('Sampled {} entries from the detector output file.'.format(
            len(images)))

    #%% Load images, annotate them and save

    if not images_local:
        blob_service = get_service_from_uri(args.sas_url)
        container_name = get_container_from_uri(args.sas_url)

    print('Starting to annotate the images...')
    num_saved = 0

    annotated_img_paths = []

    for entry in tqdm(images):
        if 'failure' in entry:
            print('Skipping {}, which failed because of "{}"'.format(
                entry['file'], entry['failure']))
            continue

        image_id = entry['file']
        # max_conf = entry['max_detection_conf']
        detections = entry['detections']

        if images_local:
            image_obj = os.path.join(args.images_dir, image_id)
            if not os.path.exists(image_obj):
                print('Image {} is not found at local images_dir; skipped.'.
                      format(image_id))
                continue
        else:
            if not blob_service.exists(container_name, blob_name=image_id):
                print(
                    'Image {} is not found in the blob container {}; skipped.'.
                    format(image_id, container_name))
                continue

            image_obj = io.BytesIO()
            _ = blob_service.get_blob_to_stream(container_name, image_id,
                                                image_obj)

        # resize is for displaying them more quickly
        image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                       args.output_image_width)

        vis_utils.render_detection_bounding_boxes(
            detections,
            image,
            label_map=detector_label_map,
            confidence_threshold=args.confidence)

        annotated_img_name = 'anno_' + image_id.replace('/', '~').replace(
            '\\', '~').replace(':', '~')
        annotated_img_path = os.path.join(args.out_dir, annotated_img_name)
        annotated_img_paths.append(annotated_img_path)
        image.save(annotated_img_path)
        num_saved += 1

    print('Rendered detection results on {} images, saved to {}.'.format(
        num_saved, args.out_dir))

    return annotated_img_paths
def render_bounding_boxes(image_base_dir,
                          image_relative_path,
                          display_name,
                          detections,
                          res,
                          detection_categories_map=None,
                          classification_categories_map=None,
                          options=None):
    """
        Renders detection bounding boxes on a single image.  Returns the html info struct
        for this image in the form that's used for write_html_image_list.
        """

    if options is None:
        options = PostProcessingOptions()

    # Leaving code in place for reading from blob storage, may support this
    # in the future.
    """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

    image_full_path = os.path.join(image_base_dir, image_relative_path)

    # isfile() is slow when mounting remote directories; much faster to just try/except
    # on the image open.
    if False:
        if not os.path.isfile(image_full_path):
            print('Warning: could not find image file {}'.format(
                image_full_path))
            return ''

    try:
        image = vis_utils.open_image(image_full_path)
    except:
        print('Warning: could not open image file {}'.format(image_full_path))
        return ''

    image = vis_utils.resize_image(image, options.viz_target_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detection_categories_map,
        classification_label_map=classification_categories_map,
        confidence_threshold=options.confidence_threshold,
        thickness=4)

    # Render images to a flat folder... we can use os.sep here because we've
    # already normalized paths
    sample_name = res + '_' + image_relative_path.replace(os.sep, '~')

    try:
        image.save(os.path.join(options.output_dir, res, sample_name))
    except OSError as e:
        if e.errno == errno.ENAMETOOLONG:
            sample_name = res + '_' + str(uuid.uuid4()) + '.jpg'
            image.save(os.path.join(options.output_dir, res, sample_name))
        else:
            raise

    # Use slashes regardless of os
    file_name = '{}/{}'.format(res, sample_name)

    return {
        'filename':
        file_name,
        'title':
        display_name,
        'textStyle':
        'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
    }
def visualize_incoming_annotations(args):
    print('Connecting to MegaDB to get the datasets table...')
    megadb_utils = MegadbUtils()
    datasets_table = megadb_utils.get_datasets_table()

    print('Loading the MegaDB entries...')
    with open(args.megadb_entries) as f:
        sequences = json.load(f)
    print(f'Total number of sequences: {len(sequences)}')
    dataset_seq_images = defaultdict(dict)
    for seq in sequences:
        dataset_seq_images[seq['dataset']][seq['seq_id']] = seq['images']

    print('Loading incoming annotation entries...')
    incoming = IndexedJsonDb(args.incoming_annotation)
    print(
        f'Number of images in this annotation file: {len(incoming.image_id_to_image)}'
    )

    if args.num_to_visualize != -1 and args.num_to_visualize <= len(
            incoming.image_id_to_image):
        incoming_id_to_anno = sample(
            list(incoming.image_id_to_annotations.items()),
            args.num_to_visualize)
    else:
        incoming_id_to_anno = incoming.image_id_to_annotations.items()

    # The file_name field in the incoming json looks like alka_squirrels.seq2020_05_07_25C.frame119221.jpg
    # we need to use the dataset, sequence and frame info to find the actual path in blob storage
    # using the sequences
    images_html = []
    for image_id, annotations in tqdm(incoming_id_to_anno):
        if args.trim_to_images_bboxes_labeled and annotations[0][
                'category_id'] == 5:
            # category_id 5 is No Object Visible
            continue

        anno_file_name = incoming.image_id_to_image[image_id]['file_name']
        parts = anno_file_name.split('.')
        dataset_name = parts[0]
        seq_id = parts[1].split('seq')[1]
        frame_num = int(parts[2].split('frame')[1])

        im_rel_path = get_image_rel_path(dataset_seq_images, dataset_name,
                                         seq_id, frame_num)
        if im_rel_path is None:
            print(f'Not found in megadb entries: dataset {dataset_name},'
                  f' seq_id {seq_id}, frame_num {frame_num}')
            continue

        im_full_path = megadb_utils.get_full_path(datasets_table, dataset_name,
                                                  im_rel_path)

        # download the image
        container_client = megadb_utils.get_storage_client(
            datasets_table, dataset_name)
        downloader = container_client.download_blob(im_full_path)
        image_file = io.BytesIO()
        blob_props = downloader.download_to_stream(image_file)
        image = vis_utils.open_image(image_file)

        boxes = [anno['bbox'] for anno in annotations]
        classes = [anno['category_id'] for anno in annotations]

        vis_utils.render_iMerit_boxes(boxes,
                                      classes,
                                      image,
                                      label_map=incoming.cat_id_to_name)

        file_name = '{}_gtbbox.jpg'.format(
            os.path.splitext(anno_file_name)[0].replace('/', '~'))
        image = vis_utils.resize_image(image, args.output_image_width)
        image.save(os.path.join(args.output_dir, 'rendered_images', file_name))

        images_html.append({
            'filename':
            '{}/{}'.format('rendered_images', file_name),
            'title':
            '{}, number of boxes: {}'.format(
                anno_file_name, len([b for b in boxes if len(b) > 0])),
            'textStyle':
            'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        })

    # Write to HTML
    images_html = sorted(images_html, key=lambda x: x['filename'])
    write_html_image_list(filename=os.path.join(args.output_dir, 'index.html'),
                          images=images_html,
                          options={
                              'headerHtml':
                              '<h1>Sample annotations from {}</h1>'.format(
                                  args.incoming_annotation)
                          })

    print('Visualized {} images.'.format(len(images_html)))