def render_bounding_box(detection, inputFileName, outputFileName, lineWidth):

    im = open_image(inputFileName)
    d = detection.to_api_detection()
    render_detection_bounding_boxes([d],
                                    im,
                                    thickness=lineWidth,
                                    confidence_threshold=-10)
    im.save(outputFileName)
def render_bounding_boxes(image_base_dir,
                          image_relative_path,
                          display_name,
                          detections,
                          res,
                          detection_categories_map=None,
                          classification_categories_map=None,
                          options=None):

    if options is None:
        options = PostProcessingOptions()

    # Leaving code in place for reading from blob storage, may support this
    # in the future.
    """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

    image_full_path = os.path.join(image_base_dir, image_relative_path)
    if not os.path.isfile(image_full_path):
        print('Warning: could not find image file {}'.format(image_full_path))
        return ''

    image = vis_utils.open_image(image_full_path)
    image = vis_utils.resize_image(image, options.viz_target_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detection_categories_map,
        classification_label_map=classification_categories_map,
        confidence_threshold=options.confidence_threshold,
        thickness=4)

    # Render images to a flat folder... we can use os.sep here because we've
    # already normalized paths
    sample_name = res + '_' + image_relative_path.replace(os.sep, '~')

    image.save(os.path.join(options.output_dir, res, sample_name))

    # Use slashes regardless of os
    file_name = '{}/{}'.format(res, sample_name)

    return {
        'filename':
        file_name,
        'title':
        display_name,
        'textStyle':
        'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
    }
 def render_image_pair(fn):
     
     input_image_path = os.path.join(options.image_folder,fn)
     assert os.path.isfile(input_image_path), 'Image {} does not exist'.format(input_image_path)
     
     im = visualization_utils.open_image(input_image_path)
     image_pair = image_pairs[fn]
     detections_a = image_pair[0]['detections']
     detections_b = image_pair[1]['detections']
     
     """
     def render_detection_bounding_boxes(detections, image,
                                         label_map={},
                                         classification_label_map={},
                                         confidence_threshold=0.8, thickness=4, expansion=0,
                                         classification_confidence_threshold=0.3,
                                         max_classifications=3,
                                         colormap=COLORS):
     """
     if options.target_width is not None:
         im = visualization_utils.resize_image(im, options.target_width)
         
     visualization_utils.render_detection_bounding_boxes(detections_a,im,
                                                         confidence_threshold=pairwise_options.rendering_confidence_threshold_a,
                                                         thickness=4,expansion=0,
                                                         colormap=options.colormap_a,
                                                         textalign=visualization_utils.TEXTALIGN_LEFT)
     visualization_utils.render_detection_bounding_boxes(detections_b,im,
                                                         confidence_threshold=pairwise_options.rendering_confidence_threshold_b,
                                                         thickness=2,expansion=0,
                                                         colormap=options.colormap_b,
                                                         textalign=visualization_utils.TEXTALIGN_RIGHT)
 
     output_image_fn = path_utils.flatten_path(fn)
     output_image_path = os.path.join(category_folder,output_image_fn)
     im.save(output_image_path)           
     return output_image_path
Пример #4
0
def load_and_run_detector(model_file,
                          image_file_names,
                          output_dir,
                          render_confidence_threshold=TFDetector.
                          DEFAULT_RENDERING_CONFIDENCE_THRESHOLD):
    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    # load and run detector on target images, and visualize the results
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # since we'll be writing a bunch of files to the same folder, rename
    # as necessary to avoid collisions
    output_file_names = {}

    for im_file in tqdm(image_file_names):
        try:
            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)
        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:
            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)
        except Exception as e:
            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            # the error code and message is written by generate_detections_one_image,
            # which is wrapped in a big try catch
            continue

        try:
            # image is modified in place
            viz_utils.render_detection_bounding_boxes(
                result['detections'],
                image,
                label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                confidence_threshold=render_confidence_threshold)
            fn = os.path.basename(im_file).lower()
            name, ext = os.path.splitext(fn)
            fn = '{}{}{}'.format(name,
                                 ImagePathUtils.DETECTION_FILENAME_INSERT,
                                 '.jpg')  # save all as JPG
            if fn in output_file_names:
                n_collisions = output_file_names[
                    fn]  # if there were a collision, the count is at least 1
                fn = str(n_collisions) + '_' + fn
                output_file_names[fn] = n_collisions + 1
            else:
                output_file_names[fn] = 0

            output_full_path = os.path.join(output_dir, fn)
            image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.
                  format(im_file, e))
            continue

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(
            statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(
            statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_load), std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_infer), std_dev_time_infer))
def visualize_detector_output(detector_output_path: str,
                              out_dir: str,
                              images_dir: str,
                              is_azure: bool = False,
                              confidence: float = 0.8,
                              sample: int = -1,
                              output_image_width: int = 700,
                              random_seed: Optional[int] = None) -> List[str]:
    """Draw bounding boxes on images given the output of the detector.

    Args:
        detector_output_path: str, path to detector output json file
        out_dir: str, path to directory for saving annotated images
        images_dir: str, path to local images dir, or a SAS URL to an Azure Blob
            Storage container
        is_azure: bool, whether images_dir points to an Azure URL
        confidence: float, threshold above which annotations will be rendered
        sample: int, maximum number of images to annotate, -1 for all
        output_image_width: int, width in pixels to resize images for display,
            set to -1 to use original image width
        random_seed: int, for deterministic image sampling when sample != -1

    Returns: list of str, paths to annotated images
    """
    # arguments error checking
    assert confidence > 0 and confidence < 1, (
        f'Confidence threshold {confidence} is invalid, must be in (0, 1).')

    assert os.path.exists(detector_output_path), (
        f'Detector output file does not exist at {detector_output_path}.')

    if is_azure:
        # we don't import sas_blob_utils at the top of this file in order to
        # accommodate the MegaDetector Colab notebook which does not have
        # the azure-storage-blob package installed
        import sas_blob_utils
    else:
        assert os.path.isdir(images_dir)

    os.makedirs(out_dir, exist_ok=True)

    #%% Load detector output

    with open(detector_output_path) as f:
        detector_output = json.load(f)
    assert 'images' in detector_output, (
        'Detector output file should be a json with an "images" field.')
    images = detector_output['images']

    detector_label_map = DEFAULT_DETECTOR_LABEL_MAP
    if 'detection_categories' in detector_output:
        print('detection_categories provided')
        detector_label_map = detector_output['detection_categories']

    num_images = len(images)
    print(f'Detector output file contains {num_images} entries.')

    if sample > 0:
        assert num_images >= sample, (
            f'Sample size {sample} greater than number of entries '
            f'({num_images}) in detector result.')

        if random_seed is not None:
            images = sorted(images, key=lambda x: x['file'])
            random.seed(random_seed)

        random.shuffle(images)
        images = sorted(images[:sample], key=lambda x: x['file'])
        print(f'Sampled {len(images)} entries from the detector output file.')

    #%% Load images, annotate them and save

    print('Starting to annotate the images...')
    num_saved = 0
    annotated_img_paths = []
    image_obj: Any  # str for local images, BytesIO for Azure images

    for entry in tqdm(images):
        image_id = entry['file']

        if 'failure' in entry:
            print(f'Skipping {image_id}, failure: "{entry["failure"]}"')
            continue

        # max_conf = entry['max_detection_conf']

        if is_azure:
            blob_uri = sas_blob_utils.build_blob_uri(container_uri=images_dir,
                                                     blob_name=image_id)
            if not sas_blob_utils.check_blob_exists(blob_uri):
                container = sas_blob_utils.get_container_from_uri(images_dir)
                print(f'Image {image_id} not found in blob container '
                      f'{container}; skipped.')
                continue
            image_obj, _ = sas_blob_utils.download_blob_to_stream(blob_uri)
        else:
            image_obj = os.path.join(images_dir, image_id)
            if not os.path.exists(image_obj):
                print(f'Image {image_id} not found in images_dir; skipped.')
                continue

        # resize is for displaying them more quickly
        image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                       output_image_width)

        vis_utils.render_detection_bounding_boxes(
            entry['detections'],
            image,
            label_map=detector_label_map,
            confidence_threshold=confidence)

        for char in ['/', '\\', ':']:
            image_id = image_id.replace(char, '~')
        annotated_img_path = os.path.join(out_dir, f'anno_{image_id}')
        annotated_img_paths.append(annotated_img_path)
        image.save(annotated_img_path)
        num_saved += 1

        if is_azure:
            image_obj.close()  # BytesIO object

    print(f'Rendered detection results on {num_saved} images, '
          f'saved to {out_dir}.')

    return annotated_img_paths
def render_bounding_boxes(image_base_dir, image_relative_path, display_name, detections, res,
                          detection_categories_map=None, classification_categories_map=None, options=None):
        """
        Renders detection bounding boxes on a single image.  
        
        The source image is:
            
            image_base_dir / image_relative_path
            
        The target image is, for example:
            
            [options.output_dir] / ['detections' or 'non-detections'] / [filename with slashes turned into tildes]
        
        Returns the html info struct for this image in the form that's used for 
        write_html_image_list.
        """
        
        if options is None:
            options = PostProcessingOptions()

        # Leaving code in place for reading from blob storage, may support this
        # in the future.
        """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

        image_full_path = os.path.join(image_base_dir, image_relative_path)
        
        # isfile() is slow when mounting remote directories; much faster to just try/except
        # on the image open.
        if False:
            if not os.path.isfile(image_full_path):
                print('Warning: could not find image file {}'.format(image_full_path))
                return ''
        
        try:
            image = vis_utils.open_image(image_full_path)
        except:
            print('Warning: could not open image file {}'.format(image_full_path))            
            return ''
        
        if options.viz_target_width is not None:
            image = vis_utils.resize_image(image, options.viz_target_width)

        vis_utils.render_detection_bounding_boxes(detections, image,
                                                  label_map=detection_categories_map,
                                                  classification_label_map=classification_categories_map,
                                                  confidence_threshold=options.confidence_threshold,
                                                  thickness=options.line_thickness,expansion=options.box_expansion)

        # Render images to a flat folder... we can use os.sep here because we've
        # already normalized paths
        sample_name = res + '_' + path_utils.flatten_path(image_relative_path)        
        fullpath = os.path.join(options.output_dir, res, sample_name)
        try:
            image.save(fullpath)
        except OSError as e:
            # errno.ENAMETOOLONG doesn't get thrown properly on Windows, so 
            # we awkwardly check against a hard-coded limit
            if (e.errno == errno.ENAMETOOLONG) or (len(fullpath) >= 259):
                extension = os.path.splitext(sample_name)[1]
                sample_name = res + '_' + str(uuid.uuid4()) + extension
                image.save(os.path.join(options.output_dir, res, sample_name))
            else:
                raise

        # Use slashes regardless of os
        file_name = '{}/{}'.format(res, sample_name)

        return {
            'filename': file_name,
            'title': display_name,
            'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
        }
Пример #7
0
def load_and_run_detector(model_file, image_file_names, output_dir,
                          render_confidence_threshold=TFDetector.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
                          crop_images=False):
    """Load and run detector on target images, and visualize the results."""
    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # Dictionary mapping output file names to a collision-avoidance count.
    #
    # Since we'll be writing a bunch of files to the same folder, we rename
    # as necessary to avoid collisions.
    output_filename_collision_counts = {}

    def input_file_to_detection_file(fn, crop_index=-1):
        """Creates unique file names for output files.

        This function does 3 things:
        1) If the --crop flag is used, then each input image may produce several output
            crops. For example, if foo.jpg has 3 detections, then this function should
            get called 3 times, with crop_index taking on 0, 1, then 2. Each time, this
            function appends crop_index to the filename, resulting in
                foo_crop00_detections.jpg
                foo_crop01_detections.jpg
                foo_crop02_detections.jpg

        2) If the --recursive flag is used, then the same file (base)name may appear
            multiple times. However, we output into a single flat folder. To avoid
            filename collisions, we prepend an integer prefix to duplicate filenames:
                foo_crop00_detections.jpg
                0000_foo_crop00_detections.jpg
                0001_foo_crop00_detections.jpg

        3) Prepends the output directory:
                out_dir/foo_crop00_detections.jpg

        Args:
            fn: str, filename
            crop_index: int, crop number

        Returns: output file path
        """
        fn = os.path.basename(fn).lower()
        name, ext = os.path.splitext(fn)
        if crop_index >= 0:
            name += '_crop{:0>2d}'.format(crop_index)
        fn = '{}{}{}'.format(name, ImagePathUtils.DETECTION_FILENAME_INSERT, '.jpg')
        if fn in output_filename_collision_counts:
            n_collisions = output_filename_collision_counts[fn]
            fn = '{:0>4d}'.format(n_collisions) + '_' + fn
            output_filename_collision_counts[fn] += 1
        else:
            output_filename_collision_counts[fn] = 0
        fn = os.path.join(output_dir, fn)
        return fn

    for im_file in tqdm(image_file_names):

        try:
            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)

        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:
            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)

        except Exception as e:
            print('An error occurred while running the detector on image {}. Exception: {}'.format(im_file, e))
            continue

        try:
            if crop_images:

                images_cropped = viz_utils.crop_image(result['detections'], image)

                for i_crop, cropped_image in enumerate(images_cropped):
                    output_full_path = input_file_to_detection_file(im_file, i_crop)
                    cropped_image.save(output_full_path)

            else:

                # image is modified in place
                viz_utils.render_detection_bounding_boxes(result['detections'], image,
                                                          label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                                                          confidence_threshold=render_confidence_threshold)
                output_full_path = input_file_to_detection_file(im_file)
                image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.format(im_file, e))
            continue

    # ...for each image

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_load),
                                                    std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_infer),
                                                      std_dev_time_infer))
                image_id))
            continue
    else:
        if not blob_service.exists(container_name, blob_name=image_id):
            print('Image {} is not found in the blob container {}; skipped.'.
                  format(image_id, container_name))
            continue

        image_obj = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id,
                                            image_obj)

    # resize is for displaying them more quickly
    image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                   args.output_image_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detector_label_map,
        confidence_threshold=args.confidence)

    annotated_img_name = 'anno_' + image_id.replace('/', '~').replace(
        '\\', '~')
    annotated_img_path = os.path.join(args.out_dir, annotated_img_name)
    image.save(annotated_img_path)
    num_saved += 1

print('Rendered detection results on {} images, saved to {}.'.format(
    num_saved, args.out_dir))
def visualize_detector_output(args):

    #%% Load detector output

    os.makedirs(args.out_dir, exist_ok=True)

    images_local = True if args.images_dir is not None else False

    detector_output = json.load(open(args.detector_output_path))

    assert 'images' in detector_output, 'Detector output file should be a json with an "images" field.'
    images = detector_output['images']

    detector_label_map = DEFAULT_DETECTOR_LABEL_MAP
    if 'detection_categories' in detector_output:
        print('detection_categories provided')
        detector_label_map = detector_output['detection_categories']

    num_images = len(images)
    print('Detector output file contains {} entries.'.format(num_images))

    if args.sample > 0:
        assert num_images >= args.sample, \
            'Sample size {} specified greater than number of entries ({}) in detector result.'.format(args.sample, num_images)

        if args.random_seed:
            images = sorted(images, key=lambda x: x['file'])
            random.seed(args.random_seed)

        random.shuffle(images)
        images = sorted(images[:args.sample], key=lambda x: x['file'])
        print('Sampled {} entries from the detector output file.'.format(
            len(images)))

    #%% Load images, annotate them and save

    if not images_local:
        blob_service = get_service_from_uri(args.sas_url)
        container_name = get_container_from_uri(args.sas_url)

    print('Starting to annotate the images...')
    num_saved = 0

    annotated_img_paths = []

    for entry in tqdm(images):
        if 'failure' in entry:
            print('Skipping {}, which failed because of "{}"'.format(
                entry['file'], entry['failure']))
            continue

        image_id = entry['file']
        # max_conf = entry['max_detection_conf']
        detections = entry['detections']

        if images_local:
            image_obj = os.path.join(args.images_dir, image_id)
            if not os.path.exists(image_obj):
                print('Image {} is not found at local images_dir; skipped.'.
                      format(image_id))
                continue
        else:
            if not blob_service.exists(container_name, blob_name=image_id):
                print(
                    'Image {} is not found in the blob container {}; skipped.'.
                    format(image_id, container_name))
                continue

            image_obj = io.BytesIO()
            _ = blob_service.get_blob_to_stream(container_name, image_id,
                                                image_obj)

        # resize is for displaying them more quickly
        image = vis_utils.resize_image(vis_utils.open_image(image_obj),
                                       args.output_image_width)

        vis_utils.render_detection_bounding_boxes(
            detections,
            image,
            label_map=detector_label_map,
            confidence_threshold=args.confidence)

        annotated_img_name = 'anno_' + image_id.replace('/', '~').replace(
            '\\', '~').replace(':', '~')
        annotated_img_path = os.path.join(args.out_dir, annotated_img_name)
        annotated_img_paths.append(annotated_img_path)
        image.save(annotated_img_path)
        num_saved += 1

    print('Rendered detection results on {} images, saved to {}.'.format(
        num_saved, args.out_dir))

    return annotated_img_paths
Пример #10
0
def load_and_run_detector(model_file,
                          image_file_names,
                          output_dir,
                          render_confidence_threshold=TFDetector.
                          DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
                          crop_images=False):

    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    # load and run detector on target images, and visualize the results
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # Dictionary mapping output file names to a collision-avoidance count.
    #
    # Since we'll be writing a bunch of files to the same folder, we rename
    # as necessary to avoid collisions.
    output_filename_collision_counts = {}

    def input_file_to_detection_file(fn, crop_index=-1):
        """
        Append "_detctions" to fn, prepend the output folder.
        
        Because we may be mapping many directories to one, we can have filename
        collisions.  Resolve by adding integer suffixes for duplicate filenames.
        """
        fn = os.path.basename(fn).lower()
        name, ext = os.path.splitext(fn)
        if crop_index >= 0:
            name += '_crop{:0>2d}'.format(crop_index)
        fn = '{}{}{}'.format(name, ImagePathUtils.DETECTION_FILENAME_INSERT,
                             '.jpg')
        if fn in output_filename_collision_counts:
            n_collisions = output_filename_collision_counts[fn]
            fn = '{:0>4d}'.format(n_collisions) + '_' + fn
            output_filename_collision_counts[fn] = n_collisions + 1
        else:
            output_filename_collision_counts[fn] = 0
        fn = os.path.join(output_dir, fn)
        return fn

    for im_file in tqdm(image_file_names):

        try:

            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)

        except Exception as e:

            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:

            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)

        except Exception as e:

            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            continue

        try:

            if crop_images:

                images_cropped = viz_utils.crop_image(result['detections'],
                                                      image)

                for i_crop, cropped_image in enumerate(images_cropped):
                    output_full_path = input_file_to_detection_file(
                        im_file, i_crop)
                    cropped_image.save(output_full_path)

            else:

                # image is modified in place
                viz_utils.render_detection_bounding_boxes(
                    result['detections'],
                    image,
                    label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                    confidence_threshold=render_confidence_threshold)
                output_full_path = input_file_to_detection_file(im_file)
                image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.
                  format(im_file, e))
            continue

    # ...for each image

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(
            statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(
            statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_load), std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_infer), std_dev_time_infer))
Пример #11
0
def detect_sync():

    if not has_access(request):
        print('Access denied, please provide a valid API key')
        return _make_error_response(
            403, 'Access denied, please provide a valid API key')

    # Check whether the request_processing_function had an error
    post_data = check_posted_data(request)
    if post_data.get('error_code', None) is not None:
        return _make_error_response(post_data.get('error_code'),
                                    post_data.get('error_message'))

    render_boxes = post_data.get('render_boxes')
    return_confidence_threshold = post_data.get('return_confidence_threshold')
    rendering_confidence_threshold = post_data.get(
        'rendering_confidence_threshold')

    redis_id = str(uuid.uuid4())
    d = {
        'id': redis_id,
        'render_boxes': render_boxes,
        'return_confidence_threshold': return_confidence_threshold
    }
    temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)

    try:

        try:
            # Write images to temporary files
            #
            # TODO: read from memory rather than using intermediate files
            os.makedirs(temp_direc, exist_ok=True)
            for name, file in request.files.items():
                if file.content_type in config.IMAGE_CONTENT_TYPES:
                    filename = request.files[name].filename
                    image_path = os.path.join(temp_direc, filename)
                    print('Saving image {} to {}'.format(name, image_path))
                    file.save(image_path)
                    assert os.path.isfile(
                        image_path), 'Error creating file {}'.format(
                            image_path)

        except Exception as e:
            return _make_error_object(500, 'Error saving images: ' + str(e))

        # Submit the image(s) for processing by api_backend.py, who is waiting on this queue
        db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))

        while True:

            # TODO: convert to a blocking read and eliminate the sleep() statement in this loop
            result = db.get(redis_id)

            if result:

                result = json.loads(result.decode())
                print('Processing result {}'.format(str(result)))

                if result['status'] == 200:
                    detections = result['detections']
                    db.delete(redis_id)

                else:
                    db.delete(redis_id)
                    print('Detection error: ' + str(result))
                    return _make_error_response(
                        500, 'Detection error: ' + str(result))

                try:
                    print(
                        'detect_sync: postprocessing and sending images back...'
                    )
                    fields = {
                        'detection_result':
                        ('detection_result', json.dumps(detections),
                         'application/json'),
                    }

                    if render_boxes and result['status'] == 200:

                        print('Rendering images')

                        for image_name, detections in detections.items():

                            #image = Image.open(os.path.join(temp_direc, image_name))
                            image = open(f'{temp_direc}/{image_name}', "rb")
                            image = viz_utils.load_image(image)
                            width, height = image.size

                            _detections = []
                            for d in detections:
                                y1, x1, y2, x2 = d[0:4]
                                width = x2 - x1
                                height = y2 - y1
                                bbox = [x1, y1, width, height]
                                _detections.append({
                                    'bbox': bbox,
                                    'conf': d[4],
                                    'category': d[5]
                                })

                            viz_utils.render_detection_bounding_boxes(
                                _detections,
                                image,
                                confidence_threshold=
                                rendering_confidence_threshold)

                            output_img_stream = BytesIO()
                            image.save(output_img_stream, format='jpeg')
                            output_img_stream.seek(0)
                            fields[image_name] = (image_name,
                                                  output_img_stream,
                                                  'image/jpeg')
                        print('Done rendering images')

                    m = MultipartEncoder(fields=fields)
                    return Response(m.to_string(), mimetype=m.content_type)

                except Exception as e:

                    print(traceback.format_exc())
                    print(
                        'Error returning result or rendering the detection boxes: '
                        + str(e))

                finally:

                    try:
                        print('Removing temporary files')
                        shutil.rmtree(temp_direc)
                    except Exception as e:
                        print('Error removing temporary folder {}: {}'.format(
                            temp_direc, str(e)))

            else:
                time.sleep(0.005)

            # ...if we do/don't have a request available on the queue

        # ...while(True)

    except Exception as e:

        print(traceback.format_exc())
        return _make_error_object(500, 'Error processing images: ' + str(e))
def render_bounding_boxes(image_base_dir,
                          image_relative_path,
                          display_name,
                          detections,
                          res,
                          detection_categories_map=None,
                          classification_categories_map=None,
                          options=None):
    """
        Renders detection bounding boxes on a single image.  Returns the html info struct
        for this image in the form that's used for write_html_image_list.
        """

    if options is None:
        options = PostProcessingOptions()

    # Leaving code in place for reading from blob storage, may support this
    # in the future.
    """
        stream = io.BytesIO()
        _ = blob_service.get_blob_to_stream(container_name, image_id, stream)
        image = Image.open(stream).resize(viz_size)  # resize is to display them in this notebook or in the HTML more quickly
        """

    image_full_path = os.path.join(image_base_dir, image_relative_path)

    # isfile() is slow when mounting remote directories; much faster to just try/except
    # on the image open.
    if False:
        if not os.path.isfile(image_full_path):
            print('Warning: could not find image file {}'.format(
                image_full_path))
            return ''

    try:
        image = vis_utils.open_image(image_full_path)
    except:
        print('Warning: could not open image file {}'.format(image_full_path))
        return ''

    image = vis_utils.resize_image(image, options.viz_target_width)

    vis_utils.render_detection_bounding_boxes(
        detections,
        image,
        label_map=detection_categories_map,
        classification_label_map=classification_categories_map,
        confidence_threshold=options.confidence_threshold,
        thickness=4)

    # Render images to a flat folder... we can use os.sep here because we've
    # already normalized paths
    sample_name = res + '_' + image_relative_path.replace(os.sep, '~')

    try:
        image.save(os.path.join(options.output_dir, res, sample_name))
    except OSError as e:
        if e.errno == errno.ENAMETOOLONG:
            sample_name = res + '_' + str(uuid.uuid4()) + '.jpg'
            image.save(os.path.join(options.output_dir, res, sample_name))
        else:
            raise

    # Use slashes regardless of os
    file_name = '{}/{}'.format(res, sample_name)

    return {
        'filename':
        file_name,
        'title':
        display_name,
        'textStyle':
        'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
    }
Пример #13
0
def detect_sync(*args, **kwargs):
    # check if the request_processing_function had an error while parsing user specified parameters
    if kwargs.get('error_code', None) is not None:
        return _make_error_response(kwargs.get('error_code'),
                                    kwargs.get('error_message'))

    render_boxes = kwargs.get('render_boxes')
    classification = kwargs.get('classification')
    detection_confidence = kwargs.get('detection_confidence')
    images = kwargs.get('images')
    image_names = kwargs.get('image_names')

    detection_results = []
    inference_time_detector = []

    try:
        print('runserver, post_detect_sync, scoring images...')

        for image_name, image in zip(image_names, images):
            start_time = time.time()

            result = detector.generate_detections_one_image(image, image_name)
            detection_results.append(result)

            elapsed = time.time() - start_time
            inference_time_detector.append(elapsed)

    except Exception as e:
        print('Error performing detection on the images: ' + str(e))
        log.log_exception('Error performing detection on the images: ' +
                          str(e))
        return _make_error_response(
            500, 'Error performing detection on the images: ' + str(e))

    # filter the detections by the confidence threshold
    filtered_results = {
    }  # json to return to the user along with the rendered images if they opted for it
    # each result is [ymin, xmin, ymax, xmax, confidence, category]
    try:
        for result in detection_results:
            image_name = result['file']
            detections = result.get('detections', None)
            filtered_results[image_name] = []

            if detections is None:
                continue

            for d in detections:
                if d['conf'] > detection_confidence:
                    res = TFDetector.convert_to_tf_coords(d['bbox'])
                    res.append(d['conf'])
                    res.append(
                        int(d['category'])
                    )  # category is an int here, not string as in the async API
                    filtered_results[image_name].append(res)

    except Exception as e:
        print('Error consolidating the detection boxes: ' + str(e))
        log.log_exception('Error consolidating the detection boxes: ' + str(e))
        return _make_error_response(
            500, 'Error consolidating the detection boxes: ' + str(e))

    # classification
    classification_result = {}
    # TODO
    # try:
    #     if classification:
    #         print('runserver, classification...')
    #         tic = datetime.now()
    #         classification_result = classifier.classify_boxes(images, image_names, result, classification)
    #         toc = datetime.now()
    #         classification_inference_duration = toc - tic
    #         print('runserver, classification, classifcation inference duraction: {}' \
    #               .format({classification_inference_duration}))
    #
    #     else:
    #         classification_result = {}
    #
    # except Exception as e:
    #     print('Error performing classification on the images: ' + str(e))
    #     log.log_exception('Error performing classification on the images: ' + str(e))
    #     abort(500, 'Error performing classification on the images: ' + str(e))

    # return results; optionally render the detections on the images and send the annotated images back
    try:
        print(
            'runserver, post_detect_sync, rendering and sending images back...'
        )
        fields = {
            'detection_result':
            ('detection_result', json.dumps(filtered_results),
             'application/json'),
            'classification_result':
            ('classification_result', json.dumps(classification_result),
             'application/json')
        }

        if render_boxes:
            for image_name, image, result in zip(image_names, images,
                                                 detection_results):
                detections = result.get('detections', None)
                if detections is None:
                    continue
                viz_utils.render_detection_bounding_boxes(
                    detections,
                    image,
                    confidence_threshold=detection_confidence)

                output_img_stream = BytesIO()
                image.save(output_img_stream, format='jpeg')
                output_img_stream.seek(0)
                fields[image_name] = (image_name, output_img_stream,
                                      'image/jpeg')

        m = MultipartEncoder(fields=fields)

        if len(inference_time_detector) > 0:
            mean_inference_time_detector = sum(inference_time_detector) / len(
                inference_time_detector)
        else:
            mean_inference_time_detector = -1

        log.log_info(
            'detector mean inference time',
            mean_inference_time_detector,
            additionalProperties={
                'detector mean inference time':
                str(mean_inference_time_detector),
                # TODO 'classification mean inference time': str(''),
                'num_images': len(image_names),
                'render_boxes': render_boxes,
                'detection_confidence': detection_confidence
            })
        return Response(m.to_string(), mimetype=m.content_type)
    except Exception as e:
        print('Error returning result or rendering the detection boxes: ' +
              str(e))
        log.log_exception(
            'Error returning result or rendering the detection boxes: ' +
            str(e))
        return _make_error_response(
            500, 'Error returning result or rendering the detection boxes: ' +
            str(e))