예제 #1
0
def producer_func(q, image_files):
    """ 
    Producer function; only used when using the (optional) image queue.
    
    Reads up to N images from disk and puts them on the blocking queue for processing.
    """

    if verbose:
        print('Producer starting')
        sys.stdout.flush()

    for im_file in image_files:

        try:
            if verbose:
                print('Loading image {}'.format(im_file))
                sys.stdout.flush()
            image = viz_utils.load_image(im_file)
        except Exception as e:
            print('Producer process: image {} cannot be loaded. Exception: {}'.
                  format(im_file, e))
            raise

        if verbose:
            print('Queueing image {}'.format(im_file))
            sys.stdout.flush()
        q.put([im_file, image])

    q.put(None)

    print('Finished image loading')
    sys.stdout.flush()
def process_image(im_file, tf_detector, confidence_threshold):
    """Runs the MegaDetector over a single image file.

    Args
    - im_file: str, path to image file
    - tf_detector: TFDetector, loaded model
    - confidence_threshold: float, only detections above this threshold are returned

    Returns:
    - result: dict representing detections on one image
        see the 'images' key in https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
    """
    print('Processing image {}'.format(im_file))
    try:
        image = viz_utils.load_image(im_file)
    except Exception as e:
        print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_IMAGE_OPEN}
        return result

    try:
        result = tf_detector.generate_detections_one_image(
            image, im_file, detection_threshold=confidence_threshold)
    except Exception as e:
        print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_TF_INFER}
        return result

    return result
def process_image(im_file, tf_detector, confidence_threshold):

    if isinstance(tf_detector, str):
        start_time = time.time()
        tf_detector = TFDetector(tf_detector)
        elapsed = time.time() - start_time
        print('Loaded model (worker level) in {}'.format(
            humanfriendly.format_timespan(elapsed)))

    print('Processing image {}'.format(im_file))
    image = None
    try:
        image = viz_utils.load_image(im_file)
    except Exception as e:
        print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_IMAGE_OPEN}
        return result

    try:
        result = tf_detector.generate_detections_one_image(
            image, im_file, detection_threshold=confidence_threshold)
    except Exception as e:
        print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_TF_INFER}
        return result

    return result
예제 #4
0
def load_and_run_detector_batch(model_file,
                                image_file_names,
                                checkpoint_path=None,
                                confidence_threshold=0,
                                checkpoint_frequency=-1,
                                results=[]):

    already_processed = set([i['file'] for i in results])

    # load the detector
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    count = 0  # does not count those already processed
    for im_file in tqdm(image_file_names):
        if im_file in already_processed:  # will not add additional entries not in the starter checkpoint
            continue

        count += 1

        try:
            image = viz_utils.load_image(im_file)
        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            results.append(result)
            continue

        try:
            result = tf_detector.generate_detections_one_image(
                image, im_file, detection_threshold=confidence_threshold)
            results.append(result)

        except Exception as e:
            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            continue

        # checkpoint
        if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
            print(
                'Writing a new checkpoint after having processed {} images since last restart'
                .format(count))
            with open(checkpoint_path, 'w') as f:
                json.dump({'images': results}, f)

    return results  # actually modified in place
예제 #5
0
def load_and_run_detector(model_file,
                          image_file_names,
                          output_dir,
                          render_confidence_threshold=TFDetector.
                          DEFAULT_RENDERING_CONFIDENCE_THRESHOLD):
    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    # load and run detector on target images, and visualize the results
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # since we'll be writing a bunch of files to the same folder, rename
    # as necessary to avoid collisions
    output_file_names = {}

    for im_file in tqdm(image_file_names):
        try:
            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)
        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:
            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)
        except Exception as e:
            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            # the error code and message is written by generate_detections_one_image,
            # which is wrapped in a big try catch
            continue

        try:
            # image is modified in place
            viz_utils.render_detection_bounding_boxes(
                result['detections'],
                image,
                label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                confidence_threshold=render_confidence_threshold)
            fn = os.path.basename(im_file).lower()
            name, ext = os.path.splitext(fn)
            fn = '{}{}{}'.format(name,
                                 ImagePathUtils.DETECTION_FILENAME_INSERT,
                                 '.jpg')  # save all as JPG
            if fn in output_file_names:
                n_collisions = output_file_names[
                    fn]  # if there were a collision, the count is at least 1
                fn = str(n_collisions) + '_' + fn
                output_file_names[fn] = n_collisions + 1
            else:
                output_file_names[fn] = 0

            output_full_path = os.path.join(output_dir, fn)
            image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.
                  format(im_file, e))
            continue

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(
            statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(
            statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_load), std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_infer), std_dev_time_infer))
def load_and_run_detector_batch(model_file,
                                image_file_names,
                                checkpoint_path=None,
                                confidence_threshold=0,
                                checkpoint_frequency=-1,
                                results=None,
                                n_cores=0):

    if results is None:
        results = []

    already_processed = set([i['file'] for i in results])

    if n_cores > 1 and tf.test.is_gpu_available():
        print(
            'Warning: multiple cores requested, but a GPU is available; parallelization across GPUs is not currently supported, defaulting to one GPU'
        )

    # If we're not using multiprocessing...
    if n_cores <= 1 or tf.test.is_gpu_available():
        # Load the detector
        start_time = time.time()
        tf_detector = TFDetector(model_file)
        elapsed = time.time() - start_time
        print('Loaded model in {}'.format(
            humanfriendly.format_timespan(elapsed)))
    else:
        # If we're using multiprocessing, let the workers load the model, just store
        # the model filename.
        tf_detector = model_file

    if n_cores <= 1 or tf.test.is_gpu_available():

        # Does not count those already processed
        count = 0

        for im_file in tqdm(image_file_names):

            # Will not add additional entries not in the starter checkpoint
            if im_file in already_processed:
                print('Bypassing image {}'.format(im_file))
                continue

            count += 1

            try:
                image = viz_utils.load_image(im_file)
            except Exception as e:
                print('Image {} cannot be loaded. Exception: {}'.format(
                    im_file, e))
                result = {
                    'file': im_file,
                    'failure': TFDetector.FAILURE_IMAGE_OPEN
                }
                results.append(result)
                continue

            try:
                result = tf_detector.generate_detections_one_image(
                    image, im_file, detection_threshold=confidence_threshold)
                results.append(result)

            except Exception as e:
                print(
                    'An error occurred while running the detector on image {}. Exception: {}'
                    .format(im_file, e))
                result = {
                    'file': im_file,
                    'failure': TFDetector.FAILURE_IMAGE_INFER
                }
                results.append(result)
                continue

            # checkpoint
            if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
                print(
                    'Writing a new checkpoint after having processed {} images since last restart'
                    .format(count))
                with open(checkpoint_path, 'w') as f:
                    json.dump({'images': results}, f)
    else:
        print('Creating pool with {} cores'.format(n_cores))

        if len(already_processed) > 0:
            print(
                'Warning: when using multiprocessing, all images are reprocessed'
            )

        pool = workerpool(n_cores)

        image_batches = list(
            chunks_by_number_of_chunks(image_file_names, n_cores))
        results = pool.map(
            partial(process_images,
                    tf_detector=tf_detector,
                    confidence_threshold=confidence_threshold), image_batches)

        results = list(itertools.chain.from_iterable(results))

    # This was modified in place, but we also return it for backwards-compatibility.
    return results
예제 #7
0
def load_and_run_detector(model_file, image_file_names, output_dir,
                          render_confidence_threshold=TFDetector.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
                          crop_images=False):
    """Load and run detector on target images, and visualize the results."""
    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # Dictionary mapping output file names to a collision-avoidance count.
    #
    # Since we'll be writing a bunch of files to the same folder, we rename
    # as necessary to avoid collisions.
    output_filename_collision_counts = {}

    def input_file_to_detection_file(fn, crop_index=-1):
        """Creates unique file names for output files.

        This function does 3 things:
        1) If the --crop flag is used, then each input image may produce several output
            crops. For example, if foo.jpg has 3 detections, then this function should
            get called 3 times, with crop_index taking on 0, 1, then 2. Each time, this
            function appends crop_index to the filename, resulting in
                foo_crop00_detections.jpg
                foo_crop01_detections.jpg
                foo_crop02_detections.jpg

        2) If the --recursive flag is used, then the same file (base)name may appear
            multiple times. However, we output into a single flat folder. To avoid
            filename collisions, we prepend an integer prefix to duplicate filenames:
                foo_crop00_detections.jpg
                0000_foo_crop00_detections.jpg
                0001_foo_crop00_detections.jpg

        3) Prepends the output directory:
                out_dir/foo_crop00_detections.jpg

        Args:
            fn: str, filename
            crop_index: int, crop number

        Returns: output file path
        """
        fn = os.path.basename(fn).lower()
        name, ext = os.path.splitext(fn)
        if crop_index >= 0:
            name += '_crop{:0>2d}'.format(crop_index)
        fn = '{}{}{}'.format(name, ImagePathUtils.DETECTION_FILENAME_INSERT, '.jpg')
        if fn in output_filename_collision_counts:
            n_collisions = output_filename_collision_counts[fn]
            fn = '{:0>4d}'.format(n_collisions) + '_' + fn
            output_filename_collision_counts[fn] += 1
        else:
            output_filename_collision_counts[fn] = 0
        fn = os.path.join(output_dir, fn)
        return fn

    for im_file in tqdm(image_file_names):

        try:
            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)

        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:
            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)

        except Exception as e:
            print('An error occurred while running the detector on image {}. Exception: {}'.format(im_file, e))
            continue

        try:
            if crop_images:

                images_cropped = viz_utils.crop_image(result['detections'], image)

                for i_crop, cropped_image in enumerate(images_cropped):
                    output_full_path = input_file_to_detection_file(im_file, i_crop)
                    cropped_image.save(output_full_path)

            else:

                # image is modified in place
                viz_utils.render_detection_bounding_boxes(result['detections'], image,
                                                          label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                                                          confidence_threshold=render_confidence_threshold)
                output_full_path = input_file_to_detection_file(im_file)
                image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.format(im_file, e))
            continue

    # ...for each image

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_load),
                                                    std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_infer),
                                                      std_dev_time_infer))
예제 #8
0
def detect_process():

    while True:

        # TODO: convert to a blocking read and eliminate the sleep() statement in this loop
        serialized_entry = db.lpop(config.REDIS_QUEUE_NAME)
        all_detection_results = []
        inference_time_detector = []

        if serialized_entry:

            entry = json.loads(serialized_entry)
            id = entry['id']
            print('Processing images from request id:', id)
            return_confidence_threshold = entry['return_confidence_threshold']

            try:

                temp_direc = os.path.join(config.TEMP_FOLDER, id)
                assert os.path.isdir(
                    temp_direc), 'Could not find temporary folder {}'.format(
                        temp_direc)

                for filename in os.listdir(temp_direc):

                    image_path = f'{temp_direc}/{filename}'
                    print('Reading image from {}'.format(image_path))
                    image = open(image_path, 'rb')
                    image = viz_utils.load_image(image)

                    start_time = time.time()
                    result = detector.generate_detections_one_image(
                        image, filename)
                    all_detection_results.append(result)

                    elapsed = time.time() - start_time
                    inference_time_detector.append(elapsed)

            except Exception as e:

                print('Detection error: ' + str(e))

                db.set(
                    entry['id'],
                    json.dumps({
                        'status': 500,
                        'error': 'Detection error: ' + str(e)
                    }))

                continue

            # Filter the detections by the confidence threshold
            #
            # Each result is [ymin, xmin, ymax, xmax, confidence, category]
            #
            # Coordinates are relative, with the origin in the upper-left
            detections = {}

            try:

                for result in all_detection_results:

                    image_name = result['file']
                    _detections = result.get('detections', None)
                    detections[image_name] = []

                    if _detections is None:
                        continue

                    for d in _detections:
                        if d['conf'] > return_confidence_threshold:
                            res = TFDetector.convert_to_tf_coords(d['bbox'])
                            res.append(d['conf'])
                            res.append(int(d['category']))
                            detections[image_name].append(res)

                db.set(
                    entry['id'],
                    json.dumps({
                        'status':
                        200,
                        'detections':
                        detections,
                        'inference_time_detector':
                        inference_time_detector
                    }))

            except Exception as e:
                print('Error consolidating the detection boxes: ' + str(e))

                db.set(
                    entry['id'],
                    json.dumps({
                        'status':
                        500,
                        'error':
                        'Error consolidating the detection boxes:' + str(e)
                    }))

        # ...if serialized_entry

        else:
            time.sleep(0.005)
예제 #9
0
def load_and_run_detector(model_file,
                          image_file_names,
                          output_dir,
                          render_confidence_threshold=TFDetector.
                          DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
                          crop_images=False):

    if len(image_file_names) == 0:
        print('Warning: no files available')
        return

    # load and run detector on target images, and visualize the results
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    detection_results = []
    time_load = []
    time_infer = []

    # Dictionary mapping output file names to a collision-avoidance count.
    #
    # Since we'll be writing a bunch of files to the same folder, we rename
    # as necessary to avoid collisions.
    output_filename_collision_counts = {}

    def input_file_to_detection_file(fn, crop_index=-1):
        """
        Append "_detctions" to fn, prepend the output folder.
        
        Because we may be mapping many directories to one, we can have filename
        collisions.  Resolve by adding integer suffixes for duplicate filenames.
        """
        fn = os.path.basename(fn).lower()
        name, ext = os.path.splitext(fn)
        if crop_index >= 0:
            name += '_crop{:0>2d}'.format(crop_index)
        fn = '{}{}{}'.format(name, ImagePathUtils.DETECTION_FILENAME_INSERT,
                             '.jpg')
        if fn in output_filename_collision_counts:
            n_collisions = output_filename_collision_counts[fn]
            fn = '{:0>4d}'.format(n_collisions) + '_' + fn
            output_filename_collision_counts[fn] = n_collisions + 1
        else:
            output_filename_collision_counts[fn] = 0
        fn = os.path.join(output_dir, fn)
        return fn

    for im_file in tqdm(image_file_names):

        try:

            start_time = time.time()

            image = viz_utils.load_image(im_file)

            elapsed = time.time() - start_time
            time_load.append(elapsed)

        except Exception as e:

            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            detection_results.append(result)
            continue

        try:

            start_time = time.time()

            result = tf_detector.generate_detections_one_image(image, im_file)
            detection_results.append(result)

            elapsed = time.time() - start_time
            time_infer.append(elapsed)

        except Exception as e:

            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            continue

        try:

            if crop_images:

                images_cropped = viz_utils.crop_image(result['detections'],
                                                      image)

                for i_crop, cropped_image in enumerate(images_cropped):
                    output_full_path = input_file_to_detection_file(
                        im_file, i_crop)
                    cropped_image.save(output_full_path)

            else:

                # image is modified in place
                viz_utils.render_detection_bounding_boxes(
                    result['detections'],
                    image,
                    label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP,
                    confidence_threshold=render_confidence_threshold)
                output_full_path = input_file_to_detection_file(im_file)
                image.save(output_full_path)

        except Exception as e:
            print('Visualizing results on the image {} failed. Exception: {}'.
                  format(im_file, e))
            continue

    # ...for each image

    ave_time_load = statistics.mean(time_load)
    ave_time_infer = statistics.mean(time_infer)
    if len(time_load) > 1 and len(time_infer) > 1:
        std_dev_time_load = humanfriendly.format_timespan(
            statistics.stdev(time_load))
        std_dev_time_infer = humanfriendly.format_timespan(
            statistics.stdev(time_infer))
    else:
        std_dev_time_load = 'not available'
        std_dev_time_infer = 'not available'
    print('On average, for each image,')
    print('- loading took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_load), std_dev_time_load))
    print('- inference took {}, std dev is {}'.format(
        humanfriendly.format_timespan(ave_time_infer), std_dev_time_infer))
예제 #10
0
                        detections.append({
                            'category': str(cls),
                            'conf': conf,
                            'bbox': ct_utils.truncate_float_array(api_box, precision=COORD_DIGITS)
                        })
                        max_conf = max(max_conf, conf)

        except Exception as e:
            result['failure'] = FAILURE_INFER
            print('PTDetector: image {} failed during inference: {}'.format(image_id, str(e)))

        result['max_detection_conf'] = max_conf
        result['detections'] = detections

        return result


if __name__ == '__main__':
    # for testing

    import visualization.visualization_utils as viz_utils

    model_file = "<path to the model .pt file>"
    im_file = "test_images/test_images/island_conservation_camera_traps_palau_cam10a_cam10a12122018_palau_cam10a12122018_20181108_174532_rcnx1035.jpg"

    detector = PTDetector(model_file)
    image = viz_utils.load_image(im_file)

    res = detector.generate_detections_one_image(image, im_file, detection_threshold=0.2)
예제 #11
0
        box_b = row['rgb_bottom']
        rgb_boxes.append([box_l, box_r, box_t, box_b])

    if row['ir_image_path'] == ir_image_path:
        box_l = row['ir_left']
        box_r = row['ir_right']
        box_t = row['ir_top']
        box_b = row['ir_bottom']
        ir_boxes.append([box_l, box_r, box_t, box_b])

print('Found {} RGB, {} IR annotations for this image'.format(
    len(rgb_boxes), len(ir_boxes)))

#%% Render the detections on the image(s)

img_rgb = visualization_utils.load_image(rgb_image_fn)
img_ir = visualization_utils.load_image(ir_image_fn)

for b in rgb_boxes:

    # In pixel coordinates
    box_left = b[0]
    box_right = b[1]
    box_top = b[2]
    box_bottom = b[3]
    assert box_top > box_bottom
    assert box_right > box_left
    ymin = box_bottom
    ymax = box_top
    xmin = box_left
    xmax = box_right
예제 #12
0
def detect_sync():

    if not has_access(request):
        print('Access denied, please provide a valid API key')
        return _make_error_response(
            403, 'Access denied, please provide a valid API key')

    # Check whether the request_processing_function had an error
    post_data = check_posted_data(request)
    if post_data.get('error_code', None) is not None:
        return _make_error_response(post_data.get('error_code'),
                                    post_data.get('error_message'))

    render_boxes = post_data.get('render_boxes')
    return_confidence_threshold = post_data.get('return_confidence_threshold')
    rendering_confidence_threshold = post_data.get(
        'rendering_confidence_threshold')

    redis_id = str(uuid.uuid4())
    d = {
        'id': redis_id,
        'render_boxes': render_boxes,
        'return_confidence_threshold': return_confidence_threshold
    }
    temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)

    try:

        try:
            # Write images to temporary files
            #
            # TODO: read from memory rather than using intermediate files
            os.makedirs(temp_direc, exist_ok=True)
            for name, file in request.files.items():
                if file.content_type in config.IMAGE_CONTENT_TYPES:
                    filename = request.files[name].filename
                    image_path = os.path.join(temp_direc, filename)
                    print('Saving image {} to {}'.format(name, image_path))
                    file.save(image_path)
                    assert os.path.isfile(
                        image_path), 'Error creating file {}'.format(
                            image_path)

        except Exception as e:
            return _make_error_object(500, 'Error saving images: ' + str(e))

        # Submit the image(s) for processing by api_backend.py, who is waiting on this queue
        db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))

        while True:

            # TODO: convert to a blocking read and eliminate the sleep() statement in this loop
            result = db.get(redis_id)

            if result:

                result = json.loads(result.decode())
                print('Processing result {}'.format(str(result)))

                if result['status'] == 200:
                    detections = result['detections']
                    db.delete(redis_id)

                else:
                    db.delete(redis_id)
                    print('Detection error: ' + str(result))
                    return _make_error_response(
                        500, 'Detection error: ' + str(result))

                try:
                    print(
                        'detect_sync: postprocessing and sending images back...'
                    )
                    fields = {
                        'detection_result':
                        ('detection_result', json.dumps(detections),
                         'application/json'),
                    }

                    if render_boxes and result['status'] == 200:

                        print('Rendering images')

                        for image_name, detections in detections.items():

                            #image = Image.open(os.path.join(temp_direc, image_name))
                            image = open(f'{temp_direc}/{image_name}', "rb")
                            image = viz_utils.load_image(image)
                            width, height = image.size

                            _detections = []
                            for d in detections:
                                y1, x1, y2, x2 = d[0:4]
                                width = x2 - x1
                                height = y2 - y1
                                bbox = [x1, y1, width, height]
                                _detections.append({
                                    'bbox': bbox,
                                    'conf': d[4],
                                    'category': d[5]
                                })

                            viz_utils.render_detection_bounding_boxes(
                                _detections,
                                image,
                                confidence_threshold=
                                rendering_confidence_threshold)

                            output_img_stream = BytesIO()
                            image.save(output_img_stream, format='jpeg')
                            output_img_stream.seek(0)
                            fields[image_name] = (image_name,
                                                  output_img_stream,
                                                  'image/jpeg')
                        print('Done rendering images')

                    m = MultipartEncoder(fields=fields)
                    return Response(m.to_string(), mimetype=m.content_type)

                except Exception as e:

                    print(traceback.format_exc())
                    print(
                        'Error returning result or rendering the detection boxes: '
                        + str(e))

                finally:

                    try:
                        print('Removing temporary files')
                        shutil.rmtree(temp_direc)
                    except Exception as e:
                        print('Error removing temporary folder {}: {}'.format(
                            temp_direc, str(e)))

            else:
                time.sleep(0.005)

            # ...if we do/don't have a request available on the queue

        # ...while(True)

    except Exception as e:

        print(traceback.format_exc())
        return _make_error_object(500, 'Error processing images: ' + str(e))
예제 #13
0
def _detect_process_request_data(request):
    """
    Processes the request data to the /detect endpoint. It loads data or files into a
    dictionary for access in the API function. It is passed as a parameter to the API setup.
    Args:
        request: The request object from the @ai4e_service.api_sync_func

    Returns:
        A dict with the parameters parsed from user input
    """
    files = request.files
    params = request.args

    # check that the content uploaded is not too big
    # request.content_length is the length of the total payload
    # also will not proceed if cannot find content_length, hence in the else we exceed the max limit
    content_length = request.content_length
    if not content_length:
        return _make_error_object(
            411,
            'No image(s) are sent, or content length cannot be determined.')
    if content_length > api_config.MAX_CONTENT_LENGTH_IN_MB * 1024 * 1024:
        return _make_error_object(
            413,
            ('Payload size {:.2f} MB exceeds the maximum allowed of {} MB. '
             'Please upload fewer or more compressed images.').format(
                 content_length / (1024 * 1024),
                 api_config.MAX_CONTENT_LENGTH_IN_MB))

    render_boxes = True if params.get('render', '') in ['True', 'true'
                                                        ] else False

    # validate detection confidence value
    if 'confidence' in params:
        detection_confidence = float(params['confidence'])
        print(
            'runserver, post_detect_sync, user specified detection confidence: ',
            detection_confidence)  # TODO delete
        if detection_confidence < 0.0 or detection_confidence > 1.0:
            return _make_error_object(
                400,
                'Detection confidence {} is invalid. Needs to be between 0.0 and 1.0.'
                .format(detection_confidence))
    else:
        detection_confidence = api_config.DEFAULT_DETECTION_CONFIDENCE
    log.log_info('detection confidence', detection_confidence)

    # check that the number of images is acceptable
    num_images = sum([
        1 if file.content_type in api_config.IMAGE_CONTENT_TYPES else 0
        for file in files.values()
    ])
    print('runserver, post_detect_sync, number of images received: ',
          num_images)
    log.log_info('number of images received', num_images)

    if num_images > api_config.MAX_IMAGES_ACCEPTED:
        return _make_error_object(
            413,
            'Too many images. Maximum number of images that can be processed in one call is {}.'
            .format(api_config.MAX_IMAGES_ACCEPTED))
    elif num_images == 0:
        return _make_error_object(
            400,
            'No image(s) of accepted types (image/jpeg, image/png, application/octet-stream) received.'
        )

    # check if classification is requested and if so, which classifier to use
    if 'classification' in params:
        classification = params['classification']

        if classification not in api_config.CLASSIFICATION_CLASS_NAMES.keys():
            supported = str(list(
                api_config.CLASSIFICATION_CLASS_NAMES.keys())).replace(
                    '[', '').replace(']', '')

            error_message = 'Classification name provided is not supported, The classifiers supported are {}'.format(
                supported)
            return _make_error_object(400, error_message)
    else:
        classification = None

    # read input images and parameters
    try:
        print(
            'runserver, _detect_process_request_data, reading input images...')
        images, image_names = [], []
        for k, file in files.items():
            # file of type SpooledTemporaryFile has attributes content_type and a read() method
            if file.content_type in api_config.IMAGE_CONTENT_TYPES:
                images.append(viz_utils.load_image(file))
                image_names.append(k)
    except Exception as e:
        log.log_exception('Error reading the images: ' + str(e))
        return _make_error_object(500, 'Error reading the images: ' + str(e))

    return {
        'render_boxes': render_boxes,
        'detection_confidence': detection_confidence,
        'images': images,
        'image_names': image_names,
        'classification': classification
    }