def process_image(im_file, tf_detector, confidence_threshold):

    if isinstance(tf_detector, str):
        start_time = time.time()
        tf_detector = TFDetector(tf_detector)
        elapsed = time.time() - start_time
        print('Loaded model (worker level) in {}'.format(
            humanfriendly.format_timespan(elapsed)))

    print('Processing image {}'.format(im_file))
    image = None
    try:
        image = viz_utils.load_image(im_file)
    except Exception as e:
        print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_IMAGE_OPEN}
        return result

    try:
        result = tf_detector.generate_detections_one_image(
            image, im_file, detection_threshold=confidence_threshold)
    except Exception as e:
        print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
        result = {'file': im_file, 'failure': TFDetector.FAILURE_TF_INFER}
        return result

    return result
Пример #2
0
def load_and_run_detector_batch(model_file,
                                image_file_names,
                                checkpoint_path=None,
                                confidence_threshold=0,
                                checkpoint_frequency=-1,
                                results=[]):

    already_processed = set([i['file'] for i in results])

    # load the detector
    start_time = time.time()
    tf_detector = TFDetector(model_file)
    elapsed = time.time() - start_time
    print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))

    count = 0  # does not count those already processed
    for im_file in tqdm(image_file_names):
        if im_file in already_processed:  # will not add additional entries not in the starter checkpoint
            continue

        count += 1

        try:
            image = viz_utils.load_image(im_file)
        except Exception as e:
            print('Image {} cannot be loaded. Exception: {}'.format(
                im_file, e))
            result = {
                'file': im_file,
                'failure': TFDetector.FAILURE_IMAGE_OPEN
            }
            results.append(result)
            continue

        try:
            result = tf_detector.generate_detections_one_image(
                image, im_file, detection_threshold=confidence_threshold)
            results.append(result)

        except Exception as e:
            print(
                'An error occurred while running the detector on image {}. Exception: {}'
                .format(im_file, e))
            continue

        # checkpoint
        if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
            print(
                'Writing a new checkpoint after having processed {} images since last restart'
                .format(count))
            with open(checkpoint_path, 'w') as f:
                json.dump({'images': results}, f)

    return results  # actually modified in place
def process_images(im_files, tf_detector, confidence_threshold):

    if isinstance(tf_detector, str):
        start_time = time.time()
        tf_detector = TFDetector(tf_detector)
        elapsed = time.time() - start_time
        print('Loaded model (batch level) in {}'.format(
            humanfriendly.format_timespan(elapsed)))

    results = []
    for im_file in im_files:
        results.append(
            process_image(im_file, tf_detector, confidence_threshold))
    return results
Пример #4
0
def process_images(im_files, tf_detector, confidence_threshold):
    """Runs the MegaDetector over a list of image files.

    Args
    - im_files: list of str, paths to image files
    - tf_detector: TFDetector (loaded model) or str (path to .pb model file)
    - confidence_threshold: float, only detections above this threshold are returned

    Returns
    - results: list of dict, each dict represents detections on one image
        see the 'images' key in https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
    """
    if isinstance(tf_detector, str):
        start_time = time.time()
        tf_detector = TFDetector(tf_detector)
        elapsed = time.time() - start_time
        print('Loaded model (batch level) in {}'.format(humanfriendly.format_timespan(elapsed)))

    results = []
    for im_file in im_files:
        results.append(process_image(im_file, tf_detector, confidence_threshold))
    return results
def load_and_run_detector_batch(model_file,
                                image_file_names,
                                checkpoint_path=None,
                                confidence_threshold=0,
                                checkpoint_frequency=-1,
                                results=None,
                                n_cores=0):
    """
    Args
    - model_file: str, path to .pb model file
    - image_file_names: list of str, paths to image files
    - checkpoint_path: str, path to JSON checkpoint file
    - confidence_threshold: float, only detections above this threshold are returned
    - checkpoint_frequency: int, write results to JSON checkpoint file every N images
    - results: list of dict, existing results loaded from checkpoint
    - n_cores: int, # of CPU cores to use

    Returns
    - results: list of dict, each dict represents detections on one image
    """
    if results is None:
        results = []

    already_processed = set([i['file'] for i in results])

    if n_cores > 1 and tf.test.is_gpu_available():
        print(
            'Warning: multiple cores requested, but a GPU is available; parallelization across GPUs is not currently supported, defaulting to one GPU'
        )

    # If we're not using multiprocessing...
    if n_cores <= 1 or tf.test.is_gpu_available():

        # Load the detector
        start_time = time.time()
        tf_detector = TFDetector(model_file)
        elapsed = time.time() - start_time
        print('Loaded model in {}'.format(
            humanfriendly.format_timespan(elapsed)))

        # Does not count those already processed
        count = 0

        for im_file in tqdm(image_file_names):

            # Will not add additional entries not in the starter checkpoint
            if im_file in already_processed:
                print('Bypassing image {}'.format(im_file))
                continue

            count += 1

            result = process_image(im_file, tf_detector, confidence_threshold)
            results.append(result)

            # checkpoint
            if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
                print(
                    'Writing a new checkpoint after having processed {} images since last restart'
                    .format(count))
                with open(checkpoint_path, 'w') as f:
                    json.dump({'images': results}, f)

    else:
        # when using multiprocessing, let the workers load the model
        tf_detector = model_file

        print('Creating pool with {} cores'.format(n_cores))

        if len(already_processed) > 0:
            print(
                'Warning: when using multiprocessing, all images are reprocessed'
            )

        pool = workerpool(n_cores)

        image_batches = list(
            chunks_by_number_of_chunks(image_file_names, n_cores))
        results = pool.map(
            partial(process_images,
                    tf_detector=tf_detector,
                    confidence_threshold=confidence_threshold), image_batches)

        results = list(itertools.chain.from_iterable(results))

    # results may have been modified in place, but we also return it for backwards-compatibility.
    return results
def load_and_run_detector_batch(model_file,
                                image_file_names,
                                checkpoint_path=None,
                                confidence_threshold=0,
                                checkpoint_frequency=-1,
                                results=None,
                                n_cores=0):

    if results is None:
        results = []

    already_processed = set([i['file'] for i in results])

    if n_cores > 1 and tf.test.is_gpu_available():
        print(
            'Warning: multiple cores requested, but a GPU is available; parallelization across GPUs is not currently supported, defaulting to one GPU'
        )

    # If we're not using multiprocessing...
    if n_cores <= 1 or tf.test.is_gpu_available():
        # Load the detector
        start_time = time.time()
        tf_detector = TFDetector(model_file)
        elapsed = time.time() - start_time
        print('Loaded model in {}'.format(
            humanfriendly.format_timespan(elapsed)))
    else:
        # If we're using multiprocessing, let the workers load the model, just store
        # the model filename.
        tf_detector = model_file

    if n_cores <= 1 or tf.test.is_gpu_available():

        # Does not count those already processed
        count = 0

        for im_file in tqdm(image_file_names):

            # Will not add additional entries not in the starter checkpoint
            if im_file in already_processed:
                print('Bypassing image {}'.format(im_file))
                continue

            count += 1

            try:
                image = viz_utils.load_image(im_file)
            except Exception as e:
                print('Image {} cannot be loaded. Exception: {}'.format(
                    im_file, e))
                result = {
                    'file': im_file,
                    'failure': TFDetector.FAILURE_IMAGE_OPEN
                }
                results.append(result)
                continue

            try:
                result = tf_detector.generate_detections_one_image(
                    image, im_file, detection_threshold=confidence_threshold)
                results.append(result)

            except Exception as e:
                print(
                    'An error occurred while running the detector on image {}. Exception: {}'
                    .format(im_file, e))
                result = {
                    'file': im_file,
                    'failure': TFDetector.FAILURE_IMAGE_INFER
                }
                results.append(result)
                continue

            # checkpoint
            if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
                print(
                    'Writing a new checkpoint after having processed {} images since last restart'
                    .format(count))
                with open(checkpoint_path, 'w') as f:
                    json.dump({'images': results}, f)
    else:
        print('Creating pool with {} cores'.format(n_cores))

        if len(already_processed) > 0:
            print(
                'Warning: when using multiprocessing, all images are reprocessed'
            )

        pool = workerpool(n_cores)

        image_batches = list(
            chunks_by_number_of_chunks(image_file_names, n_cores))
        results = pool.map(
            partial(process_images,
                    tf_detector=tf_detector,
                    confidence_threshold=confidence_threshold), image_batches)

        results = list(itertools.chain.from_iterable(results))

    # This was modified in place, but we also return it for backwards-compatibility.
    return results