Beispiel #1
0
def load_and_run_detector(options, detector=None):

    imageFileNames = options_to_images(options)

    if options.forceCpu:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    print('Running detector on {} images'.format(len(imageFileNames)))

    if len(imageFileNames) == 0:
        print('Warning: no files available')
        return

    # Load detector if necessary
    if detector is None:
        startTime = time.time()
        print('Loading model...')
        detector = TFDetector(options.detectorFile)
        elapsed = time.time() - startTime
        print("Loaded model in {}".format(
            humanfriendly.format_timespan(elapsed)))

    # Run detector on target images
    boxes, scores, classes, imageFileNames = generate_detections(
        detector, imageFileNames, options)

    assert len(boxes) == len(imageFileNames)

    print('Writing output...')

    df = detector_output_to_api_output(imageFileNames, options, boxes, scores,
                                       classes)

    # PERF: iterrows is the wrong way to do this for large files
    if options.outputPathReplacements is not None:
        for iRow, row in df.iterrows():
            for query in options.outputPathReplacements:
                replacement = options.outputPathReplacements[query]
                row['image_path'] = row['image_path'].replace(
                    query, replacement)

    if options.outputRelativeFilenames and os.path.isdir(options.imageFile):
        for iRow, row in df.iterrows():
            row['image_path'] = os.path.relpath(row['image_path'],
                                                options.imageFile)

    if options.outputFile.endswith('.csv'):
        write_api_results_csv(df, options.outputFile)
    else:
        # While we're in transition between formats, write out the old format and
        # convert to the new format if .json is requested
        tempfilename = next(tempfile._get_candidate_names()) + '.csv'
        write_api_results_csv(df, tempfilename)
        convert_output_format.convert_csv_to_json(tempfilename,
                                                  options.outputFile)
        os.remove(tempfilename)

    return boxes, scores, classes, imageFileNames
Beispiel #2
0
    options = BatchDetectionOptions()
    options.detectorFile = r'D:\temp\models\megadetector_v3.pb'
    options.imageFile = r'D:\temp\demo_images\ssmini'
    options.outputFile = r'D:\temp\demo_images\ssmini\detector_out.json'
    options.outputPathReplacements = {'D:\\temp\\demo_images\\ssmini\\': ''}
    options.recursive = False
    # options.checkpointFrequency = -1
    options.forceCpu = True
    options.resumeFromCheckpoint = None  # r'C:\Users\dan\AppData\Local\Temp\detector_batch\tmp77xdq9dp'

    if options.forceCpu:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    print('Loading model...', end='')
    detector = TFDetector(options.detectorFile)
    print('...done')

    boxes, scores, classes, imageFileNames = load_and_run_detector(
        options, detector)

    #%% Post-processing with process_batch_results... this can also be run from the
    #   command line.

    from api.batch_processing.postprocess_batch_results import PostProcessingOptions
    from api.batch_processing.postprocess_batch_results import process_batch_results

    ppoptions = PostProcessingOptions()
    ppoptions.image_base_dir = options.imageFile
    ppoptions.detector_output_file = options.outputFile
    ppoptions.output_dir = os.path.join(ppoptions.image_base_dir,