def main():
    
    parser = argparse.ArgumentParser()
    parser.add_argument('results_file', type=str, help='Input .json filename')
    parser.add_argument('base_input_folder', type=str, help='Input image folder')
    parser.add_argument('base_output_folder', type=str, help='Output image folder')
    
    options = SeparateDetectionsIntoFoldersOptions()
    parser.add_argument('--animal_threshold', type=float, default=options.animal_threshold, 
                        help='Confidence threshold for the animal category')
    parser.add_argument('--human_threshold', type=float, default=options.human_threshold, 
                        help='Confidence threshold for the human category')
    parser.add_argument('--vehicle_threshold', type=float, default=options.vehicle_threshold, 
                        help='Confidence threshold for vehicle category')
    parser.add_argument('--nthreads', type=int, default=options.n_threads, 
                        help='Number of threads to use for parallel operation')
    parser.add_argument('--allow_existing_directory', action='store_true', 
                        help='Proceed even if the target directory exists and is not empty')
    
    if len(sys.argv[1:])==0:
        parser.print_help()
        parser.exit()
        
    args = parser.parse_args()    
    
    # Convert to an options object
    args_to_object(args, options)
    
    separate_detections_into_folders(options)
def main():
    
    parser = argparse.ArgumentParser()
    parser.add_argument('input_file', type=str, help='Input .json filename')
    parser.add_argument('output_file', type=str, help='Output .json filename')
    parser.add_argument('--query', type=str, default=None, help='Query string to search for (omitting this matches all)')
    parser.add_argument('--replacement', type=str, default=None, help='Replace [query] with this')
    parser.add_argument('--confidence_threshold', type=float, default=None, help='Remove detections below this confidence level')
    parser.add_argument('--split_folders', action='store_true', help='Split .json files by leaf-node folder')
    parser.add_argument('--split_folder_param', type=int, help='Directory level count for n_from_bottom and n_from_top splitting')
    parser.add_argument('--split_folder_mode', type=str, help='Folder level to use for splitting ("top" or "bottom")')
    parser.add_argument('--make_folder_relative', action='store_true', help='Make image paths relative to their containing folder (only meaningful with split_folders)')
    parser.add_argument('--overwrite_json_files', action='store_true', help='Overwrite output files')
    parser.add_argument('--copy_jsons_to_folders', action='store_true', help='When using split_folders and make_folder_relative, copy jsons to their corresponding folders (relative to output_file)')
    parser.add_argument('--create_folders', action='store_true', help='When using copy_jsons_to_folders, create folders that don''t exist')    
    
    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()
        
    args = parser.parse_args()    
    
    # Convert to an options object
    options = SubsetJsonDetectorOutputOptions()
    if args.create_folders:
        options.copy_jsons_to_folders_directories_must_exist = False
        
    args_to_object(args, options)
    
    subset_json_detector_output(args.input_file, args.output_file, options)
示例#3
0
def main():

    parser = argparse.ArgumentParser(description=(
        'Run the MegaDetector each frame in a video, optionally producing a new video with detections annotated'
    ))

    parser.add_argument('model_file', type=str, help='MegaDetector model file')

    parser.add_argument('input_video_file',
                        type=str,
                        help='video file to process')

    parser.add_argument(
        '--output_json_file',
        type=str,
        default=None,
        help='.json output file, defaults to [video file].json')

    parser.add_argument('--output_video_file',
                        type=str,
                        default=None,
                        help='video output file, defaults to [video file].mp4')

    parser.add_argument(
        '--render_output_video',
        type=bool,
        default=False,
        help='enable/disable video output rendering (default False)')

    parser.add_argument(
        '--delete_output_frames',
        type=bool,
        default=False,
        help='enable/disable temporary file detection (default True)')

    parser.add_argument(
        '--confidence_threshold',
        type=float,
        default=0.8,
        help="dont render boxes with confidence below this threshold")

    parser.add_argument('--n_cores',
                        type=int,
                        default=1,
                        help='number of cores to use for detection (CPU only)')

    parser.add_argument('--debug_max_frames',
                        type=int,
                        default=-1,
                        help='trim to N frames for debugging')

    args = parser.parse_args()
    options = ProcessVideoOptions()
    args_to_object(args, options)

    process_video(options)
示例#4
0
def main():

    parser = argparse.ArgumentParser(description=('Run MegaDetector on each frame in a video (or every Nth frame), optionally producing a new video with detections annotated'))

    parser.add_argument('model_file', type=str,
                        help='MegaDetector model file')

    parser.add_argument('input_video_file', type=str,
                        help='video file (or folder) to process')

    parser.add_argument('--recursive', action='store_true',
                        help='recurse into [input_video_file]; only meaningful if a folder is specified as input')
    
    parser.add_argument('--frame_folder', type=str, default=None,
                        help='folder to use for intermediate frame storage, defaults to a folder in the system temporary folder')
                        
    parser.add_argument('--rendering_folder', type=str, default=None,
                        help='folder to use for renderred frame storage, defaults to a folder in the system temporary folder')
    
    parser.add_argument('--output_json_file', type=str,
                        default=None, help='.json output file, defaults to [video file].json')

    parser.add_argument('--output_video_file', type=str,
                        default=None, help='video output file (or folder), defaults to [video file].mp4 for files, or [video file]_annotated] for folders')

    parser.add_argument('--render_output_video', action='store_true',
                        help='enable video output rendering (not rendered by default)')

    parser.add_argument('--keep_output_frames',
                       action='store_true', help='Disable the deletion of intermediate images (pre- and post-detection rendered frames)')

    parser.add_argument('--rendering_confidence_threshold', type=float,
                        default=0.8, help="don't render boxes with confidence below this threshold")

    parser.add_argument('--json_confidence_threshold', type=float,
                        default=0.0, help="don't include boxes in the .json file with confidence below this threshold")

    parser.add_argument('--n_cores', type=int,
                        default=1, help='number of cores to use for detection (CPU only)')

    parser.add_argument('--frame_sample', type=int,
                        default=None, help='procss every Nth frame (defaults to every frame)')

    parser.add_argument('--debug_max_frames', type=int,
                        default=-1, help='trim to N frames for debugging (impacts model execution, not frame rendering)')

    args = parser.parse_args()
    options = ProcessVideoOptions()
    args_to_object(args,options)

    if os.path.isdir(options.input_video_file):
        process_video_folder(options)
    else:
        process_video(options)
def main():

    default_options = PostProcessingOptions()

    parser = argparse.ArgumentParser()
    parser.add_argument('api_output_file', action='store', type=str,
                        help='.json file produced by the batch inference API (detection/classification, required)')
    parser.add_argument('output_dir', action='store', type=str,
                        help='Base directory for output (required)')
    parser.add_argument('--image_base_dir', action='store', type=str,
                        help='Base directory for images (optional, can compute statistics without images)',
                        default=default_options.image_base_dir)
    parser.add_argument('--ground_truth_json_file', action='store', type=str,
                        help='Ground truth labels (optional, can render detections without ground truth)',
                        default=default_options.ground_truth_json_file)
    parser.add_argument('--confidence_threshold', action='store', type=float,
                        help='Confidence threshold for statistics and visualization',
                        default=default_options.confidence_threshold)
    parser.add_argument('--target_recall', action='store', type=float, 
                        help='Target recall (for statistics only)',
                        default=default_options.target_recall)
    parser.add_argument('--num_images_to_sample', action='store', type=int,
                        help='Number of images to visualize (defaults to 500) (-1 to include all images)',
                        default=default_options.num_images_to_sample)
    parser.add_argument('--viz_target_width', action='store', type=int,
                        help='Output image width',
                        default=default_options.viz_target_width)
    parser.add_argument('--random_output_sort', action='store_true', help='Sort output randomly (defaults to sorting by filename)')

    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()
    args.sort_html_by_filename = not args.random_output_sort

    options = PostProcessingOptions()
    args_to_object(args,options)

    process_batch_results(options)
def main():
    # With HTML (debug)
    # python find_repeat_detections.py "D:\temp\tigers_20190308_all_output.json" "D:\temp\tigers_20190308_all_output.filtered.json" --renderHtml --debugMaxDir 100 --imageBase "d:\wildlife_data\tigerblobs" --outputBase "d:\temp\repeatDetections"

    # Without HTML (debug)
    # python find_repeat_detections.py "D:\temp\tigers_20190308_all_output.json" "D:\temp\tigers_20190308_all_output.filtered.json" --debugMaxDir 100 --imageBase "d:\wildlife_data\tigerblobs" --outputBase "d:\temp\repeatDetections"

    # With HTML (for real)
    # python find_repeat_detections.py "D:\temp\tigers_20190308_all_output.json" "D:\temp\tigers_20190308_all_output.filtered.json" --renderHtml --imageBase "d:\wildlife_data\tigerblobs" --outputBase "d:\temp\repeatDetections"

    defaultOptions = RepeatDetectionOptions()

    parser = argparse.ArgumentParser()
    parser.add_argument('inputFile')
    parser.add_argument('outputFile')
    parser.add_argument(
        '--imageBase',
        action='store',
        type=str,
        help=
        'Image base dir, relevant if renderHtml is True or if omitFilteringFolder is not set'
    )
    parser.add_argument(
        '--outputBase',
        action='store',
        type=str,
        help='Html output dir, only relevant if renderHtml is True')
    parser.add_argument(
        '--filterFileToLoad',
        action='store',
        type=str,
        default=
        '',  # checks for string length so default needs to be the empty string
        help=
        'Path to detectionIndex.json, which should be inside a folder of images that are manually verified to _not_ contain valid animals'
    )

    parser.add_argument(
        '--confidenceMax',
        action='store',
        type=float,
        default=defaultOptions.confidenceMax,
        help=
        'Detection confidence threshold; don\'t process anything above this')
    parser.add_argument(
        '--confidenceMin',
        action='store',
        type=float,
        default=defaultOptions.confidenceMin,
        help=
        'Detection confidence threshold; don\'t process anything below this')
    parser.add_argument(
        '--iouThreshold',
        action='store',
        type=float,
        default=defaultOptions.iouThreshold,
        help=
        'Detections with IOUs greater than this are considered "the same detection"'
    )
    parser.add_argument(
        '--occurrenceThreshold',
        action='store',
        type=int,
        default=defaultOptions.occurrenceThreshold,
        help=
        'More than this many near-identical detections in a group (e.g. a folder) is considered suspicious'
    )
    parser.add_argument(
        '--nWorkers',
        action='store',
        type=int,
        default=defaultOptions.nWorkers,
        help='Level of parallelism for rendering and IOU computation')
    parser.add_argument(
        '--maxSuspiciousDetectionSize',
        action='store',
        type=float,
        default=defaultOptions.maxSuspiciousDetectionSize,
        help=
        'Detections larger than this fraction of image area are not considered suspicious'
    )

    parser.add_argument('--renderHtml',
                        action='store_true',
                        dest='bRenderHtml',
                        help='Should we render HTML output?')
    parser.add_argument(
        '--omitFilteringFolder',
        action='store_false',
        dest='bWriteFilteringFolder',
        help=
        'Should we create a folder of rendered detections for post-filtering?')
    parser.add_argument(
        '--excludeClasses',
        action='store',
        nargs='+',
        type=int,
        default=defaultOptions.excludeClasses,
        help=
        'List of classes (ints) to exclude from analysis, separated by spaces')

    parser.add_argument('--debugMaxDir', action='store', type=int, default=-1)
    parser.add_argument('--debugMaxRenderDir',
                        action='store',
                        type=int,
                        default=-1)
    parser.add_argument('--debugMaxRenderDetection',
                        action='store',
                        type=int,
                        default=-1)
    parser.add_argument('--debugMaxRenderInstance',
                        action='store',
                        type=int,
                        default=-1)

    parser.add_argument('--forceSerialComparisons',
                        action='store_false',
                        dest='bParallelizeComparisons')
    parser.add_argument('--forceSerialRendering',
                        action='store_false',
                        dest='bParallelizeRendering')

    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()

    # Convert to an options object
    options = RepeatDetectionOptions()

    ct_utils.args_to_object(args, options)

    find_repeat_detections(args.inputFile, args.outputFile, options)
示例#7
0
def main():

    defaultOptions = repeat_detections_core.RepeatDetectionOptions()

    parser = argparse.ArgumentParser()
    parser.add_argument('inputFile')
    parser.add_argument(
        '--outputFile',
        action='store',
        type=str,
        default=None,
        help=
        ".json file to write filtered results to... don't use this if you're going to do manual review of the repeat detection images"
    )
    parser.add_argument(
        '--imageBase',
        action='store',
        type=str,
        default='',
        help=
        'Image base dir, relevant if renderHtml is True or if omitFilteringFolder is not set'
    )
    parser.add_argument('--outputBase',
                        action='store',
                        type=str,
                        default='',
                        help='HTML or filtering folder output dir')
    parser.add_argument(
        '--filterFileToLoad',
        action='store',
        type=str,
        default=
        '',  # checks for string length so default needs to be the empty string
        help=
        'Path to detectionIndex.json, which should be inside a folder of images that are manually verified to _not_ contain valid animals'
    )

    parser.add_argument(
        '--confidenceMax',
        action='store',
        type=float,
        default=defaultOptions.confidenceMax,
        help=
        'Detection confidence threshold; don\'t process anything above this')
    parser.add_argument(
        '--confidenceMin',
        action='store',
        type=float,
        default=defaultOptions.confidenceMin,
        help=
        'Detection confidence threshold; don\'t process anything below this')
    parser.add_argument(
        '--iouThreshold',
        action='store',
        type=float,
        default=defaultOptions.iouThreshold,
        help=
        'Detections with IOUs greater than this are considered "the same detection"'
    )
    parser.add_argument(
        '--occurrenceThreshold',
        action='store',
        type=int,
        default=defaultOptions.occurrenceThreshold,
        help=
        'More than this many near-identical detections in a group (e.g. a folder) is considered suspicious'
    )
    parser.add_argument(
        '--nWorkers',
        action='store',
        type=int,
        default=defaultOptions.nWorkers,
        help='Level of parallelism for rendering and IOU computation')
    parser.add_argument(
        '--maxSuspiciousDetectionSize',
        action='store',
        type=float,
        default=defaultOptions.maxSuspiciousDetectionSize,
        help=
        'Detections larger than this fraction of image area are not considered suspicious'
    )

    parser.add_argument('--renderHtml',
                        action='store_true',
                        dest='bRenderHtml',
                        help='Should we render HTML output?')
    parser.add_argument(
        '--omitFilteringFolder',
        action='store_false',
        dest='bWriteFilteringFolder',
        help=
        'Should we create a folder of rendered detections for post-filtering?')
    parser.add_argument(
        '--excludeClasses',
        action='store',
        nargs='+',
        type=int,
        default=defaultOptions.excludeClasses,
        help=
        'List of classes (ints) to exclude from analysis, separated by spaces')

    parser.add_argument(
        '--nDirLevelsFromLeaf',
        default=0,
        type=int,
        help=
        'Number of levels from the leaf folders to use for repeat detection (0 == leaves)'
    )

    parser.add_argument('--debugMaxDir', action='store', type=int, default=-1)
    parser.add_argument('--debugMaxRenderDir',
                        action='store',
                        type=int,
                        default=-1)
    parser.add_argument('--debugMaxRenderDetection',
                        action='store',
                        type=int,
                        default=-1)
    parser.add_argument('--debugMaxRenderInstance',
                        action='store',
                        type=int,
                        default=-1)

    parser.add_argument('--forceSerialComparisons',
                        action='store_false',
                        dest='bParallelizeComparisons')
    parser.add_argument('--forceSerialRendering',
                        action='store_false',
                        dest='bParallelizeRendering')

    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()

    # Convert to an options object
    options = repeat_detections_core.RepeatDetectionOptions()

    ct_utils.args_to_object(args, options)

    repeat_detections_core.find_repeat_detections(args.inputFile,
                                                  args.outputFile, options)
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('results_file', type=str, help='Input .json filename')
    parser.add_argument('base_input_folder',
                        type=str,
                        help='Input image folder')
    parser.add_argument('base_output_folder',
                        type=str,
                        help='Output image folder')

    parser.add_argument(
        '--animal_threshold',
        type=float,
        default=default_threshold,
        help='Confidence threshold for the animal category (default={})'.
        format(default_threshold))
    parser.add_argument(
        '--human_threshold',
        type=float,
        default=default_threshold,
        help='Confidence threshold for the human category (default={})'.format(
            default_threshold))
    parser.add_argument(
        '--vehicle_threshold',
        type=float,
        default=default_threshold,
        help='Confidence threshold for vehicle category (default={})'.format(
            default_threshold))
    parser.add_argument(
        '--n_threads',
        type=int,
        default=1,
        help='Number of threads to use for parallel operation (default=1)')
    parser.add_argument(
        '--allow_existing_directory',
        action='store_true',
        help='Proceed even if the target directory exists and is not empty')
    parser.add_argument(
        '--no_overwrite',
        action='store_true',
        help=
        'Skip images that already exist in the target folder, must also specify --allow_existing_directory'
    )

    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()

    # Convert to an options object
    options = SeparateDetectionsIntoFoldersOptions()

    args_to_object(args, options)

    if args.animal_threshold:
        options.category_name_to_threshold['animal'] = args.animal_threshold

    if args.human_threshold:
        options.category_name_to_threshold['person'] = args.human_threshold

    if args.vehicle_threshold:
        options.category_name_to_threshold['vehicle'] = args.vehicle_threshold

    options.overwrite = (not args.no_overwrite)

    separate_detections_into_folders(options)