예제 #1
0
                        img_path.rstrip('\n')
                    ] + single_prediction + [x_size, y_size]],
                                 columns=[
                                     'image', 'image_path', 'xmin', 'ymin',
                                     'xmax', 'ymax', 'label', 'confidence',
                                     'x_size', 'y_size'
                                 ]))
        end = timer()
        print('Processed {} images in {:.1f}sec - {:.1f}FPS'.format(
            len(input_image_paths), end - start,
            len(input_image_paths) / (end - start)))
        out_df.to_csv(FLAGS.box, index=False)

    # This is for videos
    if input_video_paths:
        print('Found {} input videos: {} ...'.format(
            len(input_video_paths),
            [os.path.basename(f) for f in input_video_paths[:5]]))
        start = timer()
        for i, vid_path in enumerate(input_video_paths):
            output_path = os.path.join(
                FLAGS.output,
                os.path.basename(vid_path).replace('.', FLAGS.postfix + '.'))
            detect_video(yolo, vid_path, output_path=output_path)

        end = timer()
        print('Processed {} videos in {:.1f}sec'.format(
            len(input_video_paths), end - start))
    # Close the current yolo session
    yolo.close_session()
예제 #2
0
 # This is for videos
 # for pre-recorded videos present in the Test_Images folder
 if input_video_paths and not webcam_active:
     print("Found {} input videos: {} ...".format(
         len(input_video_paths),
         [os.path.basename(f) for f in input_video_paths[:5]],
     ))
     out_df = pd.DataFrame(
         columns=["xmin", "ymin", "xmax", "ymax", "label", "confidence"])
     start = timer()
     for i, vid_path in enumerate(input_video_paths):
         output_path = os.path.join(
             FLAGS.output,
             os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
         )
         prediction = detect_video(yolo, vid_path, output_path=output_path)
         for single_prediction in prediction:
             out_df = out_df.append(
                 pd.DataFrame(
                     [single_prediction],
                     columns=[
                         "xmin",
                         "ymin",
                         "xmax",
                         "ymax",
                         "label",
                         "confidence",
                     ],
                 ))
     end = timer()
     print("Processed {} videos in {:.1f}sec".format(
예제 #3
0
    '''
    parser.add_argument("--input",
                        nargs='?',
                        type=str,
                        required=False,
                        default='./path2your_video',
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " +
                  FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print(
            "Must specify at least video_input_path.  See usage with --help.")
예제 #4
0
    # This is for videos
    # for pre-recorded videos present in the Test_Images folder
    if input_video_paths and not webcam_active:
        print(
            "Found {} input videos: {} ...".format(
                len(input_video_paths),
                [os.path.basename(f) for f in input_video_paths[:5]],
            )
        )
        start = timer()
        for i, vid_path in enumerate(input_video_paths):
            output_path = os.path.join(
                FLAGS.output,
                os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
            )
            detect_video(yolo, vid_path, output_path=output_path, calibr_param=calibr_param)

        end = timer()
        print(
            "Processed {} videos in {:.1f}sec".format(
                len(input_video_paths), end - start
            )
        )
    # for Webcam
    if webcam_active:
        start = timer()
        detect_webcam(yolo)
        end = timer()
        print("Processed from webcam for {:.1f}sec".format(end - start))

    # Close the current yolo session
def detect(image_test_folder):
    # Delete all default flags
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    """
    Command line options
    """

    parser.add_argument(
        "--input_path",
        type=str,
        default=image_test_folder,
        help=
        "Path to image/video directory. All subdirectories will be included. Default is "
        + image_test_folder,
    )

    parser.add_argument(
        "--output",
        type=str,
        default=fp.detection_results_folder,
        help="Output path for detection results. Default is " +
        fp.detection_results_folder,
    )

    parser.add_argument(
        "--no_save_img",
        default=False,
        action="store_true",
        help=
        "Only save bounding box coordinates but do not save output images with annotated boxes. Default is False.",
    )

    parser.add_argument(
        "--file_types",
        "--names-list",
        nargs="*",
        default=[],
        help=
        "Specify list of file types to include. Default is --file_types .jpg .jpeg .png .mp4",
    )

    parser.add_argument(
        "--yolo_model",
        type=str,
        dest="model_path",
        default=fp.YOLO_weights,
        help="Path to pre-trained weight files. Default is " + fp.YOLO_weights,
    )

    parser.add_argument(
        "--anchors",
        type=str,
        dest="anchors_path",
        default=fp.anchors_path,
        help="Path to YOLO anchors. Default is " + fp.anchors_path,
    )

    parser.add_argument(
        "--classes",
        type=str,
        dest="classes_path",
        default=fp.YOLO_classname,
        help="Path to YOLO class specifications. Default is " +
        fp.YOLO_classname,
    )

    parser.add_argument("--gpu_num",
                        type=int,
                        default=1,
                        help="Number of GPU to use. Default is 1")

    parser.add_argument(
        "--confidence",
        type=float,
        dest="score",
        default=0.25,
        help=
        "Threshold for YOLO object confidence score to show predictions. Default is 0.25.",
    )

    parser.add_argument(
        "--box_file",
        type=str,
        dest="box",
        default=fp.detection_results_file,
        help="File to save bounding box results to. Default is " +
        fp.detection_results_file,
    )

    parser.add_argument(
        "--postfix",
        type=str,
        dest="postfix",
        default="_fish",
        help=
        'Specify the postfix for images with bounding boxes. Default is "_fish"',
    )

    FLAGS = parser.parse_args()

    save_img = not FLAGS.no_save_img

    file_types = FLAGS.file_types

    if file_types:
        input_paths = GetFileList(FLAGS.input_path, endings=file_types)
    else:
        input_paths = GetFileList(FLAGS.input_path)

    # Split images and videos
    img_endings = (".jpg", ".jpg", ".png")
    vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")

    input_image_paths = []
    input_video_paths = []
    for item in input_paths:
        if item.endswith(img_endings):
            input_image_paths.append(item)
        elif item.endswith(vid_endings):
            input_video_paths.append(item)

    output_path = FLAGS.output
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    # define YOLO detector
    yolo = YOLO(
        **{
            "model_path": FLAGS.model_path,
            "anchors_path": FLAGS.anchors_path,
            "classes_path": FLAGS.classes_path,
            "score": FLAGS.score,
            "gpu_num": FLAGS.gpu_num,
            "model_image_size": (416, 416),
        })

    # Make a dataframe for the prediction outputs
    out_df = pd.DataFrame(columns=[
        "image",
        "image_path",
        "xmin",
        "ymin",
        "xmax",
        "ymax",
        "label",
        "confidence",
        "x_size",
        "y_size",
    ])

    # labels to draw on images
    class_file = open(FLAGS.classes_path, "r")
    input_labels = [line.rstrip("\n") for line in class_file.readlines()]
    print("Found {} input labels: {} ...".format(len(input_labels),
                                                 input_labels))

    if input_image_paths:
        print("Found {} input images: {} ...".format(
            len(input_image_paths),
            [os.path.basename(f) for f in input_image_paths[:5]],
        ))
        start = timer()
        text_out = ""

        # This is for images
        for i, img_path in enumerate(input_image_paths):
            print(img_path)
            prediction, image = detect_object(
                yolo,
                img_path,
                save_img=save_img,
                save_img_path=FLAGS.output,
                postfix=FLAGS.postfix,
            )
            y_size, x_size, _ = np.array(image).shape
            for single_prediction in prediction:
                out_df = out_df.append(
                    pd.DataFrame(
                        [[
                            os.path.basename(img_path.rstrip("\n")),
                            img_path.rstrip("\n"),
                        ] + single_prediction + [x_size, y_size]],
                        columns=[
                            "image",
                            "image_path",
                            "xmin",
                            "ymin",
                            "xmax",
                            "ymax",
                            "label",
                            "confidence",
                            "x_size",
                            "y_size",
                        ],
                    ))
        end = timer()
        print("Processed {} images in {:.1f}sec - {:.1f}FPS".format(
            len(input_image_paths),
            end - start,
            len(input_image_paths) / (end - start),
        ))
        out_df.to_csv(FLAGS.box, index=False)

    # This is for videos
    if input_video_paths:
        print("Found {} input videos: {} ...".format(
            len(input_video_paths),
            [os.path.basename(f) for f in input_video_paths[:5]],
        ))
        start = timer()
        for i, vid_path in enumerate(input_video_paths):
            output_path = os.path.join(
                FLAGS.output,
                os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
            )
            detect_video(yolo, vid_path, output_path=output_path)

        end = timer()
        print("Processed {} videos in {:.1f}sec".format(
            len(input_video_paths), end - start))
    # Close the current yolo session
    yolo.close_session()
    return fp.detection_results_folder
예제 #6
0
        print("Processed {} images in {:.1f}sec - {:.1f}FPS".format(
            len(input_image_paths),
            end - start,
            len(input_image_paths) / (end - start),
        ))
        out_df.to_csv(FLAGS.box, index=False)

        # This is for videos
        """if input_video_paths:
        print(
            "Found {} input videos: {} ...".format(
                len(input_video_paths),
                [os.path.basename(f) for f in input_video_paths[:5]],
            )
        )
        
        
        for i, vid_path in enumerate(input_video_paths):
            output_path = os.path.join(
                FLAGS.output,
                os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
            )"""
    start = timer()
    detect_video(yolo)

    end = timer()
    print("Processed {} videos in {:.1f}sec".format(len(input_video_paths),
                                                    end - start))
    # Close the current yolo session
    yolo.close_session()