コード例 #1
0
def main():

    print('Tensorflow version: %s' % tf.__version__)
    assert tf.__version__.startswith(
        '2.'), "Tensorflow version 2.x must be used!"

    if args.output_dir:
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)

    model = args.model  # mobilenet resnet50
    stride = args.stride  # 8, 16, 32 (max 16 for mobilenet)
    quant_bytes = args.quant_bytes  # float
    multiplier = args.multiplier  # only for mobilenet

    posenet = load_model(model, stride, quant_bytes, multiplier)

    filenames = [
        f.path for f in os.scandir(args.image_dir)
        if f.is_file() and f.path.endswith(('.png', '.jpg'))
    ]

    start = time.time()
    for f in filenames:
        img = cv2.imread(f)
        pose_scores, keypoint_scores, keypoint_coords = posenet.estimate_multiple_poses(
            img)
        img_poses = posenet.draw_poses(img, pose_scores, keypoint_scores,
                                       keypoint_coords)
        posenet.print_scores(f, pose_scores, keypoint_scores, keypoint_coords)
        cv2.imwrite(
            os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)),
            img_poses)

    print('Average FPS:', len(filenames) / (time.time() - start))
コード例 #2
0
def main():

    print('Tensorflow version: %s' % tf.__version__)
    assert tf.__version__.startswith(
        '2.'), "Tensorflow version 2.x must be used!"

    model = args.model  # mobilenet resnet50
    stride = args.stride  # 8, 16, 32 (max 16 for mobilenet)
    quant_bytes = args.quant_bytes  # float
    multiplier = args.multiplier  # only for mobilenet

    posenet = load_model(model, stride, quant_bytes, multiplier)

    num_images = args.num_images
    filenames = [
        f.path for f in os.scandir(args.image_dir)
        if f.is_file() and f.path.endswith(('.png', '.jpg'))
    ]
    if len(filenames) > num_images:
        filenames = filenames[:num_images]

    images = {f: cv2.imread(f) for f in filenames}

    start = time.time()
    for i in range(num_images):
        image = images[filenames[i % len(filenames)]]
        posenet.estimate_multiple_poses(image)

    print('Average FPS:', num_images / (time.time() - start))
コード例 #3
0
def main():

    print('Tensorflow version: %s' % tf.__version__)
    assert tf.__version__.startswith(
        '2.'), "Tensorflow version 2.x must be used!"

    model = args.model  # mobilenet resnet50
    stride = args.stride  # 8, 16, 32 (max 16 for mobilenet, min 16 for resnet50)
    quant_bytes = args.quant_bytes  # float
    multiplier = args.multiplier  # only for mobilenet

    posenet = load_model(model, stride, quant_bytes, multiplier)

    if args.file is not None:
        cap = cv2.VideoCapture(args.file)
    else:
        cap = cv2.VideoCapture(args.cam_id)
    cap.set(3, args.cam_width)
    cap.set(4, args.cam_height)

    start = time.time()
    frame_count = 0

    while True:
        res, img = cap.read()
        if not res:
            raise IOError("webcam failure")

        pose_scores, keypoint_scores, keypoint_coords = posenet.estimate_multiple_poses(
            img)

        overlay_image = draw_skel_and_kp(img,
                                         pose_scores,
                                         keypoint_scores,
                                         keypoint_coords,
                                         min_pose_score=0.15,
                                         min_part_score=0.1)

        cv2.imshow('posenet', overlay_image)
        frame_count += 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print('Average FPS: ', frame_count / (time.time() - start))
コード例 #4
0
def main():

    print('Tensorflow version: %s' % tf.__version__)
    assert tf.__version__.startswith(
        '2.'), "Tensorflow version 2.x must be used!"

    model = args.model  # mobilenet resnet50
    stride = args.stride  # 8, 16, 32 (max 16 for mobilenet, min 16 for resnet50)
    quant_bytes = args.quant_bytes  # float
    multiplier = args.multiplier  # only for mobilenet

    posenet = load_model(model, stride, quant_bytes, multiplier)

    # for inspiration, see: https://www.programcreek.com/python/example/72134/cv2.VideoWriter
    if args.input_file is not None:
        cap = cv2.VideoCapture(args.input_file)
    else:
        raise IOError("video file not found")

    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    video_writer = cv2.VideoWriter(args.output_file, fourcc, fps,
                                   (width, height))

    max_pose_detections = 20

    # Scaling the input image reduces the quality of the pose detections!
    # The speed gain is about the square of the scale factor.
    posenet_input_height = 540  # scale factor for the posenet input
    posenet_input_scale = 1.0  # posenet_input_height / height  # 1.0
    posenet_input_width = int(width * posenet_input_scale)
    print("posenet_input_scale: %3.4f" % (posenet_input_scale))

    start = time.time()
    frame_count = 0

    ret, frame = cap.read()

    while ret:
        if posenet_input_scale == 1.0:
            frame_rescaled = frame  # no scaling
        else:
            frame_rescaled = \
                cv2.resize(frame, (posenet_input_width, posenet_input_height), interpolation=cv2.INTER_LINEAR)

        pose_scores, keypoint_scores, keypoint_coords = posenet.estimate_multiple_poses(
            frame_rescaled, max_pose_detections)

        keypoint_coords_upscaled = keypoint_coords / posenet_input_scale
        overlay_frame = draw_skel_and_kp(frame,
                                         pose_scores,
                                         keypoint_scores,
                                         keypoint_coords_upscaled,
                                         min_pose_score=0.15,
                                         min_part_score=0.1)

        frame_count += 1
        # This is uncompressed video. cv2 has no way to write compressed videos, so we'll have to use ffmpeg to
        # compress it afterwards! See:
        # https://stackoverflow.com/questions/25998799/specify-compression-quality-in-python-for-opencv-video-object
        video_writer.write(overlay_frame)
        ret, frame = cap.read()

    print('Average FPS: ', frame_count / (time.time() - start))

    video_writer.release()
    cap.release()
コード例 #5
0
def main():
    print("Tensorflow version: %s" % tf.__version__)
    assert tf.__version__.startswith(
        "2."), "Tensorflow version 2.x must be used!"

    if args.image_dir == args.output_dir:
        print(
            "[WARNING] input dir is the same as output dir -- the pictures will be overwritten"
        )
        print("Do you wish to continue?: y/n")
        if input() != "y":
            exit()

    # get input folder name to concatenate with out file name
    image_dir_name = os.path.basename(args.image_dir).split("./", 1)[0]

    start_date = datetime.now().strftime("--%H-%M--%d-%m-%Y")
    # paths for posenet out pictures and .csv
    output_pic_dir_name = args.output_dir + "/" + image_dir_name + start_date
    output_csv_dir_name = args.output_dir + "/" + image_dir_name + start_date + ".csv"
    if not os.path.exists(output_pic_dir_name):
        os.makedirs(output_pic_dir_name)

    model = args.model  # mobilenet resnet50
    stride = args.stride  # 8, 16, 32 (max 16 for mobilenet, min 16 for resnet50)
    quant_bytes = args.quant_bytes  # float
    multiplier = args.multiplier  # only for mobilenet
    label = args.pose  # type of pose for pictures in folder

    posenet = load_model(model, stride, quant_bytes, multiplier)

    # file_count = su.count_files(args.image_dir)
    filenames = [
        f.path for f in os.scandir(args.image_dir)
        if f.is_file() and f.path.endswith((".png", ".jpg"))
    ]

    # prepare .csv file for points coords
    csv_file = open(output_csv_dir_name, "ab")
    np.savetxt(csv_file, su.csv_column_names, delimiter=",", fmt="%s")

    start = time.time()
    for f in filenames:
        img = cv2.imread(f)
        pose_scores, keypoint_scores, keypoint_coords = posenet.estimate_multiple_poses(
            img)
        img_poses = posenet.draw_poses(img, pose_scores, keypoint_scores,
                                       keypoint_coords)
        cv2.imwrite(
            os.path.join(output_pic_dir_name,
                         os.path.relpath(f, args.image_dir)),
            img_poses,
        )

        one_row = np.concatenate(
            (
                [pose_scores[0]
                 ],  # certainity score for pose for pose 0 in 10 total
                keypoint_coords[0, :7, 0],  # X coords
                keypoint_coords[0, :7, 1],  # Y
                [label],  # pose type
                # TODO: picture number (first implement it in vid2pic)
            ),
            axis=0,
        ).reshape(su.csv_column_names.shape)
        np.savetxt(csv_file, one_row, delimiter=",")

    print("Average FPS:", len(filenames) / (time.time() - start))
    csv_file.close()