Ejemplo n.º 1
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    frame = cv2.imread(args.image)

    if frame is None:
        raise RuntimeError('could not read image at {}'.format(args.image))

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    print('Initialization done')

    options = wrnchAI.PoseEstimatorOptions()

    print('Inferring ...')

    estimator.process_frame(frame, options)

    num_persons = len(estimator.humans_2d())

    print('Inference done! Found ', num_persons, ' humans')
Ejemplo n.º 2
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True

    # Default Model resolution
    params.preferred_net_width = 328
    params.preferred_net_height = 184

    output_format = wrnchAI.JointDefinitionRegistry.get('j23')

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key,
                                      params=params,
                                      gpu_id=0,
                                      output_format=output_format)
    print('Initialization done!')

    options = wrnchAI.PoseEstimatorOptions()

    print('Opening webcam...')
    with videocapture_context(args.webcam_index) as cap:
        visualizer = Visualizer()

        joint_definition = estimator.human_2d_output_format()
        bone_pairs = joint_definition.bone_pairs()

        while True:
            _, frame = cap.read()

            if frame is not None:
                estimator.process_frame(frame, options)  ### passing in frame
                humans2d = estimator.humans_2d()
                ### visualizer is using opencv: using frame and draws over it
                visualizer.draw_image(frame)  ### overlaying skeleton
                for human in humans2d:
                    joints = human.joints()

                    visualizer.draw_points(joints)
                    visualizer.draw_lines(joints, bone_pairs)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 3
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    estimator.initialize_head(args.models_dir)
    print('Initialization done!')

    options = wrnchAI.PoseEstimatorOptions()
    options.estimate_heads = True
    options.estimate_face_poses = True

    print('Opening webcam...')
    with videocapture_context(args.webcam_index) as cap:
        visualizer = Visualizer()

        while True:
            _, frame = cap.read()

            if frame is not None:

                estimator.process_frame(frame, options)
                heads = estimator.heads()
                faces = estimator.faces()

                visualizer.draw_image(frame)
                for head in heads:
                    bounding_box = head.bounding_box
                    visualizer.draw_box(bounding_box.min_x, bounding_box.min_y,
                                        bounding_box.width,
                                        bounding_box.height)

                for face in faces:
                    landmarks = face.landmarks()
                    arrow = face.arrow()

                    visualizer.draw_points(landmarks, joint_size=2)
                    visualizer.draw_arrow(arrow)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:  # escape key
                break
Ejemplo n.º 4
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    print("Initializing networks...")
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    estimator.initialize_3d(args.models_dir)
    print("Initialization done!")

    options = wrnchAI.PoseEstimatorOptions()
    options.estimate_3d = True

    visualizer = Visualizer()

    print("Opening webcam...")
    with videocapture_context(args.webcam_index) as cap:
        while True:
            _, frame = cap.read()

            if frame is not None:

                estimator.process_frame(frame, options)
                humans3d = estimator.raw_humans_3d()

                visualizer.draw_image(frame)

                for human in humans3d:
                    positions = human.positions()

                    visualizer.draw_points3d(positions)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 5
0
def wrnch_func(args):
    print("Inside wrnch_func...")
    print(args.pose_estimator, " = selected pose estimator")
    print(args.license_key, " = license key")

    reset_camera()
    input_handler()

    # from /usr/src/wrnchAI/wrSamples/python/pose3d_sample.py
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    # params
    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True
    # Default Model resolution
    params.preferred_net_width = 328
    params.preferred_net_height = 184
    # model
    output_format = wrnchAI.JointDefinitionRegistry.get('j23')

    try:
        print('Initializing PoseEstimator...')
        estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                          license_string=args.license_key,
                                          params=params,
                                          gpu_id=0,
                                          output_format=output_format)
        # which type to process frame
        options = wrnchAI.PoseEstimatorOptions()
        options.estimate_3d = True
        print('Initialization done!')
    except:
        print_exc()

    print('Opening webcam...')
    with cv2.VideoCapture(-1) as cap:
        visualizer = Visualizer()

        joint_definition = estimator.human_3d_output_format()
        # bone_pairs = joint_definition.bone_pairs()

        while True:

            # capture frame by frame
            _, frame = cap.read()

            if frame is not None:

                # passing in frame
                estimator.process_frame(frame, options)

                # get the pose3d's last estimation by PoseEstimator, returns list of Pose2d <<<
                humans3d = estimator.humans_3d()

                # visualizer is using opencv: using frame and draws over it
                visualizer.draw_image(frame)  ### overlaying skeleton

                for human in humans3d:
                    joints = human.joints()

                    visualizer.draw_points(joints)
                    visualizer.draw_lines(joints)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 6
0
# Copyright (c) 2019 Wrnch Inc.
# All rights reserved

from __future__ import print_function, division

import sys

import cv2
import wrnchAI
from visualizer import Visualizer

if not wrnchAI.license_check():
    sys.exit('A valid license is required to run the samples')

num_args = len(sys.argv)
if num_args < 2 or num_args > 3:
    sys.exit(
        'Usage: python pose2d_sample.py <model path> [camera index {0}]')

if num_args == 3:
    webcam_index = int(sys.argv[2])
else:
    webcam_index = 0

params = wrnchAI.PoseParams()
params.bone_sensitivity = wrnchAI.Sensitivity.high
params.joint_sensitivity = wrnchAI.Sensitivity.high
params.enable_tracking = True

# Default Model resolution
params.preferred_net_width = 328