Ejemplo n.º 1
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    frame = cv2.imread(args.image)

    if frame is None:
        raise RuntimeError('could not read image at {}'.format(args.image))

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    print('Initialization done')

    options = wrnchAI.PoseEstimatorOptions()

    print('Inferring ...')

    estimator.process_frame(frame, options)

    num_persons = len(estimator.humans_2d())

    print('Inference done! Found ', num_persons, ' humans')
Ejemplo n.º 2
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True

    # Default Model resolution
    params.preferred_net_width = 328
    params.preferred_net_height = 184

    output_format = wrnchAI.JointDefinitionRegistry.get('j23')

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key,
                                      params=params,
                                      gpu_id=0,
                                      output_format=output_format)
    print('Initialization done!')

    options = wrnchAI.PoseEstimatorOptions()

    print('Opening webcam...')
    with videocapture_context(args.webcam_index) as cap:
        visualizer = Visualizer()

        joint_definition = estimator.human_2d_output_format()
        bone_pairs = joint_definition.bone_pairs()

        while True:
            _, frame = cap.read()

            if frame is not None:
                estimator.process_frame(frame, options)  ### passing in frame
                humans2d = estimator.humans_2d()
                ### visualizer is using opencv: using frame and draws over it
                visualizer.draw_image(frame)  ### overlaying skeleton
                for human in humans2d:
                    joints = human.joints()

                    visualizer.draw_points(joints)
                    visualizer.draw_lines(joints, bone_pairs)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 3
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    print('Initializing networks...')
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    estimator.initialize_head(args.models_dir)
    print('Initialization done!')

    options = wrnchAI.PoseEstimatorOptions()
    options.estimate_heads = True
    options.estimate_face_poses = True

    print('Opening webcam...')
    with videocapture_context(args.webcam_index) as cap:
        visualizer = Visualizer()

        while True:
            _, frame = cap.read()

            if frame is not None:

                estimator.process_frame(frame, options)
                heads = estimator.heads()
                faces = estimator.faces()

                visualizer.draw_image(frame)
                for head in heads:
                    bounding_box = head.bounding_box
                    visualizer.draw_box(bounding_box.min_x, bounding_box.min_y,
                                        bounding_box.width,
                                        bounding_box.height)

                for face in faces:
                    landmarks = face.landmarks()
                    arrow = face.arrow()

                    visualizer.draw_points(landmarks, joint_size=2)
                    visualizer.draw_arrow(arrow)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:  # escape key
                break
Ejemplo n.º 4
0
def main(args):
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    print("Initializing networks...")
    estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                      license_string=args.license_key)
    estimator.initialize_3d(args.models_dir)
    print("Initialization done!")

    options = wrnchAI.PoseEstimatorOptions()
    options.estimate_3d = True

    visualizer = Visualizer()

    print("Opening webcam...")
    with videocapture_context(args.webcam_index) as cap:
        while True:
            _, frame = cap.read()

            if frame is not None:

                estimator.process_frame(frame, options)
                humans3d = estimator.raw_humans_3d()

                visualizer.draw_image(frame)

                for human in humans3d:
                    positions = human.positions()

                    visualizer.draw_points3d(positions)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 5
0
def wrnch_func(args):
    print("Inside wrnch_func...")
    print(args.pose_estimator, " = selected pose estimator")
    print(args.license_key, " = license key")

    reset_camera()
    input_handler()

    # from /usr/src/wrnchAI/wrSamples/python/pose3d_sample.py
    code = wrnchAI.license_check_string(args.license_key) if args.license_key \
        else wrnchAI.license_check()

    if code != 0:
        raise RuntimeError(wrnchAI.returncode_describe(code))

    # params
    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True
    # Default Model resolution
    params.preferred_net_width = 328
    params.preferred_net_height = 184
    # model
    output_format = wrnchAI.JointDefinitionRegistry.get('j23')

    try:
        print('Initializing PoseEstimator...')
        estimator = wrnchAI.PoseEstimator(models_path=args.models_dir,
                                          license_string=args.license_key,
                                          params=params,
                                          gpu_id=0,
                                          output_format=output_format)
        # which type to process frame
        options = wrnchAI.PoseEstimatorOptions()
        options.estimate_3d = True
        print('Initialization done!')
    except:
        print_exc()

    print('Opening webcam...')
    with cv2.VideoCapture(-1) as cap:
        visualizer = Visualizer()

        joint_definition = estimator.human_3d_output_format()
        # bone_pairs = joint_definition.bone_pairs()

        while True:

            # capture frame by frame
            _, frame = cap.read()

            if frame is not None:

                # passing in frame
                estimator.process_frame(frame, options)

                # get the pose3d's last estimation by PoseEstimator, returns list of Pose2d <<<
                humans3d = estimator.humans_3d()

                # visualizer is using opencv: using frame and draws over it
                visualizer.draw_image(frame)  ### overlaying skeleton

                for human in humans3d:
                    joints = human.joints()

                    visualizer.draw_points(joints)
                    visualizer.draw_lines(joints)

                visualizer.show()

            key = cv2.waitKey(1)

            if key & 255 == 27:
                break
Ejemplo n.º 6
0
    webcam_index = 0

params = wrnchAI.PoseParams()
params.bone_sensitivity = wrnchAI.Sensitivity.high
params.joint_sensitivity = wrnchAI.Sensitivity.high
params.enable_tracking = True

# Default Model resolution
params.preferred_net_width = 328
params.preferred_net_height = 184

output_format = wrnchAI.JointDefinitionRegistry.get('j25')

print('Initializing networks...')
estimator = wrnchAI.PoseEstimator(models_path=sys.argv[1],
                                  params=params,
                                  gpu_id=0,
                                  output_format=output_format)
print('Initialization done!')

options = wrnchAI.PoseEstimatorOptions()

print('Opening webcam...')
cap = cv2.VideoCapture(webcam_index)

joint_definition = estimator.human_2d_output_format()
bone_pairs = joint_definition.bone_pairs()

keyboard = Controller()

while True:
    ret, frame = cap.read()
Ejemplo n.º 7
0
def main():
    ser1 = serial.Serial('COM9', 9600)
    code = wrnchAI.license_check_string(licenseKey)
    time.sleep(4)
    ser1.write('0'.encode())

    # setting tracking parameters
    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True
    # create resolution
    params.preferred_net_width = 500
    params.preferred_net_height = 500
    # format output
    output = wrnchAI.JointDefinitionRegistry.get('j23')
    print('Initializing networks')
    estimate = wrnchAI.PoseEstimator(models_path='..\\..\\..\\..\\bin\\wrModels', license_string=licenseKey,
                                     params=params, gpu_id=0, output_format=output)
    print('Initialization done!')
    options = wrnchAI.PoseEstimatorOptions()
    print('Turning on webcam now')


    # run-time vars:
    pose_img_path = r"C:\Users\tayse\OneDrive\Desktop\wrnchAI-engine-GPU-1.15.0-Windows-amd64\src\wrnchAI\wrSamples\python\Assets\pic.jpg"
    # extract McWICS Letters:
    #pose_imgs = np.load(pose_img_path)
    #pose_imgs_list = [pose_imgs[i].tolist() for i in range(6)]
    pose = vp.request_joint_data(pose_img_path)

    with videocapture_context(0) as cap:
        visual = Visualizer()
        joint_def = estimate.human_2d_output_format()
        bone_pairs = joint_def.bone_pairs()
        stagger = 0
        while True:
            #counter_letter = 0
            _, frame = cap.read()
            if frame is not None:
                estimate.process_frame(frame, options)
                humans2d = estimate.humans_2d()
                visual.draw_image(frame)
                for human in humans2d:
                    visual.draw_points(human.joints())
                    visual.draw_lines(human.joints(), bone_pairs)
                    if stagger % 15 == 0:
                        # gives the coordinates to calculate similarity
                        input_data = human.joints().tolist()  # list of floats
                        comp_val = vp.compare_dance(input_data, pose)  # compare results
                        if 0 <= comp_val <= 0.08:
                            # green
                            ser1.write('3'.encode())
                            print('excellent')
                            print(comp_val)
                            print("GET READY FOR NEXT LETTER!")
                        elif 0.08 < comp_val <= 0.14:
                            # green
                            ser1.write('2'.encode())
                            print('good')
                            print(comp_val)
                        elif 0.14 < comp_val:
                            # red
                            ser1.write('1'.encode())
                            print('bad')
                            print(comp_val)
                    if stagger is 15:
                        stagger = 0
                    stagger += 1

                visual.show()

            key = cv2.waitKey(1)
            if key & 255 == 27:
                break
Ejemplo n.º 8
0
if not wrnchAI.license_check():
    sys.exit('A valid license is required to run the samples')

num_args = len(sys.argv)
if num_args < 2 or num_args > 3:
    sys.exit('Usage: python wrHands_sample.py <model path> [camera index {0}]')

if num_args == 3:
    webcam_index = int(argv[2])
else:
    webcam_index = 0

print('Initializing networks...')
models_path = sys.argv[1]
estimator = wrnchAI.PoseEstimator(models_path)
estimator.initialize_head(models_path)
print('Initialization done!')

options = wrnchAI.PoseEstimatorOptions()
options.estimate_heads = True
options.estimate_face_poses = True

print('Opening webcam...')
cap = cv2.VideoCapture(webcam_index)

if not cap.isOpened():
    sys.exit('Cannot open webcam.')

visualizer = Visualizer()
params = wrnchAI.PoseParams()
params.bone_sensitivity = wrnchAI.Sensitivity.high
params.joint_sensitivity = wrnchAI.Sensitivity.high
params.enable_tracking = True
params.preferred_net_width = 328
params.preferred_net_height = 184
frame_count = 0

options = wrnchAI.PoseEstimatorOptions()
#Options maybe something
output_format = wrnchAI.JointDefinitionRegistry.get('j23')

print('Initializing networks...')
estimator = wrnchAI.PoseEstimator(models_path=args["models"],
                                  license_string=args["license"],
                                  params=params,
                                  gpu_id=0,
                                  output_format=output_format)

visualizer = Visualizer()
joint_definition = estimator.human_2d_output_format()
bone_pairs = joint_definition.bone_pairs()
with videocapture_context(args["input"]) as cap:
    visualizer = Visualizer()
    array_area = np.array([])
    joint_definition = estimator.human_2d_output_format()
    #print(joint_definition.joint_names())
    bone_pairs = joint_definition.bone_pairs()
    print(joint_definition.print_joint_definition())
    while True:
        _, frame = cap.read()
Ejemplo n.º 10
0
# All rights reserved

from __future__ import print_function, division

import cv2
import wrnchAI
import sys

if not wrnchAI.license_check():
    sys.exit('A valid license is required to run the samples')

num_args = len(sys.argv)
if num_args != 3:
    sys.exit('Usage: python wrHello_sample.py <image file> <model path> ')

frame = cv2.imread(sys.argv[1])

print('Initializing networks...')
estimator = wrnchAI.PoseEstimator(sys.argv[2])
print('Initialization done')

options = wrnchAI.PoseEstimatorOptions()

print('Inferring ...')

estimator.process_frame(frame, options)

numPersons = len(estimator.humans_2d())

print('Inference done! Found ', numPersons, ' humans')
Ejemplo n.º 11
0
def handler(event, context):
    wrnch_key = getWrchKey()
    models_path = "/usr/bin/wrModels"

    # todo: detect realsense index
    # camera_indexes = checkValidCameraIndex()  # list of camera indexes
    camera_indexes = checkValidCameraIndex(cameraIndex())
    print(camera_indexes)
    camera_index = int(camera_indexes[1])

    # todo input from event
    user = "******"
    test_time = time.strftime('%l.%M%p_%b_%d_%Y')
    # test time has a leading space
    test_time = str(test_time)[1:]
    output_path = "/gg/data/{}/{}/".format(user, test_time)
    frames_number = 200
    testTimeLength = 5
    test_type = 'chair'
    serial_number = 123
    topic = 'joints/{}'.format(str(serial_number))

    # todo: fix video save, see line 156
    # Define the codec and create VideoWriter object
    # fourcc = cv2.VideoWriter_fourcc('h', '2', '6', '4')
    # fourcc = cv2.VideoWriter_fourcc(*'X264')
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    fps = 20.0
    resolution = (640, 480)
    video_out = cv2.VideoWriter(output_path + 'output.avi', fourcc, fps,
                                resolution)

    joints_filename = test_time + "_joints.csv"
    confidence_filename = test_time + "_confidence_joints.csv"
    confidence_human_filename = test_time + "_confidence_human.csv"

    # dictionary that will be saved as a json of the payloads
    big_payload = {}

    # payload to indicate starting
    payload = {
        'start_test': True,
        'test_time': test_time,
        'user': user,
        'output_path': output_path,
        'length_of_test': testTimeLength,
        'test_type': test_type,
        'serial_number': serial_number,
    }
    publish_mqtt(json.dumps(payload), topic)

    # generate the empty csv files
    generateCsv(output_path, joints_filename, confidence_filename,
                confidence_human_filename)
    """
    This part takes a long time to 'warm up'
    
    It still seems to be checking the license. If used on too many devices, it will still give an error
    """
    ### SETUP WRNCH
    # This appears to not be needed, but will read the license from the environment variable
    # if the key is not passed to it.
    # code = wrnchAI.license_check_string(wrnch_key) if wrnch_key \
    #     else wrnchAI.license_check()
    # if code != 0:
    #     raise RuntimeError(wrnchAI.returncode_describe(code))

    params = wrnchAI.PoseParams()
    params.bone_sensitivity = wrnchAI.Sensitivity.high
    params.joint_sensitivity = wrnchAI.Sensitivity.high
    params.enable_tracking = True

    # Default Model resolution
    params.preferred_net_width = 328
    params.preferred_net_height = 184

    # the part that takes so long
    output_format = wrnchAI.JointDefinitionRegistry.get('j25')
    estimator = wrnchAI.PoseEstimator(models_path=models_path,
                                      license_string=wrnch_key,
                                      params=params,
                                      gpu_id=0,
                                      output_format=output_format)

    options = wrnchAI.PoseEstimatorOptions()

    # todo: find realsense index using realsense sdk
    # this was here because video_index was reading a string from an input field,
    # Don't change until realsense index is built
    video_index = int(camera_index)
    with videocapture_context(video_index) as cap:

        # wrnch
        # visualizer = Visualizer()
        joint_definition = estimator.human_2d_output_format()
        bone_pairs = joint_definition.bone_pairs()

        # time record starts
        startTime = time.time()

        counter = 0
        while True:
            _, frame = cap.read()

            # todo: remove if not needed
            # currently not needed
            # # dimensions of frame
            # width = frame.shape[1]
            # height = frame.shape[0]
            # # frame counter
            # counter += 1

            if frame is not None:
                # wrnch
                estimator.process_frame(frame, options)
                humans2d = estimator.humans_2d()
                # visualizer.draw_image(frame)

                image_time = time.time()

                # todo: check to make sure that len(humans2d)>0, if not, this will break
                # try/catch might be needed
                try:
                    human = humans2d[0]
                except (RuntimeError, TypeError, NameError):
                    print("The Length of humans2d[0]:", len(humans2d[0]))
                    print(RuntimeError, TypeError, NameError)

                # for human in humans2d:
                joints = human.joints()
                confidenceScores = human.scores()
                confidence = human.score()

                # todo: make this an async function with threading, OR buffer and publish at the end
                # buffer is probably better option. Threading is already being used by one of the
                # wrnch helpers, which is why asyncio is failing
                if len(joints) >= 1:
                    # publish data to mqtt
                    # send a dictionary that zips joints_labels and timetamp + joints
                    joints_dict = dict(
                        zip(joints_labels(),
                            [image_time - startTime] + joints.tolist()))
                    joints_confidence_dict = dict(
                        zip(joints_labels(), [image_time - startTime] +
                            confidenceScores.tolist()))
                    # human_confidence is a single float value

                    # this is the payload for each joint, some metadata still needs to be added
                    # be sure to make it match the earlier payload
                    # This payload should indicate that this ins't the first frame => start_test == false
                    payload = {
                        'start_test': False,
                        'test_time': test_time,
                        'user': user,
                        'output_path': output_path,
                        'length_of_test': testTimeLength,
                        'test_type': test_type,
                        'serial_number': serial_number,
                        'joints': joints_dict,
                        'joints_confidence': joints_confidence_dict,
                        'human_confidence': confidence
                    }
                    """
                    This seems to be slowing down record time
                    """
                    # todo: make async or buffer and publish at the end
                    # Buffer is the best option right now
                    # publish payload
                    # publish_mqtt(json.dumps(payload), topic)

                    # save payload to to big_payload
                    big_payload[image_time] = payload

                    # todo: explore a post-process to convert a dictionary to a csv without pandas
                    # Pandas is only used to build the csv, another script or lambda could convert
                    # a json to a csv
                    #
                    # write dcata to csv
                    with open(output_path + joints_filename,
                              'a') as realTimeCSV:
                        csvWriter = csv.writer(realTimeCSV)
                        csvWriter.writerow([image_time - startTime] +
                                           joints.tolist())

                    with open(output_path + confidence_filename,
                              'a') as realTimeConfidenceCSV:
                        csvWriter = csv.writer(realTimeConfidenceCSV)
                        csvWriter.writerow([image_time - startTime] +
                                           confidenceScores.tolist())

                    with open(output_path + confidence_human_filename,
                              'a') as realTimeHumanCSV:
                        csvWriter = csv.writer(realTimeHumanCSV)
                        csvWriter.writerow([image_time - startTime] +
                                           [str(confidence)])

                    # wrnch
                    # visualizer.draw_points(joints)
                    # visualizer.draw_lines(joints, bone_pairs)

                # wrnch
                # visualizer.show()

                # todo: Fix the video output
                # save the video
                # video_out = cv2.VideoWriter(output_path + 'motion_analysis.mp4', fourcc, fps, resolution)
                # video_out = cv2.VideoWriter(output_path + 'output.avi', -1, 20.0, (640, 480)) # this one might work - tested in windows
                # font = cv2.FONT_HERSHEY_SIMPLEX
                # cv2.putText(frame,
                #             image_time,
                #             (50, 50),
                #             font, 1,
                #             (0, 255, 255),
                #             2,
                #             cv2.)
                video_out.write(frame)

            if time.time() - startTime >= testTimeLength:

                # todo: rewrite make_csv to match payload metadata
                # make_csv(big_payload, output_path + "all_" + joints_filename)
                with open(output_path + "payload.json", "w") as f:
                    f.write(json.dumps(big_payload))

                # todo: remove plots before deploying
                plotJoints(output_path, joints_filename, confidence_filename,
                           confidence_human_filename)

                # todo: direct save all csvs for now
                # write_to_s3(bucket_name="motion-analysis", file_name=output_path + joints_filename)

                # todo: decompose this buffer by reading the dictionay keys and then publishing each value
                # Cannot publish entire payload at once
                # publish_mqtt(json.dumps(big_payload), topic)

                break
Ejemplo n.º 12
0
import os

params = wrnchAI.PoseParams()
params.bone_sensitivity = wrnchAI.Sensitivity.high
params.joint_sensitivity = wrnchAI.Sensitivity.high
params.enable_tracking = True

# Default Model resolution
params.preferred_net_width = 328
params.preferred_net_height = 184

output_format = wrnchAI.JointDefinitionRegistry.get('j23')

estimator = wrnchAI.PoseEstimator(models_path=os.environ['MODEL_PATH'],
                                  license_string=os.environ['LISCENSE_KEY'],
                                  params=params,
                                  gpu_id=0,
                                  output_format=output_format)
options = wrnchAI.PoseEstimatorOptions()
joint_definition = estimator.human_2d_output_format()
bone_pairs = joint_definition.bone_pairs()


def main():
    print('Opening webcam...')
    with videocapture_context(0) as cap:
        visualizer = Visualizer()

        while True:
            _, frame = cap.read()