コード例 #1
0
def main():

    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']
        num_images = args.num_images

        filenames = [
            f.path for f in os.scandir(args.image_dir)
            if f.is_file() and f.path.endswith(('.png', '.jpg'))
        ]
        if len(filenames) > num_images:
            filenames = filenames[:num_images]

        images = {
            f: posenet.read_imgfile(f, 1.0, output_stride)[0]
            for f in filenames
        }

        start = time.time()
        for i in range(num_images):
            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': images[filenames[i % len(filenames)]]})

            output = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.25)

        print('Average FPS:', num_images / (time.time() - start))
コード例 #2
0
def main():
    errors = 0
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.output_dir:
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)

        filenames = [f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]

        start = time.time()
        for f in filenames:
            input_image, draw_image, output_scale = posenet.read_imgfile(
                f, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,
                min_pose_score=0.30)

            keypoint_coords *= output_scale

            if args.output_dir:
              draw_image = posenet.draw_skel_and_kp(draw_image, pose_scores, keypoint_scores, keypoint_coords,min_pose_score=0.25, min_part_score=0.25)

              cv2.imwrite(os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)), draw_image)

            if not args.notxt:
                print("Results for image: %s" % f)
                coords = "{"
                for pi in range(len(pose_scores)):
                    if pose_scores[pi] == 0.:
                        break
                    print('Pose #%d, score = %f' % (pi, pose_scores[pi]))

                    for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
                       print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))

                       coords += str(posenet.COORDENADAS[ki] % (c[0], c[1]))
                try:
                    coords += ",\"atividade\"" + str(":") + "\"3\""
                    coords += "}"
                    createFile(coords)
                except:
                  print("Erro file: " + str(filenames))
                  errors +=1

        print('Average FPS:', len(filenames) / (time.time() - start))
        print('ERRROS:', errors)
コード例 #3
0
 def __init__(self, cuda=True, model=101, scale_factor=1.0):
     self.model = posenet.load_model(model)
     self.scale_factor = scale_factor
     self.cuda = cuda
     if cuda:
         self.model = self.model.cuda()
     self.output_stride = self.model.output_stride
コード例 #4
0
ファイル: calculations.py プロジェクト: god102104/CDP2
 def calculate_Score(self, video, action):
     with tf.compat.v1.Session() as sess:
         model_cfg, model_outputs = posenet.load_model(101, sess)
         reference_coordinates, reference_video_frames = self.get_action_coords_from_dict(
             action)
         cap = cv2.VideoCapture(video)
         new_video_frames = 0
         if cap.isOpened() is False:
             print("error in opening video")
         while cap.isOpened():
             ret_val, image = cap.read()
             if ret_val:
                 input_points = self.pose.getpoints(image, sess, model_cfg,
                                                    model_outputs)
                 if len(input_points) == 0:
                     continue
                 input_new_coords = np.asarray(
                     self.pose.roi(input_points)[0:34]).reshape(17, 2)
                 self.new_video_coordinates.append(input_new_coords)
                 new_video_frames = new_video_frames + 1
             else:
                 break
         cap.release()
         final_score, score_list = self.score.compare_34dim(
             np.asarray(self.new_video_coordinates),
             np.asarray(reference_coordinates), new_video_frames,
             reference_video_frames, self.weights)
     return final_score, score_list
コード例 #5
0
ファイル: routes.py プロジェクト: rmarshall10/Insight_App
def example_file(link):
    '''If user clicks on an example link, process the static video file'''

    if link == 1:
        video_name = "ball_test6.mp4"
    elif link == 2:
        video_name = "ball_test4.mp4"
    elif link == 3:
        video_name = "ball_test13.mp4"

    #load the soccer ball detection model
    model_path = "app/static/"
    model_name = "frozen_inference_graph.pb"
    model_text = "graph_text.pbtxt"
    filename = model_path + video_name

    net = video_tracker.loading_model(model_path + model_name,
                                      model_path + model_text)

    #load the pose model
    sess = tf.Session()
    model_cfg, model_outputs = posenet.load_model(101, sess)
    output_stride = model_cfg['output_stride']

    #OPTIONALLY can output the following, then show output as pre-loaded gif
    #(video_bytes, bounces, body_part_bounces, body_part_sequence) = video_tracker.run_video(f.filename, net, sess, output_stride, model_outputs)
    #return Response(video_tracker.display_video(video_bytes), mimetype='multipart/x-mixed-replace; boundary=frame')

    return Response(video_tracker.run_video(filename, net, sess, output_stride,
                                            model_outputs),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
コード例 #6
0
ファイル: detection.py プロジェクト: maro525/pose_zmq
    def load(self):

        self.sess = tf.InteractiveSession()

        self.model_cfg, self.model_outputs = posenet.load_model(
            POSE_MODEL, self.sess)
        self.output_stride = self.model_cfg['output_stride']
コード例 #7
0
def main():
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(101, sess)
        output_stride = model_cfg['output_stride']
        scale_factor = 0.7125
        last_res = 5
        cap = cv2.VideoCapture(0)
        cap.set(3, 640)
        cap.set(4, 480)
        eggNet = egg_model.PandaEgg()
        eggNet.load_weights('data/egg_model_weights.csv')
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            if np.array_equal(keypoint_coords, np.zeros((1, 17, 2))):
                text = 'Nope'
            else:
                res = eggNet.pose_detect(keypoint_coords)
                if res != last_res:
                    if res == 0:
                        sio.emit('message', '0')
                        text = 'STANDING'
                        last_res = res
                    elif res == 1:
                        sio.emit('message', '1')
                        text = 'SITTING'
                        last_res = res
                    elif res == 2:
                        sio.emit('message', '2')
                        text = 'LYING'
                        last_res = res

            cv2.putText(overlay_image, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (0, 0, 0), 2)

            cv2.imshow('posenet', overlay_image)
            if cv2.waitKey(1) & 0xFF == 27:
                break
コード例 #8
0
ファイル: routes.py プロジェクト: rmarshall10/Insight_App
def uploaded_file():
    '''If a video is uploaded, process the video and display output'''
    if request.method == 'POST':
        f = request.files['file']
        # if not allowed_extension(f.filename):
        # 	return

        # save the file
        f.save(secure_filename(f.filename))

        # load the soccer ball detection model
        model_path = "app/static/"
        model_name = "frozen_inference_graph.pb"
        model_text = "graph_text.pbtxt"
        net = video_tracker.loading_model(model_path + model_name,
                                          model_path + model_text)

        # load pose model
        sess = tf.Session()
        model_cfg, model_outputs = posenet.load_model(101, sess)
        output_stride = model_cfg['output_stride']

        #OPTIONALLY can output the following, then show output as pre-loaded gif
        #(video_bytes, bounces, body_part_bounces, body_part_sequence) = video_tracker.run_video(f.filename, net, sess, output_stride, model_outputs)
        #return Response(video_tracker.display_video(video_bytes), mimetype='multipart/x-mixed-replace; boundary=frame')

        # Show output video as it is processed in real time
        return Response(video_tracker.run_video(f.filename, net, sess,
                                                output_stride, model_outputs),
                        mimetype='multipart/x-mixed-replace; boundary=frame')
コード例 #9
0
def main():
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(argModel, sess)
        output_stride = model_cfg['output_stride']
        cap = cv2.VideoCapture(cam_id)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        cap.set(3, cam_width)
        cap.set(4, cam_height)

        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            leftHandHigh = False

            leftWrist_h = keypoint_coords[0, :, :][9][0]
            leftShoulder_h = keypoint_coords[0, :, :][5][0]
            if (leftWrist_h < leftShoulder_h):
                leftHandHigh = True

            rightHandHigh = False

            rightWrist_h = keypoint_coords[0, :, :][10][0]
            rightShoulder_h = keypoint_coords[0, :, :][6][0]
            if (rightWrist_h < rightShoulder_h):
                rightHandHigh = True

            if (rightHandHigh and leftHandHigh):
                pos = 'U_arm'
            elif (rightHandHigh):
                pos = 'R_arm'
            elif (leftHandHigh):
                pos = 'L_arm'
            else:
                pos = 'D_arm'
            background[200:200 + 346, 200:200 + 240] = imcollection[pos]
            cv2.imshow('posenet', background)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
コード例 #10
0
def main():

    with torch.no_grad():
        model = posenet.load_model(args.model)
        model = model.cuda()
        output_stride = model.output_stride
        num_images = args.num_images

        filenames = [
            f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]
        if len(filenames) > num_images:
            filenames = filenames[:num_images]

        images = {f: posenet.read_imgfile(f, 1.0, output_stride)[0] for f in filenames}

        start = time.time()
        for i in range(num_images):
            input_image = torch.Tensor(images[filenames[i % len(filenames)]]).cuda()

            results = model(input_image)
            heatmaps, offsets, displacement_fwd, displacement_bwd = results
            output = posenet.decode_multiple_poses(
                heatmaps.squeeze(0),
                offsets.squeeze(0),
                displacement_fwd.squeeze(0),
                displacement_bwd.squeeze(0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.25)

        print('Average FPS:', num_images / (time.time() - start))
コード例 #11
0
def import_posenet_model(sess):
    cwd = os.getcwd()
    os.chdir("posenet-python")
    import posenet
    os.chdir(cwd)

    return posenet.load_model(100, sess)[1][0]  # return heatmap
コード例 #12
0
    def process_pose_frame(np_frame, resolution):
        frame = None
        with tf.Session() as sess:
            model_cfg, model_outputs = posenet.load_model(0, sess)
            output_stride = model_cfg['output_stride']

            input_image, draw_image, output_scale = posenet.process_input(
                np_frame, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,
                min_pose_score=0.25)

            keypoint_coords *= output_scale

            frame = posenet.draw_skel_and_kp(draw_image,
                                             pose_scores,
                                             keypoint_scores,
                                             keypoint_coords,
                                             min_pose_score=0.25,
                                             min_part_score=0.25)

        return frame
コード例 #13
0
def main():

    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.output_dir:
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)

        filenames = [
            f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]

        start = time.time()
        for f in filenames:
            input_image, draw_image, output_scale = posenet.read_imgfile(
                f, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.25)

            keypoint_coords *= output_scale

            if args.output_dir:
                draw_image = posenet.draw_skel_and_kp(
                    draw_image, pose_scores, keypoint_scores, keypoint_coords,
                    min_pose_score=0.25, min_part_score=0.25)

                cv2.imwrite(os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)), draw_image)

            if not args.notxt:
                print()
                print("Results for image: %s" % f)

                # Open file
                file = open("log.txt", "a")
                
                for pi in range(len(pose_scores)):
                    if pose_scores[pi] == 0.:
                        break
                    print('Pose #%d, score = %f' % (pi, pose_scores[pi]))
                    file.write('Pose #%d, score = %f \n' % (pi, pose_scores[pi]))
                    for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
                        print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))
                        file.write('Keypoint %s, score = %f, coord = %s \n' % (posenet.PART_NAMES[ki], s, c))
        # Close file
        file.close()
        print('Average FPS:', len(filenames) / (time.time() - start))
コード例 #14
0
ファイル: server.py プロジェクト: arnitkun/PoseBox
def load_model_posenet():
    global model_posenet, sess
    if sess is not None:
        sess.close()
    sess = tf.Session()
    model_cfg, model_outputs = posenet.load_model(101, sess)
    model_posenet['model_cfg'] = model_cfg
    model_posenet['model_outputs'] = model_outputs
コード例 #15
0
def main():
    #  D:\Programming-Github\Sem-6\MiniProject\posenet-pytorch-master\posenet\converter\wget.py
    #  go here and find
    #       data = json.loads(response.content)
    #  replace with
    #       data = json.loads(response.content.decode('utf-8'))
    model = posenet.load_model(args.model)
    model = model.cuda()
    output_stride = model.output_stride

    cap = cv2.VideoCapture(
        "C:\\Users\\habil\\Pictures\\Camera Roll\\WIN_20200329_15_03_35_Pro.mp4"
    )
    cap.set(3, args.cam_width)
    cap.set(4, args.cam_height)

    start = time.time()
    frame_count = 0
    while True:

        try:
            input_image, display_image, output_scale = posenet.read_cap(
                cap,
                scale_factor=args.scale_factor,
                output_stride=output_stride)
        except OSError:
            break

        with torch.no_grad():
            input_image = torch.Tensor(input_image).cuda()

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(
                input_image)

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(0),
                offsets_result.squeeze(0),
                displacement_fwd_result.squeeze(0),
                displacement_bwd_result.squeeze(0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

        keypoint_coords *= output_scale

        #overlay_image = posenet.draw_skel_and_kp(
        #display_image, pose_scores, keypoint_scores, keypoint_coords,
        #min_pose_score=0.15, min_part_score=0.1)

        print(keypoint_coords)
        #overlay_image = posenet.draw_skel_and_kp(display_image,[],[],[])

        #cv2.imshow('posenet', overlay_image)
        frame_count += 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print('Average FPS: ', frame_count / (time.time() - start))
コード例 #16
0
 def init_session(self):
     if self.sess == None:
         self.sess = tf.InteractiveSession()
         self.sess.run(tf.global_variables_initializer())
         self.model_cfg, self.model_outputs = posenet.load_model(
             self.model, self.sess)
         self.output_stride = self.model_cfg['output_stride']
         self.calibration_count = 0
         self.proportion = None
コード例 #17
0
def main():

    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.output_csv_dir:
            if not os.path.exists(args.output_csv_dir):
                os.makedirs(args.output_csv_dir)

        filenames = [
            f.path for f in os.scandir(args.image_dir)
            if f.is_file() and f.path.endswith(('.png', '.jpg'))
        ]

        start = time.time()
        for f in filenames:
            input_image, draw_image, output_scale = posenet.read_imgfile(
                f, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,
                min_pose_score=0.25)

            keypoint_coords *= output_scale

            with open(args.output_csv_dir + "/motion_model.csv",
                      'a') as write_file:
                writer = csv.writer(write_file)

                # clip
                keypoint_coords[0, :, 0] = keypoint_coords[0, :, 0] - min(
                    keypoint_coords[0, :, 0])
                keypoint_coords[0, :, 1] = keypoint_coords[0, :, 1] - min(
                    keypoint_coords[0, :, 1])

                # normalize
                x_l2_norm = np.linalg.norm(keypoint_coords[0, :, 0], ord=2)
                pose_coords_x = (keypoint_coords[0, :, 0] / x_l2_norm).tolist()
                y_l2_norm = np.linalg.norm(keypoint_coords[0, :, 1], ord=2)
                pose_coords_y = (keypoint_coords[0, :, 1] / y_l2_norm).tolist()

                tpm_row = [f.replace(args.image_dir, '')
                           ] + pose_coords_x + pose_coords_y + keypoint_scores[
                               0, :].tolist() + [pose_scores[0]]
                writer.writerow(tpm_row)

        print('Average FPS:', len(filenames) / (time.time() - start))
        print('Complete making CSV File!!')
コード例 #18
0
ファイル: pose.py プロジェクト: tprlab/posenet-python
def init(model=50):
    global tf_sess, model_cfg, model_outputs

    t = time.time()

    tf_sess = tf.Session()
    model_cfg, model_outputs = posenet.load_model(model, tf_sess)

    t = time.time() - t
    logging.debug("Model loaded in {:.4f} secs".format(t))
コード例 #19
0
def main():
    write = open_csv_file('../data/lie.csv', 'w')

    cam_width = 640
    cam_height = 480
    scale_factor = 0.7125
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(101, sess)
        output_stride = model_cfg['output_stride']
        cap = cv2.VideoCapture(0)
        cap.set(3, cam_width)
        cap.set(4, cam_height)

        irr_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor, output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            overlay_image = posenet.draw_skel_and_kp(
                display_image, pose_scores, keypoint_scores, keypoint_coords,
                min_pose_score=0.15, min_part_score=0.1)

            cv2.putText(overlay_image, str(irr_count), (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
            cv2.imshow('posenet', overlay_image)


            keypoint_coords[:, :, 0] = keypoint_coords[:, :, 0] / 480
            keypoint_coords[:, :, 1] = keypoint_coords[:, :, 1] / 640
            flat_array = keypoint_coords.flatten()
            new_data = np.insert(flat_array, 0, 0)# Insert the label for training data, stand:0, sit:1, lie:2 to index 0

            write.writerow(new_data)

            irr_count += 1

            if cv2.waitKey(1) & 0xFF == 27:
                break
            elif irr_count == 1200:
                break
コード例 #20
0
def main():
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.file is not None:
            cap = cv2.VideoCapture(args.file)
        else:
            cap = cv2.VideoCapture(args.cam_id)
        cap.set(3, args.cam_width)
        cap.set(4, args.cam_height)

        start = time.time()
        frame_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            overlay_image = posenet.draw_skel_and_kp(
                display_image, pose_scores, keypoint_scores, keypoint_coords,
                min_pose_score=0.15, min_part_score=0.1)

            cv2.imshow('posenet', overlay_image)
            frame_count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            # Open file
            file = open("log.txt", "a")
            for pi in range(len(pose_scores)):
                    if pose_scores[pi] == 0.:
                        break
                    print('Pose #%d, score = %f' % (pi, pose_scores[pi]))
                    file.write('Pose #%d, score = %f \n' % (pi, pose_scores[pi]))
                    for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
                        print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))
                        file.write('Keypoint %s, score = %f, coord = %s \n' % (posenet.PART_NAMES[ki], s, c))
        # Close file
        file.close()
        print('Average FPS: ', frame_count / (time.time() - start))
コード例 #21
0
def main():
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(argModel, sess)
        output_stride = model_cfg['output_stride']
        cap = cv2.VideoCapture(cam_id)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        cap.set(3, cam_width)
        cap.set(4, cam_height)

        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            leftHandHigh = False

            leftWrist_h = keypoint_coords[0, :, :][9][0]
            leftShoulder_h = keypoint_coords[0, :, :][5][0]
            if (leftWrist_h < leftShoulder_h):
                leftHandHigh = True

            rightHandHigh = False

            rightWrist_h = keypoint_coords[0, :, :][10][0]
            rightShoulder_h = keypoint_coords[0, :, :][6][0]
            if (rightWrist_h < rightShoulder_h):
                rightHandHigh = True

            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(overlay_image,
                        str(leftHandHigh) + str(rightHandHigh), (10, 450),
                        font, 1, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.imshow('posenet', overlay_image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
コード例 #22
0
ファイル: convert.py プロジェクト: aptlin/posenet-tvm
def main():
    model = posenet.load_model(args.model)
    model = model.to(DEVICE)
    output_stride = model.output_stride

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    start = time.time()
    random_cv2_image = np.random.randint(
        256,
        size=(args.processing_height, args.processing_width, args.n_channels),
        dtype=np.uint8,
    )

    input_image, draw_image, output_scale = posenet.process_input(
        random_cv2_image,
        scale_factor=args.scale_factor,
        output_stride=output_stride)

    scripted_model = torch.jit.trace(model, torch.Tensor(input_image)).eval()

    input_name = args.input_name
    shape_list = [(input_name, input_image.shape)]
    mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)

    target = tvm.target.create(args.target)
    with tvm.transform.PassContext(opt_level=3):
        graph, lib, params = relay.build(mod, target=target, params=params)

    path_lib = "{}/deploy_lib_{}_{}_{}.tar".format(args.output_dir,
                                                   args.input_name,
                                                   args.processing_width,
                                                   args.processing_height)
    path_graph = "{}/deploy_graph_{}_{}_{}.json".format(
        args.output_dir, args.input_name, args.processing_width,
        args.processing_height)
    path_params = "{}/deploy_params_{}_{}_{}.params".format(
        args.output_dir, args.input_name, args.processing_width,
        args.processing_height)

    lib.export_library(path_lib)
    with open(path_graph, "w") as f:
        f.write(graph)
    with open(path_params, "wb") as f:
        f.write(relay.save_param_dict(params))

    ctx = tvm.cpu(0) if str(DEVICE) == "cpu" else tvm.gpu()
    print("-" * 80)
    print("Done! Converted and serialized the model to:")
    print("\t- lib: {}".format(path_lib))
    print("\t- graph: {}".format(path_graph))
    print("\t- params: {}".format(path_params))
    print("-" * 80)
コード例 #23
0
def main():

    cap = cvs.VideoCapture(0)

    with tf.Session() as sess:
        print('load models...')
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        start = time.time()
        frame_count = 0
        while True:
            sleep(30)
            img = cvs.read()
            frame_count += 1
            if img is None:
                continue

            if cam_id > 0:
                img = cvs.flip(img, 0)

            input_image, display_image, output_scale = posenet.read_cap(
                img,
                scale_factor=args.scale_factor,
                output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale
            #print keypoint_coords

            # TODO this isn't particularly fast, use GL for drawing and display someday...
            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            cvs.imshow(overlay_image)
            frame_count += 1
            # global lbs
            lbs = 'Average FPS: ' + str(frame_count / (time.time() - start))
            cvs.setLbs(lbs)
コード例 #24
0
def load_model():
    global _posenet_model, _session

    if not _posenet_model:
        # Create new session and load model into graph of new session
        _session = tf.Session()
        _posenet_model = posenet.load_model(POSENET_MODEL_ID, _session,
                                  model_dir=POSENET_MODEL_DIR)

    model_cfg, model_outputs = _posenet_model
    return model_cfg, model_outputs, _session
コード例 #25
0
 def __init__(self):
     # initialize PoseNet Model
     print("[INFO] loading PoseNet Model...")
     argument_model = 101
     self.scale_factor = 0.7125  #TODO image our:0.1
     self.model = posenet.load_model(argument_model).cuda()
     self.output_stride = self.model.output_stride
     self.Pose_pattern = np.loadtxt('golden_pattern.txt', delimiter=' ')
     self.pose_scores = 0
     self.keypoint_scores = 0
     self.keypoint_coords = None
コード例 #26
0
def recorded_main_vid():
    with tf.compat.v1.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        url = "https://www.youtube.com/watch?v=2HTvZp5rPrg&t=7s"
        video = pafy.new(url)
        best = video.getbest(preftype="mp4")
        cap = cv2.VideoCapture()
        cap.open(best.url)
        cap.set(cv2.CAP_PROP_FPS, int(30))

        start = time.time()
        frame_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            global keypoint2

            pose_scores, keypoint_scores, keypoint_coords2 = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)
            
            keypoint_coords2 *= output_scale
            keypoint2 = np.array(keypoint_coords2[0])
            # print("camera", np.array(keypoint_coords2[0]).shape)

            # for ii, score in enumerate(pose_scores):
            #     print("************")
            #     print(ii, "----------", score)
            # print(pose_scores)

            overlay_image = posenet.draw_skel_and_kp(
                display_image, pose_scores, keypoint_scores, keypoint_coords2,
                min_pose_score=0.15, min_part_score=0.1)

            # overlay_image = cv2.resize(overlay_image, (0,0), fx=0.8, fy=0.8)

            ret, jpeg = cv2.imencode('.jpg', overlay_image)

            frame = jpeg.tobytes()
            yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
コード例 #27
0
def main(VideoName, model_layes):
    model = posenet.load_model(args.model)
    model = model.cuda()
    output_stride = model.output_stride
    cap, cap_length = videoInfo(VideoName)
    kpt2Ds = []
    pose_3d = []
    #annotator = AnnotatorInterface.build(max_persons=1)
    for i in range(cap_length):  #tqdm(range(cap_length)):
        #if i < 90: continue
        if i > 300: break
        _, frame = cap.read()
        input_image, display_image, output_scale = posenet.process_input(
            frame, 1 / 3.0, output_stride)
        frame, W, H = resize_img(frame)

        time0 = time.time()
        joint2D = get_2d_pose_torch(input_image, output_stride,
                                    model)  #get_2d_pose_1(frame)
        time1 = time.time()

        #print(output_scale)
        #joint2 = 0#get_2d_pose_2(sess, input_image, output_stride, model_outputs)
        #persons = annotator.update(frame)
        #poses_2d = [p['pose_2d'].get_joints() for p in persons]
        #joint2D2 = poses_2d[0]
        #print(joint2D)
        #joint2D = np.vstack((joint2D[0:1, :], joint2D[5:17, :]))
        #print(joint2D3.shape)
        time2 = time.time()
        #raise KeyboardInterrupt
        if i == 0:
            for _ in range(30):
                kpt2Ds.append(joint2D)
        else:
            kpt2Ds.append(joint2D)
            kpt2Ds.pop(0)

        #if i < 15:
        #    kpt2Ds.append(joint2D)
        #    kpt2Ds.pop(0)
        #else:
        #    kpt2Ds.append(joint2D)

        #print(len(kpt2Ds))
        joint3D = interface3D(model3D, np.array(kpt2Ds), W, H)
        joint3D_item = joint3D[-1]  #(17, 3)
        time3 = time.time()
        pose_3d.append((joint3D_item, joint2D))
        print(time1 - time0, time2 - time1, time3 - time2, time3 - time1)
        #draw_3Dimg(joint3D_item, frame, display=1, kpt2D=joint2D)
    save_pose(pose_3d)
コード例 #28
0
def main():
    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.file is not None:
            cap = cv2.VideoCapture(args.file)
        else:
            cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=2),
                                   cv2.CAP_GSTREAMER)
        #cap.set(3, args.cam_width)
        #cap.set(4, args.cam_height)

        start = time.time()
        frame_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap,
                scale_factor=args.scale_factor,
                output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale

            # TODO this isn't particularly fast, use GL for drawing and display someday...
            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            cv2.namedWindow("posenet", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty("posenet", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)
            cv2.imshow('posenet', overlay_image)
            frame_count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        print('Average FPS: ', frame_count / (time.time() - start))
コード例 #29
0
def main():
    stat = ''
    with tf.compat.v1.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.file is not None:
            cap = cv2.VideoCapture(args.file)
        else:
            cap = cv2.VideoCapture(args.cam_id)
        cap.set(3, args.cam_width)
        cap.set(4, args.cam_height)

        start = time.time()
        frame_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap,
                scale_factor=args.scale_factor,
                output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            keypoint_coords *= output_scale
            stat_res = to_http.httpreq(keypoint_coords[0, 5, :], stat)
            stat = stat_res

            # TODO this isn't particularly fast, use GL for drawing and display someday...
            overlay_image = posenet.draw_skel_and_kp(display_image,
                                                     pose_scores,
                                                     keypoint_scores,
                                                     keypoint_coords,
                                                     min_pose_score=0.15,
                                                     min_part_score=0.1)

            cv2.imshow('posenet', overlay_image)
            frame_count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        print('Average FPS: ', frame_count / (time.time() - start))
コード例 #30
0
def webcam_main_vid():
    with tf.compat.v1.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        if args.file is not None:
            cap = cv2.VideoCapture(args.file)
            # cap=cv2.VideoCapture(1)
        else:
            cap = cv2.VideoCapture(args.cam_id)
            # cap=cv2.VideoCapture(1)

        start = time.time()
        frame_count = 0
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=args.scale_factor, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )

            pose_scores, keypoint_scores, keypoint_coords1 = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=0.15)

            # for ii, score in enumerate(pose_scores):
            #     print("************")
            #     print(ii, "----------", score)

            global keypoint1
            keypoint_coords1 *= output_scale
            keypoint1 = np.array(keypoint_coords1[0])

            overlay_image = posenet.draw_skel_and_kp(
            display_image, pose_scores, keypoint_scores, keypoint_coords1,
            min_pose_score=0.15, min_part_score=0.1)

            # overlay_image = cv2.resize(overlay_image, (0,0), fx=0.8, fy=0.8)
            
            ret, jpeg = cv2.imencode('.jpg', overlay_image)

            frame = jpeg.tobytes()
            yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')