Example #1
0
 def __init__(self, size='432x368', model='mobilenet_thin'):
     w, h = model_wh(size)
     if w == 0 or h == 0:
         self.e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
     else:
         self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
     self.w, self.h = w, h
Example #2
0
def infer(image, model='cmu', resize='0x0', resize_out_ratio=4.0):
    """

    :param image:
    :param model:
    :param resize:
    :param resize_out_ratio:
    :return: coco_style_keypoints array
    """
    w, h = model_wh(resize)
    e = get_estimator(model, resize)
    
    # estimate human poses from a single image !
    image = common.read_imgfile(image, None, None)
    if image is None:
        raise Exception('Image can not be read, path=%s' % image)
    humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=resize_out_ratio)
    image_h, image_w = image.shape[:2]
    
    if "TERM_PROGRAM" in os.environ and 'iTerm' in os.environ["TERM_PROGRAM"]:
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        image_str = cv2.imencode(".jpg", image)[1].tostring()
        print("\033]1337;File=name=;inline=1:" + base64.b64encode(image_str).decode("utf-8") + "\a")
    
    return [(eval.write_coco_json(human, image_w, image_h), human.score) for human in humans]
Example #3
0
def run_video(video, folder_name, id_value, visual=False):
    print(f"Running on {folder_name} Video ID: {id_value}")
    fps_time = 0
    w, h = model_wh('0x0')
    if w > 0 and h > 0:
        e = TfPoseEstimator(get_graph_path(model),
                            target_size=(w, h),
                            trt_bool=tensorrt)
    else:
        e = TfPoseEstimator(get_graph_path(model),
                            target_size=(432, 368),
                            trt_bool=tensorrt)

    cam = cv2.VideoCapture(video)
    ret_val, image = cam.read()

    while True:
        ret_val, image = cam.read()
        if not ret_val:
            break
        humans = e.inference(image,
                             resize_to_default=(w > 0 and h > 0),
                             upsample_size=resize_out_ratio)
        print(humans)

        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.imshow('tf-pose-estimation result', image)
        fps_time = time.time()
        if cv2.waitKey(1) == 27:
            break

    cv2.destroyAllWindows()
    def __init__(self, model=None, image_size=None):

        if model is None:
            model = "cmu"

        if image_size is None:
            image_size = "432x368"  # 7 fps

        models = set({"mobilenet_thin", "cmu"})
        self.model = model if model in models else "mobilenet_thin"
        self.resize_out_ratio = 4.0

        w, h = model_wh(image_size)
        if w == 0 or h == 0:
            e = TfPoseEstimator(get_graph_path(self.model),
                                target_size=(432, 368),
                                tf_config=config)
        else:
            e = TfPoseEstimator(get_graph_path(self.model),
                                target_size=(w, h),
                                tf_config=config)

        # self.args = args
        self.w, self.h = w, h
        self.e = e
        self.fps_time = time.time()
        self.cnt_image = 0
Example #5
0
def detection_loop():
    global outputFrame, outputArray, lock

    w, h = model_wh("432x368")
    e = TfPoseEstimator(get_graph_path("mobilenet_thin"), target_size=(w, h))
    cam = cv2.VideoCapture(0)
    cam.set(3, 1280)
    cam.set(4, 720)
    print("------------openpose_finished---------")
    try:
        for img in getframe(cam, e, w, h):
            if img is not None:
                with lock:
                    outputFrame = img
                    outputArray = "a"
                    mystr = '{"timestamp":"2020-11-16 03:14:43.430204","nodeid":"0","nodeid":"0","sensor":"image","car_count":"0"}'
                    mqtt_client.publish(
                        "{}/{}".format(args.mqtt_topic, 'car_count'),
                        str(mystr))
                    mqtt_client.publish(
                        "{}/{}".format(args.mqtt_topic, 'car_count'),
                        str(outputArray))
            else:
                print(
                    "--------------Thread_finished(ctl + C)-----------------")

    except:
        os._exit(1)
Example #6
0
	def makeData(self):
		w, h = model_wh('432x368')
		e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(w, h))

		i = 0
		cap = cv2.VideoCapture(WEBCAM_FILE_NAME)
		while cap.isOpened():
			endFlag, frame = cap.read()
			if endFlag == False:
				break
			cv2.imwrite('./results/webcam/webcamRaw/'+'img_%s.png' % str(i).zfill(6), frame)
			i += 1

		j = 0
		path_w = './results/webcam/webcamData.txt'
		with open(path_w, mode='w') as f:
			f.write('')
		while j < i:
			image = common.read_imgfile('./results/webcam/webcamRaw/'+'img_%s.png' % str(j).zfill(6))
			humans = e.inference(image, resize_to_default=True, upsample_size=4.0)

			centers = TfPoseEstimator.get_centers(image, humans, imgcopy=False)
			
			with open(path_w, mode='a') as f:
				f.write('t' + str(j) + ':' + str(centers) + '\n')
			j += 1

		k = 30
		while k < 80:
			image = common.read_imgfile('./results/webcam/webcamRaw/'+'img_%s.png' % str(k).zfill(6))
			humans = e.inference(image, resize_to_default=True, upsample_size=4.0)
			image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
			cv2.imwrite('./results/webcam2/webcamOpenpose/'+'img_%s.png' % str(k).zfill(6), image)
			k += 1
Example #7
0
 def __init__(self, image, resize='0x0', model='mobilenet_thin'):
     self.logger = logging.getLogger('TfPoseEstimator-WebCam')
     self.logger.setLevel(logging.DEBUG)
     self.ch = logging.StreamHandler()
     self.ch.setLevel(logging.DEBUG)
     self.formatter = logging.Formatter(
         '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
     self.ch.setFormatter(self.formatter)
     self.logger.addHandler(self.ch)
     self.logger.debug('initialization %s : %s' %
                       (model, get_graph_path(model)))
     self.w, self.h = model_wh(resize)
     if self.w > 0 and self.h > 0:
         self.e = TfPoseEstimator(get_graph_path(model),
                                  target_size=(self.w, self.h))
     else:
         self.e = TfPoseEstimator(get_graph_path(model),
                                  target_size=(432, 368))
     self.logger.debug('cam read+')
     # cam = cv2.VideoCapture(camera)
     # ret_val, image = cam.read()
     self.image_h, self.image_w = image.shape[:2]
     # logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
     self.fps_time = 0
     self.videostep = 0
     self.human_keypoint = {
         0: [
             np.array([
                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
             ])
         ]
     }
     self.hisfps = []  # Historical FPS data
Example #8
0
def initial_pose_model(config):
    w, h = model_wh(config["resize"])
    if w == 0 or h == 0:
        pose = TfPoseEstimator(get_graph_path(config["pose_model"]), target_size=(432, 368))
    else:
        pose = TfPoseEstimator(get_graph_path(config["pose_model"]), target_size=(w, h))
    return pose
Example #9
0
def infer(image, model='cmu', resize='0x0', resize_out_ratio=4.0):
    """

    :param image:
    :param model:
    :param resize:
    :param resize_out_ratio:
    :return:
    """
    w, h = model_wh(resize)
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
    else:
        e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(image, None, None)
    if image is None:
        raise Exception('Image can not be read, path=%s' % image)
    humans = e.inference(image,
                         resize_to_default=(w > 0 and h > 0),
                         upsample_size=resize_out_ratio)

    if "TERM_PROGRAM" in os.environ and 'iTerm' in os.environ["TERM_PROGRAM"]:
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        image_str = cv2.imencode(".jpg", image)[1].tostring()
        print("\033]1337;File=name=;inline=1:" +
              base64.b64encode(image_str).decode("utf-8") + "\a")

    return humans
Example #10
0
    def __init__(self, model="cmu"):
        models = set({"mobilenet_thin", "cmu"})
        self.model = model if model in models else "mobilenet_thin"
        # parser = argparse.ArgumentParser(description='tf-pose-estimation run')
        # parser.add_argument('--image', type=str, default='./images/p1.jpg')
        # parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')

        # parser.add_argument('--resize', type=str, default='0x0',
        #                     help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
        # parser.add_argument('--resize-out-ratio', type=float, default=4.0,
        #                     help='if provided, resize heatmaps before they are post-processed. default=1.0')
        self.resize_out_ratio = 4.0

        # args = parser.parse_args()

        # w, h = model_wh(args.resize)
        w, h = model_wh("432x368")
        if w == 0 or h == 0:
            e = TfPoseEstimator(get_graph_path(self.model),
                                target_size=(432, 368))
        else:
            e = TfPoseEstimator(get_graph_path(self.model), target_size=(w, h))

        # self.args = args
        self.w, self.h = w, h
        self.e = e
        self.fps_time = time.time()
Example #11
0
    def run(self):
        try:
            w, h = model_wh(self.resolution)
            e = self.__load_model()
            cap = self.__open_video()

            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS)

            output = self.__open_video_writer()

            while cap.isOpened():
                ret_val, image = cap.read()
                try:
                    humans = e.inference(image,
                                         resize_to_default=(w > 0 and h > 0),
                                         upsample_size=4.0)
                except Exception:
                    break

                if not self.show_bg:
                    image = np.zeros(image.shape)
                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)

                if self.output_video != '':
                    output.write(image)

            output.release()
            cv2.destroyAllWindows()
        except Exception as e:
            VideoProcesserThread.error_signal.emit(e)
Example #12
0
def infer(image,
          model='mobilenet_thin',
          resize='368x368',
          resize_out_ratio=4.0):
    """

    :param image:
    :param model:
    :param resize:
    :param resize_out_ratio:
    :return:
    """
    w, h = model_wh(resize)
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
    else:
        e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(image, None, None)
    if image.shape[0] != 480 or image.shape[1] != 640:
        image = cv2.resize(image, (368, 368))
    if image is None:
        raise Exception('Image can not be read, path=%s' % image)
    humans = e.inference(image,
                         resize_to_default=(w > 0 and h > 0),
                         upsample_size=resize_out_ratio)

    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    return image
Example #13
0
def load(model='mobilenet_thin', resize='656x368'):
    w, h = model_wh(resize)
    global estimator
    if w == 0 or h == 0:
        estimator = TfPoseEstimator(
            get_graph_path(model), target_size=(432, 368))
    else:
        estimator = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
def get_estimator(model='cmu', resize='0x0'):
    w, h = model_wh(resize)
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
    else:
        e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))

    return e
def create_estimator():
    '''

    :return: TfPoseEstimator 생성
    '''
    w, h = model_wh(FLAGS.resize)
    e = TfPoseEstimator(get_graph_path(FLAGS.model), target_size=(w, h))
    return e
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 110, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)
        gz.translate(0, 0, -10)
        self.window.addItem(gx)
        self.window.addItem(gy)
        self.window.addItem(gz)

        modeln = 'mobilenet_thin'
        modelr = '432x368'
        camera = 0

        self.lines = {}
        self.connection = [
            [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6],
            [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12],
            [12, 13], [8, 14], [14, 15], [15, 16]
        ]

        w, h = model_wh(modelr)
        self.e = TfPoseEstimator(get_graph_path(modeln), target_size=(w, h))
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose('./skeleton_humanactivity/lifting/models/prob_model_params.mat')
        keypoints = self.mesh(image)

        self.points = gl.GLScatterPlotItem(
            pos=keypoints,
            color=pg.glColor((0, 255, 0)),
            size=15
        )
        self.window.addItem(self.points)

        for n, pts in enumerate(self.connection):
            self.lines[n] = gl.GLLinePlotItem(
                pos=np.array([keypoints[p] for p in pts]),
                color=pg.glColor((0, 0, 255)),
                width=3,
                antialias=True
            )
            self.window.addItem(self.lines[n])
Example #17
0
def get_2d_estimator(model='mobilenet_thin', resize='432x368'):
    w, h = model_wh(resize)
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
    else:
        e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    e.__w = w
    e.__h = h
    return e
Example #18
0
 def __init__(self):
     self.w, self.h = model_wh('432x368')
     if self.w > 0 and self.h > 0:
         self.e = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                                  target_size=(self.w, self.h),
                                  trt_bool=True)
     else:
         self.e = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                                  target_size=(432, 368),
                                  trt_bool=True)
Example #19
0
 def __init__(self, model='mobilenet_thin'):
     self.d = edict(camera=2,
                    model=model,
                    resize='432x368',
                    resize_out_ratio=4.0,
                    show_process=False,
                    tensorrt='False')
     self.e = TfPoseEstimator(get_graph_path(self.d.model),
                              target_size=model_wh(self.d.resize),
                              trt_bool=str2bool(self.d.tensorrt))
 def __init__(self, gpu_id=0):
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
     config.allow_soft_placement = True
     config.log_device_placement = True
     model = "cmu"
     resize = "656x368"
     self._w, self._h = model_wh(resize)
     self._model = init_model(gpu_id, model, self._w, self._h, config)
     self._logger = logging.getLogger(__name__)
Example #21
0
def openposeMain():
    parser = argparse.ArgumentParser(description='tf-pose-estimation Video')
    # parser.add_argument('--video', type=str, default='')
    parser.add_argument('--resolution',
                        type=str,
                        default='512x512',
                        help='network input resolution. default=432x368')
    parser.add_argument(
        '--model',
        type=str,
        default='mobilenet_thin',
        help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    parser.add_argument(
        '--show-process',
        type=bool,
        default=False,
        help='for debug purpose, if enabled, speed for inference is dropped.')
    parser.add_argument('--showBG',
                        type=bool,
                        default=True,
                        help='False to show skeleton only.')
    args = parser.parse_args()

    logger.debug('initialization %s : %s' %
                 (args.model, get_graph_path(args.model)))
    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    while True:
        image = openposeBeforeQ.get()

        humans = e.inference(image, resize_to_default=True, upsample_size=4.0)

        # print('len(humans):',len(humans)) #frame하나당 사람의 좌표
        if not args.showBG:
            image = np.zeros(image.shape)
        (image, hands_centers) = TfPoseEstimator.draw_humans(image,
                                                             humans,
                                                             imgcopy=False)

        # return value
        openposeAfterQ.put(hands_centers)

        num_hands = len(hands_centers)

        # if num_hands != 0:
        #     for i in range(num_hands):
        #         print('i:',i,'hands_centers_inin:',hands_centers[i])

        cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # cv2.imshow('tf-pose-estimation result', image)
        fps_time = time.time()
Example #22
0
    def __load_model(self):
        if self.is_active:
            self.__clear_thread()

        w, h = model_wh(self.resolution)
        try:
            return TfPoseEstimator(get_graph_path(self.model),
                                   target_size=(w, h))
        except Exception:
            raise ModelError("Не удалось загрузить модель: {}. Убедитесь, что модель находится в директории models/graph"\
                .format(self.model))
Example #23
0
    def create_dataframe(self,
                         path,
                         model='cmu',
                         resize_out_ratio=4.0,
                         resize="432x368"):
        """Analyzes the frame and returns a jupyter array where the relativ positons of the joints are located in the picture"""
        a = [str(x) if x > 9 else '0' + str(x) for x in range(1, 19)]
        b = ['x', 'y', 'score']
        temp_dic = {
            'bp' + str(key[0]) + str(key[1]): []
            for key in product(a, b)
        }
        frame = 0
        logger.debug('initialization %s : %s' % (model, get_graph_path(model)))
        w, h = model_wh(resize)
        e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        cap = cv2.VideoCapture(path)

        while cap.isOpened():
            frame += 1
            if frame % 100 == 0:
                print(frame)

            ret_val, image = cap.read()
            try:
                humans = e.inference(image,
                                     resize_to_default=(w > 0 and h > 0),
                                     upsample_size=resize_out_ratio)
            except:
                break

            for human in humans:
                for key, value in temp_dic.items():
                    key = int(key[2:4])
                    found = human.body_parts.keys()
                    if key in found:
                        #print("found something")
                        skey = str(key) if key > 9 else '0' + str(key)
                        temp_dic['bp' + skey + "score"].append(
                            human.body_parts[key].score)
                        temp_dic['bp' + skey + "y"].append(
                            human.body_parts[key].x)
                        temp_dic['bp' + skey + "x"].append(
                            human.body_parts[key].y)
                    else:
                        skey = str(key) if key > 9 else '0' + str(key)
                        temp_dic['bp' + skey + "score"].append(None)
                        temp_dic['bp' + skey + "y"].append(None)
                        temp_dic['bp' + skey + "x"].append(None)

        cv2.destroyAllWindows()

        self.df = pd.DataFrame(temp_dic)
Example #24
0
def get_keypoints_from_directory(direction_path, format='jpg', model='mobilenet_v2_large', resolution='432x368'):
    all_keypoints = []  # 返回一个关节位置序列的列表(一张图片中可能有多人)
    w, h = model_wh(resolution)
    estimator = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    files_grabbed = glob.glob(os.path.join(direction_path, '*.' + format))
    for i, file in enumerate(files_grabbed):
        image = common.read_imgfile(file, None, None)
        humans = estimator.inference(image, True, 4.0)
        results = get_keypoints_from_humans(humans)
        for item in results:
            all_keypoints.append(item)
    return all_keypoints
Example #25
0
    def run(self):
        try:
            w, h = model_wh(self.resolution)
            e = self.__load_model(w, h)
            cap = self.__open_video()

            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            frames_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            current_frame = 0

            csv_file, csv_writer = self.__open_csv()
            video_output = self.__open_video_writer(
                width, height, fps) if self.output_video else None

            while cap.isOpened():
                if not self.is_active:
                    raise ProcessingInterruptedException(
                        "Работа прервана извне")

                ret_val, image = cap.read()
                try:
                    humans = e.inference(image,
                                         resize_to_default=(w > 0 and h > 0),
                                         upsample_size=4.0)
                except Exception:
                    break

                csv_writer.writerow([humans])

                if not self.show_bg:
                    image = np.zeros(image.shape)
                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)

                if video_output:
                    video_output.write(image)

                current_frame += 1
                self.__update_progress(frames_total, current_frame)

            video_output.release()
            csv_file.close()
            cv2.destroyAllWindows()
            self.finish_signal.emit()

        except ProcessingInterruptedException:
            self.interrupted_signal.emit()
        except Exception as e:
            self.error_signal.emit(e)
Example #26
0
def initial_preprocessing(estimator, image_file, resize='0x0'):
    w, h = model_wh(resize)

    w, h = (432, 368) if (w, h) == (0, 0) else (w, h)

    estimator.initialize_hyperparams(target_size=(w, h))

    image = common.read_imgfile(image_file, None, None)
    if image is None:
        logger.error('Image can not be read, path=%s' % image)
        sys.exit(-1)

    return estimator, image, (w, h)
Example #27
0
 def __read_model__(self):
     # model読み込み
     print "model読み込み"
     try:
         w, h = model_wh(self.resolution)
         graph_path = get_graph_path(self.model)
         ros_pack = rospkg.RosPack()
         graph_path = os.path.join(ros_pack.get_path('tfpose_ros'), graph_path)
         pose_estimator = TfPoseEstimator(graph_path, target_size=(w, h))
         return pose_estimator
     except Exception as e:
         rospy.logerr('invalid model: %s, e=%s' % (self.model, e))
         sys.exit(-1)
Example #28
0
def record(args):

    # Open output file:
    """
        Output filename: <in_filename>_<model_name>
    """
    global out_data, out_file_path

    in_file = getFileName(args.video)
    out_file = ''.join([in_file, '_', args.model])
    out_file_path = os.path.join(args.output, out_file)

    logger.info(out_file_path)
    #with open(outfile, 'wb') as f:
    frame_idx = 0
    det_frame = 0

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    cap = cv2.VideoCapture(args.video)

    if cap.isOpened() is False:
        print("Error opening video stream or file.")

    num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    logger.info('Num frames to process: %d' % num_frames)
    pcnt = 0.0

    logger.info('Recording pose data to %s' % out_file_path)
    while cap.isOpened():
        ret, image = cap.read()
        if image is None:
            break

        humans = e.inference(image,
                             resize_to_default=True,
                             upsample_size=args.resize_out_ratio)

        if len(humans) > 0:
            out_data[frame_idx] = humans

        pcnt = (frame_idx + 1) / num_frames * 100.0
        sys.stdout.write("\rDone {0:.2f}%".format(pcnt))
        sys.stdout.flush()

        frame_idx += 1

    cap.release()

    logger.info('Writing data to %s' % out_file)
    pickleData()
Example #29
0
def on_stream(client, params):
    redis_store = redis.Redis(host='redis',
                              password='******',
                              port=6379,
                              db=0)
    global stream_running
    print(params)
    gpu_options = tf.GPUOptions()
    gpu_options.allow_growth = True

    w, h = model_wh(params['resize'])
    if w > 0 and h > 0:
        e = TfPoseEstimator(get_graph_path(params['model']),
                            target_size=(w, h),
                            tf_config=tf.ConfigProto(gpu_options=gpu_options))
    else:
        e = TfPoseEstimator(get_graph_path(params['model']),
                            target_size=(432, 368),
                            tf_config=tf.ConfigProto(gpu_options=gpu_options))
    cam = cv2.VideoCapture(params['camera'])
    ret_val, image = cam.read()

    while stream_running:
        ret_val, image = cam.read()
        humans = e.inference(image,
                             resize_to_default=(w > 0 and h > 0),
                             upsample_size=params['resize_out_ratio'])

        all_humans = []
        for human in humans:
            body_parts = []
            for part in human.body_parts.values():
                body_parts.append({
                    'index': part.part_idx,
                    'x': part.x,
                    'y': part.y,
                    'score': part.score
                })
            all_humans.append(body_parts)

        image_id = str(uuid.uuid4())

        retval, buffer = cv2.imencode('.png', image)
        png = base64.b64encode(buffer)

        redis_store.set(image_id, png, ex=EXPIRATION_TIME)

        frame = {'humans': all_humans, 'image': image_id}
        frame_json = json.dumps(frame)

        client.publish(SERVICE_NAME + '/pose', frame_json)
Example #30
0
def get_keypoints_from_image(image_path, model='mobilenet_v2_large', resolution='432x368', normalizetion=True):
    all_keypoints = []  # 返回一个关节位置序列的列表(一张图片中可能有多人)
    w, h = model_wh(resolution)
    estimator = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    image = common.read_imgfile(image_path, None, None)
    humans = estimator.inference(image, True, 4.0)
    for human in humans:
        body_dict = dict()
        human_split = str(human).split('BodyPart:')
        for bodypart in human_split:
            left_parenthesis = bodypart.find('(')
            if left_parenthesis != -1:
                if left_parenthesis == 2:
                    bodypart_index = int(bodypart[left_parenthesis - 2:left_parenthesis - 1])
                else:
                    bodypart_index = int(bodypart[left_parenthesis - 3:left_parenthesis - 1])
                x_pos = float(bodypart[left_parenthesis + 1:left_parenthesis + 5])
                y_pos = float(bodypart[left_parenthesis + 7:left_parenthesis + 11])
                confidence_score = float(bodypart[left_parenthesis + 19:left_parenthesis + 23])
                body_dict[bodypart_index] = [x_pos, y_pos, confidence_score]
            else:
                continue
        # 缺失的关节点补0
        for q in range(18):
            if q not in body_dict.keys():
                body_dict[q] = [0, 0, 0]
        keypoints = [
            body_dict[0][0], body_dict[0][1],
            body_dict[1][0], body_dict[1][1],
            body_dict[2][0], body_dict[2][1],
            body_dict[3][0], body_dict[3][1],
            body_dict[4][0], body_dict[4][1],
            body_dict[5][0], body_dict[5][1],
            body_dict[6][0], body_dict[6][1],
            body_dict[7][0], body_dict[7][1],
            body_dict[8][0], body_dict[8][1],
            body_dict[9][0], body_dict[9][1],
            body_dict[10][0], body_dict[10][1],
            body_dict[11][0], body_dict[11][1],
            body_dict[12][0], body_dict[12][1],
            body_dict[13][0], body_dict[13][1],
            body_dict[14][0], body_dict[14][1],
            body_dict[15][0], body_dict[15][1],
            body_dict[16][0], body_dict[16][1],
            body_dict[17][0], body_dict[17][1],
        ]
        if not normalizetion:
            all_keypoints.append(keypoints)  # 不归一化
        else:
            all_keypoints.append(normalization_from_list(keypoints))  # 归一化
    return all_keypoints