def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)
        self.timer.timeout.connect(self.compute)
        self.Period = 20
        self.timer.start(self.Period)

        ## Change to openpose installation path (Default considered to be HOME)
        self.openpose_path = os.getenv("HOME") + '/openpose'
        params = dict()
        params["model_folder"] = self.openpose_path + "/models"
        params["hand"] = True
        params["hand_detector"] = 2
        params["body"] = 0

        try:
            self.openpose_wrapper = op.WrapperPython()
            self.openpose_wrapper.configure(params)
            self.openpose_wrapper.start()
        except:
            print(
                "Error creating python wrapper of openpose. Check installation"
            )
            sys.exit(-1)

        print("Component Started")
Exemple #2
0
    def __init__(self):
        # image array config
        self.bridge = CvBridge()
        self.threat_boxes_topic = rospy.get_param("~threat_image_topic",
                                                  "/threats/potential_images")
        self.threat_boxes_sub = rospy.Subscriber(self.threat_boxes_topic,
                                                 ImageArray, self.threat_boxes)
        self.msg_seq = 0
        self.msg_time = 0
        self.skeletons = []

        # openpose config
        self.params = dict()
        self.params["model_folder"] = rospy.get_param("~model_folder_path",
                                                      "/cfg/models/")
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(self.params)
        self.opWrapper.start()
        self.datum = op.Datum()

        # publish skeleton topics
        self.op_image_pub_topic = rospy.get_param("~skeleton_image_topic",
                                                  "/threats/skeleton_images")
        self.op_image_pub = rospy.Publisher(self.op_image_pub_topic,
                                            Image,
                                            queue_size=10)
        self.op_image_seq = 0

        # threat model params
        self.threat_model_name = rospy.get_param("~threat_model_name",
                                                 "default_model")
        self.threat_model_path = rospy.get_param("~threat_model_path")
        self.threat_model_meta = self.threat_model_path + self.threat_model_name + ".meta"
Exemple #3
0
    def __init__(self, pose_type=BODY25B, params=None):
        if params is None:
            params = {}
        if 'model_pose' in params:
            logger.warning(
                'model_pose param specified by pose_type arg, conflict.')
        super().__init__()
        self.pose_type = pose_type
        self.full_params = {
            'model_folder': self.model_path,
            'number_people_max': 3,
            # 'net_resolution': '-1x368', # it is default value
            'logging_level': 3,
            'display': 0,
            'alpha_pose': 0.79,
            # 'face': 1,
            # 'hand': 1,
        }
        self.full_params.update(params)
        self.full_params.update({
            'model_pose': pose_type.NAME,
        })

        self.opWrapper = opp.WrapperPython()
        self.opWrapper.configure(self.full_params)
        self.opWrapper.start()
        self.datum: opp.Datum = opp.Datum()
Exemple #4
0
def process(input_dir, openpose_model_dir):
    params = {}

    # Configure openpose params
    params['image_dir'] = input_dir
    params['model_folder'] = openpose_model_dir
    params['face'] = True

    op_wrapper = op.WrapperPython()
    op_wrapper.configure(params)
    op_wrapper.start()

    # Process images in input dir
    paths = op.get_images_on_directory(input_dir)
    joints_2d = []
    face_2d = []

    for path in paths:
        datum = op.Datum()
        img = cv2.imread(path)
        resolution = img.shape[:2]
        datum.cvInputData = img

        op_wrapper.emplaceAndPop([datum])

        joints_2d.append(format_pose(datum.poseKeypoints, resolution))
        face_2d.append(format_face(datum.faceKeypoints, resolution))

    return joints_2d, face_2d
def extractHeatMap(image):
    params["model_folder"] = "./openpose/models/"
    params["heatmaps_add_parts"] = True
    params["heatmaps_add_bkg"] = True
    params["heatmaps_add_PAFs"] = True
    params["heatmaps_scale"] = 2
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Process Image
    datum = op.Datum()
    #imageToProcess = cv2.imread(args[0].image_path)
    imageToProcess = image
    datum.cvInputData = imageToProcess
    opWrapper.emplaceAndPop([datum])

    # Process outputs
    outputImageF = (datum.inputNetData[0].copy())[0, :, :, :] + 0.5
    outputImageF = cv2.merge(
        [outputImageF[0, :, :], outputImageF[1, :, :], outputImageF[2, :, :]])
    outputImageF = (outputImageF * 255.).astype(dtype='uint8')
    heatmaps = datum.poseHeatMaps.copy()
    heatmaps = (heatmaps).astype(dtype='uint8')
    return heatmaps
Exemple #6
0
def analyze():
    params = dict()
    params["model_folder"] = "../../../../models/"


    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    datum = op.Datum()
    
    while True:
        yuv_data_raw = dequeue()
        while yuv_data_raw == None:
            yuv_data_raw = dequeue()
        yuv_data = struct.unpack('c'*1620*1920, yuv_data_raw)
        yuv_img = np.array(list(map(ord, yuv_data)), dtype=np.uint8).reshape(1620,1920)
        print(str(yuv_img.shape))
        
        imageToProcess = cv2.cvtColor(yuv_img, cv2.COLOR_YUV2RGB_I420)
        print('cvt to rgb end')
        datum.cvInputData = imageToProcess
        print('start analyze')
        opWrapper.emplaceAndPop([datum])
        print('analyze end')
        rgba_data = cv2.cvtColor(datum.cvOutputData, cv2.COLOR_BGR2BGRA)
        print('cvt to rgba end')
        rgba_bin = rgba_data.reshape(1920*1080*4).ctypes.data
        print('start enqueue')
        while enqueue(rgba_bin) == -1:
            pass
Exemple #7
0
def detection(pipe_img, pipe_center, pipe_scale, pipe_img_2, pipe_kp):

    params = set_params()
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    detection_count = 0
    detection_time = time.time()
    while True:
        img = pipe_img.recv()
        datum = op.Datum()
        datum.cvInputData = img
        opWrapper.emplaceAndPop([datum])
        bodyKeypoints_img = datum.cvOutputData
        cv2.rectangle(bodyKeypoints_img, (330, 620), (630, 720), (0, 0, 255),
                      3)
        #cv2.imwrite('kps.jpg',bodyKeypoints_img)
        json_path = glob.glob('/media/ramdisk/output_op/*keypoints.json')
        scale, center = op_util.get_bbox(json_path[0])
        if scale == -1 and center == -1: continue
        if scale >= 10: continue
        pipe_img_2.send(img)
        pipe_center.send(center)
        pipe_scale.send(scale)
        pipe_kp.send(bodyKeypoints_img)
        os.system("rm /media/ramdisk/output_op/*keypoints.json")
        detection_count = detection_count + 1
        if detection_count == 100:
            print('Detection FPS:',
                  1.0 / ((time.time() - detection_time) / 100.0))
            detection_count = 0
            detection_time = time.time()
Exemple #8
0
def main():
    # model files check and download
    print("=== ST-GCN model ===")
    check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)
    print("=== OpenPose model ===")
    check_and_download_models(WEIGHT_POSE_PATH, MODEL_POSE_PATH,
                              REMOTE_POSE_PATH)

    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    if args.arch == "pyopenpose":
        pose = op.WrapperPython()
        params = dict(model_folder='.', model_pose='COCO')
        pose.configure(params)
        pose.start()
    else:
        pose = ailia.PoseEstimator(MODEL_POSE_PATH,
                                   WEIGHT_POSE_PATH,
                                   env_id=args.env_id,
                                   algorithm=POSE_ALGORITHM)
        if args.arch == "openpose":
            pose.set_threshold(0.1)

    if args.video is not None:
        # realtime mode
        recognize_realtime(args.video, pose, net)
    else:
        # offline mode
        recognize_from_file(args.input, pose, net)
Exemple #9
0
    def __init__(self):
        rospy.init_node('pose_tracker')
        rospy.loginfo("Started pose tracking node!")

        # publishing to and subscribing to
        rospy.Subscriber("/zed/zed_node/left/image_rect_color",Image, self.classify_image)
        # rospy.Subscriber("/zed/zed_node/depth/depth_registered", Image, self.depth_image)
        self.passenger_safe_pub = rospy.Publisher('/passenger_safe', Bool, queue_size=10)
        self.bridge = CvBridge()

        # set up openpose params
        self.params = dict()
        self.params["model_folder"] = os.path.join(os.path.expanduser("~"), 'catkin_ws/src/openpose/models/', '')
        self.params["number_people_max"] = 1

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(self.params)
        self.opWrapper.start()

        # Variable to publish poses
        self.found_poses = []

        rate = rospy.Rate(5)
        while not rospy.is_shutdown():
            rate.sleep()
        cv2.destroyAllWindows()
Exemple #10
0
def m_main():  # 对于视频的处理
    # ====================import openpose=========================================
    dir_path = os.path.dirname(os.path.realpath(__file__))
    try:
        # sys.path.append(dir_path + '/../python/openpose/Release')
        # 一定要注意是 build目录下的python而不是openpose根目录下的
        # 如果一直报错可以将绝对路径加入 path环境变量中去。
        # 或者将绝对路径引进来 F:\\OPENPOSE\\openpose\\build\\python\\openpose\\Release
        # 或是如下添加绝对路径
        # sys.path.append("F:\\OPENPOSE\\openpose\\build\\python\\openpose\\Release")
        # 此句和上句同理 两者只要一者起效便可import openpose
        # import pyopenpose as op
        sys.path.append('/home/wfnian/OPENPOSE/openpose/build/python')
        from openpose import pyopenpose as op

    except ImportError as e:
        print('Did you enable `BUILD_PYTHON`')
        raise e
    # =============================== 参数设置 =====================================

    params = dict()
    params["model_folder"] = "/home/wfnian/OPENPOSE/openpose/models"
    params["number_people_max"] = 1  # 只检测一个人
    params["camera_resolution"] = "640x360"
    # params["disable_blending"] = True  #此处可以控制是否显示背景或者是只显示骨骼图
    params["render_threshold"] = 0.001

    opWrapper = op.WrapperPython(3)
    opWrapper.configure(params)
    opWrapper.execute()
Exemple #11
0
    def get_sample_heatmaps():
        # These parameters are globally set. You need to unset variables set here if you have a new OpenPose object. See *
        params = dict()
        params["model_folder"] = "/usr/local/src/openpose-1.7.0/models/"
        params["heatmaps_add_parts"] = True
        params["heatmaps_add_bkg"] = True
        params["heatmaps_add_PAFs"] = True
        params["heatmaps_scale"] = 3
        params["upsampling_ratio"] = 1
        params["body"] = 1

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image and get heatmap
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])
        poseHeatMaps = datum.poseHeatMaps.copy()
        opWrapper.stop()

        return poseHeatMaps
Exemple #12
0
    def __init__(self, args):
        self.args = args

        # init openpose and 3d lifting model
        self.opWrapper = op.WrapperPython()
        params = dict(model_folder="/openpose/models/")
        params['face'] = cfg.face
        params['hand'] = False
        params['model_pose'] = cfg.model_pose
        params['num_gpu'] = 1
        params['num_gpu_start'] = self.args.pid % cfg.ngpu
        self.opWrapper.configure(params)
        self.opWrapper.start()
        self.poseLifting = Prob3dPose()

        # cv bridge
        self.bridge = CvBridge()

        # subscriber and publisher
        self.sub = rospy.Subscriber(
            "/multi_proc_backend/info_{}".format(self.args.pid), ImageInfo,
            self.callback)
        self.pub = rospy.Publisher("/multi_proc_backend/result",
                                   String,
                                   tcp_nodelay=True,
                                   queue_size=1)
Exemple #13
0
def get_points_from_image(image_path):
    try:
        logger.debug('Starting analysis')

        # parameters
        params = dict()
        params["model_folder"] = "./openpose_models/"
        params["face"] = True
        params["hand"] = True
        # params["write_json"] = "./json_outputs/"

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])

        logger.debug('Analysis done, raw data: {}'.format(datum.cvOutputData))

        return datum.cvOutputData
    except Exception as e:
        logger.error('An error occurred while analysing an image')
        logger.error(e, exc_info=True)
        # propagate error forward
        raise e
Exemple #14
0
def init():
    # Try to import OpenPose
    try:
        # Get path from system environment
        OPENPOSE_ROOT = os.environ["OPENPOSE_ROOT"]
        sys.path.append(OPENPOSE_ROOT + '/' + 'build/python/')
        from openpose import pyopenpose as op

    except ImportError as e:
        print(
            'Error: OpenPose library could not be found. '
            'Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e
    # Init OpenPose
    global Data, PoseDetector
    # Set OpenPose parameters
    params = dict()
    params["model_folder"] = OPENPOSE_ROOT + os.sep + "models" + os.sep
    params["face"] = False
    params["hand"] = False
    params["disable_blending"] = True
    # Start OpenPose wrapper
    op_wrapper = op.WrapperPython()
    op_wrapper.configure(params)
    op_wrapper.start()
    # Init Datum object
    Data = op.Datum()
    # Rename op_wrapper
    PoseDetector = op_wrapper
Exemple #15
0
    def initialize_openpose(self, args):
        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()

        # Openpose params

        # Model path
        params["model_folder"] = args[0].openpose_folder

        # Face disabled
        params["face"] = False

        # Hand disabled
        params["hand"] = False

        # Net Resolution
        params["net_resolution"] = args[0].net_size

        # Gpu number
        params["num_gpu"] = 1  # Set GPU number

        # Gpu Id
        # Set GPU start id (not considering previous)
        params["num_gpu_start"] = 0

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()

        self.datum = op.Datum()
Exemple #16
0
def get_configured_opWrapper(hand=True,
                             face=False,
                             body=1,
                             number_people_max=1,
                             frame_step=3,
                             render_threshold=0.5,
                             model_folder=MODEL_DIR):
    """
    model_folder should basically point to openpose/models/
    Face takes an enormous amount of VRAM, this can make openpose crash due to insufficient VRAM, so face defaults
    to False.
    """
    params = dict()
    params["model_folder"] = model_folder
    params["hand"] = hand
    if hand and body != 1:
        # since body detection is off, we cannot use it's model (the default) so we use hand detection model
        params["hand_detector"] = HAND_DETECTOR_MODEL
    params["body"] = body
    params["face"] = face
    params["number_people_max"] = number_people_max
    params["frame_step"] = frame_step
    params["render_threshold"] = render_threshold
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    return opWrapper
def get_keypoints(image):

    try:
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        datum.cvInputData = image

        opWrapper.emplaceAndPop([datum])

        if (datum.poseKeypoints.ndim == 0):
            print("No person found in image!")
            sys.exit(-1)
        num = len(datum.poseKeypoints)
        if (num is 0):
            print("No person found in image!")
            sys.exit(-1)
        return datum

    except Exception as e:
        print("Openpose Error: ")
        print(e)
Exemple #18
0
    def __init__(self, debug=0):

        self.debug = debug

        self.frameCount = 0
        self.timePrev = time.time()

        #region OPENPOSE
        opParams = dict()
        opParams['model_folder'] = '/home/aufar/Documents/openpose/models/'
        opParams['net_resolution'] = '176x176'

        self.opWrapper = op.WrapperPython()

        self.opWrapper.configure(opParams)
        self.opWrapper.start()

        self.datum = op.Datum()
        #endregion

        self.subscriberImageDetections = rospy.Subscriber(
            'yolo_detector/output/compresseddetections',
            CompressedImageAndBoundingBoxes,
            self.callback,
            queue_size=1)

        self.publisherDetectionDirections = rospy.Publisher(
            'yact/output/detectiondirections',
            DetectionAndDirection,
            queue_size=1)
Exemple #19
0
    def __init__(self, debug=False):
        #from openpose import *
        params = dict()
        params["model_folder"] = Constants.openpose_modelfolder
        params["net_resolution"] = "-1x320"
        self.openpose = op.WrapperPython()
        self.openpose.configure(params)
        self.openpose.start()

        max_cosine_distance = Constants.max_cosine_distance
        nn_budget = Constants.nn_budget
        self.nms_max_overlap = Constants.nms_max_overlap
        max_age = Constants.max_age
        n_init = Constants.n_init

        model_filename = 'model_data/mars-small128.pb'
        self.encoder = gdet.create_box_encoder(model_filename, batch_size=1)
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        self.tracker = DeepTracker(metric, max_age=max_age, n_init=n_init)

        self.capture = cv2.VideoCapture("input_video.mp4")

        if self.capture.isOpened():  # Checks the stream
            self.frameSize = (int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                              int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)))
        Constants.SCREEN_HEIGHT = self.frameSize[0]
        #Constants.SCREEN_WIDTH = self.frameSize[1]
        Constants.SCREEN_WIDTH = 450

        # Write video
        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        self.out = cv2.VideoWriter(
            'result.avi', fourcc, 30.0,
            (Constants.SCREEN_WIDTH, Constants.SCREEN_HEIGHT))
def main():
    with open("openpose_config.yml", 'r') as ymlfile:
        if sys.version_info[0] > 2:
            cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
        else:
            cfg = yaml.load(ymlfile)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    # params["model_folder"] = "/home/benjamin/CMU/openpose/models/"
    params["model_folder"] = cfg['model_folder']
    params["model_pose"] = cfg['model_pose']

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Process Image
    datum = op.Datum()

    # walk 'unique' for renamable list
    image_list = []
    for (dirpath, dirnames, filenames) in walk(cfg['image_folder']):
        image_list.extend(filenames)
        break

    images_processed = 0
    for image in image_list:
        current_image = cfg['image_folder'] + image
        keypoint_file = cfg['keypoint_folder'] + image[:-4]
        output_file = cfg['output_folder'] + image
        # print("output_file = {}".format(output_file))
        # print("current_image = {}".format(current_image))

        imageToProcess = cv2.imread(current_image)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])

        # Save numpy arrays
        if cfg['save_keypoints']:
            # print("Body keypoints: \n" + str(datum.poseKeypoints))
            # print(type(datum.cvOutputData))
            np.save(keypoint_file, datum.poseKeypoints)

        # Display Image
        if cfg['show_images']:
            cv2.imshow(image, datum.cvOutputData)
            cv2.waitKey(0)
            cv2.destroyWindow(image)

        if cfg['save_output_image']:
            cv2.imwrite(output_file, datum.cvOutputData)

        images_processed += 1
        if images_processed % 100 == 0:
            print("\n    Images to process remaining in {} : {} \n").format(
                cfg['image_folder'],
                len(image_list) - images_processed)
Exemple #21
0
    def __init__(self):
        params = dict()
        params["model_folder"] = model_folder

        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(params)
        self.opWrapper.start()
        self.datum = op.Datum()
Exemple #22
0
 def __init__(self, keys_params={}):
     self._params = self.set_default_params()
     self.set_params(keys_params=keys_params)
     assert(self._params["face"] == False)  # I haven't added this.
     self._opWrapper = op.WrapperPython()
     self._opWrapper.configure(self._params)
     self._opWrapper.start()  # Start Openpose.
     self._datum = None
def get_model(tracking=0):
    if tracking:
        params["tracking"] = 5
        params["number_people_max"] = 1
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    return opWrapper
Exemple #24
0
def main(argv):
    LoggingConfig.setup()

    input_files_path = "/Users/allarviinamae/EduWorkspace/master-thesis-training-videos/backflips"
    op_models_path = "/Users/allarviinamae/EduWorkspace/openpose/models"
    show_video = False

    try:
        opts, args = getopt.getopt(argv, "hi:m:s", ["inputFilesPath=", "opModelsPath=", "showVideo="])
    except getopt.GetoptError:
        logging.info('main.py -i <inputFilesPath> -m <opModelsPath> -s')
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            logging.info('main.py -i <inputFilesPath> -m <opModelsPath> -s')
            sys.exit()
        elif opt in ("-i", "--inputFilesPath"):
            input_files_path = arg
        elif opt in ("-m", "--opModelsPath"):
            op_models_path = arg
        elif opt in ("-s", "--showVideo"):
            show_video = True

    logging.info(f'Input files path is {input_files_path}')
    logging.info(f'OpenPose models path is {op_models_path}')
    logging.info(f'Show video is {show_video}')

    try:
        # Change these variables to point to the correct folder (Release/x64 etc.)
        # sys.path.append('../../python')
        # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the
        # OpenPose/python module from there. This will install OpenPose and the python library at your desired
        # installation path. Ensure that this is in your python path in order to use it. sys.path.append(
        # '/usr/local/python')
        from openpose import pyopenpose as op
    except ImportError as e:
        logging.warn(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python '
            'script in the right folder?')
        raise e

    # Initializing Python OpenPose wrapper. Constructing OpenPose object allocates GPU memory
    logging.info("Starting OpenPose Python Wrapper...")
    op_wrapper = op.WrapperPython()
    openpose_params = get_openpose_params(op_models_path)
    op_wrapper.configure(openpose_params)
    op_wrapper.start()
    logging.info("OpenPose Python Wrapper started")

    video_processor = VideoProcessor(op_wrapper, show_video)

    input_files = InputFileService.get_input_files(input_files_path)
    input_files.sort()

    for video_to_process in input_files:
        video_processor.process(video_to_process)
    def _load_estimator(self):
        print("Loading Openpose......")
        self._opWrapper = op.WrapperPython()
        self._opWrapper.configure(self._params)
        self._opWrapper.start()

        datum = op.Datum()
        print("Successfully Loading Openpose")
        return datum
def get_model(tracking=0):
    # Tracking
    if tracking:
        params["tracking"] = 5
        params["number_people_max"] = 1
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    print("get model ---> done")
    return opWrapper
def load_model():
    try:
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
    except Exception as e:
        print(e)
        sys.exit(-1)

    return opWrapper
Exemple #28
0
    def __init__(self):
        params = dict()
        devc = devConfig.devConfig()
        params["model_folder"] = devc._ini_openpose_model
        opWrapper = openpose.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
        datum = openpose.Datum()
        self.opWrapper = opWrapper
        self.datum = datum
        self.net = load_net(
			devc.extend["cfg"].encode('utf-8'),
			devc.extend["weights"].encode('utf-8'),
			0
        )
        self.meta = load_meta(devc.extend["data"].encode('utf-8'))
        self.keypointHistory = dict()
        self.shootingCount = 1
        self.dribbleCount = 1
        self.saveVideo = dict()
        self.frameCount = 0
        self.shootPerson = None
        self.shootRate = dict()

        # input temp, modify by panels
        self._idict = None
        # input track dict, e.g. {"person": "11"}
        self._iimg = None
        # input image: origin showed image
        self._imode = None
        # input mode: 0 or 1 indicate whether there is tracker

        # output temp, retrieve by panels
        self._odict = None
        self._oimg = None

        with tf.Graph().as_default():
            graph0 = tf.GraphDef()
            pb_path = os.path.join(
                os.path.dirname(
                    os.path.abspath(__file__)
                ),
                'frozen_har.pb'
            )
            with open(pb_path, mode='rb') as f:
                graph0.ParseFromString(f.read())
                tf.import_graph_def(graph0, name='')
            sess = tf.Session()
            init = tf.global_variables_initializer()
            sess.run(init)
            input = sess.graph.get_tensor_by_name('input:0')
            y = sess.graph.get_tensor_by_name('y_:0')
            self.sessLSTM = sess
            self.inputLSTM = input
            self.y_LSTM = y
    def __init__(self):
        rospy.init_node('pose_tracker')
        rospy.loginfo("Started pose tracking node!")
        self.passenger_safe_pub = rospy.Publisher('/passenger_safe',
                                                  Bool,
                                                  queue_size=10)
        self.passenger_exit_pub = rospy.Publisher('/passenger_exit',
                                                  Bool,
                                                  queue_size=10)
        rospy.Subscriber('/safety_constant', Bool, self.initial_safety)
        rospy.Subscriber('/safety_exit', Bool, self.passenger_exit)
        rospy.Subscriber('/zed/image_raw', Image, self.update_image)

        ###################################################
        # Set up global variables for use in all methods. #
        ###################################################
        self.start_time = time.time()
        self.start_time_stamp = datetime.datetime.now()
        self.passenger_unsafe = False

        # Setup logging fileimage_call
        self.path = os.path.expanduser(
            "~") + "/catkin_ws/src/ai-navigation/cart_endpoints/scripts/logs/"
        if os.path.isdir(self.path):
            pass
        else:
            os.makedirs(self.path)
        self.full_path = self.path + str(
            self.start_time_stamp.date()) + "_" + str(
                self.start_time_stamp.time())
        os.makedirs(self.full_path)

        self.CONFIDENCE_THRESHOLD = 15
        self.trip_live = False
        self.image_raw = None
        self.image_ready = False

        ######################
        ### OpenPose Setup ###
        ######################
        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        self.params = dict()
        self.params["model_folder"] = os.path.join(
            os.path.expanduser("~"), 'catkin_ws/src/openpose/models/', '')
        self.params["number_people_max"] = 1

        # Starting OpenPose
        self.opWrapper = op.WrapperPython()
        self.opWrapper.configure(self.params)
        self.opWrapper.start()
        self.initial_safety(None)
        rate = rospy.Rate(5)
        while not rospy.is_shutdown():
            rate.sleep()
Exemple #30
0
 def __init__(self):
     params = set_params()
     # Constructing OpenPose object allocates GPU memory
     opWrapper = op.WrapperPython()
     opWrapper.configure(params)
     opWrapper.start()
     self.opWrapper = opWrapper
     # Load Deep SORT model
     self.model_path = './deep_sort/model_data/mars-small128.pb'
     self.nms_max_overlap = 1.0
     self.encoder = create_box_encoder(self.model_path, batch_size=1)