Exemplo n.º 1
0
    def analyze(self):
        rate = rospy.Rate(5)
        while self.image_ready == False and not rospy.is_shutdown():
            rate.sleep()
        final_image = self.image_raw

        final_image = self.edit_video(final_image)
        ###############
        # Process openpose
        ############
        # Process image
        datum = op.Datum()
        datum.cvInputData = final_image
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        frame = datum.poseKeypoints
        cv2.imshow("Frame", datum.cvOutputData)

        ############
        # Classify current passenger state.
        ######
        prediction = self.safety_check(frame)

        return prediction, frame, datum.cvOutputData
Exemplo n.º 2
0
    def hand_skel(self, image_path, is_left=False):
        from openpose import pyopenpose as op

        # Read image and face rectangle locations
        img = cv2.imread(image_path)
        if img is None:
            return None
        height, width = img.shape[:2]
        top, bottom, left, right = get_square_padding(img)
        img = squarify(img, (top, bottom, left, right))
        hand_rect = op.Rectangle(0.0, 0.0, img.shape[0], img.shape[0])
        null_rect = op.Rectangle(0.0, 0.0, 0.0, 0.0)
        if is_left:
            hand_rects = [[hand_rect, null_rect]]
        else:
            hand_rects = [[null_rect, hand_rect]]

        datum = op.Datum()
        datum.cvInputData = img
        datum.handRectangles = hand_rects
        self.op_wrap.emplaceAndPop(op.VectorDatum([datum]))

        if is_left:
            kps = datum.handKeypoints[0][0]
        else:
            kps = datum.handKeypoints[1][0]
        if left:
            kps[:, 0] -= left
        elif top:
            kps[:, 1] -= top
        return kps, width, height
Exemplo n.º 3
0
    def processPic(self):
        # ============================= 启动openPose ===================================

        datum = op.Datum()

        datum.cvInputData = cv2.imread(self.picPaths)  # 输入
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))  # 输出
        keyPoints = datum.poseKeypoints.tolist()

        dstPicPath = "../dataset/marked_pic/p_" + self.picPaths.split('/')[
            -1]  # 处理后的图片
        cv2.imwrite(dstPicPath, datum.cvOutputData)
        # ============================= 写骨骼数据文件 ===================================
        with open("../dataset/bone_dataSet.data", "a+") as dataSet:
            dataSet.writelines(
                str(
                    self.pointDistance(keyPoints[0]) +
                    self.pointAngle(keyPoints[0]) +
                    [int(self.lineEdit.text())]))
            dataSet.write("\n")
        # ============================= 写骨骼图片文件 ===================================
        bone_img = datum.cvOutputData
        height, width, channel = bone_img.shape
        pixmap = QPixmap.fromImage(
            QImage(bone_img.data, width, height, 3 * width,
                   QImage.Format_RGB888).rgbSwapped())
        self.label_3.setPixmap(pixmap)
Exemplo n.º 4
0
    def get_sample_heatmaps():
        # These parameters are globally set. You need to unset variables set here if you have a new OpenPose object. See *
        params = dict()
        params["model_folder"] = os.path.join(os.path.expanduser("~"), 'catkin_ws/src/openpose/models/', '')
        params["heatmaps_add_parts"] = True
        params["heatmaps_add_bkg"] = True
        params["heatmaps_add_PAFs"] = True
        params["heatmaps_scale"] = 3
        params["upsampling_ratio"] = 1
        params["body"] = 1

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image and get heatmap
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))
        poseHeatMaps = datum.poseHeatMaps.copy()
        opWrapper.stop()

        return poseHeatMaps
    def detect_poses(self, image):
        # `op` added to globals in the constructor
        datum = op.Datum()  # pylint: disable=undefined-variable # noqa: F821
        datum.cvInputData = image
        self._openpose_wrapper.emplaceAndPop(op.VectorDatum([datum]))

        keypoints = datum.poseKeypoints
        overlayed_image = datum.cvOutputData

        recognitions = []

        if keypoints is not None and len(
                keypoints.shape
        ) == 3:  # If no detections, keypoints will be None
            num_persons, num_bodyparts, _ = keypoints.shape
            for person_id in range(0, num_persons):
                for body_part_id in range(0, num_bodyparts):
                    body_part = self._model["body_parts"][body_part_id]
                    x, y, probability = keypoints[person_id][body_part_id]
                    if probability > 0:
                        recognitions.append(
                            Recognition(group_id=person_id,
                                        roi=RegionOfInterest(width=1,
                                                             height=1,
                                                             x_offset=int(x),
                                                             y_offset=int(y)),
                                        categorical_distribution=
                                        CategoricalDistribution(probabilities=[
                                            CategoryProbability(
                                                label=body_part,
                                                probability=float(probability))
                                        ])))

        return recognitions, overlayed_image
Exemplo n.º 6
0
    def scan_image_without_normalize(self, img_path):
        # Opening image in OpenCV
        imageToProcess = cv2.imread(img_path)

        # Get data points (datum)
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Get output image
        output_image = datum.cvOutputData

        arr = []
        try:
            pop_all(arr)
            x_high = 0
            x_low = 9999
            y_high = 0
            y_low = 9999

            # Get highest and lowest keypoints
            for count, x in enumerate(datum.poseKeypoints[0]):
                # Avoid x=0 and y=0 because some keypoints that are not discovered.
                # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                if x[0] != 0 and x[1] != 0:
                    if x_high < x[0]:
                        x_high = x[0]
                    if x_low > x[0]:
                        x_low = x[0]
                    if y_high < x[1]:
                        y_high = x[1]
                    if y_low > x[1]:
                        y_low = x[1]

                # Add pose keypoints to a dictionary
                KP = {'x': x[0], 'y': x[1]}
                # Append dictionary to array
                arr.append(KP)

            # Find the highest and lowest position of x and y
            # (Used to draw rectangle)
            if y_high - y_low > x_high - x_low:
                height = y_high - y_low
                width = x_high - x_low
            else:
                height = x_high - x_low
                width = y_high - y_low

            # Draw rectangle (get width and height)
            y_high = int(y_high + height / 40)
            y_low = int(y_low - height / 12)
            x_high = int(x_high + width / 5)
            x_low = int(x_low - width / 5)

            return arr, x_low, y_low, output_image
        except Exception as e:
            print(end="")
Exemplo n.º 7
0
def hand_pose(model_dir, left=0):
    params = {'model_folder': model_dir}
    params['net_resolution'] = '240x192'
    #params['disable_blending'] = True
    #params['number_people_max'] = 1
    params['hand'] = True
    #params['face'] = True
    params['body'] = 0
    params['hand_detector'] = 2
    hands_boxes = [  # left & right hands: only 1-person
        [pyop.Rectangle(0, 0, 0, 0),
         pyop.Rectangle(0, 0, 640, 640)]
    ]

    #opWrapper = pyop.WrapperPython(pyop.ThreadManagerMode.Synchronous)
    #opWrapper.configure(params); opWrapper.execute() # webcam
    '''opWrapper = pyop.WrapperPython(pyop.ThreadManagerMode.AsynchronousOut)
    opWrapper.configure(params); opWrapper.start() # webcam
    while cv2.waitKey(5)!=27: # faster
        t = time(); x = pyop.VectorDatum()
        if not opWrapper.waitAndPop(x): continue
        x = x[0]; im = x.cvOutputData; fps = 'FPS=%.1f'%(1/(time()-t))
        im = cv2.putText(im, fps, (5,20), 4, 0.7, (0,255,255), 1, 16)
        cv2.imshow('OpenPose', im) # hand_standalone unsupported
    cv2.destroyAllWindows()#'''

    opWrapper = pyop.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    x = pyop.Datum()
    cap = cv2.VideoCapture(-1)
    hand_boxes = hand_box(cap, left)
    while cv2.waitKey(5) != 27:  # slower
        t = time()
        _, im = cap.read()
        x.cvInputData = im
        if params['hand_detector'] == 2: x.handRectangles = hand_boxes
        opWrapper.emplaceAndPop(pyop.VectorDatum([x]))
        im = x.cvOutputData
        fps = 'FPS=%.1f' % (1 / (time() - t))
        im = cv2.putText(im, fps, (5, 20), 4, 0.7, (0, 255, 255), 1, 16)
        cv2.imshow('OpenPose', RSZ(im, 1.5))  # cv2.LINE_AA=16
    cv2.destroyAllWindows()
    cap.release()  #'''
Exemplo n.º 8
0
    def __init__(self, frame_id, no_depth, pub_topic, color_topic, depth_topic,
                 cam_info_topic, op_wrapper, display):

        self.pub = rospy.Publisher(pub_topic, Frame, queue_size=10)

        self.frame_id = frame_id
        self.no_depth = no_depth

        self.bridge = CvBridge()

        self.op_wrapper = op_wrapper

        self.display = display
        self.frame = None

        # Populate necessary K matrix values for 3D pose computation.
        cam_info = rospy.wait_for_message(cam_info_topic, CameraInfo)
        self.fx = cam_info.K[0]
        self.fy = cam_info.K[4]
        self.cx = cam_info.K[2]
        self.cy = cam_info.K[5]

        # Obtain depth topic encoding
        encoding = rospy.wait_for_message(depth_topic, Image).encoding
        self.mm_to_m = 0.001 if encoding == "16UC1" else 1.

        # Function wrappers for OpenPose version discrepancies
        if OPENPOSE1POINT7_OR_HIGHER:
            self.emplaceAndPop = lambda datum: self.op_wrapper.emplaceAndPop(
                op.VectorDatum([datum]))
            self.detect = lambda kp: kp is not None
        else:
            self.emplaceAndPop = lambda datum: self.op_wrapper.emplaceAndPop(
                [datum])
            self.detect = lambda kp: kp.shape != ()

        image_sub = message_filters.Subscriber(color_topic, Image)
        depth_sub = message_filters.Subscriber(depth_topic, Image)
        self.ts = message_filters.ApproximateTimeSynchronizer(
            [image_sub, depth_sub], 1, 0.01)
        self.ts.registerCallback(self.callback)
        """ OpenPose skeleton dictionary
Exemplo n.º 9
0
def process_frame(img, op_wrapper):
    """Function to take a Frame and return keypoints and output image with keypoints

    Args:
        img: Image to work with
        op_wrapper: The openpose wrapper
    """

    datum = op.Datum()
    if not img.dtype == np.uint8:
        img = np.uint8(img)
        tqdm.write("Wrong image dtype: Changing to np.uint8")
    # To be resolved
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    datum.cvInputData = img
    op_wrapper.emplaceAndPop(op.VectorDatum([datum]))
    # tqdm.write(datum.poseKeypoints.shape)
    #cv2.imwrite('output/test.jpg', datum.cvOutputData)
    # return datum.cvOutputData, datum.poseKeypoints, datum.handKeypoints
    return datum.getPoseKeypoints()
Exemplo n.º 10
0
    def showCapture(self):
        try:

            frame = self.video.captureFrame()
            datum = op.Datum()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            resPic = datum.cvOutputData
            # cv2.putText(resPic, "OpenPose", (25, 25),
            #             cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 222))

            pic = cv2.cvtColor(resPic, cv2.COLOR_BGR2RGB)
            pic = QImage(pic, pic.shape[1], pic.shape[0],
                         QtGui.QImage.Format_RGB888)
            self.label_3.setPixmap(QPixmap.fromImage(pic))

            keyPoints = datum.poseKeypoints.tolist()
            self.label_4.setText(pos[predict_result(
                pointDistance(keyPoints[0]) + pointAngle(keyPoints[0]))])

        except TypeError:
            print("No frame")
Exemplo n.º 11
0
    def classify_image(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        flip_image = cv2.flip(cv_image, -1) # flip the zed image right side up
        print(flip_image.shape)

        # Process image
        datum = op.Datum()
        datum.cvInputData = flip_image
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        poseKeypoints = datum.poseKeypoints
        # print("pose keypoints")
        print(poseKeypoints)
        print("left shoulder ", poseKeypoints[0][2])
        print("right shoulder ", poseKeypoints[0][5])

        cv2.imshow("Frame", datum.cvOutputData)
        cv2.waitKey(3)
Exemplo n.º 12
0
def upload():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(
            basepath, secure_filename(f.filename))
        f.save(file_path)

    dir_path = os.path.dirname(os.path.realpath(__file__))
    sys.path.append('/usr/local/python')
    from openpose import pyopenpose as op    
    parser = argparse.ArgumentParser()
    nameOfUpload = f.filename
    print(nameOfUpload)
    parser.add_argument("--image_path", default=nameOfUpload, help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
    args = parser.parse_known_args()

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "/Users/timothy/openpose/models"
    params["write_json"] = "../temp" 
    params["keypoint_scale"] = "3"
    # Add others in path?
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1])-1: next_item = args[1][i+1]
        else: next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-','')
            if key not in params:  params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-','')
            if key not in params: params[key] = next_item

    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    print('here')
    print(args[0].image_path)
    # Process Image

    datum = op.Datum()
    imageToProcess = cv2.imread(args[0].image_path)
    datum.cvInputData = imageToProcess
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))

    # Display Image
    print("Body keypoints: \n" + str(datum.poseKeypoints))
    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
    #saves output as 
    outputName = 'poseSkeleton' + args[0].image_path 
    cv2.imwrite(outputName,datum.cvOutputData)
    cv2.waitKey(1)

    bridge = {"version":1.3,"people":[{"person_id":[-1],"pose_keypoints_2d":[0.255244,0.603225,0.853665,0.3113,0.66861,0.611142,0.32568,0.706593,0.807634,0.426866,0.712084,0.641127,0.517195,0.717605,0.256783,0.302272,0.6304,0.369653,0,0,0,0,0,0,0.479215,0.510503,0.315729,0.475625,0.502358,0.327412,0.639993,0.447886,0.737408,0.632795,0.706738,0.700472,0.477412,0.521413,0.28034,0.632712,0.453244,0.332464,0.618279,0.665819,0.568486,0.239083,0.622133,0.949109,0.240877,0.603191,0.23209,0.242691,0.684896,0.808952,0,0,0,0.697774,0.733872,0.147229,0.701321,0.723011,0.143883,0.605658,0.684873,0.465871,0.697786,0.733884,0.716619,0.686923,0.739437,0.644811,0.623708,0.72306,0.68213],"face_keypoints_2d":[],"hand_left_keypoints_2d":[],"hand_right_keypoints_2d":[],"pose_keypoints_3d":[],"face_keypoints_3d":[],"hand_left_keypoints_3d":[],"hand_right_keypoints_3d":[]}]}

   

    fileNameForPhoto = '0' +'_keypoints.json'
    with open(fileNameForPhoto) as f:
        data = json.load(f)
    print(data["people"][0]["pose_keypoints_2d"])
    accuracy = comparePoints(1, data["people"][0]["pose_keypoints_2d"])
# Output: {'name': 'Bob', 'languages': ['English', 'Fench']}
    
    # assume data contains your decoded image
    encoded = base64.b64encode(open(outputName , "rb").read())
        # Make prediction
        # preds = model_predict(file_path, model)

        # Process your result for human
        # # pred_class = preds.argmax(axis=-1)            # Simple argmax
        # pred_class = decode_predictions(preds, top=1)   # ImageNet Decode
        # result = str(pred_class[0][0][1])               # Convert to string
        # return result
    #return encoded
    return json.dumps(accuracy)
Exemplo n.º 13
0
def player_thread(client, opWrapper, mqtt_client, mqtt_channel, debug, addr):
    data = b''
    fps_time = 0
    epoch = time.time()
    detect_move = False
    reg = False
    while True:
        payload_size = struct.calcsize("L")
        while len(data) < payload_size:
            #data += client.recv(90456)
            data += client.recv(4096)
        packed_msg_size = data[:payload_size]
        data = data[payload_size:]
        msg_size = struct.unpack("L", packed_msg_size)[0]
        #print(str(msg_size))
        str_msg = str(msg_size)
        #print("Size with last digit player" +str_msg);
        player_num = int(str_msg[-1])
        msg_size = int(str_msg[:-1])
        player = "player" + str(player_num)
        print("Received input from " + player)
        while len(data) < msg_size:
            data += client.recv(4096)
        frame_data = data[:msg_size]
        data = data[msg_size:]
        frame = pickle.loads(frame_data)
        now = datetime.now()
        current_time = now.strftime("%M:%S")
        print("\tTime: " + current_time + " -- " + str(frame.size))
        datum = op.Datum()
        datum.cvInputData = frame  #imageToProcess
        print("\tPost process " + current_time + " -- " + str(frame.size))
        stats = opWrapper.emplaceAndPop(op.VectorDatum([datum]))
        #print(str(stats));
        poseModel = op.PoseModel.BODY_25
        #print(op.getPoseBodyPartMapping(poseModel))
        #print("Body keypoints: \n" + str(datum.poseKeypoints))
        movement = move(datum.poseKeypoints)
        #####MQTT SEND IT#######
        if movement == "hook" and detect_move == False:
            detect_move = True
            action = 'h'
        if movement == "blocking" and detect_move == False:
            detect_move = True
            action = 'o'

        #else:
        #message = json.dumps({"player": player, "action": ""})
        #print("");
        print(time.time())
        if int(time.time()) % 5 == 0:
            if reg == True:
                if detect_move == True:
                    message = json.dumps({
                        "playerID": player_num,
                        "action": action
                    })
                    detect_move = False
                else:
                    message = json.dumps({
                        "playerID": player_num,
                        "action": ""
                    })
                mqtt_client.publish(mqtt_channel, message, qos=1)
                reg = False
        if int(time.time()) % 6 == 0:
            reg = True
            detect_move = False

        if debug:
            cv2.putText(datum.cvOutputData,
                        "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.imshow(player, datum.cvOutputData)

        print("FPS: %f" % (1.0 / (time.time() - fps_time)))
        fps_time = time.time()
        if cv2.waitKey(1) == 27:
            break
            if key not in params: params[key] = next_item

    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython(op.ThreadManagerMode.AsynchronousOut)
    opWrapper.configure(params)
    opWrapper.start()

    # Main loop
    userWantsToExit = False
    while not userWantsToExit:
        # Pop frame
        s = time.time()
        datumProcessed = op.VectorDatum()
        if opWrapper.waitAndPop(datumProcessed):
            if not args[0].no_display:
                # Display image
                userWantsToExit = display(datumProcessed)
            printKeypoints(datumProcessed)
        else:
            break

        e = time.time()
        print('FPS:%5.2f' % (1 / (e - s)))
except Exception as e:
    print(e)
    sys.exit(-1)
Exemplo n.º 15
0
    for input_path in args.files:
        print(f"Video: {input_path}")
        video = Video(input_path=input_path)
        tracker = Tracker(
            distance_function=keypoints_distance,
            distance_threshold=DISTANCE_THRESHOLD,
            detection_threshold=DETECTION_THRESHOLD,
            hit_counter_max=HIT_COUNTER_MAX,
            initialization_delay=INITIALIZATION_DELAY,
            pointwise_hit_counter_max=POINTWISE_HIT_COUNTER_MAX,
        )
        KEYPOINT_DIST_THRESHOLD = video.input_height / 40

        for frame in video:
            datum.cvInputData = frame
            detector(op.VectorDatum([datum]))
            detected_poses = datum.poseKeypoints

            if detected_poses is not None:
                openpose_detections = ([] if not detected_poses.any() else [
                    Detection(p, scores=s, label=0) for (p, s) in zip(
                        detected_poses[:, :, :2], detected_poses[:, :, 2])
                ])
            else:
                openpose_detections = []

            yolo_out = model(frame,
                             conf_threshold=args.conf_thres,
                             iou_threshold=args.iou_thresh,
                             image_size=args.img_size,
                             classes=args.classes)
Exemplo n.º 16
0
    def get_keypoints_and_id_from_img_without_normalize(self, img):
        # KP ordering of body parts
        NECK = 1
        R_SHOULDER = 2
        R_ELBOW = 3
        R_WRIST = 4
        L_SHOULDER = 5
        L_ELBOW = 6
        L_WRIST = 7
        MID_HIP = 8
        R_HIP = 9
        R_KNEE = 10
        R_ANKLE = 11
        L_HIP = 12
        L_KNEE = 13
        L_ANKLE = 14

        # Define bodyparts to get the selected keypoints
        BODY_PARTS = [
            NECK, R_SHOULDER, R_ELBOW, R_WRIST, L_SHOULDER, L_ELBOW, L_WRIST,
            MID_HIP, R_HIP, R_KNEE, R_ANKLE, L_HIP, L_KNEE, L_ANKLE
        ]

        # Set tracker
        max_cosine_distance = 0.2
        nn_budget = 100
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        tracker = Tracker(metric)

        # Get data points (datum)
        datum = op.Datum()
        datum.cvInputData = img
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Initialize lists
        arr = []
        boxes = []
        list_of_pose_temp = []
        list_of_pose_and_id = []
        try:
            # Get highest and lowest keypoints
            for kp_idx, keypoint in enumerate(datum.poseKeypoints):
                pop_all(arr)
                x_high = 0
                x_low = 9999
                y_high = 0
                y_low = 9999

                for count, x in enumerate(keypoint):
                    # Avoid x=0 and y=0 because some keypoints that are not discovered.
                    # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                    if x[0] != 0 and x[1] != 0:
                        if x_high < x[0]:
                            x_high = x[0]
                        if x_low > x[0]:
                            x_low = x[0]
                        if y_high < x[1]:
                            y_high = x[1]
                        if y_low > x[1]:
                            y_low = x[1]

                    # Add pose keypoints to a dictionary
                    if count in BODY_PARTS:
                        KP = {'x': x[0], 'y': x[1]}

                        # Append dictionary to array
                        arr.append(KP)

                # Find the highest and lowest position of x and y
                # (Used to draw rectangle)
                if y_high - y_low > x_high - x_low:
                    height = y_high - y_low
                    width = x_high - x_low
                else:
                    height = x_high - x_low
                    width = y_high - y_low

                # Draw rectangle (get width and height)
                y_high = int(y_high + height / 40)
                y_low = int(y_low - height / 12)
                x_high = int(x_high + width / 5)
                x_low = int(x_low - width / 5)

                # # Normalize keypoint
                list_of_pose_temp.append(arr)

                # Make the box
                boxes.append([x_low, y_low, width, height])

                # Encode the features inside the designated box
                features = self.encoder(datum.cvOutputData, boxes)

                # For a non-empty item add to the detection array
                def nonempty(xywh):
                    return xywh[2] != 0 and xywh[3] != 0

                detections = [
                    Detection(bbox, 1.0, feature)
                    for bbox, feature in zip(boxes, features) if nonempty(bbox)
                ]

                # Run non-maxima suppression.
                np_boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    np_boxes, self.nms_max_overlap, scores)
                detections = [detections[i] for i in indices]

                # Update tracker.
                tracker.predict()
                tracker.update(detections)

                # Make pose and person ID list
                if kp_idx == len(datum.poseKeypoints) - 1:
                    for track_idx, track in enumerate(tracker.tracks):
                        bbox = track.to_tlwh()
                        list_of_pose_and_id.append({
                            "Keypoints":
                            list_of_pose_temp[track_idx],
                            "ID":
                            track.track_id
                        })

            return list_of_pose_and_id
        except Exception as e:
            print(end="")
            if key not in params: params[key] = next_item

    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Process Image
    datum = op.Datum()
    imageToProcess = cv2.imread(args[0].image_path)
    datum.cvInputData = imageToProcess
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
    human_count = len(datum.poseKeypoints)

    # Display Image
    for human in range(human_count):
        print("%dth person body keypoints:\n" % human +
              str(datum.poseKeypoints[human]))
        print("%dth person face keypoints:\n" % human +
              str(datum.faceKeypoints[human]))
        print("%dth person Left Hand keypoints:\n" % human +
              str(datum.handKeypoints[0][human]))
        print("%dth person Right Hand keypoints:\n" % human +
              str(datum.handKeypoints[1][human]))

    print("Total %d human detected" % human_count)
    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
Exemplo n.º 18
0
    def scan_video_without_normalize(self, video_path, keypoints_to_extract):
        # Opening OpenCV stream
        stream = cv2.VideoCapture(video_path)

        # Define list of pose, x low, and y low
        list_of_pose = []
        list_of_x_low = []
        list_of_y_low = []
        while True:
            try:
                # Stream
                ret, imageToProcess = stream.read()
                datum = op.Datum()
                datum.cvInputData = imageToProcess
            except Exception as e:
                # Break at end of frame
                break

            # Find keypoints
            self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

            # Get output image processed by Openpose
            output_image = datum.cvOutputData

            # Define keypoints array and binding box array
            arr = []
            boxes = []

            try:
                # Loop each of the 17 keypoints
                for keypoint in datum.poseKeypoints:
                    pop_all(arr)
                    x_high = 0
                    x_low = 9999
                    y_high = 0
                    y_low = 9999

                    # Get highest and lowest keypoints
                    for count, x in enumerate(keypoint):
                        # Check which keypoints to extract
                        if count in keypoints_to_extract:
                            # Avoid x=0 and y=0 because some keypoints that are not discovered.
                            # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                            if x[0] != 0 and x[1] != 0:
                                if x_high < x[0]:
                                    x_high = x[0]
                                if x_low > x[0]:
                                    x_low = x[0]
                                if y_high < x[1]:
                                    y_high = x[1]
                                if y_low > x[1]:
                                    y_low = x[1]

                            # Add pose keypoints to a dictionary
                            KP = {'x': x[0], 'y': x[1]}
                            # Append dictionary to array
                            arr.append(KP)

                    # Find the highest and lowest position of x and y
                    # (Used to draw rectangle)
                    if y_high - y_low > x_high - x_low:
                        height = y_high - y_low
                        width = x_high - x_low
                    else:
                        height = x_high - x_low
                        width = y_high - y_low

                    # Draw rectangle (get width and height)
                    y_high = int(y_high + height / 40)
                    y_low = int(y_low - height / 12)
                    x_high = int(x_high + width / 5)
                    x_low = int(x_low - width / 5)

                    # Append list of pose, x low, and y low
                    list_of_pose.append(arr)
                    list_of_x_low.append(x_low)
                    list_of_y_low.append(y_low)
            except Exception as e:
                print(end="")

        return list_of_pose, list_of_x_low, list_of_y_low
Exemplo n.º 19
0
    def callback(self, ros_image, ros_depth):
        # Don't process if we have not obtained K matrix yet
        if not (self.fx and self.cx and self.fy and self.cy):
            return

        # Construct a frame with current time !before! pushing to OpenPose
        fr = Frame()
        fr.header.frame_id = self.frame_id
        fr.header.stamp = rospy.Time.now()

        # Convert images to cv2 matrices
        image = depth = None
        try:
            image = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")
            depth = self.bridge.imgmsg_to_cv2(ros_depth, "32FC1")
        except CvBridgeError as e:
            rospy.logerr("CvBridge Error: {0}".format(e))

        # Push data to OpenPose and block while waiting for results
        datum = op.Datum()
        datum.cvInputData = image

        if OPENPOSE1POINT7_OR_HIGHER:
            self.op_wrapper.emplaceAndPop(op.VectorDatum([datum]))
        else:
            self.op_wrapper.emplaceAndPop([datum])

        pose_kp = datum.poseKeypoints
        lhand_kp = datum.handKeypoints[0]
        rhand_kp = datum.handKeypoints[1]

        # Set number of people detected
        if pose_kp is not None:
            if pose_kp.shape == ():
                num_persons = 0
                body_part_count = 0
            else:
                num_persons = pose_kp.shape[0]
                body_part_count = pose_kp.shape[1]
        else:
            return

        # Check to see if hands were detected
        lhand_detected = False
        rhand_detected = False
        hand_part_count = 0

        if lhand_kp is not None:
            if lhand_kp.shape != ():
                lhand_detected = True
                hand_part_count = lhand_kp.shape[1]

        if rhand_kp is not None:
            if rhand_kp.shape != ():
                rhand_detected = True
                hand_part_count = rhand_kp.shape[1]

        # Handle body points
        fr.persons = [Person() for _ in range(num_persons)]
        try:
            for person in range(num_persons):
                fr.persons[person].bodyParts = [BodyPart() for _ in range(body_part_count)]
                fr.persons[person].leftHandParts = [BodyPart() for _ in range(hand_part_count)]
                fr.persons[person].rightHandParts = [BodyPart() for _ in range(hand_part_count)]

                detected_hands = []
                if lhand_detected:
                    detected_hands.append((lhand_kp, fr.persons[person].leftHandParts))
                if rhand_detected:
                    detected_hands.append((rhand_kp, fr.persons[person].rightHandParts))

                # Process the body
                for bp in range(body_part_count):
                    u, v, s = pose_kp[person, bp]
                    x, y, z = self.convert_to_3d(u, v, depth)
                    arr = fr.persons[person].bodyParts[bp]
                    arr.pixel.x = u
                    arr.pixel.y = v
                    arr.score = s
                    arr.point.x = x
                    arr.point.y = y
                    arr.point.z = z

                # Process left and right hands
                for hp in range(hand_part_count):
                    for kp, harr in detected_hands:
                        u, v, s = kp[person, hp]
                        x, y, z = self.convert_to_3d(u, v, depth)
                        arr = harr[hp]
                        arr.pixel.x = u
                        arr.pixel.y = v
                        arr.score = s
                        arr.point.x = x
                        arr.point.y = y
                        arr.point.z = z

        except IndexError as e:
            rospy.logerr("Indexing error occured: {}".format(e))
            # return

        if self.display: self.frame = datum.cvOutputData.copy()
        self.pub.publish(fr)