Пример #1
0
def opImages_2_json(imagePaths, params):
    """Applique openpose selon les paramètres params sur les images de la liste
    imagePaths. Le résultat est en format JSON au path params["write_json"] """

    if params["write_json"] == "":
        raise ValueError(
            "Le chemin d'écriture ne peut pas être vide. Définissez vos paramètres avec un chemin d'écriture pour write_json"
        )

    opWrapper = init_openpose(params)
    start = time.time()

    for imageId in tqdm(range(len(imagePaths))):

        imagePath = imagePaths[imageId]
        datum = op.Datum()
        image = cv2.imread(imagePath)
        datum.cvInputData = image
        if params["hand_detector"] == 2:
            imageSize = max(image.shape[0:2])
            handRectangles = [[
                op.Rectangle(0., 0., imageSize, imageSize),
                op.Rectangle(0., 0., imageSize, imageSize),
            ]]
            datum.handRectangles = handRectangles
        opWrapper.emplaceAndPop([datum])

    end = time.time()
    print("OpenPose demo successfully finished. Total time: " +
          str(end - start) + " seconds")
def detect_key_points(video_to_process, op_wrapper):
    """
    Creates rectangle and process it
    :param op_wrapper: open pose wrapper object
    :param video_to_process: open_cv object to process
    :return: np.array with data
    """
    width = video_to_process.get(3)  # float width of video
    height = video_to_process.get(4)  # float high of video

    # by default rectangle for right hand by center
    right_hand_rectangle = [
        # center
        [
            op.Rectangle(0., 0., 0., 0.),
            op.Rectangle(0.2 * width, 0.1 * height, height * 0.85,
                         height * 0.85)
        ]
    ]

    datum = op.Datum()
    data = None
    timestamps = []

    while video_to_process.isOpened():
        # Capture frame-by-frame
        ret, frame = video_to_process.read()
        if ret:
            datum.cvInputData = frame
            datum.handRectangles = right_hand_rectangle
            op_wrapper.emplaceAndPop([datum])
            cv2.imshow("OpenPose 1.4.0. Right hand keypoints",
                       datum.cvOutputData)
            timestamps.append(video_to_process.get(cv2.CAP_PROP_POS_MSEC))
            if data is not None:
                data = np.append(data,
                                 datum.handKeypoints[1].reshape(1, 63),
                                 axis=0)
            else:
                data = datum.handKeypoints[1].reshape(1, 63)
            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        # Break the loop
        else:
            break
    # add timestamps to beginning
    data_with_time = np.append(np.array(timestamps).reshape(
        len(timestamps), 1),
                               data,
                               axis=1)
    return data_with_time
Пример #3
0
def opFrame_run(frame, params, opWrapper):
    """Applique openpose sur une frame et retourne l'object datum contenant :
        - l'image avec le squelette
        - les keypoints en numpy
    """
    datum = op.Datum()

    datum.cvInputData = frame
    if params["hand_detector"] == 2:
        imageSize = float(max(frame.shape[0:2]))
        handRectangles = [[
            op.Rectangle(0., 0., imageSize, imageSize),
            op.Rectangle(0., 0., imageSize, imageSize),
        ]]
        datum.handRectangles = handRectangles
    opWrapper.emplaceAndPop([datum])
    return datum
def box2oprectangle(box):
    left, right, top, bottom = box
    width = np.abs(right - left)
    height = np.abs(top - bottom)
    max_length = int(max(width,height))
    center = (int(left + (width /2 )), int(bottom - (height/2)))
    # Openpose hand detector needs the bounding box to be quite big , so we make it bigger
    # Top point for rectangle
    new_top = (int(center[0] - max_length / 1.3), int(center[1] - max_length /1.3))
    max_length = int(max_length * 1.6)
    hand_rectangle = op.Rectangle(new_top[0],new_top[1],max_length,max_length)
    return hand_rectangle
def predict_keypoints(color_image,rect):
    imageToProcess = color_image
    if rect is None:
        rect = [100.,50.,400.,400.]
    handRectangles = [
        # Left/Right hands person 0
        [
        op.Rectangle(0., 0., 0., 0.),
        op.Rectangle(rect[0], rect[1], rect[2], rect[3]),
        ]
    ]
    
    # Create new datum
    datum.cvInputData = imageToProcess
    datum.handRectangles = handRectangles
    
    # Process and display image
    opWrapper.emplaceAndPop([datum])
    # Debug code
    # print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
    # print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
    # cv2.imshow("OpenPose 1.5.1 - Tutorial Python API", datum.cvOutputData)      
    return(datum.handKeypoints[1],datum.cvOutputData)
Пример #6
0
def opImage_display(imagePath, params, display=True, frame=None):
    """Applique openpose sur une image et retourne l'object datum contenant :
        - l'image avec le squelette
        - les keypoints en numpy
    """
    opWrapper = init_openpose(params)
    datum = op.Datum()
    if frame:
        image = frame
    else:
        image = cv2.imread(imagePath)
    datum.cvInputData = image
    if params["hand_detector"] == 2:
        imageSize = float(max(image.shape[0:2]))
        handRectangles = [[
            op.Rectangle(0., 0., imageSize, imageSize),
            op.Rectangle(0., 0., imageSize, imageSize),
        ]]
        datum.handRectangles = handRectangles
    opWrapper.emplaceAndPop([datum])
    if display:
        plt.imshow(datum.cvOutputData)
        plt.show()
    return datum
def detect_keypoints(image, hand_boxes, threshold=0.5):
    # We are considering every seen hand is a left hand
    hands_rectangles = [[box2oprectangle(box),op.Rectangle(0., 0., 0., 0.)] for box in hand_boxes]
    # hands_rectangles.append([hand_rectangle,op.Rectangle(0., 0., 0., 0.)])

    # Create new datum
    datum = op.Datum()
    datum.cvInputData = image
    datum.handRectangles = hands_rectangles
    # Process and display image
    opWrapper.emplaceAndPop([datum])

    if datum.handKeypoints[0].shape == ():
        # if there were no detections
        hand_keypoints = [[],[]]
    else:
        hand_keypoints = datum.handKeypoints
    return hand_keypoints, datum.cvOutputData
Пример #8
0
def get_hand_rectangle_from_keypoints(wrist, elbow, shoulder) -> op.Rectangle:
    """
    Adapted from original CPP code:
    https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
    """

    ratio_wrist_elbow = 0.33  # Taken from openpose

    hand_rectangle = op.Rectangle()
    hand_rectangle.x = wrist[0] + ratio_wrist_elbow * (wrist[0] - elbow[0])
    hand_rectangle.y = wrist[1] + ratio_wrist_elbow * (wrist[1] - elbow[1])

    distance_wrist_elbow = get_distance(wrist, elbow)
    distance_elbow_shoulder = get_distance(elbow, shoulder)

    hand_rectangle.width = 1.5 * np.max(
        [distance_wrist_elbow, 0.9 * distance_elbow_shoulder])
    hand_rectangle.height = hand_rectangle.width
    hand_rectangle.x -= hand_rectangle.width / 2.
    hand_rectangle.y -= hand_rectangle.width / 2.

    return hand_rectangle
Пример #9
0
params["model_folder"] = '../data/models/'

# Do not use the face detector.
# Define constant face rectangles.
params["face_detector"] = 2

params["face"] = True
params["body"] = 0

opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()

# Face area in the video
face_rects = [
    op.Rectangle(400, 30, 512, 512),
]


def op_process_file(filename):
    img = cv2.imread(filename)

    datum = op.Datum()
    datum.cvInputData = img
    datum.faceRectangles = face_rects

    opWrapper.emplaceAndPop([datum])
    keypoints = datum.faceKeypoints[0]

    return keypoints
# Add others in path?
for i in range(0, len(args[1])):
    curr_item = args[1][i]
    if i != len(args[1]) - 1: next_item = args[1][i + 1]
    else: next_item = "1"
    if "--" in curr_item and "--" in next_item:
        key = curr_item.replace('-', '')
        if key not in params: params[key] = "1"
    elif "--" in curr_item and "--" not in next_item:
        key = curr_item.replace('-', '')
        if key not in params: params[key] = next_item

# Construct it from system arguments
# Fixed the handRectangles to only 1 person and 1 big rectangle, don't have to keep changing rectangle
handRectangles = [[
    op.Rectangle(100.0, 150.0, 328.0, 390.0),
    op.Rectangle(0., 0., 0., 0.)
]]

opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()

# Process Image
datum = op.Datum()
datum.handRectangles = handRectangles
cam = cv2.VideoCapture(0)  # modify here for camera number
while (cv2.waitKey(1) != 27):
    # Get camera frame
    ret, frame = cam.read()
    datum.cvInputData = frame
Пример #11
0
    )
    raise e

params = dict()
params["model_folder"] = r'C:\Work\Projects\fyp\openpose\openpose\models'
params["hand"] = True
params["hand_detector"] = 2
params["body"] = 0

opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()

handRectangles = [
    # Left/Right hands person 0
    [op.Rectangle(0., 0., 0., 0.),
     op.Rectangle(0., 0., res, res)]
]

datum = op.Datum()
datum.handRectangles = handRectangles

cap = cv2.VideoCapture(1)

while True:
    # Capture frame-by-frame
    ret, frame = cap.read()
    frame = cv2.resize(frame, (int(res), int(res)),
                       interpolation=cv2.INTER_AREA)

    # todo remove
Пример #12
0
                top = facepos.item()['top']
                bottom = facepos.item()['bottom']
                lrdiff = right - left
                btdiff = bottom - top
                len = 0
                x = left
                y = top
                if lrdiff < btdiff:
                    len = lrdiff
                    y = y + (btdiff - lrdiff) / 2
                else:
                    len = btdiff
                    x = x + (lrdiff - btdiff) / 2

                faceRectangles = [
                    op.Rectangle(x, y, len, len),
                ]

                # Create new datum
                datum = op.Datum()
                datum.cvInputData = imageToProcess
                datum.faceRectangles = faceRectangles

                # Process and display image
                opWrapper.emplaceAndPop(op.VectorDatum([datum]))
                # print("Face keypoints: \n" + str(datum.faceKeypoints))

                # save openpose data
                # rootForFacepos = root.replace('iphone_pictures', 'openpose')
                # if not isdir(rootForFacepos):
                #     os.makedirs(rootForFacepos)
Пример #13
0
datum = op.Datum()
# 시간측정

#손,코 데이터 불러옴
with open('./step1/hndata.pickle.', 'rb') as fr:
    hnData = pickle.load(fr)

imagePaths = list(paths.list_images('./step2'))
print(imagePaths)
start = time.time()

for idx, imagePath in enumerate(imagePaths):
    imageToProcess = imread(imagePath)
    filename = imagePath.split(os.path.sep)[-1]
    faceRectangles = [
        op.Rectangle(hnData[filename][4] - 100.0, hnData[filename][5] - 100.0,
                     200.0, 200.0),
    ]

    datum.cvInputData = imageToProcess
    datum.faceRectangles = faceRectangles
    opWrapper.emplaceAndPop([datum])

    # 이미지 출력
    # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", datum.cvOutputData)
    # cv2.waitKey(0)
    # 이미지 저장
    # for imagePath in imagePaths:
    label = imagePath.split(os.path.sep)[-2]
    check_folder('./step3', label)
    print('step3', idx, imagePath)
    print('time : ', time.time() - start)
Пример #14
0
def main():
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
                # from openpose import *
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
                # from openpose import *
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"
        params["hand"] = True
        params["hand_detector"] = 2
        params["body"] = 1
        params["net_resolution"] = '320x192'  #20*11
        # params["face"] = True
        # params["disable_blending"] = True
        # params["fps_max"] = 5
        handRectangles = [[
            op.Rectangle(128, 0, 1024, 1024),
            op.Rectangle(0., 0., 0., 0.)
        ]]
        # device = 'cuda' if torch.cuda.is_available() else 'cpu'
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        datum.handRectangles = handRectangles
        cam = cv2.VideoCapture(0)  # modify here for camera number
        cam.set(3, 1280)
        cam.set(4, 1024)
        pair_poseKeypoints = [[], []]
        input_hands = []
        prev_state = None
        msg_state = ('not_sent', time.perf_counter())
        while (cv2.waitKey(1) != 27):
            if msg_state[0] == 'sent':
                if time.perf_counter() - msg_state[1] > 2.5:
                    msg_state = ('not_sent', time.perf_counter())

            ret, frame = cam.read()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])
            frame = datum.cvOutputData
            '''If Person not in Camera'''
            if datum.poseKeypoints.shape == ():
                # conn.request("POST", "/v2/chat/users/[email protected]/messages", payload, headers)
                #
                # res = conn.getresponse()
                # data = res.read()
                #
                # print(data.decode("utf-8"))
                if msg_state[0] == 'not_sent':
                    # print('WHY NOT WORKING')
                    conn.request(
                        "POST",
                        "/v2/chat/users/[email protected]/messages",
                        payload_absent, headers)
                    #
                    res = conn.getresponse()
                    data = res.read()

                    print(data.decode("utf-8"))
                    msg_state = ('sent', time.perf_counter())
                font = cv2.FONT_HERSHEY_SIMPLEX
                fontScale = 3
                fontColor = (255, 255, 0)
                lineType = 2
                fontThickness = 2
                msg_on_screen = 'ABSENT!'
                textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                           fontThickness)[0]
                bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                          (1024 + textsize[1]) // 2)

                cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255), 20)
                cv2.putText(frame, 'Absent!', bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
                cv2.imshow("Openpose 1.4.0 Webcam", frame)
                continue
            '''Evaluate Movement & Confidence'''
            del pair_poseKeypoints[0]
            pair_poseKeypoints.append(datum.poseKeypoints[0])
            body_confidence_avg = avg_pose_confidence(datum.poseKeypoints[0])
            # print(body_confidence_avg)
            moved = metric(pair_poseKeypoints)
            '''Evaluate Hand Gesture'''
            if len(input_hands) == 12:
                del input_hands[0]
            input_hands.append(datum.handKeypoints[0][0])
            # print(len(input_hands))
            prob, gesture = None, None
            hand_confidence_avg = avg_list_confidence(input_hands)
            # if len(input_hands) == 12 and avg >= 0.1:
            if len(input_hands) == 12:
                # print('Confidence : ', hand_confidence_avg)
                prob, gesture = get_hand_gesture('normalizev2.pt', input_hands,
                                                 'cuda')
            # print(prob, gesture)
            '''Output Recognition Results'''
            print_msg = False
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = None
            fontColor = None
            fontScale = 3
            fontThickness = 2
            msg_on_screen = None

            if valid_hand(hand_confidence_avg, gesture) and gesture == 1:
                print('THUMBS DOWN PROB : ', prob)
                if prob > 11:
                    '''Counter'''

                    if prev_state is None:
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_down', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_down':
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsdown, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:
                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (255, 0, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS DOWN'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())

            elif valid_hand(hand_confidence_avg, gesture) and gesture == 2:
                print('THUMBS DOWN PROB : ', prob)
                '''Counter'''
                if prob > 11:
                    if prev_state is None:
                        prev_state = ('thumbs up', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_up', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_up':
                        prev_state = ('thumbs_up', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsup, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:

                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (0, 255, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS UP'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())

            elif valid_hand(hand_confidence_avg, gesture) and gesture == 4:
                print('RAISE HAND PROB : ', prob)
                '''Counter'''
                if prev_state is None:
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 5.5:
                        prev_state = ('raise_hand', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'raise_hand':
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(time.perf_counter() - prev_state[1])
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_raisehand, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())
                    if time.perf_counter() - prev_state[1] > 0.5:
                        print_msg = True
                        bottomLeftCornerOfText = (450, 500)
                        fontColor = (0, 255, 255)
                        fontScale = 3
                        msg_on_screen = 'HAND RAISED'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())

            elif moved:
                '''Counter'''
                if prev_state is None:
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 1.5:
                        prev_state = ('detect_move', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'detect_move':
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(msg_state)
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_movement, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())

                    print_msg = True
                    bottomLeftCornerOfText = (150, 500)
                    fontColor = (255, 255, 255)
                    fontScale = 3
                    msg_on_screen = 'MOVEMENT DETECTED'
                    textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                               fontThickness)[0]
                    # print(textsize)
                    bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                              (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())

            if print_msg:
                # print(data.decode("utf-8"))
                # bottomLeftCornerOfText = (550, 500)
                # fontScale = 2
                # fontColor = (255, 0, 0)
                lineType = 2
                cv2.rectangle(frame, (0, 0), (1280, 1024), fontColor, 40)
                cv2.putText(frame, msg_on_screen, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
            cv2.imshow("Openpose 1.4.0 Webcam", frame)  # datum.cvOutputData
        # Always clean up
        cam.release()
        cv2.destroyAllWindows()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
        print(e)
        sys.exit(-1)
Пример #15
0
        'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
    )
    raise e
cap = cv2.VideoCapture(0)
params = dict()
params["model_folder"] = "../../../models/"
params["hand"] = True
params["hand_detector"] = 2
params["body"] = 0
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
handRectangles = [
    # Left/Right hands person 0
    [
        op.Rectangle(0., 0., 0., 0.),
        op.Rectangle(160., 80., 320., 320.),
    ]
]
portx = "COM4"
#波特率,标准值之一:50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,19200,38400,57600,115200
bps = 9600
#超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
timex = 5
# 打开串口,并得到串口对象
ser = serial.Serial(portx, bps, timeout=timex)
pointpair = [[20, 19], [19, 18], [18, 17], [17, 0], [16, 15], [15, 14],
             [14, 13], [13, 0], [12, 11], [11, 10], [10, 9], [9, 0], [8, 7],
             [7, 6], [6, 5], [5, 0], [4, 3], [3, 2], [2, 1], [1, 0], [17, 13],
             [13, 9], [9, 5], [5, 1]]
while True:
Пример #16
0
        depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics
        # color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
        # print(f'width: {color_intrin.width} height: {color_intrin.height}')
        # depth_to_color_extrin = aligned_depth_frame.profile.get_extrinsics_to(color_frame.profile)

        # default image from RS: w848 h480. seems like max resolu input for openpose: 300x300
        r1x = 300
        r1y = 100
        r2w = 300  # 300
        r2h = 300  # 300

        handRectangles = [
            # Left/Right hands person 0
            [
                op.Rectangle(0., 0., 0., 0.),  # disable left hand
                op.Rectangle(r1x, r1y, r2w, r2h),
            ],

        ]

        # Create new datum
        datum = op.Datum()
        datum.cvInputData = color_image
        datum.handRectangles = handRectangles  # need to modify here with a hand locator.

        # Process and display image
        opWrapper.emplaceAndPop([datum])
        # print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
        # print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
        frame = datum.cvOutputData
    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Read image and face rectangle locations
    #imageToProcess = cv2.imread(args[0].image_path)
    cap = cv2.VideoCapture(args[0].image_path)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 20.0,
                          (frame_width, frame_height))
    faceRectangles = [
        op.Rectangle(330.119385, 277.532715, 48.717274, 48.717274),
        op.Rectangle(24.036991, 267.918793, 65.175171, 65.175171),
        op.Rectangle(151.803436, 32.477852, 108.295761, 108.295761),
    ]
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Create new datum
        datum = op.Datum()
        datum.cvInputData = frame
        datum.faceRectangles = faceRectangles

        # Process and display image
        opWrapper.emplaceAndPop([datum])
Пример #18
0
def main():
    try:
        '''
        Part to get openpose & pyopenpose from built openpose.
        Be sure to build the openpose system first.
        Feel free to change directories of built python packages.
        '''
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
                # from openpose import *
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
                # from openpose import *
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e
        '''
        Customizing part for Openpose Python Wrapper.
        For the project, only enable body & hand.
        '''
        params = dict()
        params["model_folder"] = "../../../models/"
        params["hand"] = True
        params["hand_detector"] = 2
        params["body"] = 1
        params["net_resolution"] = '320x192'  #20*11
        # params["face"] = True
        # params["disable_blending"] = True
        # params["fps_max"] = 5

        handRectangles = [[
            op.Rectangle(128, 0, 1024, 1024),
            op.Rectangle(0., 0., 0., 0.)
        ]]
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        datum.handRectangles = handRectangles
        cam = cv2.VideoCapture(0)  # modify here for camera number
        cam.set(3, 1280)
        cam.set(4, 1024)
        pair_poseKeypoints = [[], []]
        input_hands = []
        prev_state = None
        '''
        Loading Model for Recognizing Hand Gestures
        Please refer to model.py for various models trained for the task.
        Current implementaiton is based on cnn_resample_100.pt which shows best demo performance.
        If you would like to use other models, be sure to modify the codes below properly.
        '''
        model = HandGestureNet(n_channels=42, n_classes=5)
        model.load_state_dict(torch.load('cnn_resample_100.pt'))
        # model.load_state_dict(torch.load('CNN_mid12.pt'))
        model.to('cuda')
        model.eval()
        '''
        Message state contains time of state changes.
        Used to prevent burst of messages in short time to the Zoom chat.
        '''
        msg_state = ('not_sent', time.perf_counter())
        '''
        Starting the Project
        Pose & Hand Gesture Estimation
        '''
        while (cv2.waitKey(1) != 27):
            if msg_state[0] == 'sent':
                if time.perf_counter() - msg_state[1] > 2.5:
                    msg_state = ('not_sent', time.perf_counter())

            ret, frame = cam.read()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])
            frame = datum.cvOutputData
            '''If Person not in Camera'''
            if datum.poseKeypoints.shape == ():
                if msg_state[0] == 'not_sent':
                    # print('WHY NOT WORKING')
                    conn.request(
                        "POST",
                        "/v2/chat/users/[email protected]/messages",
                        payload_absent, headers)
                    #
                    res = conn.getresponse()
                    data = res.read()

                    print(data.decode("utf-8"))
                    msg_state = ('sent', time.perf_counter())
                font = cv2.FONT_HERSHEY_SIMPLEX
                fontScale = 3
                fontColor = (0, 0, 255)
                lineType = 2
                fontThickness = 2
                msg_on_screen = 'ABSENT!'
                textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                           fontThickness)[0]
                bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                          (1024 + textsize[1]) // 2)

                cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255), 20)
                cv2.putText(frame, 'Absent!', bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
                cv2.imshow("Openpose 1.4.0 Webcam", frame)
                continue
            '''If there are more than one people on camera'''
            if len(datum.poseKeypoints) > 1:
                if prev_state is not None and prev_state[0] == 'multi_people':
                    if prev_state[1] > 2:
                        if msg_state[0] == 'not_sent':
                            # print('WHY NOT WORKING')
                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_multi, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        fontScale = 3
                        fontColor = (0, 0, 255)
                        lineType = 2
                        fontThickness = 2
                        msg_on_screen = 'MUTLIPLE PEOPLE!'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)

                        cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255),
                                      20)
                        cv2.putText(frame, msg_on_screen,
                                    bottomLeftCornerOfText, font, fontScale,
                                    fontColor, lineType)
                        cv2.imshow("Openpose 1.4.0 Webcam", frame)
                        msg_state = ('sent', time.perf_counter())
                        continue
                else:
                    prev_state = ('multi_people', time.perf_counter())
            '''Evaluate Movement & Confidence'''
            del pair_poseKeypoints[0]
            pair_poseKeypoints.append(datum.poseKeypoints[0])
            body_confidence_avg = avg_pose_confidence(datum.poseKeypoints[0])
            # print(body_confidence_avg)
            moved = metric(pair_poseKeypoints)
            '''
            Evaluate Hand Gesture
            Please modify the below if you use other models for hand gestures.
            Refer to model.py
            '''
            # if len(input_hands) == 12:  # normalizev2.pt
            #     del input_hands[0]
            # input_hands.append(datum.handKeypoints[0][0])
            # # print(len(input_hands))
            # prob, gesture = None, None
            # hand_confidence_avg = avg_list_confidence(input_hands)
            # # if len(input_hands) == 12 and avg >= 0.1:
            # if len(input_hands) == 12:
            #     # print('Confidence : ', hand_confidence_avg)
            #     prob, gesture = get_hand_gesture(model, input_hands)
            # # print(prob, gesture)

            norm_hand = normalize(datum.handKeypoints[0][0])
            # print(norm_hand.shape)
            if len(input_hands) == 100:
                for i in range(5):
                    del input_hands[0]
            for i in range(5):
                input_hands.append(norm_hand)
            # print(len(input_hands))
            prob, gesture = None, None
            hand_confidence_avg = avg_list_confidence(input_hands)
            # if len(input_hands) == 12 and avg >= 0.1:
            if len(input_hands) == 100:
                # print('Confidence : ', hand_confidence_avg)
                # print(input_hands[0][:][:2])
                inputs = torch.FloatTensor(input_hands).to('cuda')
                inputs = inputs[:, :, :2]
                # print(inputs.size())
                # prob, gesture = get_hand_gesture_cnn(model, input_hands[:][:][:1])
                prob, gesture = get_hand_gesture_cnn(model, inputs)

            print(prob, gesture)
            '''Output Recognition Results'''
            print_msg = False
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = None
            fontColor = None
            fontScale = 3
            fontThickness = 2
            msg_on_screen = None
            '''Thumbs DOWN'''
            if valid_hand(hand_confidence_avg, gesture) and gesture == 1:
                print('THUMBS DOWN PROB : ', prob)
                if prob > 0:  # mitigated by using softmax
                    '''Counter'''

                    if prev_state is None:
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_down', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_down':
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsdown, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:
                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (255, 0, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS DOWN'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())
                '''Thumbs UP'''
            elif valid_hand(hand_confidence_avg, gesture) and gesture == 2:
                print('THUMBS UP PROB : ', prob)
                '''Counter'''
                if prob > 0:
                    if prev_state is None:
                        prev_state = ('thumbs up', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_up', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_up':
                        prev_state = ('thumbs_up', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsup, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:

                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (0, 255, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS UP'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())
                '''RAISED HAND'''
            elif valid_hand(hand_confidence_avg, gesture) and gesture == 4:
                print('RAISE HAND PROB : ', prob)
                '''Counter'''
                if prev_state is None:
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 5.5:
                        prev_state = ('raise_hand', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'raise_hand':
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(time.perf_counter() - prev_state[1])
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_raisehand, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())
                    if time.perf_counter() - prev_state[1] > 0.5:
                        print_msg = True
                        bottomLeftCornerOfText = (450, 500)
                        fontColor = (0, 255, 255)
                        fontScale = 3
                        msg_on_screen = 'HAND RAISED'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())
                '''
                Movement Detected
                Please refer to metric.py for detection metrics
                '''
            elif moved:
                '''Counter'''
                if prev_state is None:
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 1.5:
                        prev_state = ('detect_move', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'detect_move':
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(msg_state)
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_movement, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())

                    print_msg = True
                    bottomLeftCornerOfText = (150, 500)
                    fontColor = (0, 0, 255)
                    fontScale = 3
                    msg_on_screen = 'MOVEMENT DETECTED'
                    textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                               fontThickness)[0]
                    # print(textsize)
                    bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                              (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())
            '''Print messages according to states above'''
            if print_msg:
                lineType = 2
                cv2.rectangle(frame, (0, 0), (1280, 1024), fontColor, 40)
                cv2.putText(frame, msg_on_screen, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
            cv2.imshow("Openpose 1.4.0 Webcam", frame)  # datum.cvOutputData
        # Always clean up
        cam.release()
        cv2.destroyAllWindows()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
        print(e)
        sys.exit(-1)
    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Read image and face rectangle locations
    imageToProcess = cv2.imread(args[0].image_path)
    handRectangles = [
        # Left/Right hands person 0
        [
            op.Rectangle(320.035889, 377.675049, 69.300949, 69.300949),
            op.Rectangle(0., 0., 0., 0.),
        ],
        # Left/Right hands person 1
        [
            op.Rectangle(80.155792, 407.673492, 80.812706, 80.812706),
            op.Rectangle(46.449715, 404.559753, 98.898178, 98.898178),
        ],
        # Left/Right hands person 2
        [
            op.Rectangle(185.692673, 303.112244, 157.587555, 157.587555),
            op.Rectangle(88.984360, 268.866547, 117.818230, 117.818230),
        ]
    ]

    # Create new datum
Пример #20
0
    hnData = pickle.load(fr)

print("images loading")
imagePaths = list(paths.list_images('./step1'))
print('imagePaths', imagePaths)

start = time.time()
for idx, imagePath in enumerate(imagePaths):
    classname = imagePath.split(os.path.sep)[-2]
    filename = imagePath.split(os.path.sep)[-1]
    imageToProcess = imread(imagePath)

    handRectangles = [
        # 왼손, 오른손
        [
            op.Rectangle(hnData[filename][0] - 100.0,
                         hnData[filename][1] - 100.0, 200.0, 200.0),
            op.Rectangle(hnData[filename][2] - 100.0,
                         hnData[filename][3] - 100.0, 200.0, 200.0),
        ],
    ]

    # Create new datum
    datum = op.Datum()
    datum.cvInputData = imageToProcess
    datum.handRectangles = handRectangles

    # Process and display image
    opWrapper.emplaceAndPop([datum])

    #결과값을 numpy로 변환
    a = datum.handKeypoints