Esempio n. 1
0
    def hand_skel(self, image_path, is_left=False):
        from openpose import pyopenpose as op

        # Read image and face rectangle locations
        img = cv2.imread(image_path)
        if img is None:
            return None
        height, width = img.shape[:2]
        top, bottom, left, right = get_square_padding(img)
        img = squarify(img, (top, bottom, left, right))
        hand_rect = op.Rectangle(0.0, 0.0, img.shape[0], img.shape[0])
        null_rect = op.Rectangle(0.0, 0.0, 0.0, 0.0)
        if is_left:
            hand_rects = [[hand_rect, null_rect]]
        else:
            hand_rects = [[null_rect, hand_rect]]

        datum = op.Datum()
        datum.cvInputData = img
        datum.handRectangles = hand_rects
        self.op_wrap.emplaceAndPop(op.VectorDatum([datum]))

        if is_left:
            kps = datum.handKeypoints[0][0]
        else:
            kps = datum.handKeypoints[1][0]
        if left:
            kps[:, 0] -= left
        elif top:
            kps[:, 1] -= top
        return kps, width, height
    def HandKeypoint_getKeypoints(self, HandImage, box):
        #
        # implementCODE
        #
        # keypoints = KeypointType()
        arr = np.fromstring(HandImage.image, np.uint8)
        frame = np.reshape(
            arr, (HandImage.height, HandImage.width, HandImage.depth))
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        print('Input Data Recieved')
        box = np.array(box)
        ## Creating openpose rectangle
        hands_rectangles = [[
            self.box2oprectangle(box),
            op.Rectangle(0., 0., 0., 0.)
        ]]

        # Create new datum
        datum = op.Datum()
        datum.cvInputData = frame
        datum.handRectangles = hands_rectangles
        # Process and display image
        self.openpose_wrapper.emplaceAndPop([datum])

        if datum.handKeypoints[0].shape == ():
            # if there were no detections
            hand_keypoints = None
        else:
            hand_keypoints = datum.handKeypoints[0][0]
        return hand_keypoints
Esempio n. 3
0
 def face_region_zoom_out(self, cv_image, rect):
     rect_coords = self.rect_to_bbox(rect)
     xleft = rect_coords[0] - 275
     yleft = rect_coords[1] - 100
     xright = rect_coords[2] - 25
     yright = rect_coords[1] - 100
     cv_image = cv2.rectangle(cv_image, (xleft, yleft),
                              (xleft + 300, yleft + 300), (255, 255, 0))
     cv_image = cv2.rectangle(cv_image, (xright, yright),
                              (xright + 300, yright + 300), (255, 0, 0))
     # cv2.imshow("Hand Region", cv_image)
     cv2.waitKey(1)
     handRectangle = [
         op.Rectangle(xleft, yleft, 300, 300),
         op.Rectangle(xright, yright, 300, 300)
     ]
     return handRectangle
Esempio n. 4
0
def hand_pose(model_dir, left=0):
    params = {'model_folder': model_dir}
    params['net_resolution'] = '240x192'
    #params['disable_blending'] = True
    #params['number_people_max'] = 1
    params['hand'] = True
    #params['face'] = True
    params['body'] = 0
    params['hand_detector'] = 2
    hands_boxes = [  # left & right hands: only 1-person
        [pyop.Rectangle(0, 0, 0, 0),
         pyop.Rectangle(0, 0, 640, 640)]
    ]

    #opWrapper = pyop.WrapperPython(pyop.ThreadManagerMode.Synchronous)
    #opWrapper.configure(params); opWrapper.execute() # webcam
    '''opWrapper = pyop.WrapperPython(pyop.ThreadManagerMode.AsynchronousOut)
    opWrapper.configure(params); opWrapper.start() # webcam
    while cv2.waitKey(5)!=27: # faster
        t = time(); x = pyop.VectorDatum()
        if not opWrapper.waitAndPop(x): continue
        x = x[0]; im = x.cvOutputData; fps = 'FPS=%.1f'%(1/(time()-t))
        im = cv2.putText(im, fps, (5,20), 4, 0.7, (0,255,255), 1, 16)
        cv2.imshow('OpenPose', im) # hand_standalone unsupported
    cv2.destroyAllWindows()#'''

    opWrapper = pyop.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    x = pyop.Datum()
    cap = cv2.VideoCapture(-1)
    hand_boxes = hand_box(cap, left)
    while cv2.waitKey(5) != 27:  # slower
        t = time()
        _, im = cap.read()
        x.cvInputData = im
        if params['hand_detector'] == 2: x.handRectangles = hand_boxes
        opWrapper.emplaceAndPop(pyop.VectorDatum([x]))
        im = x.cvOutputData
        fps = 'FPS=%.1f' % (1 / (time() - t))
        im = cv2.putText(im, fps, (5, 20), 4, 0.7, (0, 255, 255), 1, 16)
        cv2.imshow('OpenPose', RSZ(im, 1.5))  # cv2.LINE_AA=16
    cv2.destroyAllWindows()
    cap.release()  #'''
Esempio n. 5
0
def box2oprectangle(box):
    left, right, top, bottom = box
    width = np.abs(right - left)
    height = np.abs(top - bottom)
    max_length = int(max(width, height))
    center = (int(left + (width / 2)), int(bottom - (height / 2)))
    new_top = (int(center[0] - max_length / 1.3),
               int(center[1] - max_length / 1.3))
    max_length = int(max_length * 1.6)
    hand_rectangle = op.Rectangle(new_top[0], new_top[1], max_length,
                                  max_length)
    return hand_rectangle
 def box2oprectangle(self, box):
     [left, right, top, bottom] = box
     width = np.abs(right - left)
     height = np.abs(top - bottom)
     max_length = int(max(width, height))
     center = (int(left + (width / 2)), int(bottom - (height / 2)))
     # Openpose hand detector needs the bounding box to be quite big , so we make it bigger
     # Top point for rectangle
     new_top = (int(center[0] - max_length / 1.3),
                int(center[1] - max_length / 1.3))
     max_length = int(max_length * 1.6)
     hand_rectangle = op.Rectangle(new_top[0], new_top[1], max_length,
                                   max_length)
     return hand_rectangle
Esempio n. 7
0
def detect_keypoints(image, opWrapper_, hand_boxes=None):
    if hand_boxes is None:
        hand_boxes = [[0, image.shape[1] - 1, 0, image.shape[0] - 1]]
    # Left hand only
    hands_rectangles = [[box2oprectangle(box),
                         op.Rectangle(0., 0., 0., 0.)] for box in hand_boxes]

    datum = op.Datum()
    datum.cvInputData = image
    datum.handRectangles = hands_rectangles
    opWrapper_.emplaceAndPop([datum])

    if datum.handKeypoints[0].shape == ():
        hand_keypoints = []
    else:
        hand_keypoints = datum.handKeypoints[0]
    return hand_keypoints, datum.cvOutputData
Esempio n. 8
0
	if (depth_center!=0 and depth_center<1500) :
		print("On est sense retourner ca")
		print(depth_center)
	else :
		depth_center = 0
	

try:
    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    # Read image and face rectangle locations
    imageToProcess = cv2.imread('Coloredimage.jpg')
    faceRectangles = [
        op.Rectangle(face[0],face[1],face[2],face[3])
    ]

    # Create new datum
    datum = op.Datum()
    datum.cvInputData = imageToProcess
    datum.faceRectangles = faceRectangles

    # Process and display image
    opWrapper.emplaceAndPop([datum])
    print("Face keypoints: \n" + str(datum.faceKeypoints))
    cv2.imshow("OpenPose 1.4.0 - Tutorial Python API", datum.cvOutputData)
    cv2.waitKey(0)
except Exception as e:
    # print(e)
    sys.exit(-1)
            if key not in params: params[key] = next_item

    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Read image and face rectangle locations
    # If you use face_detector 2 and body  0 options, then use must provide the face regions
    imageToProcess = cv2.imread(args[0].image_path)
    faceRectangles = [
        op.Rectangle(330.119385, 277.532715, 48.717274, 48.717274),
        op.Rectangle(24.036991, 267.918793, 65.175171, 65.175171),
        op.Rectangle(151.803436, 32.477852, 108.295761, 108.295761),
    ]

    # Create new datum
    datum = op.Datum()
    datum.cvInputData = imageToProcess
    datum.faceRectangles = faceRectangles

    # Process and display image
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
    print("Face keypoints: \n" + str(datum.faceKeypoints))
    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
    cv2.waitKey(0)
except Exception as e:
Esempio n. 10
0
def hand_box(sz, left=0):
    non = pyop.Rectangle(0, 0, 0, 0)
    if type(sz) == cv2.VideoCapture:  # wh
        sz = max(sz.get(3), sz.get(4))
    box = pyop.Rectangle(0, 0, sz, sz)
    return [[box, non] if left else [non, box]]
Esempio n. 11
0
    if len(files) == 0:
        raise Exception("Input directory is empty")

    print("Found", len(files), "files")

    images = [cv2.imread(os.path.join(args.input, f)) for f in files]
    data = []
    for image in images:
        # Create new datum
        datum = op.Datum()
        datum.cvInputData = image
        datum.handRectangles = [
            # Left/Right hands person 0
            [
                op.Rectangle(0., 0., 0., 0.),
                op.Rectangle(0, 0, image.shape[0], image.shape[1]),
            ],
        ]
        data.append(datum)
        get_hand_detector().emplaceAndPop([datum])

    for f, image, datum in zip(files, images, data):
        pose = [[float(x) for x in r][:2]
                for r in np.array(datum.handKeypoints[1][0], dtype=np.float16)]
        print(pose)

        save_loc = os.path.join(args.output, f)
        if args.format == "json":
            json.dump(pose, open(save_loc + ".json", "w"))
        else:
Esempio n. 12
0
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Read image and hand rectangle locations
    # If you use hand_detector 2 and body  0 options, then use must provide the hand regions

    imageToProcess = cv2.imread(args[0].image_path)
    handRectangles = [
        # Left/Right hands person 0
        [
            op.Rectangle(320.035889, 377.675049, 69.300949, 69.300949),
            op.Rectangle(0., 0., 0., 0.),
        ],
        # Left/Right hands person 1
        [
            op.Rectangle(80.155792, 407.673492, 80.812706, 80.812706),
            op.Rectangle(46.449715, 404.559753, 98.898178, 98.898178),
        ],
        # Left/Right hands person 2
        [
            op.Rectangle(185.692673, 303.112244, 157.587555, 157.587555),
            op.Rectangle(88.984360, 268.866547, 117.818230, 117.818230),
        ]
    ]

    # Create new datum
Esempio n. 13
0
 def Rectangle(*args, **kwargs):
     return pyopenpose.Rectangle(*args, **kwargs)
Esempio n. 14
0
    while (True):
        start = time.time()
        rec, imageToProcess = cam.read()
        if not rec:
            break

            # Read image and face rectangle locations
        #imageToProcess = cv2.imread(args[0].image_path)
        print(imageToProcess.shape)
        height = imageToProcess.shape[0]
        width = imageToProcess.shape[1]
        handRectangles = [
            # Left/Right hands person 0
            [
                op.Rectangle(0., 0., height, height),
                op.Rectangle(width / 2., 0., height, height),
            ],
            # Left/Right hands person 1
            [
                op.Rectangle(0., 0., height, height),
                op.Rectangle(width / 2, 0., height, height),
            ]
        ]

        # Create new datum
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        datum.handRectangles = handRectangles

        # Process and display image