Пример #1
0
    def __init__(self, args):
        self.args = args

        # init openpose and 3d lifting model
        self.opWrapper = op.WrapperPython()
        params = dict(model_folder="/openpose/models/")
        params['face'] = cfg.face
        params['hand'] = False
        params['model_pose'] = cfg.model_pose
        params['num_gpu'] = 1
        params['num_gpu_start'] = self.args.pid % cfg.ngpu
        self.opWrapper.configure(params)
        self.opWrapper.start()
        self.poseLifting = Prob3dPose()

        # cv bridge
        self.bridge = CvBridge()

        # subscriber and publisher
        self.sub = rospy.Subscriber(
            "/multi_proc_backend/info_{}".format(self.args.pid), ImageInfo,
            self.callback)
        self.pub = rospy.Publisher("/multi_proc_backend/result",
                                   String,
                                   tcp_nodelay=True,
                                   queue_size=1)
    def mesh(self, image):
        image_h, image_w = image.shape[:2]
        width = 640
        height = 480
        pose_2d_mpiis = []
        visibilities = []

        humans = self.e.inference(image, scales=[None])

        for human in humans:
            pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
            pose_2d_mpiis.append([(int(x * width + 0.5), int(y * height + 0.5))
                                  for x, y in pose_2d_mpii])
            visibilities.append(visibility)

        # pose_2d_mpiis = np.array(pose_2d_mpiis)
        # visibilities = np.array(visibilities)
        # transformed_pose2d, weights = self.poseLifting.transform_joints(
        #     pose_2d_mpiis, visibilities
        # )
        # pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights)

        poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')
        pose_2d_mpiis = np.array(pose_2d_mpiis)
        visibilities = np.array(visibilities)
        print(pose_2d_mpiis)
        transformed_pose2d, weights = poseLifting.transform_joints(
            pose_2d_mpiis, visibilities)
        pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)
        print(pose_3d)

        keypoints = pose_3d[0].transpose()

        return keypoints / 80
def Estimate_3Ddata(image, e, scales):
    # t0 = time.time()
    # # estimate human poses from a single image !
    # t = time.time()
    humans = e.inference(image, scales=scales)
    #elapsed = time.time() - t
    image = TfPoseEstimator.draw_humans(image, humans)
    #logger.info('inference image:%.4f seconds.' % (elapsed))
    logger.info('3d lifting initialization.')

    poseLifting = Prob3dPose('lifting/models/prob_model_params.mat')

    standard_w = 320
    standard_h = 240

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    if (pose_2d_mpiis.ndim != 3):
        return 0
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    #alltime= time.time() - t0
    #logger.info('estimate all time:%.4f seconds.' % (alltime))
    return pose_3d, image
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 110, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)
        gz.translate(0, 0, -10)
        self.window.addItem(gx)
        self.window.addItem(gy)
        self.window.addItem(gz)

        modeln = 'mobilenet_thin'
        modelr = '432x368'
        camera = 0

        self.lines = {}
        self.connection = [
            [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6],
            [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12],
            [12, 13], [8, 14], [14, 15], [15, 16]
        ]

        w, h = model_wh(modelr)
        self.e = TfPoseEstimator(get_graph_path(modeln), target_size=(w, h))
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose('./skeleton_humanactivity/lifting/models/prob_model_params.mat')
        keypoints = self.mesh(image)

        self.points = gl.GLScatterPlotItem(
            pos=keypoints,
            color=pg.glColor((0, 255, 0)),
            size=15
        )
        self.window.addItem(self.points)

        for n, pts in enumerate(self.connection):
            self.lines[n] = gl.GLLinePlotItem(
                pos=np.array([keypoints[p] for p in pts]),
                color=pg.glColor((0, 0, 255)),
                width=3,
                antialias=True
            )
            self.window.addItem(self.lines[n])
Пример #5
0
def process_multi(img_path, model):
    image = common.read_imgfile(img_path, None, None)
    filename = os.path.split(img_path)[1].split('.')[0]
    scales = ast.literal_eval(node_or_string='[None]')
    humans = model.inference(image, scales=scales)
    image = model.draw_humans(image, humans, imgcopy=False)
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    path_save_2d = path = './results/' + filename + '_2d.jpg'
    cv2.imwrite(path_save_2d, image)
    # cv2.imshow('result', image)
    #cv2.waitKey()

    #poseLifting = Prob3dPose('/data/ai/JF/pose_estimation/multi_pose_estimator/lifting/models/prob_model_params.mat')
    poseLifting = Prob3dPose('./lifting/models/prob_model_params.mat')
    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * image_w + 0.5), int(y * image_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    num = len(pose_3d)
    if num % 2 == 0:
        l = num // 2
    else:
        l = num // 2 + 1

    IMAGES_PATH = './results/'
    if not os.path.exists(IMAGES_PATH):
        os.makedirs(IMAGES_PATH)
    path_save_3d = './results/' + filename + '_3d.png'

    fig = plt.figure()
    for i, single_3d in enumerate(pose_3d):
        plot_pose(single_3d, i, l, fig, num)
    plt.savefig(path_save_3d)
    # plt.show()

    return path_save_2d, path_save_3d
Пример #6
0
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        """
        # Initialize plot.
        plt.ion()
        f = plt.figure(figsize=(5, 5))
        f2 = plt.figure(figsize=(6, 5))

        self.window3DBody = f.gca(projection='3d')
        self.window3DBody.set_title('3D_Body')
        self.windowStable = f2.add_subplot(1, 1, 1)
        self.windowStable.set_title('Stable')
        self.windowStable.set_xlabel('Time')
        self.windowStable.set_ylabel('Distant')
        self.windowStable.set_ylim([0, 1500])

        #plt.show()
        self.times = [0]
        self.stable = [0]
        self.recordHead = []
        self.fps_time = 0

        model = 'mobilenet_thin_432x368'
        w, h = model_wh(model)
        #model = 'cmu'
        #w,h = 656,368
        camera = 1  #1 mean external camera , 0 mean internal camera

        self.lines = {}
        self.connection = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6],
                           [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12],
                           [12, 13], [8, 14], [14, 15], [15, 16]]
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read(cv2.IMREAD_COLOR)

        self.poseLifting = Prob3dPose(
            './src/lifting/models/prob_model_params.mat')
        self.statusBodyWindow = 0
        try:
            keypoints = self.mesh(image)

        except:
            pass
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 110, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)
        gz.translate(0, 0, -10)
        self.window.addItem(gx)
        self.window.addItem(gy)
        self.window.addItem(gz)

        model = 'mobilenet_thin'
        camera = 0
        w, h = model_wh("432x368")
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        self.cam = cv2.VideoCapture(0)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose('lifting/models/prob_model_params.mat')
        keypoints = self.mesh(image)
        
        self.points = gl.GLScatterPlotItem(
            pos=keypoints,
            color=pg.glColor((255, 255, 0)),
            size=15
        )
        # self.lines = gl.GLLinePlotItem(
        #     pos=lines,
        #     color=pg.glColor((255, 255, 255)),
        #     width=2
        # )
        self.window.addItem(self.points)
Пример #8
0
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        add some grids in the 3 point... that are just filling up the background
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 110, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)  # we translate all of them
        gz.translate(0, 0, -10)  # so they're pushed out to the background
        self.window.addItem(gx)
        self.window.addItem(gy)  # add them all to the window
        self.window.addItem(gz)

        model = "mobilenet_thin_432x368"
        camera = 0
        w, h = model_wh(model)
        # we are gonna create e objects but instead we're gonna call it
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        # we're basically just going the same thing(run.py line:37) instead of args.model
        # we just created our own object for model
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose(
            './src/lifting/models/prob_model_params.mat')
        # we'll have this object to do our 3d pose translater? yukardaki
        keypoints = self.mesh(image)

        self.points = gl.GLScatterPlotItem(  # plot dots
            pos=keypoints,
            color=pg.glColor((0, 255, 0)),
            size=15)
        self.window.addItem(self.points)
Пример #9
0
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    plt.show()

    import sys
    sys.exit(0)

    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5), int(y * standard_h + 0.5)) for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(pose_2d_mpiis, visibilities)
Пример #10
0
def pose_ddd():
    global pose_3d
    u_r_s=20
    u_l_s=180
    left_elbow_dir=1
    right_elbow_dir=1
    
    
    while True:
        try:
            poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

            #image_h, image_w = image.shape[:2]
            standard_w = 640
            standard_h = 480

            pose_2d_mpiis = []
            visibilities = []
            for human in humans:
                pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
                pose_2d_mpiis.append([(int(x * standard_w + 0.5), int(y * standard_h + 0.5)) for x, y in pose_2d_mpii])
                visibilities.append(visibility)
            pose_2d_mpiis = np.array(pose_2d_mpiis)
            visibilities = np.array(visibilities)
            transformed_pose2d, weights = poseLifting.transform_joints(pose_2d_mpiis, visibilities)
            pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)
            l_knee.update(pose_3d[0])
            l_hip.update(pose_3d[0])
            r_knee.update(pose_3d[0])
            r_hip.update(pose_3d[0])
            l_elbow.update(pose_3d[0])
            r_elbow.update(pose_3d[0])
            l_shoulder.update(pose_3d[0])
            r_shoulder.update(pose_3d[0])
            u_r_shoulder.update(pose_3d[0])
            u_l_shoulder.update(pose_3d[0])


            
            if u_r_shoulder.joint_angle < 30 :
                u_r_s=90
            elif  u_r_shoulder.joint_angle > 30 :
                u_r_s=20 # down
            
            if u_l_shoulder.joint_angle < 30 :
                u_l_s=90
            elif u_l_shoulder.joint_angle > 30:
                u_l_s=180 # down
            

            #right  15:elbow , right 16: hand 
            if pose_3d[0][0,15] > pose_3d[0][0,16]:
                right_elbow_dir=1
            elif  pose_3d[0][0,15] < pose_3d[0][0,16]:
                right_elbow_dir=-1
            

            if pose_3d[0][0,12] > pose_3d[0][0,13]:
                left_elbow_dir=1
            elif pose_3d[0][0,12] < pose_3d[0][0,13]:
                left_elbow_dir=-1
            
            message=str(r_hip.joint_angle)+','+str(r_knee.joint_angle-90)+','+str(l_hip.joint_angle)+','+str(l_knee.joint_angle-90)+','+str(right_elbow_dir*(r_elbow.joint_angle-140))+','+str(r_shoulder.joint_angle)+','+str(l_shoulder.joint_angle)+','+str(left_elbow_dir*(l_elbow.joint_angle-145))+','+str(u_r_s)+','+str(u_l_s)#","+str(l_knee.joint_angle-90)+','+str(r_hip.joint_angle)+","+str(r_knee.joint_angle-90)
            
            clientsocket.send(message.encode('utf-8'))

            
            if BREAK == True:
                clientsocket.send("off".encode('utf-8'))
                print("off sent")
                break

            

            
            
            
        except :
            pass
Пример #11
0
    def adHocData(self):
        directory_in_str = sys.path[0] + "\\..\\images\\OurTest\\"

        # Delete old csv files
        try:
            os.remove(outputfile)
            os.remove(cleanedOutputfile)
        except OSError:
            pass

        # Run through every image in the folder
        for file in os.listdir(directory_in_str):
            filename = os.fsdecode(file)
            if filename.endswith(".jpg") or filename.endswith(".png"):
                fullpath = directory_in_str + filename

                # Estimate human poses from a single image !
                image = common.read_imgfile(fullpath, None, None)
                # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
                t = time.time()
                humans = self.e.inference(image, scales=self.scales)
                elapsed = time.time() - t

                logger.info('inference image: %s in %.4f seconds.' %
                            (fullpath, elapsed))

                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)
                # cv2.imshow('tf-pose-estimation result', image)
                # cv2.waitKey()

                myFile = open(outputfile, 'a')
                # myFile.write(str(filename) + ',')
                # print(filename)
                myFile.write('\n')
                myFile.close()

                # Attempt to plot our skeletons onto the image
                try:

                    fig = plt.figure()
                    a = fig.add_subplot(2, 2, 1)
                    a.set_title('Result')
                    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

                    bgimg = cv2.cvtColor(image.astype(np.uint8),
                                         cv2.COLOR_BGR2RGB)
                    bgimg = cv2.resize(
                        bgimg,
                        (self.e.heatMat.shape[1], self.e.heatMat.shape[0]),
                        interpolation=cv2.INTER_AREA)

                    # show network output
                    a = fig.add_subplot(2, 2, 2)
                    plt.imshow(bgimg, alpha=0.5)
                    tmp = np.amax(self.e.heatMat[:, :, :-1], axis=2)
                    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    tmp2 = self.e.pafMat.transpose((2, 0, 1))
                    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
                    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

                    a = fig.add_subplot(2, 2, 3)
                    a.set_title('Vectormap-x')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    a = fig.add_subplot(2, 2, 4)
                    a.set_title('Vectormap-y')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()
                    plt.show()

                    logger.info('3d lifting initialization.')
                    poseLifting = Prob3dPose(
                        sys.path[0] +
                        '\\lifting\\models\\prob_model_params.mat')

                    image_h, image_w = image.shape[:2]
                    standard_w = 640
                    standard_h = 480

                    pose_2d_mpiis = []
                    visibilities = []
                    for human in humans:
                        pose_2d_mpii, visibility = common.MPIIPart.from_coco(
                            human)
                        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                                               int(y * standard_h + 0.5))
                                              for x, y in pose_2d_mpii])
                        visibilities.append(visibility)

                    pose_2d_mpiis = np.array(pose_2d_mpiis)
                    visibilities = np.array(visibilities)
                    transformed_pose2d, weights = poseLifting.transform_joints(
                        pose_2d_mpiis, visibilities)
                    pose_3d = poseLifting.compute_3d(transformed_pose2d,
                                                     weights)

                    for i, single_3d in enumerate(pose_3d):
                        plot_pose(single_3d)
                    plt.show()

                except:
                    print("Error when plotting image ")

        dataScriptGenerator.dataCleanup(self)
Пример #12
0
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    plt.show()

    import sys
    sys.exit(0)

    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose(
        './pose_estimation/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
Пример #13
0
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        add some grids in the 3 point... that are just filling up the background
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setFixedSize(1920, 1080)
        self.window.showMaximized()
        # self.window.setMaximumSize(1920, 1080)
        # self.win = pg.GraphicsWindow()
        # self.vb = self.win.addViewBox(col=0, row=0)
        # self.t = pg.TextItem("zeynep", (255, 255, 255), anchor=(0,0))
        # self.vb.addItem(self.t)
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 0, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()
        # self.root = Tk()
        self.angle = ""
        # self.root = Tk()
        # self.root.geometry("100x50+%d+%d" % (0, 0))  # top left
        # self.root.overrideredirect(True)  # frameless tkinter window
        # self.root.resizable(False, False)
        # self.root.columnconfigure(0, weight=1)
        # self.root.rowconfigure(0, weight=1)

        # TEXT = ""
        # self.lbl = tk.Label(root, text=TEXT, bg="#e61c1c", font=("bold", 15), border=0, width=35)
        # self.lbl.grid(column=0, row=0, sticky="nsew")

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)  # we translate all of them
        gz.translate(0, 0, -10)  # so they're pushed out to the background
        self.window.addItem(gx)
        self.window.addItem(gy)  # add them all to the window
        self.window.addItem(gz)

        model = "mobilenet_thin_432x368"
        camera = 0

        self.lines = {}
        self.connections = [  # lines that we want to connect all those key points
            [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
            [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],
            [15, 16]
        ]  # 5.35

        w, h = model_wh(model)
        # we are gonna create e objects but instead we're gonna call it
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        # we're basically just going the same thing(run.py line:37) instead of args.model
        # we just created our own object for model
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose(
            './src/lifting/models/prob_model_params.mat')
        # we'll have this object to do our 3d pose translater? yukardaki
        keypoints = self.mesh(image)
        # rightPointList = keypoints[2:5]
        self.rightPointList = keypoints[11:14]
        self.leftPointList = keypoints[14:]

        self.a = (
            gl.GLScatterPlotItem(  # plot dot
                pos=keypoints[:11],
                color=pg.glColor((0, 255, 0)),
                size=15))
        self.window.addItem(self.a)
        self.right = (
            gl.GLScatterPlotItem(  # plot dot
                pos=self.rightPointList,
                color=pg.glColor((255, 0, 0)),
                size=15))
        self.window.addItem(self.right)
        self.left = (
            gl.GLScatterPlotItem(  # plot dot
                pos=self.leftPointList,
                color=pg.glColor((0, 0, 255)),
                size=15))
        self.window.addItem(self.left)

        for n, pts in enumerate(self.connections):
            self.lines[n] = gl.GLLinePlotItem(  # lines dict with all of them
                pos=np.array([keypoints[p] for p in pts]),
                color=pg.glColor((0, 0, 255)),
                width=3,
                antialias=True)
            # add them to our window
            self.window.addItem(self.lines[n])
Пример #14
0
    def __init__(self):

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setFixedSize(1920, 1080)
        # self.window.showMaximized()

        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 0, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()
        self.angle = ""

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)  # we translate all of them
        gz.translate(0, 0, -10)  # so they're pushed out to the background
        self.window.addItem(gx)
        self.window.addItem(gy)  # add them all to the window
        self.window.addItem(gz)

        model = "mobilenet_thin_432x368"
        camera = 0

        self.lines = {}
        self.connections = [  # lines that we want to connect all those key points
            [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
            [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],
            [15, 16]
        ]  # 5.35

        w, h = model_wh(model)
        # we are gonna create e objects but instead we're gonna call it
        print("modellllll : ", get_graph_path(model))
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        print("self.e : ", self.e)
        # we're basically just going the same thing(run.py line:37) instead of args.model
        # we just created our own object for model
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose(
            './src/lifting/models/prob_model_params.mat')
        # we'll have this object to do our 3d pose translater? yukardaki
        keypoints = self.mesh(image)
        self.rightPointList = keypoints[11:14]
        self.leftPointList = keypoints[14:]

        self.a = (
            gl.GLScatterPlotItem(  # plot dot
                pos=keypoints[:11],
                color=pg.glColor((0, 255, 0)),
                size=15))
        self.window.addItem(self.a)
        self.right = (
            gl.GLScatterPlotItem(  # plot dot
                pos=self.rightPointList,
                color=pg.glColor((255, 0, 0)),
                size=15))
        self.window.addItem(self.right)
        self.left = (
            gl.GLScatterPlotItem(  # plot dot
                pos=self.leftPointList,
                color=pg.glColor((0, 0, 255)),
                size=15))
        self.window.addItem(self.left)

        for n, pts in enumerate(self.connections):
            self.lines[n] = gl.GLLinePlotItem(  # lines dict with all of them
                pos=np.array([keypoints[p] for p in pts]),
                color=pg.glColor((0, 0, 255)),
                width=3,
                antialias=True)
            # add them to our window
            self.window.addItem(self.lines[n])
    def __init__(self):
        """
        Initialize the graphics window and mesh surface
        """

        # setup the view window
        self.app = QtGui.QApplication(sys.argv)
        self.window = gl.GLViewWidget()
        self.window.setWindowTitle('Terrain')
        self.window.setGeometry(0, 110, 1920, 1080)
        self.window.setCameraPosition(distance=30, elevation=12)
        self.window.show()

        gx = gl.GLGridItem()
        gy = gl.GLGridItem()
        gz = gl.GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)
        gz.translate(0, 0, -10)
        self.window.addItem(gx)
        self.window.addItem(gy)
        self.window.addItem(gz)

        model = 'mobilenet_thin'
        camera = 0

        self.lines = {}
        self.connection = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6],
                           [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12],
                           [12, 13], [8, 14], [14, 15], [15, 16]]

        parser = argparse.ArgumentParser(
            description='tf-pose-estimation webcam3D')
        parser.add_argument(
            '--resize',
            type=str,
            default='0x0',
            help='if provided, resize images before they are processed. '
            'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
        parser.add_argument('--model',
                            type=str,
                            default='mobilenet_thin',
                            help='cmu / mobilenet_thin')
        parser.add_argument('--resolution',
                            type=str,
                            default='432x368',
                            help='network input resolution. default=432x368')
        args = parser.parse_args()
        w, h = model_wh('432x368')

        #w, h = model_wh(model)
        self.e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
        self.cam = cv2.VideoCapture(camera)
        ret_val, image = self.cam.read()
        self.poseLifting = Prob3dPose(
            './src/lifting/models/prob_model_params.mat')
        keypoints = self.mesh(image)

        self.points = gl.GLScatterPlotItem(pos=keypoints,
                                           color=pg.glColor((0, 255, 0)),
                                           size=15)
        self.window.addItem(self.points)

        for n, pts in enumerate(self.connection):
            self.lines[n] = gl.GLLinePlotItem(pos=np.array(
                [keypoints[p] for p in pts]),
                                              color=pg.glColor((0, 0, 255)),
                                              width=3,
                                              antialias=True)
            self.window.addItem(self.lines[n])
Пример #16
0
def main():
    parser = argparse.ArgumentParser(description='tf-pose-estimation run')
    parser.add_argument('--image', type=str, default='./images/p3.jpg')
    parser.add_argument(
        '--resolution',
        type=str,
        default='432x368',
        help='network input resolution. default=432x368')
    parser.add_argument(
        '--model',
        type=str,
        default='mobilenet_thin',
        help='cmu / mobilenet_thin')
    parser.add_argument(
        '--scales',
        type=str,
        default='[None]',
        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    parser.add_argument(
        '--stick_only',
        action='store_true',
        help='save output without other analysis')
    parser.add_argument('--plot_3d', action='store_true', help='save 3d poses')

    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=scales)
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    # cv2.imshow('tf-pose-estimation result', image)
    # cv2.waitKey()

    import matplotlib.pyplot as plt

    fig = plt.figure()
    a = fig.add_subplot(2, 2, 1)
    a.set_title('Result')
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    if args.stick_only:
        cv2.imwrite('output/test.png', image)
        logger.info('image saved: {}'.format('output/test.png'))
        import sys
        sys.exit(0)
    plt.imshow(rgb_image)

    bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
    bgimg = cv2.resize(
        bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]),
        interpolation=cv2.INTER_AREA)

    # show network output
    a = fig.add_subplot(2, 2, 2)
    plt.imshow(bgimg, alpha=0.5)
    tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    tmp2 = e.pafMat.transpose((2, 0, 1))
    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

    a = fig.add_subplot(2, 2, 3)
    a.set_title('Vectormap-x')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    if not os.path.exists('output'):
        os.mkdir('output')
    plt.savefig('output/test.png')
    logger.info('image saved: {}'.format('output/test.png'))

    if not args.plot_3d:
        import sys
        sys.exit(0)
    #For plotting in 3d
    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    for i, single_3d in enumerate(pose_3d):
        plot_pose(single_3d)
    plt.draw()
    plt.savefig('output/pose_3d_test.png')
    logger.info('3d plot saved: {}'.format('output/pose_3d_test.png'))
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    plt.show()

    #    import sys
    #    sys.exit(0)

    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    print('humans count' + str(len(humans)))
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)