Пример #1
0
    def getAudioMapImg(self):
        audioAngleImgTotal = self.kinectFloorMap.getBlankImg(dtype=np.uint16)

        audioDataVec = self.kinectClient.getAudioDataVec()

        for k in xrange(self.kinectClient.getKinectNum()):
            audioData = audioDataVec[k]
            if audioData == None:
                continue

            sourceAngle = audioData['sourceAngle']
            sourceConfidence = audioData['sourceConfidence']

            if sourceConfidence == 0:
                continue

            kinect_wPos = np.array(self.kinectClient.getT(k))
            kinect_cTransform = self.kinectClient.getCoorinateTransform(k)

            distance = 10000. #10(m)
            audioDirPos = [math.tan(math.radians(- sourceAngle))*distance, 0., distance]
            audioDirPos = kinect_cTransform.transformR(audioDirPos)

            audioDst_wPos = kinect_wPos + np.array(audioDirPos)

            kinect_mPos = self.kinectFloorMap.wPos2mapPos(kinect_wPos)
            audioDst_mPos = self.kinectFloorMap.wPos2mapPos(audioDst_wPos)

            audioAngleImg = self.kinectFloorMap.getBlankImg(dtype=np.uint8)

            val = int(255. * sourceConfidence) #最大255
            #val = sourceConfidence
            if val == 0:
                continue

            minRangeAngle = 10.
            rangeAngle = minRangeAngle + (90. - minRangeAngle) * (1.0-sourceConfidence) #minRangeAngle-90度。confidence==1で20度

            audioAngleImg = FXImageUtil.drawSensingDirection(audioAngleImg, kinect_mPos, audioDst_mPos,\
                                                             rangeAngle, val)
            audioAngleImgTotal = audioAngleImgTotal + audioAngleImg

        audioAngleImgTotal = audioAngleImgTotal / self.kinectClient.getKinectNum() #kinect台数で割る
        audioAngleImgTotal = audioAngleImgTotal.astype(np.uint8)
        return audioAngleImgTotal
Пример #2
0
    def faceProcess(self):
        trackedData = self.getTrackedData()

        if trackedData.has_key('data') == False:
            return

        t = trackedData['t']
        points = trackedData['data']
        rgbImgVec = self.kinectClient.getRGBImgVec()

        faceAngleImg = self.kinectFloorMap.getBlankImg(dtype=np.uint16)


        ####### only one kinect (so far)
        kinectIdx = 0

        rgbImgOrig = rgbImgVec[kinectIdx]
        if rgbImgOrig == None:
            return

        rgbImg = rgbImgOrig.copy()
        camCoord = self.kinectClient.kinectRGBCamCoordVec[kinectIdx]

        croppedImgs = []
        mPosVec = []

        totalConf = 0

        for point in points:
            p = point['trackedData']
            wPos = [p['x'], p['y'], p['z']]
            trackedId = point['trackedId']

            rt = self.kinectClient.calcCropCorners(kinectIdx,\
                                                   wPos,\
                                                   self.faceCropParams['width'], self.faceCropParams['height'],\
                                                   self.faceCropParams['zTopMergin'], self.faceCropParams['cropType'])
            if rt == None:
                continue

            iPosPixel, corner_iPosPixels = rt

            cv2.circle(rgbImg, (iPosPixel[0], iPosPixel[1]), 6, (0, 0, 255), -1)
            cv2.putText(rgbImg, str(trackedId), (iPosPixel[0]+7, iPosPixel[1]), cv2.FONT_HERSHEY_PLAIN, 2.0, (0,0,255))

            cv2.polylines(rgbImg, np.array([corner_iPosPixels]), True, (0,255,0), 2)

            #人物領域をcroppedImgとして抜出
            cropImgSize = (50, 70) #[width, height] pixels
            croppedImg = FXImageUtil.cropImage(rgbImgOrig, corner_iPosPixels, (140, 100))

            minSize = int(min(cropImgSize)*0.2)
            maxSize = int(max(cropImgSize)*0.8)
            detAng, detConf = self.detectOrientation.detect(croppedImg, minSize, maxSize)


            detAngDeg = math.degrees(detAng)

            camDir = np.array(self.kinectClient.getT(kinectIdx)) - np.array(wPos)
            camDir[2] = 0
            faceCoord = CoordinateTransform3D.CoordinateTransform3D()
            #一般的な場合、なぜか(-90, 90, 0)がR=eyeになる。なので、その相対。detAngDegは時計回りに正
            facePan = -90 + detAngDeg

            faceCoord.setPanTiltRoll(facePan, 90, 0)
            faceDir = faceCoord.transformR(camDir)
            faceDir = faceDir/np.linalg.norm(faceDir)
            faceDirWPos = np.array(wPos) + faceDir * 500.0
            faceDirIPos = camCoord.wPos2iPos(faceDirWPos)
            faceDirIPosPixels = camCoord.iPos2iPosPixel(faceDirIPos, rgbImg)

            if detConf > 10:
                angText = "ang:%.0f, conf:%.0f" % (detAngDeg, detConf)
                cv2.putText(rgbImg, angText, (iPosPixel[0]+7, iPosPixel[1]+30), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,0,255))
                cv2.line(rgbImg,  (iPosPixel[0], iPosPixel[1]), (faceDirIPosPixels[0], faceDirIPosPixels[1]), (0,255,255), 3)

            #trackedDataに'face'情報を追加
            faceData = {}
            faceData['angle'] = detAng
            faceData['confidence'] = detConf
            point['face'] = faceData

            mPos = self.kinectFloorMap.wPos2mapPos(wPos)
            mPosVec.append(mPos)
            if detConf > 0:
                faceDirWPos2 = np.array(wPos) + faceDir*10000.
                faceDir_mPos = self.kinectFloorMap.wPos2mapPos(faceDirWPos2)
                faceAngleImg_person = self.kinectFloorMap.getBlankImg(dtype=np.uint16)
                rangeAngle = 30. #degrees
                val = detConf
                faceAngleImg_person = FXImageUtil.drawSensingDirection(faceAngleImg_person, mPos, faceDir_mPos,\
                                                                       rangeAngle, val)
                faceAngleImg = faceAngleImg + faceAngleImg_person

            totalConf = totalConf + detConf


            #表示用にcroppedImgsに横につなげる
            if np.alen(croppedImgs) > 0:
                croppedImgs = np.hstack((croppedImgs, croppedImg))
            else:
                croppedImgs = np.copy(croppedImg)

        self.currentTrackedDataFace = trackedData


        dispImg = cv2.resize(rgbImg, (810, 540))
        #dispImg = rgbImg

        cv2.imshow("rgbFace", dispImg)
        #cv2.imshow("croppedImgs", croppedImgs)


        self.currentFaceAngleImg = faceAngleImg

        if self.currentFaceAngleSumImg == None:
            self.currentFaceAngleSumImg = self.currentFaceAngleImg.copy()
        else:
            processInterval = t - self.currentFaceProcessTime
            timeRange = 1.0
            processInterval = min (processInterval, timeRange)
            w = processInterval / timeRange

            self.currentFaceAngleSumImg = cv2.addWeighted(self.currentFaceAngleSumImg, (1.0 - w),\
                                                           self.currentFaceAngleImg, w, 0)

        self.faceAngleConfMax = max(self.faceAngleConfMax, totalConf)
        self.currentFaceProcessTime = t

        #表示系
        self.faceAngleConfMax = max(self.faceAngleConfMax, totalConf)
        if self.faceAngleConfMax > 0:
            faceAngleImg8UC3 = FXImageUtil.createColorMap(self.currentFaceAngleSumImg, 0.0, self.faceAngleConfMax)[0]
            #faceAngleImg8UC3 = FXImageUtil.createColorMap(self.currentFaceAngleImg, 0.0, self.faceAngleConfMax)[0]

            for mPos in mPosVec:
                cv2.circle(faceAngleImg8UC3, (int(mPos[0]), int(mPos[1])), 3, (0, 0, 255), -1)

            cv2.imshow("faceAngleImg", faceAngleImg8UC3)