Пример #1
0
    def handleWBCapture(self, img, thumbImg):
        t = FXTimeUtil.getT()
        dtString = FXTimeUtil.T2DateString(t)
        filename = dtString + ".jpg"
        cv2.imwrite(filename, img)

        filename = dtString + "_thumb.jpg"
        cv2.imwrite(filename, thumbImg)
Пример #2
0
    def movementCheckProcess(self):
        interval = self.param['movementCheck']['interval']


        while True:
            sTime = FXTimeUtil.getT()

            img = self.getMovementCheckImg()
            if img==None:
                continue

            imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            if self.movementCheckImg == None:
                self.movementCheckImg = imgGray

            diffImg = cv2.absdiff(imgGray, self.movementCheckImg)
            val = cv2.mean(diffImg)
            val = float(val[0])

            duration = FXTimeUtil.getT() - sTime

            if duration < interval:
                time.sleep(interval - duration)
                duration = interval

            self.movementVal = val

            w = duration/1.0
            w = min(w, 1.0)
            self.movementValMean = w * val + (1.0-w) * self.movementValMean

            self.movementCheckImg = imgGray

            if self.debugLevel >= 3:
                print "movement: %f (mean %f)" % (self.movementVal, self.movementValMean)

            if useRoyWeb:
                ps.send('movement', self.movementVal, 'v', 'movementVal')
                ps.send('movementMean', self.movementValMean, 'v', 'movementValMean')
Пример #3
0
    def audioProcess(self):
        t = FXTimeUtil.getT()

        audioAngleImg = self.getAudioMapImg()

        self.currentAudioAngleImg = audioAngleImg
        if self.currentAudioAngleSumImg == None:
            self.currentAudioAngleSumImg = self.currentAudioAngleImg.copy()
        else:
            processInterval = t - self.currentAudioProcessTime
            timeRange = 1.0
            processInterval = min (processInterval, timeRange)
            w = processInterval / timeRange

            self.currentAudioAngleSumImg = cv2.addWeighted(self.currentAudioAngleSumImg, (1.0 - w),\
                                                           self.currentAudioAngleImg, w, 0)

        self.currentAudioProcessTime = t
    def _threadProcessObserve(self):
        interval = 1.0 #secs
        currentT = 0.

        while True:
            if self.threadFlag == False:
                break

            t = FXTimeUtil.getT()
            diffT = t - currentT
            if diffT < interval:
                time.sleep(interval - diffT)

            self.observeProcess()

            cv2.waitKey(1)
            currentT = t


        print "thread process killed [observe]\n"
Пример #5
0
    def trackProcess(self):
        t = FXTimeUtil.getT()
        floorImg = self.getFloorImage()
        if floorImg == None:
            return

        viewPort = self.trackInfo['viewPort']
        floorImg8UC3, floorImgBin = FXImageUtil.createColorMap(floorImg, 0., viewPort['z']['max'])

        noiseReduction = self.trackInfo['noiseReduction']
        if noiseReduction > 0:
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (noiseReduction, noiseReduction))
            floorImgBin = cv2.morphologyEx(floorImgBin, cv2.MORPH_OPEN, kernel)
            floorImgBin = cv2.morphologyEx(floorImgBin, cv2.MORPH_CLOSE, kernel)

        binDilate = self.trackInfo['binDilate']
        if binDilate > 0:
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (binDilate, binDilate))
            floorImgBin = cv2.dilate(floorImgBin, kernel)
        binErode = self.trackInfo['binErode']
        if binErode > 0:
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (binErode, binErode))
            floorImgBin = cv2.erode(floorImgBin, kernel)

        floorImgBinOrig = floorImgBin.copy()

        contours, hierarchy = cv2.findContours(floorImgBin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        gaussianSize = self.trackInfo['gaussianSize']
        if gaussianSize > 0:
            floorImgGaussian = cv2.GaussianBlur(floorImg,(gaussianSize,gaussianSize),0)
        else:
            floorImgGaussian = floorImg.copy()

        #floorImg = floorImgGaussian
        #floorImgGaussian8UC3 = FXImageUtil.createColorMap(floorImgGaussian, 0., viewPort['z']['max'])[0]
        #cv2.imshow("floorImgGaussian", floorImgGaussian8UC3)


        ######## audioのマップを作成する
        self.audioProcess()

        ########トラッキング処理
        trackedData = {'dataId': 'kinectTracker', 't': t}

        points = []

        for contour in contours:
            if len(contour) < 5:
                continue #too small

            box = cv2.fitEllipse(contour)

            #検出したサイズをmmに (minSize, maxSize)
            objectSizeMM = (min(box[1]) * self.trackInfo['pixel2mm'], max(box[1]) * self.trackInfo['pixel2mm'])
            #print objectSize

            #サイズのチェック
            if objectSizeMM[0] < self.trackInfo['objectSize']['min']: #小さい方の大きさ
                continue
            if objectSizeMM[1] > self.trackInfo['objectSize']['max']: #大きい方の大きさ
                continue

            ellipseImgBin = np.zeros(floorImgBin.shape).astype(np.uint8)
            cv2.ellipse(ellipseImgBin, box, (255), -1)
            minMaxLoc = cv2.minMaxLoc(floorImgGaussian, mask=ellipseImgBin)

            #人物のシルエット
            personBinImg = floorImgBin * ellipseImgBin

            #人物のシルエットの面積/楕円の面積
            areaSize_ellipse = np.count_nonzero(ellipseImgBin)
            areaSize_personBin = np.count_nonzero(personBinImg)
            areaSizeRate = 0.
            if areaSize_ellipse > 0:
                areaSizeRate = float(areaSize_personBin) / float(areaSize_ellipse)


            #print box #((centerx, centery), (w,h), angle?)
            boxCenter = box[0]

            object_height = int(minMaxLoc[1])
            maxLoc = minMaxLoc[3]
            object_mPos = [maxLoc[0], maxLoc[1]]
            #object_mPos = [int(boxCenter[0]), int(boxCenter[1])]

            object_wPos = self.kinectFloorMap.mapPos2wPos(object_mPos, object_height)

            audioScore = 0.
            if self.currentAudioAngleSumImg != None:
                audioScore = float(self.currentAudioAngleSumImg[object_mPos[1]][object_mPos[0]])

            tData = {}
            tData['x'] = object_wPos[0]
            tData['y'] = object_wPos[1]
            tData['z'] = object_wPos[2]
            tData['width'] = objectSizeMM[1] #大きい方
            tData['height'] = objectSizeMM[0] #小さい方

            mData = {}
            mData['x'] = object_mPos[0]
            mData['y'] = object_mPos[1]
            mData['box'] = box
            mData['areaSizeRate'] = areaSizeRate

            audioData = {}
            audioData['score'] = audioScore

            #print areaSizeRate

            point = {'trackedData': tData, 'trackedMapData': mData, 'audio': audioData}
            points.append(point)


        trackedData['data'] = points

        self.putTrackedIds(self.currentTrackedData, trackedData)


        ###### visualize

        audioAngleSumImg8UC3 = cv2.cvtColor(self.currentAudioAngleSumImg, cv2.COLOR_GRAY2BGR)
        floorImgBin8UC3 = cv2.cvtColor(floorImgBinOrig, cv2.COLOR_GRAY2BGR)

        #floorImg8UC3に描画
        if trackedData.has_key('data'):
            points = trackedData['data']
            for point in points:
                mPos = [point['trackedMapData']['x'], point['trackedMapData']['y']]
                box = point['trackedMapData']['box']

                cv2.ellipse(floorImg8UC3, box, (0, 0, 255), 1)
                cv2.circle(floorImg8UC3, (mPos[0], mPos[1]), 3, (0,0,255), -1)
                trackStr = "(" + point['trackedId'] + ")" + str(object_wPos[2])
                cv2.putText(floorImg8UC3, trackStr, (mPos[0]+5, mPos[1]), cv2.FONT_HERSHEY_PLAIN, 0.8, (0,0,255))

                #cv2.ellipse(audioAngleSumImg8UC3, box, (0, 0, 255), 1)
                cv2.circle(audioAngleSumImg8UC3, (mPos[0], mPos[1]), 3, (0,0,255), -1)

                cv2.ellipse(floorImgBin8UC3, box, (0, 0, 255), 1)
                cv2.circle(floorImgBin8UC3, (mPos[0], mPos[1]), 3, (0,0,255), -1)


        cv2.imshow("floorImg", floorImg8UC3)
        cv2.imshow("audioAngleImg", audioAngleSumImg8UC3)
        cv2.imshow("floorImgBin", floorImgBin8UC3)


        self.currentTrackedData = trackedData
Пример #6
0
    def diffCheckProcess(self):
        interval = self.param['interval']

        while True:
            sTime = FXTimeUtil.getT()

            if self.movementValMean < self.param['movementCheck']['thr']:
                if self.debugLevel >= 2:
                    print "no movements. diff check"

                img = self.getImg()
                if img==None:
                    continue

                imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                imgGray = cv2.GaussianBlur(imgGray, (5,5),0)

                lapImg = cv2.Laplacian(imgGray, cv2.CV_32F, ksize=3)






                #lapImg = lapImg.astype(np.uint8)

                #conjunction
                #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
                #lapImg = cv2.dilate(lapImg, kernel, iterations=2)
                #lapImg = cv2.erode(lapImg, kernel, iterations=2)

                if self.diffCheckImg == None:
                    self.diffCheckImg = lapImg

                diffImg = cv2.absdiff(lapImg, self.diffCheckImg)
                ret, diffImg = cv2.threshold(diffImg, 10, 255, cv2.THRESH_BINARY)
                diffImg = diffImg.astype(np.uint8)

                #noise reduction
                kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
                diffImg = cv2.erode(diffImg, kernel, iterations=2)
                diffImg = cv2.dilate(diffImg, kernel, iterations=2)

                diffImg = cv2.bitwise_and(diffImg, self.diffCheckMask)

                if self.debugLevel >= 1:
                    dispImg = diffImg.copy()
                    dispImg = FXImageUtil.scaleImage(diffImg, 0.25)
                    cv2.imshow("diffImg", dispImg)

                val = cv2.mean(diffImg)
                val = float(val[0])

                self.diffVal = val

                self.diffCheckImg = lapImg

                if self.debugLevel >= 2:
                    print "HD difference val: %f" % self.diffVal

                if useRoyWeb:
                    ps.send('diff', self.diffVal, 'v', 'diffVal')

                if self.diffVal > self.param['thr']:
                    print "difference is detected"

                    thumbSize = self.param['dstThumb']['size']
                    thumbImg = cv2.resize(img, (thumbSize[0], thumbSize[1]))

                    if self.thumbRectFlag:
                        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (100, 100))
                        diffImgArea = cv2.dilate(diffImg, kernel)
                        diffImgArea = cv2.erode(diffImgArea, kernel)

                        contours, hierarchy = cv2.findContours(diffImgArea, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

                        #get max rectangle
                        areas = [cv2.contourArea(contour) for contour in contours]
                        cnt_max = [contours[areas.index(max(areas))]][0]
                        rect = cv2.boundingRect(cnt_max)

                        #draw rectangle
                        offset = 20
                        pt1 = (int((rect[0]-offset) * self.thumbScale), int((rect[1]-offset) * self.thumbScale))
                        pt2 = (int((rect[0]+rect[2]+offset) * self.thumbScale), int((rect[1]+rect[3]+offset) * self.thumbScale))
                        cv2.rectangle(thumbImg, pt1, pt2, self.thumbRectColor, thickness=self.thumbRectThickness)


                    self.handleWBCapture(img, thumbImg)



                    if self.debugLevel >= 1:
                        dispImg = FXImageUtil.scaleImage(img, 0.25)
                        cv2.imshow("captured wb", dispImg)
                        dispImg = FXImageUtil.scaleImage(diffImg, 0.25)
                        cv2.imshow("captured wb diff", dispImg)

            else:
                if self.debugLevel >= 2:
                    print "movement is detected. skip diff check"


            duration = FXTimeUtil.getT() - sTime

            if duration < interval:
                time.sleep(interval - duration)
                duration = interval

            cv2.waitKey(1)