コード例 #1
0
    def __init__(self):

        self.TassTools = TassTools()
        self.TassClassifier = TassClassifier(1)

        self._configs = {}

        with open("config.json") as configs:

            self._configs = json.loads(configs.read())

        self.startMQTT()

        for i, camera in enumerate(self._configs["CameraList"]):

            camThread = threading.Thread(name="camera_thread_" + str(i),
                                         target=self.processFrame,
                                         args=(
                                             TassIpCameras.TassIpCameras(
                                                 camera["camURL"]),
                                             camera["camID"],
                                             camera["camSensorID"],
                                             camera["camZone"],
                                         ))

            camThread.start()
コード例 #2
0
    def __init__(self, model):

        self.TassTools = TassTools()

        self.model = model

        with open('config.json') as configs:

            self._configs = json.loads(configs.read())

        self.imgDim = self._configs["ClassifierSettings"]["imgDim"]
        self.threshold = self._configs["ClassifierSettings"]["threshold"]
        self.cuda = self._configs["ClassifierSettings"]["useCuda"]

        self.fileDir = os.path.dirname(os.path.abspath(__file__))
        self.modelsDir = self.fileDir + '/models'
        self.dlibModelDir = self.modelsDir + '/dlib'
        self.openfaceModelDir = self.modelsDir + '/openface'

        self.dlibFacePredictor = self.dlibModelDir + '/shape_predictor_68_face_landmarks.dat'
        self.networkModel = self.openfaceModelDir + '/nn4.small2.v1.t7'

        self.detector = dlib.get_frontal_face_detector()

        self.classifierModel = os.path.dirname(
            os.path.abspath(__file__)) + "/features/" + str(
                self.model) + "/classifier.pkl"

        self.faceCascade1 = cv2.CascadeClassifier(
            os.getcwd() + '/' +
            self._configs["ClassifierSettings"]["HAAR_FACES"])
        self.faceCascade2 = cv2.CascadeClassifier(
            os.getcwd() + '/' +
            self._configs["ClassifierSettings"]["HAAR_PROFILES"])
        self.faceCascade3 = cv2.CascadeClassifier(
            os.getcwd() + '/' +
            self._configs["ClassifierSettings"]["HAAR_FACES2"])
        self.faceCascade4 = cv2.CascadeClassifier(
            os.getcwd() + '/' +
            self._configs["ClassifierSettings"]["HAAR_FACES3"])
        self.faceCascade5 = cv2.CascadeClassifier(
            os.getcwd() + '/' +
            self._configs["ClassifierSettings"]["HAAR_PROFILES"])

        with open(self.classifierModel, 'r') as f:

            (self.le, self.clf) = pickle.load(f)

        self.align = openface.AlignDlib(self.dlibFacePredictor)
        self.net = openface.TorchNeuralNet(self.networkModel,
                                           imgDim=self.imgDim,
                                           cuda=self.cuda)
コード例 #3
0
    def __init__(self):

        self.JumpWayMQTTClient = ""

        self.TassTools = TassTools()
        self.TassClassifier = TassClassifier(1)

        self._configs = {}

        with open('config.json') as configs:

            self._configs = json.loads(configs.read())

        framesPerSecond = 30

        serv = pyrs.Service()
        realsense = serv.Device(
            device_id=self._configs["RealsenseCam"]["localCamID"],
            streams=[pyrs.stream.ColorStream(fps=framesPerSecond)])

        self.startMQTT()
        self.processFrame(realsense, self._configs["RealsenseCam"]["camID"],
                          self._configs["RealsenseCam"]["camZone"],
                          self._configs["RealsenseCam"]["camSensorID"])
コード例 #4
0
class TassCore():
    def __init__(self):

        self.TassTools = TassTools()
        self.TassClassifier = TassClassifier(1)

        self._configs = {}

        with open("config.json") as configs:

            self._configs = json.loads(configs.read())

        self.startMQTT()

        for i, camera in enumerate(self._configs["CameraList"]):

            camThread = threading.Thread(name="camera_thread_" + str(i),
                                         target=self.processFrame,
                                         args=(
                                             TassIpCameras.TassIpCameras(
                                                 camera["camURL"]),
                                             camera["camID"],
                                             camera["camSensorID"],
                                             camera["camZone"],
                                         ))

            camThread.start()

    def startMQTT(self):

        try:

            self.JumpWayMQTTClient = JumpWayPythonMQTTApplicationConnection({
                "locationID":
                self._configs["IoTJumpWayMQTTSettings"]["SystemLocation"],
                "applicationID":
                self._configs["IoTJumpWayMQTTSettings"]["SystemApplicationID"],
                "applicationName":
                self._configs["IoTJumpWayMQTTSettings"]
                ["SystemApplicationName"],
                "username":
                self._configs["IoTJumpWayMQTTSettings"]["applicationUsername"],
                "password":
                self._configs["IoTJumpWayMQTTSettings"]["applicationPassword"]
            })

        except Exception as e:
            print(str(e))
            sys.exit()

        self.JumpWayMQTTClient.connectToApplication()

    def processFrame(self, camera, camID, camSensorID, camZone):

        while True:

            frame = camera.readIpCamFrame()

            if frame is None:

                continue

            frame = self.TassTools.resize(frame)
            currentImage, detected = self.TassClassifier.openCVDetect(frame)

            if detected is not None:

                currentImage = cv2.imread(currentImage)
                persons, confidences = self.TassClassifier.classify(
                    currentImage, " ", "CV")

                if len(confidences):

                    print "P: " + str(persons) + " C: " + str(confidences)

                else:

                    self.TassClassifier.moveNotIdentified(frame)

                    print "Unable To Classify Frame "

                for i, c in enumerate(confidences):

                    if persons[i] == "unknown":

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Sensors", camZone, camID, {
                                "Sensor": "CCTV",
                                "SensorID": camSensorID,
                                "SensorValue": "Intruder"
                            })

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Warnings", camZone, camID, {
                                "WarningType": "CCTV",
                                "WarningOrigin": camSensorID,
                                "WarningValue": "Intruder",
                                "WarningMessage":
                                "An intruder has been detected"
                            })

                        self.TassClassifier.moveNotIdentified(frame)

                        print "Unknown Person Detected With Confidence " + str(
                            c)

                    elif persons[i] != "":

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Sensors", camZone, camID, {
                                "Sensor": "CCTV",
                                "SensorID": camSensorID,
                                "SensorValue": persons[i]
                            })

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Warnings", camZone, camID, {
                                "WarningType":
                                "CCTV",
                                "WarningOrigin":
                                camSensorID,
                                "WarningValue":
                                persons[i],
                                "WarningMessage":
                                "User " + str(persons[i]) +
                                " detected with confidence: " + str(c)
                            })

                        self.TassClassifier.moveIdentified(frame)

                        print str(
                            persons[i]) + " Detected With Confidence " + str(c)

            else:

                dlframe = cv2.flip(frame, 1)
                currentImage, detected = self.TassClassifier.dlibDetect(
                    dlframe)

                if detected is not None:

                    for face in detected:

                        persons, confidences = self.TassClassifier.classify(
                            dlframe, face, "CV")

                        if len(confidences):

                            print "P: " + str(persons) + " C: " + str(
                                confidences)

                        else:

                            self.TassClassifier.moveNotIdentified(frame)

                            print "Unable To Classify Frame "

                    for i, c in enumerate(confidences):

                        if persons[i] == "unknown":

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Sensors", camZone, camID, {
                                    "Sensor": "CCTV",
                                    "SensorID": camSensorID,
                                    "SensorValue": "Intruder"
                                })

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Warnings", camZone, camID, {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    camSensorID,
                                    "WarningValue":
                                    "Intruder",
                                    "WarningMessage":
                                    "An intruder has been detected"
                                })

                            self.TassClassifier.moveNotIdentified(frame)

                            print "Unknown Person Detected With Confidence " + str(
                                c)

                        elif persons[i] != "":

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Sensors", camZone, camID, {
                                    "Sensor": "CCTV",
                                    "SensorID": camSensorID,
                                    "SensorValue": persons[i]
                                })

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Warnings", camZone, camID, {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    camSensorID,
                                    "WarningValue":
                                    persons[i],
                                    "WarningMessage":
                                    "User " + str(persons[i]) +
                                    " detected with confidence: " + str(c)
                                })

                            self.TassClassifier.moveIdentified(frame)

                            print str(persons[i]
                                      ) + " Detected With Confidence " + str(c)
コード例 #5
0
class TassRealsense():
    def __init__(self):

        self.JumpWayMQTTClient = ""

        self.TassTools = TassTools()
        self.TassClassifier = TassClassifier(1)

        self._configs = {}

        with open('config.json') as configs:

            self._configs = json.loads(configs.read())

        framesPerSecond = 30

        serv = pyrs.Service()
        realsense = serv.Device(
            device_id=self._configs["RealsenseCam"]["localCamID"],
            streams=[pyrs.stream.ColorStream(fps=framesPerSecond)])

        self.startMQTT()
        self.processFrame(realsense, self._configs["RealsenseCam"]["camID"],
                          self._configs["RealsenseCam"]["camZone"],
                          self._configs["RealsenseCam"]["camSensorID"])

    def startMQTT(self):

        try:

            self.JumpWayMQTTClient = JumpWayPythonMQTTApplicationConnection({
                "locationID":
                self._configs["IoTJumpWayMQTTSettings"]["SystemLocation"],
                "applicationID":
                self._configs["IoTJumpWayMQTTSettings"]["SystemApplicationID"],
                "applicationName":
                self._configs["IoTJumpWayMQTTSettings"]
                ["SystemApplicationName"],
                "username":
                self._configs["IoTJumpWayMQTTSettings"]["applicationUsername"],
                "password":
                self._configs["IoTJumpWayMQTTSettings"]["applicationPassword"]
            })

        except Exception as e:
            print(str(e))
            sys.exit()

        self.JumpWayMQTTClient.connectToApplication()

    def processFrame(self, realsense, realsenseID, realsenseZone,
                     realsenseSensor):

        realsense.apply_ivcam_preset(0)

        while True:

            realsense.wait_for_frames()

            frame = self.TassTools.resize(
                cv2.cvtColor(realsense.color, cv2.COLOR_RGB2BGR))
            currentImage, detected = self.TassClassifier.openCVDetect(frame)

            if detected is not None:

                for face in detected:

                    persons, confidences = self.TassClassifier.classify(
                        frame, face, "CV")

                    if len(confidences):

                        print "P: " + str(persons) + " C: " + str(confidences)

                    else:

                        self.TassClassifier.moveNotIdentified(frame)

                        print "Unable To Classify Frame "

                for i, c in enumerate(confidences):

                    if persons[i] == "unknown":

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Sensors", realsenseZone, realsenseID, {
                                "Sensor": "CCTV",
                                "SensorID": realsenseSensor,
                                "SensorValue": "Intruder"
                            })

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Warnings", realsenseZone, realsenseID, {
                                "WarningType": "CCTV",
                                "WarningOrigin": realsenseSensor,
                                "WarningValue": "Intruder",
                                "WarningMessage":
                                "An intruder has been detected"
                            })

                        self.TassClassifier.moveNotIdentified(frame)

                        print "Unknown Person Detected With Confidence " + str(
                            c)

                    elif persons[i] != "":

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Sensors", realsenseZone, realsenseID, {
                                "Sensor": "CCTV",
                                "SensorID": realsenseSensor,
                                "SensorValue": persons[i]
                            })

                        self.JumpWayMQTTClient.publishToDeviceChannel(
                            "Warnings", realsenseZone, realsenseID, {
                                "WarningType":
                                "CCTV",
                                "WarningOrigin":
                                realsenseSensor,
                                "WarningValue":
                                persons[i],
                                "WarningMessage":
                                "User " + str(persons[i]) +
                                " detected with confidence: " + str(c)
                            })

                        self.TassClassifier.moveIdentified(frame)

                        print str(
                            persons[i]) + " Detected With Confidence " + str(c)

            else:

                #dlframe = cv2.flip(frame, 1)
                currentImage, detected = self.TassClassifier.dlibDetect(frame)

                if detected is not None:

                    for face in detected:

                        persons, confidences = self.TassClassifier.classify(
                            frame, face, "CV")

                        if len(confidences):

                            print "P: " + str(persons) + " C: " + str(
                                confidences)

                        else:

                            self.TassClassifier.moveNotIdentified(frame)

                            print "Unable To Classify Frame "

                    for i, c in enumerate(confidences):

                        if persons[i] == "unknown":

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Sensors", realsenseZone, realsenseID, {
                                    "Sensor": "CCTV",
                                    "SensorID": realsenseSensor,
                                    "SensorValue": "Intruder"
                                })

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Warnings", realsenseZone, realsenseID, {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    realsenseSensor,
                                    "WarningValue":
                                    "Intruder",
                                    "WarningMessage":
                                    "An intruder has been detected"
                                })

                            self.TassClassifier.moveNotIdentified(frame)

                            print "Unknown Person Detected With Confidence " + str(
                                c)

                        elif persons[i] != "":

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Sensors", realsenseZone, realsenseID, {
                                    "Sensor": "CCTV",
                                    "SensorID": realsenseSensor,
                                    "SensorValue": persons[i]
                                })

                            self.JumpWayMQTTClient.publishToDeviceChannel(
                                "Warnings", realsenseZone, realsenseID, {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    realsenseSensor,
                                    "WarningValue":
                                    persons[i],
                                    "WarningMessage":
                                    "User " + str(persons[i]) +
                                    " detected with confidence: " + str(c)
                                })

                            self.TassClassifier.moveIdentified(frame)

                            print str(persons[i]
                                      ) + " Detected With Confidence " + str(c)
コード例 #6
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
########################################################################################

import os
import cv2
import time

from TassTools import TassTools
from TassClassifier import TassClassifier

TassTools = TassTools()
TassClassifier = TassClassifier(1)

for file in os.listdir("testing/"):

    newPayload = cv2.imread("testing/" + file)
    persons, confidences = TassClassifier.classify(newPayload, " ", "CV")
    print "SHOULD BE: " + os.path.splitext(
        os.path.basename("testing/" + file))[0]
    print "DETECTED: " + str(persons[0]) + " With Confidence: " + str(
        confidences[0])
    if str(persons[0]) == "unknown":
        print("Intruder")
    else:
        print(str(persons[0]) + " " + str(confidences[0]))
    print("")
コード例 #7
0
class TassClassifier():

    def __init__(self,model):

        self.TassTools = TassTools()

        self.model = model

        with open('config.json') as configs:

            self._configs = json.loads(configs.read())

        self.imgDim = self._configs["ClassifierSettings"]["imgDim"]
        self.threshold = self._configs["ClassifierSettings"]["threshold"]
        self.cuda = self._configs["ClassifierSettings"]["useCuda"]

        self.fileDir = os.path.dirname(os.path.abspath(__file__))
        self.modelsDir = self.fileDir+'/models'
        self.dlibModelDir = self.modelsDir+'/dlib'
        self.openfaceModelDir = self.modelsDir+'/openface'

        self.dlibFacePredictor = self.dlibModelDir+'/shape_predictor_68_face_landmarks.dat'
        self.networkModel = self.openfaceModelDir+'/nn4.small2.v1.t7'

        self.detector = dlib.get_frontal_face_detector()

        self.classifierModel = os.path.dirname(os.path.abspath(__file__)) + "/features/"+str(self.model)+"/classifier.pkl"

        self.faceCascade1 = cv2.CascadeClassifier( os.getcwd()+'/'+self._configs["ClassifierSettings"]["HAAR_FACES"])
        self.faceCascade2 = cv2.CascadeClassifier( os.getcwd()+'/'+self._configs["ClassifierSettings"]["HAAR_PROFILES"])
        self.faceCascade3 = cv2.CascadeClassifier( os.getcwd()+'/'+self._configs["ClassifierSettings"]["HAAR_FACES2"])
        self.faceCascade4 = cv2.CascadeClassifier( os.getcwd()+'/'+self._configs["ClassifierSettings"]["HAAR_FACES3"])
        self.faceCascade5 = cv2.CascadeClassifier( os.getcwd()+'/'+self._configs["ClassifierSettings"]["HAAR_PROFILES"])

        with open(self.classifierModel, 'r') as f:

            (self.le, self.clf) = pickle.load(f)

        self.align = openface.AlignDlib(self.dlibFacePredictor)
        self.net = openface.TorchNeuralNet(self.networkModel,imgDim=self.imgDim,cuda=self.cuda)

    def openCVDetect(self,frame):

        gray = self.TassTools.preProcess(frame)

        faces = self.faceCascade1.detectMultiScale(gray,
            scaleFactor=self._configs["ClassifierSettings"]["HAAR_SCALE_FACTOR"],
            minNeighbors=self._configs["ClassifierSettings"]["HAAR_MIN_NEIGHBORS"],
            minSize=(30,30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

        if not len(faces):

            faces = self.faceCascade2.detectMultiScale(gray,
                scaleFactor=self._configs["ClassifierSettings"]["HAAR_SCALE_FACTOR"],
                minNeighbors=self._configs["ClassifierSettings"]["HAAR_MIN_NEIGHBORS"],
                minSize=(30,30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

        if not len(faces):

            faces = self.faceCascade3.detectMultiScale(gray,
                scaleFactor=self._configs["ClassifierSettings"]["HAAR_SCALE_FACTOR"],
                minNeighbors=self._configs["ClassifierSettings"]["HAAR_MIN_NEIGHBORS"],
                minSize=(30,30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

        if not len(faces):

            faces = self.faceCascade4.detectMultiScale(gray,
                scaleFactor=self._configs["ClassifierSettings"]["HAAR_SCALE_FACTOR"],
                minNeighbors=self._configs["ClassifierSettings"]["HAAR_MIN_NEIGHBORS"],
                minSize=(30,30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

        if not len(faces):

            faces = self.faceCascade5.detectMultiScale(gray,
                scaleFactor=self._configs["ClassifierSettings"]["HAAR_SCALE_FACTOR"],
                minNeighbors=self._configs["ClassifierSettings"]["HAAR_MIN_NEIGHBORS"],
                minSize=(30,30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

        print( "OPENCV DETECTED " + str(len(faces)) + " FACES")

        if len(faces):

            today = time.strftime("%Y-%m-%d-%H")

            if not os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/frames/'+today+'/detected/'):
                os.makedirs(os.path.dirname(os.path.abspath(__file__))+'/frames/'+today+'/detected/')

            for (x, y, w, h) in faces:

                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

            fileName = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')+'.jpg'
            fileAddress=os.path.dirname(os.path.abspath(__file__))+'/frames/'+today+'/detected/'+fileName
            cv2.imwrite(fileAddress, frame)

            return fileAddress, faces

        else:

            today = time.strftime("%Y-%m-%d-%H")

            if not os.path.exists(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/'):
                os.makedirs(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/')

            fileName = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')+'.jpg'
            fileAddress=os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/'+fileName
            cv2.imwrite(fileAddress, frame)

            return None, None

    def dlibDetect(self,frame):

        frame = self.TassTools.preProcess(frame)
        faces = self.detector(frame, 1)

        print("DLIB DETECTED "+ str(len(faces))+" FACES")

        if len(faces):

            today = time.strftime("%Y-%m-%d-%H")

            if not os.path.exists(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/detected/'):

                os.makedirs(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/detected/')

            for face in faces:

                x,y,w,h = self.TassTools.convertRect(face)

                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

            fileName = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')+'.jpg'
            fileAddress=os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/detected/'+fileName
            cv2.imwrite(fileAddress, frame)

            return fileAddress, faces

        else:

            today = time.strftime("%Y-%m-%d-%H")

            if not os.path.exists(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/'):

                os.makedirs(os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/')

            fileName = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')+'.jpg'
            fileAddress=os.path.dirname(os.path.abspath('__file__'))+'/frames/'+today+'/notdetected/'+fileName
            cv2.imwrite(fileAddress, frame)

            return None, None

    def getRep(self, bgrImg):

        if bgrImg is None:

            raise Exception("Unable to load image/frame")

        rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
        bb = self.align.getAllFaceBoundingBoxes(rgbImg)

        if bb is None:
            print "No Bounding Boxes Found"
            return None

        alignedFaces = []

        for box in bb:

            bl = (box.left(), box.bottom())
            tr = (box.right(), box.top())

            cv2.rectangle(
                bgrImg,
                bl,
                tr,
                color=(0, 255, 0),
                thickness=3
            )

            alignedFaces.append(
                self.align.align(
                    self.imgDim,
                    rgbImg,
                    box,
                    landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE
                )
            )

        if alignedFaces is None:

            print("Unable to align the frame")
            raise Exception("Unable to align the frame")

        reps = []

        for alignedFace in alignedFaces:

            reps.append(self.net.forward(alignedFace))

        return reps

    def classify(self, alignedFace, faces, cvOrDlib):

        if(cvOrDlib == "Dlib"):

            landmarks = self.align.findLandmarks(alignedFace, faces)

            if landmarks == None:

                print("LANDMARKS NOT FOUND")
                return None

            alignedFace = self.align.align(self.imgDim, alignedFace, faces,landmarks=landmarks,landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)

        reps = self.getRep(alignedFace)
        people = []
        confidences = []

        for rep in reps:

            try:

                rep = rep.reshape(1, -1)
                predictions = self.clf.predict_proba(rep).ravel()
                maxI = np.argmax(predictions)
                people.append(self.le.inverse_transform(maxI))
                confidences.append(predictions[maxI]*100)

            except:

                print("No Face detected")

        return (people, confidences)