Esempio n. 1
0
def main(argv):

    humanStart = datetime.now()
    clockStart = time.time()

    print("-- FACENET TEST MODE STARTING ")
    print("-- STARTED: ", humanStart)
    print("")

    validDir = Classifier._configs["ClassifierSettings"][
        "NetworkPath"] + Classifier._configs["ClassifierSettings"]["ValidPath"]
    testingDir = Classifier._configs["ClassifierSettings"][
        "NetworkPath"] + Classifier._configs["ClassifierSettings"][
            "TestingPath"]

    files = 0
    identified = 0

    for test in os.listdir(testingDir):

        if test.endswith('.jpg') or test.endswith('.jpeg') or test.endswith(
                '.png') or test.endswith('.gif'):
            #print(testingDir+test)

            test_output = FacenetHelpers.infer(cv2.imread(testingDir + test),
                                               Classifier.graph)
            files = files + 1

            for valid in os.listdir(validDir):

                if valid.endswith('.jpg') or valid.endswith(
                        '.jpeg') or valid.endswith('.png') or valid.endswith(
                            '.gif'):

                    valid_output = FacenetHelpers.infer(
                        cv2.imread(validDir + valid), Classifier.graph)

                    if (FacenetHelpers.match(valid_output, test_output)):
                        identified = identified + 1
                        print("-- MATCH " + test)
                        print("")

                        Classifier.jumpwayClient.publishToDeviceChannel(
                            "Warnings", {
                                "WarningType":
                                "CCTV",
                                "WarningOrigin":
                                Classifier._configs["Cameras"][0]["ID"],
                                "WarningValue":
                                "RECOGNISED",
                                "WarningMessage":
                                test.rsplit(".", 1)[0] + " Detected"
                            })
                        break
                    else:

                        print("-- NO MATCH")
                        print("")

                        Classifier.jumpwayClient.publishToDeviceChannel(
                            "Warnings", {
                                "WarningType":
                                "CCTV",
                                "WarningOrigin":
                                Classifier._configs["Cameras"][0]["ID"],
                                "WarningValue":
                                "INTRUDER",
                                "WarningMessage":
                                "INTRUDER"
                            })

    humanEnd = datetime.now()
    clockEnd = time.time()

    print("")
    print("-- FACENET TEST MODE ENDING")
    print("-- ENDED: ", humanEnd)
    print("-- TESTED: ", files)
    print("-- IDENTIFIED: ", identified)
    print("-- TIME(secs): {0}".format(clockEnd - clockStart))
    print("")

    print("!! SHUTTING DOWN !!")
    print("")

    Classifier.graph.DeallocateGraph()
    Classifier.movidius.CloseDevice()
def main(argv):

    if argv[0] == "Facenet":

        Classifier.loadRequirements(argv[0])

        humanStart = datetime.now()
        clockStart = time.time()

        print("-- FACENET TEST MODE STARTING ")
        print("-- STARTED: ", humanStart)

        validDir = Classifier._configs["ClassifierSettings"][
            "NetworkPath"] + Classifier._configs["ClassifierSettings"][
                "ValidPath"]
        testingDir = Classifier._configs["ClassifierSettings"][
            "NetworkPath"] + Classifier._configs["ClassifierSettings"][
                "TestingPath"]

        files = 0
        identified = 0

        for test in os.listdir(testingDir):

            if test.endswith('.jpg') or test.endswith(
                    '.jpeg') or test.endswith('.png') or test.endswith('.gif'):
                #print(testingDir+test)

                test_output = FacenetHelpers.infer(
                    cv2.imread(testingDir + test), Classifier.fgraph)
                files = files + 1

                for valid in os.listdir(validDir):

                    if valid.endswith('.jpg') or valid.endswith(
                            '.jpeg') or valid.endswith(
                                '.png') or valid.endswith('.gif'):

                        valid_output = FacenetHelpers.infer(
                            cv2.imread(validDir + valid), Classifier.fgraph)

                        if (FacenetHelpers.match(valid_output, test_output)):
                            identified = identified + 1
                            print("-- MATCH " + test)

                            Classifier.jumpwayClient.publishToDeviceChannel(
                                "Warnings", {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    Classifier._configs["Cameras"][0]["ID"],
                                    "WarningValue":
                                    "RECOGNISED",
                                    "WarningMessage":
                                    test.rsplit(".", 1)[0] + " Detected"
                                })
                            break
                        else:

                            print("-- NO MATCH")

                            Classifier.jumpwayClient.publishToDeviceChannel(
                                "Warnings", {
                                    "WarningType":
                                    "CCTV",
                                    "WarningOrigin":
                                    Classifier._configs["Cameras"][0]["ID"],
                                    "WarningValue":
                                    "INTRUDER",
                                    "WarningMessage":
                                    "INTRUDER"
                                })

        humanEnd = datetime.now()
        clockEnd = time.time()

        print("")
        print("-- FACENET TEST MODE ENDING")
        print("-- ENDED: ", humanEnd)
        print("-- TESTED: ", files)
        print("-- IDENTIFIED: ", identified)
        print("-- TIME(secs): {0}".format(clockEnd - clockStart))
        print("")
        print("!! SHUTTING DOWN !!")
        print("")

        Classifier.fgraph.DeallocateGraph()
        Classifier.movidius.CloseDevice()

    elif argv[0] == "Inception":

        Classifier.loadRequirements("IDC")

        humanStart = datetime.now()
        clockStart = time.time()

        print("-- INCEPTION V3 TEST MODE STARTED: : ", humanStart)

        rootdir = Classifier._configs["ClassifierSettings"][
            "NetworkPath"] + Classifier._configs["ClassifierSettings"][
                "InceptionImagePath"]

        files = 0
        identified = 0

        for file in os.listdir(rootdir):

            if file.endswith('.jpg') or file.endswith(
                    '.jpeg') or file.endswith('.png') or file.endswith('.gif'):

                files = files + 1
                fileName = rootdir + file

                print("-- Loaded Test Image", fileName)
                img = cv2.imread(fileName).astype(np.float32)

                dx, dy, dz = img.shape
                delta = float(abs(dy - dx))

                if dx > dy:

                    img = img[int(0.5 * delta):dx - int(0.5 * delta), 0:dy]

                else:

                    img = img[0:dx, int(0.5 * delta):dy - int(0.5 * delta)]

                img = cv2.resize(img, (Classifier.reqsize, Classifier.reqsize))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                for i in range(3):

                    img[:, :,
                        i] = (img[:, :, i] - Classifier.mean) * Classifier.std

                detectionStart = datetime.now()
                detectionClockStart = time.time()

                print("-- DETECTION STARTING ")
                print("-- STARTED: : ", detectionStart)

                Classifier.graph.LoadTensor(img.astype(np.float16),
                                            'user object')
                output, userobj = Classifier.graph.GetResult()

                top_inds = output.argsort()[::-1][:5]

                detectionEnd = datetime.now()
                detectionClockEnd = time.time()

                print("-- DETECTION ENDING")
                print("-- ENDED: ", detectionEnd)
                print("-- TIME: {0}".format(detectionClockEnd -
                                            detectionClockStart))

                if output[top_inds[0]] > Classifier._configs[
                        "ClassifierSettings"][
                            "InceptionThreshold"] and Classifier.categories[
                                top_inds[0]] == "1":

                    identified = identified + 1

                    print("")
                    print("!! TASS Identified IDC with a confidence of",
                          str(output[top_inds[0]]))
                    print("")

                    Classifier.jumpwayClient.publishToDeviceChannel(
                        "Warnings", {
                            "WarningType":
                            "CCTV",
                            "WarningOrigin":
                            Classifier._configs["Cameras"][0]["ID"],
                            "WarningValue":
                            "RECOGNISED",
                            "WarningMessage":
                            "IDC Detected"
                        })

                    print("")

                else:

                    print("")
                    print("!! TASS Did Not Identify IDC")
                    print("")

                    Classifier.jumpwayClient.publishToDeviceChannel(
                        "Warnings", {
                            "WarningType":
                            "CCTV",
                            "WarningOrigin":
                            Classifier._configs["Cameras"][0]["ID"],
                            "WarningValue":
                            "NOT RECOGNISED",
                            "WarningMessage":
                            "IDC Not Detected"
                        })

                    print("")

                Classifier.jumpwayClient.publishToDeviceChannel(
                    "Sensors", {
                        "Sensor":
                        "CCTV",
                        "SensorID":
                        Classifier._configs["Cameras"][0]["ID"],
                        "SensorValue":
                        "IDC: " + Classifier.categories[top_inds[0]] +
                        " (Confidence: " + str(output[top_inds[0]]) + ")"
                    })

        humanEnd = datetime.now()
        clockEnd = time.time()

        print("")
        print("-- INCEPTION V3 TEST MODE ENDING")
        print("-- ENDED: ", humanEnd)
        print("-- TESTED: ", files)
        print("-- IDENTIFIED: ", identified)
        print("-- TIME(secs): {0}".format(clockEnd - clockStart))
        print("")

        print("!! SHUTTING DOWN !!")
        print("")

        Classifier.graph.DeallocateGraph()
        Classifier.movidius.CloseDevice()

    else:

        print("**ERROR** Check Your Commandline Arguments")
        print("")
Esempio n. 3
0
def TASSinference():
    
    Server.CheckDevices()
    Server.loadRequirements("TASS")

    humanStart = datetime.now()
    clockStart = time.time()

    print("-- FACENET LIVE INFERENCE STARTED: ", humanStart)

    r = request
    nparr = np.fromstring(r.data, np.uint8)

    print("-- Loading Face")
    fileName = "data/captured/TASS/"+str(clockStart)+'.png'
    img = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)
    cv2.imwrite(fileName,img)
    img = cv2.imread(fileName).astype(np.float32)
    print("-- Loaded Sample")

    validDir    = Server._configs["ClassifierSettings"]["NetworkPath"] + Server._configs["ClassifierSettings"]["ValidPath"]
    testingDir  = Server._configs["ClassifierSettings"]["NetworkPath"] + Server._configs["ClassifierSettings"]["TestingPath"]

    files = 0
    identified = 0

    test_output = FacenetHelpers.infer(img, Server.fgraph)
    files = files + 1

    for valid in os.listdir(validDir):

        if valid.endswith('.jpg') or valid.endswith('.jpeg') or valid.endswith('.png') or valid.endswith('.gif'):

            valid_output = FacenetHelpers.infer(cv2.imread(validDir+valid), Server.fgraph)
            known, confidence = FacenetHelpers.match(valid_output, test_output)
            if (known=="True"):
                identified = identified + 1
                print("-- MATCH "+valid)
                break

    humanEnd = datetime.now()
    clockEnd = time.time()

    Server.fgraph.DeallocateGraph()
    Server.movidius.CloseDevice()

    print("")
    print("-- FACENET LIVE INFERENCE ENDED: ", humanEnd)
    print("-- TESTED: ", 1)
    print("-- IDENTIFIED: ", identified)
    print("-- TIME(secs): {0}".format(clockEnd - clockStart))
    print("")

    if identified:

        validPerson = os.path.splitext(valid)[0]

        message = validPerson +" Detected With Confidence " + str(confidence)
        person = validPerson

    else:

        message = "Intruder Detected With Confidence " + str(confidence)
        person = "Intruder"

    response = {
        'Response': 'OK',
        'Results': identified,
        'Person': person,
        'Confidence': confidence,
        'ResponseMessage': message
    }

    response_pickled = jsonpickle.encode(response)

    return Response(response=response_pickled, status=200, mimetype="application/json")
Esempio n. 4
0
    def do_GET(self):
        if self.path.endswith('.mjpg'):
            self.send_response(200)
            self.send_header(
                'Content-type',
                'multipart/x-mixed-replace; boundary=--jpgboundary')
            self.end_headers()
            frameWait = 0
            fps = FPS().start()

            try:

                while True:
                    # grab the frame from the threaded video stream and resize it
                    # to have a maximum width of 400 pixels
                    frame = capture.read()
                    frame = imutils.resize(frame, width=640)
                    rawFrame = frame.copy()

                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    rects = Classifier.detector(gray, 0)

                    for (i, rect) in enumerate(rects):
                        # determine the facial landmarks for the face region, then
                        # convert the facial landmark (x, y)-coordinates to a NumPy
                        # array
                        shape = Classifier.predictor(gray, rect)
                        shape = face_utils.shape_to_np(shape)

                        # convert dlib's rectangle to a OpenCV-style bounding box
                        # [i.e., (x, y, w, h)], then draw the face bounding box
                        (x, y, w, h) = face_utils.rect_to_bb(rect)
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)

                        # loop over the (x, y)-coordinates for the facial landmarks
                        # and draw them on the image
                        for (x, y) in shape:
                            cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

                        frameWait = 0
                        currentFace = rawFrame[
                            max(0,
                                rect.top() - 100):min(rect.bottom() +
                                                      100, 480),
                            max(0,
                                rect.left() - 100):min(rect.right() +
                                                       100, 640)]
                        cv2.imwrite("test.jpg", currentFace)

                        validDir = Classifier._configs["ClassifierSettings"][
                            "NetworkPath"] + Classifier._configs[
                                "ClassifierSettings"]["ValidPath"]

                        for valid in os.listdir(validDir):

                            if valid.endswith('.jpg') or valid.endswith(
                                    '.jpeg') or valid.endswith(
                                        '.png') or valid.endswith('.gif'):

                                if (FacenetHelpers.match(
                                        FacenetHelpers.infer(
                                            cv2.imread(validDir + valid),
                                            Classifier.graph),
                                        FacenetHelpers.infer(
                                            currentFace, Classifier.graph))):

                                    name = valid.rsplit('.', 1)[0]
                                    print("-- MATCH " + name)
                                    print("")
                                    Classifier.jumpwayClient.publishToDeviceChannel(
                                        "Warnings", {
                                            "WarningType":
                                            "CCTV",
                                            "WarningOrigin":
                                            Classifier._configs["Cameras"][0]
                                            ["ID"],
                                            "WarningValue":
                                            "RECOGNISED",
                                            "WarningMessage":
                                            name + " Detected"
                                        })
                                    break
                                else:
                                    print("-- NO MATCH")
                                    print("")

                                    cv2.rectangle(frame, (x, y),
                                                  (x + w, y + h), (255, 0, 0),
                                                  2)

                                    Classifier.jumpwayClient.publishToDeviceChannel(
                                        "Warnings", {
                                            "WarningType":
                                            "CCTV",
                                            "WarningOrigin":
                                            Classifier._configs["Cameras"][0]
                                            ["ID"],
                                            "WarningValue":
                                            "INTRUDER",
                                            "WarningMessage":
                                            "INTRUDER"
                                        })
                            else:
                                print("-- NO VALID ID")
                                print("")

                    imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    imgRGB = cv2.flip(imgRGB, 1)
                    jpg = Image.fromarray(imgRGB)
                    tmpFile = BytesIO()
                    jpg.save(tmpFile, 'JPEG')
                    self.wfile.write("--jpgboundary".encode())
                    self.send_header('Content-type', 'image/jpeg')
                    self.send_header('Content-length',
                                     str(tmpFile.getbuffer().nbytes))
                    self.end_headers()
                    self.wfile.write(tmpFile.getvalue())
                    #time.sleep(0.05)
                    fps.update()
                    frameWait = frameWait + 1

            except KeyboardInterrupt:
                exit
            return
        if self.path.endswith('.html'):
            src = '<img src="http://' + Classifier._configs["Cameras"][0][
                "Stream"] + ':' + str(Classifier._configs["Cameras"][0]
                                      ["StreamPort"]) + '/cam.mjpg" />'
            self.send_response(200)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write('<html><head></head><body>'.encode())
            self.wfile.write(src.encode())
            self.wfile.write('</body></html>'.encode())
            return