def __init__(self, name=None, shared_variables=None, index=0):
     threading.Thread.__init__(self)
     self.name = name
     self.shared_variables = shared_variables
     self.index = index
     LOG.info("Start tracking " + str(self.index),
              "SYSTEM-" + self.shared_variables.name)
    def run(self):

        #  wait for initial detection
        while self.shared_variables.detection_box[self.index] is None:
            pass

        # initiate tracker
        self.create_custom_tracker()

        # tracking loop
        while self.shared_variables.system_running:

            self.start_time = datetime.datetime.now()

            self.frame = self.shared_variables.frame[self.index]
            self.object_custom_tracking()
            self.end_time = datetime.datetime.now()

            if self.shared_variables.debug:
                LOG.debug(
                    "Tracking time : " + str(self.end_time - self.start_time),
                    "SYSTEM-" + self.shared_variables.name)

        LOG.info("Stopped tracking " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 3
0
 def __init__(self, shared_variables=None, id=0, index=0):
     threading.Thread.__init__(self)
     self.id = id
     self.index = index
     self.shared_variables = shared_variables
     LOG.info("Started a webcam at " + str(index) + " from " + str(id),
              "SYSTEM-" + self.shared_variables.name)
Esempio n. 4
0
def main():
    LOG.clear_log()
    LOG.info("Log cleared", "ROOT")
    LOG.info("Starting program", "ROOT")

    controll_thread = controller.parse_controller()
    controll_thread.start()
 def __init__(self, name=None, shared_variables=None, index=0):
     threading.Thread.__init__(self)
     self.name = name
     self.shared_variables = shared_variables
     self.index = index
     self.initiate_variables()
     LOG.info("Started imshow " + str(index),
              "SYSTEM-" + self.shared_variables.name)
Esempio n. 6
0
 def __init__(self, name=None, shared_variables=None):
     threading.Thread.__init__(self)
     self.name = name
     self.shared_variables = shared_variables
     self.sleep_time = self.SHORT_SLEEP
     self.index = int(name)
     LOG.info("Create dlib detection" + str(self.index),
              "SYSTEM-" + self.shared_variables.name)
Esempio n. 7
0
 def __init__(self, name=None, shared_variables=None):
     threading.Thread.__init__(self)
     self.name = name
     self.shared_variables = shared_variables
     self.sleep_time = self.SHORT_SLEEP
     self.model_path = self.get_model_path()
     self.index = int(name)
     LOG.info("Loading Tensorflow modell " + str(self.index),
              "SYSTEM-" + self.shared_variables.name)
 def create_new_system_instance(self):
     # Generate system id and add new system instance
     instance_name = str(threading.get_ident())
     self.system_reference_array.append(
         shared_variables.Shared_Variables(name=instance_name,
                                           config=self.config))
     LOG.info("Created system with id " + str(instance_name),
              "SYSTEM-" + str(instance_name))
     print("Created system with id " + str(instance_name))
Esempio n. 9
0
    def run(self):
        LOG.info("Start blink frequency "+ str(self.index), "SYSTEM-"+self.shared_variables.name)

        # load model
        model = load_model('../../model/blinkModel.hdf5')

        close_counter = blinks = mem_counter= 0
        state = ''

        #Wait for detection
        while self.shared_variables.frame[self.index] is None:
            pass
        while self.shared_variables.system_running is not None:

            if self.shared_variables.frame[self.index] is not None:

                frame = self.shared_variables.frame[self.index]


                eyes = self.cropEyes(frame)
                if eyes is None:
                    continue
                else:
                    left_eye,right_eye = eyes


                prediction = (model.predict(self.cnnPreprocess(left_eye)) + model.predict(self.cnnPreprocess(right_eye)))/2.0

                if prediction > 0.5 :
                    state = 'open'
                    close_counter = 0
                else:
                    state = 'close'
                    close_counter += 1

                if state == 'open' and mem_counter > 1:
                    blinks += 1

                mem_counter = close_counter

                #save blinking
                #eye_state
                self.shared_variables.eye_state[self.index] = state
                #blinks
                self.shared_variables.blinks[self.index] = blinks
                #eye_left
                self.shared_variables.eye_left[self.index] = left_eye
                #eye_right
                self.shared_variables.eye_right[self.index] = right_eye

                if self.shared_variables.debug:
                    LOG.debug(str(state) + " " + str(blinks) + " from "+str(self.index),"SYSTEM-"+self.shared_variables.name)

        LOG.info("Ending blink freq " + str(self.index), "SYSTEM-"+self.shared_variables.name)
 def run(self):
     args = None
     input_str = ""
     print("----- Program -----")
     print(" type help or h to see functions")
     while True:
         input_str = input('Program>')
         LOG.info("Run command " + input_str, "ROOT")
         input_str_array = input_str.split(' ')
         if self.get_first_arg(input_str_array) is not None:
             if self.run_command(input_str_array):
                 pass
             else:
                 print("No command : " + input_str + " (for help type h)")
Esempio n. 11
0
    def run(self):
        success = self.capture()
        if (not success):
            return

        while self.shared_variables.system_running:
            if self.capture.isOpened():
                temp, frame = self.capture.read()

                # flipp if needed
                if self.shared_variables.flipp_test[self.index]:
                    self.shared_variables.frame[self.index] = imutils.rotate(
                        frame,
                        self.shared_variables.flipp_test_degree[self.index])
                else:
                    self.shared_variables.frame[self.index] = frame

        LOG.info("Ending webcam stream " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 12
0
    def run(self):
        try:
            self.capture = cv2.VideoCapture(self.address)
        except Exception as e:
            LOG.warning("Could not open ip camera " + address,"SYSTEM-"+self.shared_variables.name)
            return


        while self.shared_variables.system_running:
            if self.capture.isOpened():
                temp, frame = self.capture.read()

                # flipp if needed
                if self.shared_variables.flipp_test[self.index]:
                    self.shared_variables.frame[self.index] = imutils.rotate(frame, self.shared_variables.flipp_test_degree[self.index])
                else:
                    self.shared_variables.frame[self.index] = frame

        LOG.info("End ipcamera stream " + address,"SYSTEM-"+self.shared_variables.name)
    def run(self):
        LOG.info("Start age and gender estimation "+ str(self.index), "SYSTEM-"+self.shared_variables.name)
        # load model and weights
        img_size = 64
        model = WideResNet(img_size, depth=16, k=8)()

        model.load_weights(self.pretrained_model_path)


        #wait for detection
        while self.shared_variables.frame[self.index] is None:
            pass

        img_size = 64

        while self.shared_variables.system_running:

            input_img = cv2.cvtColor(self.shared_variables.frame[self.index], cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            faces = np.empty((len([self.shared_variables.face_box[self.index]]), img_size, img_size, 3))



            w = self.shared_variables.face_box[self.index][0][0]
            h = self.shared_variables.face_box[self.index][0][1]
            x1 = self.shared_variables.face_box[self.index][0][2]
            y1 = self.shared_variables.face_box[self.index][0][3]
            x2 = w + x1
            y2 = h + y1

            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), img_w - 1)
            yw2 = min(int(y2 + 0.4 * h), img_h - 1)

            faces[0, :, :, :] = cv2.resize(self.shared_variables.frame[self.index][yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))



    # predict ages and genders of the detected faces
            results = model.predict(faces)
            predicted_genders = results[0]
            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages).flatten()


    # Show
            if(self.shared_variables.debug):
                LOG.debug("Predicted age: " + str(predicted_ages[0]) + " from " + str(self.index), "SYSTEM-"+self.shared_variables.name)

            gender = ""

            if predicted_genders[0][0] > 0.5:
                gender = "Female"
            else:
                gender = "Male"

            if(self.shared_variables.debug):
                print("Predicted gender: " + gender)

            self.shared_variables.gender[self.index] = gender
            self.shared_variables.age[self.index] = predicted_ages[0]


    #Short version

            if(self.shared_variables.debug):

                label = "{}, {}".format(int(predicted_ages[0]),
                                    "F" if predicted_genders[0][0] > 0.5 else "M")
                LOG.debug(label +" from " + str(self.index), "SYSTEM-"+self.shared_variables.name)

        LOG.info("Stopped age gender estimation" + str(self.index), "SYSTEM-"+self.shared_variables.name)
Esempio n. 14
0
    def run(self):
        LOG.info("Start expression " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        face_cascade = cv2.CascadeClassifier(
            '../../model/haarcascade_frontalface_default.xml')
        eye_cascade = cv2.CascadeClassifier('../../model/haarcascade_eye.xml')

        sess = tf.Session()

        face_expression_detector = model.Model()
        checkpoint_save_dir = os.path.join("../../model/checkpoint")
        face_expression_detector.load_graph(sess, checkpoint_save_dir)

        preferred_w, preferred_h = 800, 600
        sentiment_argmax = 0
        res = np.array([[0]])

        sentiment_arr = []

        while True:
            #ret,frame = cap.read()
            frame = self.shared_variables.frame[self.index]
            frame_height, frame_width = frame.shape[:2]

            frame = cv2.resize(frame,
                               None,
                               fx=preferred_w / frame_width,
                               fy=preferred_h / frame_height,
                               interpolation=cv2.INTER_CUBIC)
            frame_height, frame_width = frame.shape[:2]

            grayed = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(grayed, 1.3, 5)
            for x, y, w, h in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = grayed[y:y + h, x:x + w]
                roi_color = frame[y:y + h, x:x + w]

                desired_h, desired_w = 48, 48
                resized_ratio_h, resized_ratio_w = desired_h / h, desired_w / w
                res = cv2.resize(roi_gray,
                                 None,
                                 fx=resized_ratio_w,
                                 fy=resized_ratio_h,
                                 interpolation=cv2.INTER_CUBIC)

                res = np.reshape(res, (-1, 2304))

                feed_dict = {
                    face_expression_detector.X: res,
                    face_expression_detector.keep_prob: 1
                }
                sentiment_arr = np.array(
                    sess.run(face_expression_detector.softmax_logits,
                             feed_dict=feed_dict))
                sentiment_arr = sentiment_arr[0]
                sentiment_argmax = np.argmax(sentiment_arr, axis=0)
                res = np.reshape(res, (48, 48))

            self.shared_variables.expression_result[self.index] = sentiment_arr
            self.shared_variables.face_image[self.index] = res
        sess.close()
        LOG.info("Close expression " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 15
0
    def run(self):
        if not self.Loaded_model:
            LOG.info("Loading Dlib modell" + str(self.index),
                     "SYSTEM-" + self.shared_variables.name)

            # Load model
            self.face_detector = dlib.get_frontal_face_detector()
            self.landmarks_predictor = dlib.shape_predictor(
                self.landmarks_model_path)

            #face_cascade = cv2.CascadeClassifier(face_cascade_path)
            self.Loaded_model = True

        LOG.info("Start dlib detections" + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        #wait for first cam frame
        while self.shared_variables.frame[self.index] is None:
            pass

            # Start Loop
        while self.shared_variables.system_running:
            self.start_time = datetime.datetime.now()

            frame = self.shared_variables.frame[self.index]

            if self.do_flipp_test:
                frame = imutils.rotate(
                    frame, self.flipp_test_degree * self.flipp_test_nr)

                # Do detection
            success, face_box, landmarks, score = self.object_detection(frame)

            # if found faces
            if success:

                self.shared_variables.detection_score[self.index] = score

                self.no_face_count = 0

                # Save landmark
                #self.shared_variables.landmarks[self.index] = landmarks
                self.shared_variables.set_landmarks(landmarks, self.index)

                # Save boxes
                self.shared_variables.face_box[self.index] = face_box
                #self.shared_variables.detection_box[self.index] = face_box
                self.shared_variables.set_detection_box(face_box, self.index)

                self.shared_variables.face_found[self.index] = True
                # Do flipp test on detection
                if self.shared_variables.flipp_test[
                        self.index] and self.do_flipp_test:
                    # save flipp as success
                    degree = self.shared_variables.flipp_test_degree[
                        self.
                        index] + self.flipp_test_nr * self.flipp_test_degree

                    degree = degree - (degree % 360) * 360

                    self.shared_variables.flipp_test_degree[
                        self.index] = degree

                    # log frame change
                    LOG.info(
                        "Flipp test successful add degree :" +
                        str(self.flipp_test_nr * self.flipp_test_degree),
                        self.shared_variables.name)

                    # end flipp test
                    self.do_flipp_test = False
                    self.flipp_test_nr = 1

                # Wake tracking thread

                #if not self.shared_variables.tracking_running[self.index]:
                #    self.sleep_time = self.SHORT_SLEEP

            else:
                # No face
                self.shared_variables.face_found[self.index] = False

                # if max face misses has been done, do less detections
                if self.no_face_count >= self.NO_FACE_MAX:

                    # do flipp test
                    if self.shared_variables.flipp_test[self.index]:

                        # doing flipp test
                        if self.do_flipp_test:
                            self.flipp_test_nr = self.flipp_test_nr + 1

                            # flipp test did not find anything
                            if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                self.do_flipp_test = False
                                self.flipp_test_nr = 1

                                if self.sleep_time == self.SHORT_SLEEP:
                                    #LOG.log("Initiate energy save",self.shared_variables.name)
                                    #self.sleep_time = self.LONG_SLEEP
                                    pass
                        else:
                            self.do_flipp_test = True

                    else:
                        #self.sleep_time = self.LONG_SLEEP
                        #self.shared_variables.tracking_running[self.index] = False
                        #LOG.log("Initiate energy save",self.shared_variables.name)
                        pass

                else:
                    self.no_face_count = self.no_face_count + 1

                if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[
                        self.index]:
                    self.no_face_count = 0

            self.end_time = datetime.datetime.now()

            # Debug detection time
            if self.shared_variables.debug:
                LOG.debug(
                    'Dlib Detection time:' +
                    str(self.end_time - self.start_time),
                    self.shared_variables.name)

            time.sleep(self.sleep_time)  # sleep if wanted

        LOG.info("Ending dlib detection " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
    def run(self):

        while True:
            if self.shared_variables.system_running:
                self.frame = self.shared_variables.frame[self.index]

                # Show tracking GREEN
                if self.shared_variables.tracking_box[self.index] is not None:
                    if self.show_tracking:
                        topLeft = (int(
                            self.shared_variables.tracking_box[self.index][0]),
                                   int(self.shared_variables.tracking_box[
                                       self.index][1]))
                        bottomRight = (
                            int(self.shared_variables.tracking_box[self.index]
                                [0] + self.shared_variables.tracking_box[
                                    self.index][2]),
                            int(self.shared_variables.face_box[self.index][1] +
                                self.shared_variables.face_box[self.index][3]))
                        cv2.rectangle(self.frame, topLeft, bottomRight,
                                      (0, 255, 0), 2, 1)

                #show blink data
                if (self.shared_variables.blinks[self.index] is not None
                        and self.shared_variables.eye_state[self.index]
                        is not None):
                    self.draw_label(
                        self.frame, (10, 50),
                        str(int(self.shared_variables.blinks[self.index])) +
                        " " + str(self.shared_variables.eye_state[self.index]))

                #show face
                if self.show_face:
                    if (self.shared_variables.face_image[self.index]
                            is not None):
                        cv2.imshow(
                            'FACE %s' % self.shared_variables.name + "_" +
                            str(self.index),
                            self.shared_variables.face_image[self.index])

                #show expression data
                if (self.shared_variables.expression_result[self.index]
                        is not None):
                    for i, sentiment in enumerate(
                            self.shared_variables.expression_result[self.index]
                    ):  # 0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral
                        sentiment *= 100
                        preferred_w, preferred_h = 350, 600
                        sentiment_argmax = 0
                        font_offset = 50
                        sentiment = round(sentiment, 3)
                        if (sentiment_argmax == i):
                            self.frame = cv2.putText(
                                self.frame,
                                self.get_emotion_by_index(i) + " " +
                                str(sentiment),
                                (preferred_w - 300, i * font_offset + 100),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
                        else:
                            self.frame = cv2.putText(
                                self.frame,
                                self.get_emotion_by_index(i) + " " +
                                str(sentiment),
                                (preferred_w - 300, i * font_offset + 100),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)

                # Some face detected
                if self.shared_variables.face_found[self.index]:

                    #show score in terminal
                    #if self.show_detection_score:
                    #    if self.shared_variables.detection_score[self.index] is not None:
                    #        print(self.shared_variables.detection_score[self.index])

                    if (self.show_eyes):
                        if (self.shared_variables.eye_left[self.index]
                                is not None):
                            cv2.imshow(
                                'LeftEYE %s' % self.shared_variables.name +
                                "_" + str(self.index),
                                self.shared_variables.eye_left[self.index])

                        if (self.shared_variables.eye_right[self.index]
                                is not None):
                            cv2.imshow(
                                'RightEYE %s' % self.shared_variables.name +
                                "_" + str(self.index),
                                self.shared_variables.eye_right[self.index])

                    # Show detections BLUE
                    if self.shared_variables.detection_box[
                            self.index] is not None:
                        if self.show_detection:
                            topLeft = (int(self.shared_variables.detection_box[
                                self.index][0]),
                                       int(self.shared_variables.detection_box[
                                           self.index][1]))
                            bottomRight = (int(self.shared_variables.
                                               detection_box[self.index][0] +
                                               self.shared_variables.
                                               detection_box[self.index][2]),
                                           int(self.shared_variables.face_box[
                                               self.index][1] +
                                               self.shared_variables.face_box[
                                                   self.index][3]))
                            cv2.rectangle(self.frame, topLeft, bottomRight,
                                          (255, 0, 0), 2, 1)

                            #show score in image
                            if self.show_detection_score:
                                if self.shared_variables.detection_score[
                                        self.index] is not None:
                                    self.draw_label(
                                        self.frame, bottomRight,
                                        str(self.shared_variables.
                                            detection_score[self.index]))

                            #show age and gender
                            if self.shared_variables.age[
                                    self.
                                    index] is not None and self.shared_variables.gender[
                                        self.index] is not None:
                                self.draw_label(
                                    self.frame, topLeft,
                                    str(
                                        int(self.shared_variables.age[
                                            self.index])) + " " +
                                    str(self.shared_variables.gender[
                                        self.index]))

                    # Show Landmarks RED
                    if self.show_landmarks:
                        # loop over the (x, y)-coordinates for the facial landmarks
                        # and draw them on the image
                        for (x,
                             y) in self.shared_variables.landmarks[self.index]:
                            cv2.circle(self.frame, (x, y), 1, (0, 0, 255), -1)

                # show frame
                if self.frame is not None:

                    if self.grayscale:
                        self.frame = cv2.cvtColor(self.frame,
                                                  cv2.COLOR_BGR2GRAY)

                    cv2.imshow(
                        self.shared_variables.name + "_" + str(self.index),
                        self.frame)

                # Create and show backproject frames
                if self.showbackprojectedFrame:
                    if self.shared_variables.face_box[self.index] is not None:
                        if self.do_once:
                            camShifTracker = CAMShiftTracker(
                                self.shared_variables.face_box[self.index],
                                self.frame)
                            self.do_once = False

                        cv2.imshow(
                            'BackImg %s' % self.shared_variables.name + "_" +
                            str(self.index),
                            camShifTracker.getBackProjectedImage(self.frame))

                # close program
                if cv2.waitKey(1) == 27:
                    break  # esc to quit
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break

        # stop camera
        cv2.destroyAllWindows()
        LOG.info("Stopping imshow " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 17
0
    def run(self):
        # Load model
        LOG.info("Loading OPENCV model" + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        face_cascade = cv2.CascadeClassifier(
            'utils/haarcascade_frontalface_default.xml')
        facial_features_cascade = cv2.CascadeClassifier(
            'utils/haarcascade_facial_features.xml')

        LOG.info("Start opencv detections " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        # Start Loop
        while self.shared_variables.system_running:

            self.start_time = datetime.datetime.now()

            frame = self.shared_variables.frame[self.index]

            if self.do_flipp_test:
                frame = imutils.rotate(
                    frame, self.flipp_test_degree * self.flipp_test_nr)

            # Do detection
            if frame is not None:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                landmarksAndFaces = []

                face_patches = face_cascade.detectMultiScale(gray, 1.3, 5)

                # if found faces
                if len(face_patches) > 0:

                    landmarksAndFaces.append(face_patches[0].tolist())

                    for (x, y, w, h) in face_patches:

                        roi_gray = gray[y:y + h, x:x + w]

                        # To dont use landmarks, instead use boxes
                        for (ex, ey, ew,
                             eh) in facial_features_cascade.detectMultiScale(
                                 roi_gray):
                            landmarksAndFaces.append([x + ex, y + ey, ew, eh])

                    self.no_face_count = 0

                    self.shared_variables.face_found[self.index] = True
                    # Save boxes
                    self.shared_variables.face_box[
                        self.index] = landmarksAndFaces
                    #self.shared_variables.detection_box[self.index] = face_box
                    self.shared_variables.set_detection_box(
                        landmarksAndFaces, self.index)

                    # Do flipp test on detection
                    if self.shared_variables.flipp_test[
                            self.index] and self.do_flipp_test:
                        # save flipp as success
                        degree = self.shared_variables.flipp_test_degree[
                            self.
                            index] + self.flipp_test_nr * self.flipp_test_degree

                        degree = degree - (degree % 360) * 360

                        self.shared_variables.flipp_test_degree[
                            self.index] = degree

                        # log frame change
                        LOG.log(
                            "Flipp test successful add degree :" +
                            str(self.flipp_test_nr * self.flipp_test_degree),
                            self.shared_variables.name)

                        # end flipp test
                        self.do_flipp_test = False
                        self.flipp_test_nr = 1

                else:
                    # No face
                    self.shared_variables.face_found[self.index] = False

                    # if max face misses has been done, stop tracking and do less detections
                    if self.no_face_count >= self.NO_FACE_MAX:

                        # do flipp test
                        if self.shared_variables.flipp_test:

                            # doing flipp test
                            if self.do_flipp_test:
                                self.flipp_test_nr = self.flipp_test_nr + 1

                                # flipp test did not find anything
                                if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                    self.do_flipp_test = False
                                    self.flipp_test_nr = 1

                                    self.sleep_time = self.LONG_SLEEP

                            else:
                                self.do_flipp_test = True

                        else:
                            #self.sleep_time = self.LONG_SLEEP
                            #self.shared_variables.tracking_running = False
                            #LOG.log("Initiate energy save",self.shared_variables.name)
                            pass

                    else:
                        self.no_face_count = self.no_face_count + 1

                    if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[
                            self.index]:
                        self.no_face_count = 0

            self.end_time = datetime.datetime.now()

            # Debug detection time
            if self.shared_variables.debug:
                LOG.debug(
                    'OPENCV Detection time:' +
                    str(self.end_time - self.start_time),
                    self.shared_variables.name)

            time.sleep(self.sleep_time)  # sleep if wanted

        LOG.info("Ending OPENCV detection " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 18
0
    def run(self):
        with tf.Session() as sess:
            LOG.info("Loading Tensorflow modell " + str(self.index),
                     "SYSTEM-" + self.shared_variables.name)

            # Load model
            self.pnet, self.rnet, self.onet = detect_and_align.create_mtcnn(
                sess, None)

            self.Loaded_model = True

            LOG.info("Start tf detections " + str(self.index),
                     "SYSTEM-" + self.shared_variables.name)

            # Start Loop
            while self.shared_variables.system_running:

                self.start_time = datetime.datetime.now()

                frame = self.shared_variables.frame[self.index]

                if self.do_flipp_test:
                    frame = imutils.rotate(
                        frame, self.flipp_test_degree * self.flipp_test_nr)

                # Do detection
                face_patches, padded_bounding_boxes, landmarks, score = detect_and_align.align_image(
                    frame, self.pnet, self.rnet, self.onet)

                # if found faces
                if len(face_patches) > 0:

                    self.shared_variables.detection_score[self.index] = score

                    self.no_face_count = 0

                    # Save landmark
                    #self.shared_variables.landmarks[self.index] = self.convert_to_dlib_landmarks(landmarks)
                    self.shared_variables.set_landmarks(
                        self.convert_to_dlib_landmarks(landmarks), self.index)

                    # Convert box from Tensorflow to OpenCV
                    i = 0
                    while i < len(padded_bounding_boxes):
                        face_box.append(
                            self.convert_tensorflow_box_to_openCV_box(
                                padded_bounding_boxes[i]))
                        i += 1

                    # Save boxes
                    self.shared_variables.face_box[self.index] = face_box

                    self.shared_variables.set_detection_box(
                        face_box, self.index)
                    #self.shared_variables.detection_box[self.index] = face_box

                    self.shared_variables.face_found[self.index] = True

                    # Do flipp test on detection
                    if self.shared_variables.flipp_test[
                            self.index] and self.do_flipp_test:
                        # save flipp as success
                        degree = self.shared_variables.flipp_test_degree[
                            self.
                            index] + self.flipp_test_nr * self.flipp_test_degree

                        degree = degree - (degree % 360) * 360

                        self.shared_variables.flipp_test_degree[
                            self.index] = degree

                        # log frame change
                        LOG.info(
                            "Flipp test successful add degree :" +
                            str(self.flipp_test_nr * self.flipp_test_degree),
                            self.shared_variables.name)

                        # end flipp test
                        self.do_flipp_test = False
                        self.flipp_test_nr = 1

                    # Wake tracking thread
                #    if not self.shared_variables.tracking_running:
                #    self.sleep_time = self.SHORT_SLEEP

                else:
                    # No face
                    self.shared_variables.face_found[self.index] = False

                    # if max face misses has been done, stop tracking and do less detections
                    if self.no_face_count >= self.NO_FACE_MAX:

                        # do flipp test
                        if self.shared_variables.flipp_test[self.index]:

                            # doing flipp test
                            if self.do_flipp_test:
                                self.flipp_test_nr = self.flipp_test_nr + 1

                                # flipp test did not find anything
                                if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                    self.do_flipp_test = False
                                    self.flipp_test_nr = 1

                                    #self.sleep_time = self.LONG_SLEEP
                                    #self.shared_variables.tracking_running = False
                                    #LOG.log("Initiate energy save",self.shared_variables.name)

                            else:
                                self.do_flipp_test = True

                        else:
                            #self.sleep_time = self.LONG_SLEEP
                            #self.shared_variables.tracking_running = False
                            #LOG.log("Initiate energy save",self.shared_variables.name)
                            pass
                    else:
                        self.no_face_count = self.no_face_count + 1

                    if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[
                            self.index]:
                        self.no_face_count = 0

            self.end_time = datetime.datetime.now()

            # Debug detection time
            if self.shared_variables.debug:
                LOG.debug(
                    'TF Detection time:' +
                    str(self.end_time - self.start_time),
                    self.shared_variables.name)

            time.sleep(self.sleep_time)  # sleep if wanted
        LOG.info('Ending tf detection' + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
Esempio n. 19
0
 def __init__(self, shared_variables = None, address = "rtsp://*****:*****@192.168.0.10:554/live.sdp", index = 0):
     threading.Thread.__init__(self)
     self.shared_variables = shared_variables
     self.address = address
     LOG.info("Started ipcamera " + str(index) + " from " + address,"SYSTEM-"+self.shared_variables.name)