示例#1
0
def start_instances_for_all_cameras():
    # start all cameras
    number_of_cameras =  countCameras()

    LOG.log("Found %s cameras" % (number_of_cameras), "SYSTEM")
    for i in range(number_of_cameras):
        start_instance('CAM_%s' % (i), i, 'NORMAL')
示例#2
0
def main():
    LOG.log("Starting system", "SYSTEM")
    LOG.log("Start all threads", "SYSTEM")
    #LOG.log("Find all cameras", "SYSTEM")

    #start_instances_for_all_cameras();
    start_instance('CAM_%s' % (0), 0, 'NORMAL')
示例#3
0
    def run(self):

        #  wait for initial detection
        while not self.shared_variables.detection_done:
            pass

        # initiate tracker
        self.create_custom_tracker()

        # tracking loop
        while self.shared_variables.tracking_running:

            if self.shared_variables.camera_capture.isOpened():
                self.start_time = datetime.datetime.now()

                #ret_val, self.frame = self.shared_variables.camera_capture.read()
                self.frame = self.shared_variables.frame
                self.object_custom_tracking()
                self.end_time = datetime.datetime.now()

                if self.shared_variables.debug or self.shared_variables.debug_tracking:
                    LOG.log(
                        "Tracking time : " +
                        str(self.end_time - self.start_time),
                        self.shared_variables.name)
    def run(self):

        color_array = []

        # Get 9 pixels on nose
        x, y = self.get_facial(2)

        nose_points = self.get_approx_positions(x, y, -1,
                                                1)  # list of point round nose

        # left cheek
        x, y = self.get_more_facial(i1=0, i2=3)  # point between

        left_cheek = self.get_approx_positions(x, y, -1, 1)

        # right cheek
        x, y = self.get_more_facial(i1=1, i2=4)

        right_cheek = self.get_approx_positions(x, y, -1, 1)

        # put them in an array
        color_array.append(nose_points)
        color_array.append(left_cheek)
        color_array.append(right_cheek)

        # calculate median rgb
        mean_RGB = self.calculate_medium_color(color_array)

        LOG.log("Face RGB: " + str(mean_RGB), "SKIN_COLOR: ")
示例#5
0
def start_instance(instance_name,camera_id,camera_mode='NORMAL'):

    LOG.log("Capturing Camera %s" % camera_id, instance_name)

    # Capture camera
    _camera_capture = cv2.VideoCapture(camera_id)

     # initiate shared variables instance
    _shared_variables = shared_variables.Shared_Variables(instance_name,
                                          _camera_capture)
    # detection Thread
    _shared_variables.start_detection_thread()

    # tracking Thread
    _shared_variables.start_tracking_thread()

    # show camera thread
    _shared_variables.start_camera_thread(camera_mode)
示例#6
0
    def run(self):
        with tf.Session() as sess:
            '''if not self.Loaded_model:
                LOG.log("Loading modell",self.shared_variables.name)

                # Load model
                self.pnet, self.rnet, self.onet = detect_and_align.create_mtcnn(sess, None)

                model_exp = os.path.expanduser(self.model_path)
                if (os.path.isfile(model_exp)):
                    with gfile.FastGFile(model_exp, 'rb') as f:
                        graph_def = tf.GraphDef()
                        graph_def.ParseFromString(f.read())
                        tf.import_graph_def(graph_def, name='')

                self.Loaded_model = True
                '''
            # Load model
            LOG.log("Loading modell", self.shared_variables.name)

            self.pnet, self.rnet, self.onet = detect_and_align.create_mtcnn(
                sess, None)

            LOG.log("Start detections", self.shared_variables.name)

            # Start Loop
            while self.shared_variables.detection_running:

                if self.shared_variables.camera_capture.isOpened():
                    self.start_time = datetime.datetime.now()

                    # ret_val, frame = self.shared_variables.camera_capture.read()
                    frame = self.shared_variables.frame

                    if self.do_flipp_test:
                        frame = imutils.rotate(
                            frame, self.flipp_test_degree * self.flipp_test_nr)

                    # Do detection
                    face_patches, padded_bounding_boxes, landmarks, score = detect_and_align.align_image(
                        frame, self.pnet, self.rnet, self.onet)

                    # if found faces
                    if len(face_patches) > 0:

                        self.shared_variables.detection_score = score

                        self.no_face_count = 0

                        # Save frames
                        self.shared_variables.detection_frame = frame
                        self.shared_variables.tracking_and_detection_frame = frame

                        # Save landmark
                        self.shared_variables.landmarks = landmarks

                        # Convert box from Tensorflow to OpenCV
                        face_box = self.convert_tensorflow_box_to_openCV_box(
                            padded_bounding_boxes[0])

                        # Save boxes
                        self.shared_variables.face_box = face_box
                        self.shared_variables.detection_box = face_box

                        # Do flipp test on detection
                        if self.shared_variables.flipp_test and self.do_flipp_test:
                            # save flipp as success
                            degree = self.shared_variables.flipp_test_degree + self.flipp_test_nr * self.flipp_test_degree

                            degree = degree - (degree % 360) * 360

                            self.shared_variables.flipp_test_degree = degree

                            # log frame change
                            LOG.log(
                                "Flipp test successful add degree :" +
                                str(self.flipp_test_nr *
                                    self.flipp_test_degree),
                                self.shared_variables.name)

                            # end flipp test
                            self.do_flipp_test = False
                            self.flipp_test_nr = 1

                        # Wake tracking thread
                        if not self.shared_variables.tracking_running:
                            self.sleep_time = self.SHORT_SLEEP
                            self.shared_variables.start_tracking_thread()
                            LOG.log("Start detection",
                                    self.shared_variables.name)

                    else:
                        # No face
                        self.shared_variables.face_found = False

                        # if max face misses has been done, stop tracking and do less detections
                        if self.no_face_count >= self.NO_FACE_MAX and self.shared_variables.tracking_running:

                            # do flipp test
                            if self.shared_variables.flipp_test:

                                # doing flipp test
                                if self.do_flipp_test:
                                    self.flipp_test_nr = self.flipp_test_nr + 1

                                    # flipp test did not find anything
                                    if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                        self.do_flipp_test = False
                                        self.flipp_test_nr = 1

                                        self.sleep_time = self.LONG_SLEEP
                                        self.shared_variables.tracking_running = False
                                        LOG.log("Initiate energy save",
                                                self.shared_variables.name)

                                else:
                                    self.do_flipp_test = True

                            else:
                                self.sleep_time = self.LONG_SLEEP
                                self.shared_variables.tracking_running = False
                                LOG.log("Initiate energy save",
                                        self.shared_variables.name)

                        else:
                            self.no_face_count = self.no_face_count + 1

                        if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test:
                            self.no_face_count = 0

                self.end_time = datetime.datetime.now()

                # Debug detection time
                if self.shared_variables.debug_detection or self.shared_variables.debug:
                    LOG.log(
                        'Detection time:' +
                        str(self.end_time - self.start_time),
                        self.shared_variables.name)

                time.sleep(self.sleep_time)  # sleep if wanted
示例#7
0
    def run(self):
        if not self.Loaded_model:
            LOG.log("Loading modell", self.shared_variables.name)

            # Load model
            self.face_detector = dlib.get_frontal_face_detector()
            self.landmarks_predictor = dlib.shape_predictor(
                self.landmarks_model_path)

            #face_cascade = cv2.CascadeClassifier(face_cascade_path)
            self.Loaded_model = True

        LOG.log("Start detections", self.shared_variables.name)

        #wait for first cam frame
        while self.shared_variables.frame is None:
            pass

            # Start Loop
        while self.shared_variables.detection_running:
            if self.shared_variables.camera_capture.isOpened():
                self.start_time = datetime.datetime.now()

                frame = self.shared_variables.frame

                if self.do_flipp_test:
                    frame = imutils.rotate(
                        frame, self.flipp_test_degree * self.flipp_test_nr)

                    # Do detection
                success, face_box, landmarks, score = self.object_detection()

                # if found faces
                if success:

                    self.shared_variables.detection_score = score

                    self.no_face_count = 0

                    # Save frames
                    self.shared_variables.detection_frame = frame
                    self.shared_variables.tracking_and_detection_frame = frame

                    # Save landmark
                    self.shared_variables.landmarks = landmarks

                    # Save boxes
                    self.shared_variables.face_box = face_box
                    self.shared_variables.detection_box = face_box

                    # Do flipp test on detection
                    if self.shared_variables.flipp_test and self.do_flipp_test:
                        # save flipp as success
                        degree = self.shared_variables.flipp_test_degree + self.flipp_test_nr * self.flipp_test_degree

                        degree = degree - (degree % 360) * 360

                        self.shared_variables.flipp_test_degree = degree

                        # log frame change
                        LOG.log(
                            "Flipp test successful add degree :" +
                            str(self.flipp_test_nr * self.flipp_test_degree),
                            self.shared_variables.name)

                        # end flipp test
                        self.do_flipp_test = False
                        self.flipp_test_nr = 1

                    # Wake tracking thread
                    if not self.shared_variables.tracking_running:
                        self.sleep_time = self.SHORT_SLEEP
                        self.shared_variables.start_tracking_thread()
                        LOG.log("Start detection", self.shared_variables.name)

                else:
                    # No face
                    self.shared_variables.face_found = False

                    # if max face misses has been done, stop tracking and do less detections
                    if self.no_face_count >= self.NO_FACE_MAX and self.shared_variables.tracking_running:

                        # do flipp test
                        if self.shared_variables.flipp_test:

                            # doing flipp test
                            if self.do_flipp_test:
                                self.flipp_test_nr = self.flipp_test_nr + 1

                                # flipp test did not find anything
                                if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                    self.do_flipp_test = False
                                    self.flipp_test_nr = 1

                                    self.sleep_time = self.LONG_SLEEP
                                    self.shared_variables.tracking_running = False
                                    LOG.log("Initiate energy save",
                                            self.shared_variables.name)

                            else:
                                self.do_flipp_test = True

                        else:
                            self.sleep_time = self.LONG_SLEEP
                            self.shared_variables.tracking_running = False
                            LOG.log("Initiate energy save",
                                    self.shared_variables.name)

                    else:
                        self.no_face_count = self.no_face_count + 1

                    if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test:
                        self.no_face_count = 0

            self.end_time = datetime.datetime.now()

            # Debug detection time
            if self.shared_variables.debug_detection or self.shared_variables.debug:
                LOG.log(
                    'Detection time:' + str(self.end_time - self.start_time),
                    self.shared_variables.name)

            time.sleep(self.sleep_time)  # sleep if wanted
示例#8
0
    def run(self):
        # Load model
        LOG.info("Loading OPENCV model" + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        face_cascade = cv2.CascadeClassifier(
            'utils/haarcascade_frontalface_default.xml')
        facial_features_cascade = cv2.CascadeClassifier(
            'utils/haarcascade_facial_features.xml')

        LOG.info("Start opencv detections " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)

        # Start Loop
        while self.shared_variables.system_running:

            self.start_time = datetime.datetime.now()

            frame = self.shared_variables.frame[self.index]

            if self.do_flipp_test:
                frame = imutils.rotate(
                    frame, self.flipp_test_degree * self.flipp_test_nr)

            # Do detection
            if frame is not None:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                landmarksAndFaces = []

                face_patches = face_cascade.detectMultiScale(gray, 1.3, 5)

                # if found faces
                if len(face_patches) > 0:

                    landmarksAndFaces.append(face_patches[0].tolist())

                    for (x, y, w, h) in face_patches:

                        roi_gray = gray[y:y + h, x:x + w]

                        # To dont use landmarks, instead use boxes
                        for (ex, ey, ew,
                             eh) in facial_features_cascade.detectMultiScale(
                                 roi_gray):
                            landmarksAndFaces.append([x + ex, y + ey, ew, eh])

                    self.no_face_count = 0

                    self.shared_variables.face_found[self.index] = True
                    # Save boxes
                    self.shared_variables.face_box[
                        self.index] = landmarksAndFaces
                    #self.shared_variables.detection_box[self.index] = face_box
                    self.shared_variables.set_detection_box(
                        landmarksAndFaces, self.index)

                    # Do flipp test on detection
                    if self.shared_variables.flipp_test[
                            self.index] and self.do_flipp_test:
                        # save flipp as success
                        degree = self.shared_variables.flipp_test_degree[
                            self.
                            index] + self.flipp_test_nr * self.flipp_test_degree

                        degree = degree - (degree % 360) * 360

                        self.shared_variables.flipp_test_degree[
                            self.index] = degree

                        # log frame change
                        LOG.log(
                            "Flipp test successful add degree :" +
                            str(self.flipp_test_nr * self.flipp_test_degree),
                            self.shared_variables.name)

                        # end flipp test
                        self.do_flipp_test = False
                        self.flipp_test_nr = 1

                else:
                    # No face
                    self.shared_variables.face_found[self.index] = False

                    # if max face misses has been done, stop tracking and do less detections
                    if self.no_face_count >= self.NO_FACE_MAX:

                        # do flipp test
                        if self.shared_variables.flipp_test:

                            # doing flipp test
                            if self.do_flipp_test:
                                self.flipp_test_nr = self.flipp_test_nr + 1

                                # flipp test did not find anything
                                if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                    self.do_flipp_test = False
                                    self.flipp_test_nr = 1

                                    self.sleep_time = self.LONG_SLEEP

                            else:
                                self.do_flipp_test = True

                        else:
                            #self.sleep_time = self.LONG_SLEEP
                            #self.shared_variables.tracking_running = False
                            #LOG.log("Initiate energy save",self.shared_variables.name)
                            pass

                    else:
                        self.no_face_count = self.no_face_count + 1

                    if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[
                            self.index]:
                        self.no_face_count = 0

            self.end_time = datetime.datetime.now()

            # Debug detection time
            if self.shared_variables.debug:
                LOG.debug(
                    'OPENCV Detection time:' +
                    str(self.end_time - self.start_time),
                    self.shared_variables.name)

            time.sleep(self.sleep_time)  # sleep if wanted

        LOG.info("Ending OPENCV detection " + str(self.index),
                 "SYSTEM-" + self.shared_variables.name)
示例#9
0
def main():
    LOG.log("Starting system", "SYSTEM")

    shared_var = shared_variables.Shared_Variables(name="shared_version")
    controll_thread = controller.parse_controller(shared_var)
    controll_thread.start()