コード例 #1
0
def main():
    LOG.log("Starting system", "SYSTEM")
    LOG.log("Start all threads", "SYSTEM")
    #LOG.log("Find all cameras", "SYSTEM")

    #start_instances_for_all_cameras();
    start_instance('CAM_%s' % (0), 0, 'NORMAL')
コード例 #2
0
def start_instances_for_all_cameras():
    # start all cameras
    number_of_cameras = countCameras()

    LOG.log("Found %s cameras" % (number_of_cameras), "SYSTEM")
    for i in range(number_of_cameras):
        start_instance('CAM_%s' % (i), i, 'NORMAL')
コード例 #3
0
def run():
    while True:
        # Make calculations

        break

    result = (0, 0, 0)

    LOG.log(result, 'HUD_COLOR_RGB')

    task_done()
コード例 #4
0
def start_instance(instance_name, camera_id, camera_mode='NORMAL'):

    LOG.log("Capturing Camera %s" % camera_id, instance_name)

    # Capture camera
    _camera_capture = cv2.VideoCapture(camera_id)

    # initiate shared variables instance
    _shared_variables = shared_variables.Shared_Variables(
        instance_name, _camera_capture)
    # detection Thread
    _shared_variables.start_detection_thread()

    # tracking Thread
    _shared_variables.start_tracking_thread()

    # show camera thread
    _shared_variables.start_camera_thread(camera_mode)
コード例 #5
0
    def run(self):
        #  wait for initial detection
        while not self.shared_variables.detection_done:
            pass

        # initiate tracker
        self.create_custom_tracker()

        # tracking loop
        while self.shared_variables.tracking_running:

            if self.shared_variables.camera_capture.isOpened():
                self.start_time = datetime.datetime.now()

                #ret_val, self.frame = self.shared_variables.camera_capture.read()
                self.frame = self.shared_variables.frame
                self.object_custom_tracking()
                self.end_time = datetime.datetime.now()

                if self.shared_variables.debug or self.shared_variables.debug_tracking:
                    LOG.log("Tracking time : " + str(self.end_time - self.start_time),self.shared_variables.name)
def main():
    LOG.log("Starting system", "SYSTEM")
    LOG.log("Setting up system", "SYSTEM")
    init()  # Set up init

    LOG.log("System is running", "SYSTEM")
    run()  # run loop
コード例 #7
0
    def run(self):
        # Load model
        LOG.log("Loading modell", self.shared_variables.name)

        face_cascade = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
        facial_features_cascade = cv2.CascadeClassifier(
            'haarcascade_facial_features.xml')

        LOG.log("Start detections", self.shared_variables.name)

        # Start Loop
        while self.shared_variables.detection_running:

            if self.shared_variables.camera_capture.isOpened():
                self.start_time = datetime.datetime.now()

                # ret_val, frame = self.shared_variables.camera_capture.read()
                frame = self.shared_variables.frame

                if self.do_flipp_test:
                    frame = imutils.rotate(
                        frame, self.flipp_test_degree * self.flipp_test_nr)

                # Do detection
                if frame is not None:
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                    landmarksAndFaces = []

                    face_patches = face_cascade.detectMultiScale(gray, 1.3, 5)

                    for (x, y, w, h) in face_patches:

                        roi_gray = gray[y:y + h, x:x + w]

                        # To dont use landmarks, instead use boxes
                        for (ex, ey, ew,
                             eh) in facial_features_cascade.detectMultiScale(
                                 roi_gray):
                            landmarksAndFaces.append([x + ex, y + ey, ew, eh])

                        landmarksAndFaces.append(face_patches[0].tolist())

                    # if found faces
                    if len(face_patches) > 0:

                        self.no_face_count = 0

                        self.shared_variables.face_found = True

                        # Save frames
                        self.shared_variables.detection_frame = frame
                        self.shared_variables.tracking_and_detection_frame = frame

                        # Save boxes
                        self.shared_variables.face_box = landmarksAndFaces
                        self.shared_variables.detection_box = landmarksAndFaces

                        # Do flipp test on detection
                        if self.shared_variables.flipp_test and self.do_flipp_test:
                            # save flipp as success
                            degree = self.shared_variables.flipp_test_degree + self.flipp_test_nr * self.flipp_test_degree

                            degree = degree - (degree % 360) * 360

                            self.shared_variables.flipp_test_degree = degree

                            # log frame change
                            LOG.log(
                                "Flipp test successful add degree :" +
                                str(self.flipp_test_nr *
                                    self.flipp_test_degree),
                                self.shared_variables.name)

                            # end flipp test
                            self.do_flipp_test = False
                            self.flipp_test_nr = 1

                        # Wake tracking thread
                        if not self.shared_variables.tracking_running:
                            self.sleep_time = self.SHORT_SLEEP
                            self.shared_variables.start_tracking_thread()

                    else:
                        # No face
                        self.shared_variables.face_found = False

                        # if max face misses has been done, stop tracking and do less detections
                        if self.no_face_count >= self.NO_FACE_MAX and self.shared_variables.tracking_running:

                            # do flipp test
                            if self.shared_variables.flipp_test:

                                # doing flipp test
                                if self.do_flipp_test:
                                    self.flipp_test_nr = self.flipp_test_nr + 1

                                    # flipp test did not find anything
                                    if self.flipp_test_nr * self.flipp_test_degree >= 360:
                                        self.do_flipp_test = False
                                        self.flipp_test_nr = 1

                                        self.sleep_time = self.LONG_SLEEP
                                        self.shared_variables.tracking_running = False
                                        LOG.log("Initiate energy save",
                                                self.shared_variables.name)

                                else:
                                    self.do_flipp_test = True

                            else:
                                self.sleep_time = self.LONG_SLEEP
                                self.shared_variables.tracking_running = False
                                LOG.log("Initiate energy save",
                                        self.shared_variables.name)

                        else:
                            self.no_face_count = self.no_face_count + 1

                        if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test:
                            self.no_face_count = 0

                self.end_time = datetime.datetime.now()

                # Debug detection time
                if self.shared_variables.debug_detection or self.shared_variables.debug:
                    LOG.log(
                        'Detection time:' +
                        str(self.end_time - self.start_time),
                        self.shared_variables.name)

                    time.sleep(self.sleep_time)  # sleep if wanted
コード例 #8
0
def run():
    with tf.Session() as sess:

        LOG.log("Loading modell","SYSTEM")
        #temp test
        global pnet
        global rnet
        global onet
        global images_placeholder
        global embeddings
        global phase_train_placeholder
        
        pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None)
        
        model_exp = os.path.expanduser(model_path)
        if (os.path.isfile(model_exp)):
           # print('Model filename: %s' % model_exp)
            with gfile.FastGFile(model_exp, 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                tf.import_graph_def(graph_def, name='')
            
        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")            
    
    
        # set up tensorflow model
        #load_model(model_path)

        LOG.log("Start system","SYSTEM")
        while True:
         
            if cam_cap.isOpened():
                start = time.time()
                # Get current frame
                global frame
                ret, frame = cam_cap.read()


                # Count tick
                global ticks
                ticks = ticks + 1

                # Do detection
                global DETECTION_SLEEP_TICKS
                if DETECTION_SLEEP_TICKS <= ticks:

                 #print("Detection")
                    global face_box
                    global face_found
                    
                    global show_id
                    global show_bb
                    global show_landmarks
    
                    # Do detection
                    face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)

                    # if found faces
                    if len(face_patches) > 0:
                        face_patches = np.stack(face_patches)
                        feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
       
                        embs = sess.run(embeddings, feed_dict=feed_dict)

                       # print('Matches in frame:')
                        for i in range(len(embs)):
                            bb = padded_bounding_boxes[i]
            
                            if show_bb:
                                cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)

                            if show_landmarks:
                                for j in range(5):
                                    size = 1
                                    top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size)
                                    bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size)
                                    cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
        
                        # Convert box to OpenCV
                        
                        face_box = convert_tensorflow_box_to_openCV_box(padded_bounding_boxes[0])
                       # print (face_box)
                        
                        # if running custom tracker this is needed
                        update_custom_tracker()

                        face_found = True
                        #return True
        

                    else:
                    # No face
                        face_found = False
                    #return False

                    # if face found
                    

                    
                    if face_found:
                        ticks = 0
                        global FAST_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = FAST_DETECTION_SLEEP_TICKS
                    else:
                        # Make less detections if not
                        ticks = 0
                        global SLOW_DETECTION_SLEEP_TICKS
                        DETECTION_SLEEP_TICKS = SLOW_DETECTION_SLEEP_TICKS 
                else:
                    # Do tracking
                    if face_found:
                        object_custom_tracking()

                # print fps
                end = time.time()

                seconds = end - start
                if seconds != 0:
                    fps = round(1 / seconds, 2)

                if show_fps:
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(fps), (0, 100), font, 1, (255, 255, 255), 1, cv2.LINE_AA)

                
                #Show Cam
                cv2.imshow('Detection GUI', frame)

                #Close Program functionallity
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cam_cap.release()
                    cv2.destroyAllWindows()
                    break

                time.sleep(0.2) # Sleep