コード例 #1
0
def inferencer(results, frameBuffer, model, camera_width, camera_height):

    engine = DetectionEngine(model)

    while True:

        if frameBuffer.empty():
            continue

        # Run inference.
        color_image = frameBuffer.get()
        prepimg = color_image[:, :, ::-1].copy()
        prepimg = Image.fromarray(prepimg)

        tinf = time.perf_counter()
        ans = engine.DetectWithImage(prepimg,
                                     threshold=0.5,
                                     keep_aspect_ratio=True,
                                     relative_coord=False,
                                     top_k=10)
        print(time.perf_counter() - tinf, "sec")
        results.put(ans)
コード例 #2
0
ファイル: imagesearchapp.py プロジェクト: mcrlab/Coral
    def __init__(self):

        self.frame = None
        self.thread = None
        self.stopEvent = None
        
        self.camera = cv2.VideoCapture(0)
        self.camera.set(3, WIDTH)
        self.camera.set(4, HEIGHT)
        
        self.engine = DetectionEngine('./mobilenet_ssd_v1_coco_quant_postprocess_edgetpu.tflite')
        self.labels = ReadLabelFile('./coco_labels.txt')
        
        self.root = tki.Tk()
        self.root.bind('<Escape>', lambda e: self.onClose())
        self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
        self.panel = None

        self.stopEvent = threading.Event()
        self.thread = threading.Thread(target=self.videoLoop, args=())
        self.thread.daemon = True
        self.thread.start()
コード例 #3
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=10,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.3,
                        help='class score threshold')
    args = parser.parse_args()

    print("Loading %s with %s labels."%(args.model, args.labels))
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()
    def user_callback(image, svg_canvas):
      nonlocal last_time
      start_time = time.monotonic()
      objs = engine.DetectWithImage(image, threshold=args.threshold,
                                    keep_aspect_ratio=True, relative_coord=True,
                                    top_k=args.top_k)
      end_time = time.monotonic()
      text_lines = [
          'Inference: %.2f ms' %((end_time - start_time) * 1000),
          'FPS: %.2f fps' %(1.0/(end_time - last_time)),
      ]
      print(' '.join(text_lines))
      last_time = end_time
      
      # keep only relevant classes, e.g. person, car, truck, train, stop sign, traffic lights, etc.
      objs = [obj for obj in objs if obj.label_id in [0,1,2,3,5,6,7,9,12]]
      # non max suppression
      objs.sort(key=lambda x: x.score)
      keep = []
      while objs:
          max_score_obj = objs.pop()
          keep.append(max_score_obj)
          tmp = []
          for obj in objs:
              if iou(obj.bounding_box.flatten().tolist(),max_score_obj.bounding_box.flatten().tolist()) < 0.1:
                  tmp.append(obj)
          objs = tmp
          
      generate_svg(svg_canvas, keep, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback)
コード例 #4
0
ファイル: CoralMain.py プロジェクト: cmgrier/BeachBotsMQP
    def __init__(self):
        """
        Initializations
        """

        print("[INFO] Beginning initialization of Coral Main File")

        # initialize the labels dictionary
        self.labels = {}
        self.labels[0] = "Can"

        # Argument Parser for model path
        ap = argparse.ArgumentParser()
        ap.add_argument("-m", "--model", required=True,
                        help="path to TensorFlow Lite object detection model")
        args = vars(ap.parse_args())

        # load the Google Coral object detection model
        print("[INFO] loading Coral model...")
        self.model = DetectionEngine(args["model"])

        # initialize the video stream and allow the camera sensor to warmup
        self.vs = VideoStream(src=0).start()
        time.sleep(2.0)

        print("Finished Initialization of Model and Video")

        # Socket Variables
        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.client_socket.connect(('192.168.1.230', 8485))
        self.connection = self.client_socket.makefile('wb')
        self.img_counter = 0

        # Video Variables
        self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
        self.h = 480  # DO NOT CHANGE
        self.w = 500  # DO NOT CHANGE
        self.threshold = 0.6
コード例 #5
0
    def __init__(self,
                 detection_filter,
                 model_path,
                 detection_lock,
                 device_path=None):
        """
        Args:
          model_path (str): Path to a TensorFlow Lite (``.tflite``) file.
            This model must be `compiled for the Edge TPU
            <https://coral.withgoogle.com/docs/edgetpu/compiler/>`_; otherwise, it simply executes
            on the host CPU.
          device_path (str): The device path for the Edge TPU this engine should use. This argument
            is needed only when you have multiple Edge TPUs and more inference engines than
            available Edge TPUs. For details, read `how to use multiple Edge TPUs
            <https://coral.withgoogle.com/docs/edgetpu/multiple-edgetpu/>`_.

        Raises:
          ValueError: If the model's output tensor size is not 4.
        """
        _LOGGER.warn('Initializing filtered detection engine')
        DetectionEngine.__init__(self, model_path, device_path)
        self._filter = detection_filter
        self._detection_lock = detection_lock
コード例 #6
0
    def __init__(self,
                 appDuration=50,
                 cameraResolution=(304, 304),
                 useVideoPort=True,
                 btDisconMode=False,
                 serialPort='/dev/rfcomm0',
                 mailboxName='abc',
                 minObjectScore=0.35):
        self.cameraResolution = cameraResolution
        self.useVideoPort = useVideoPort
        self.btDisconMode = btDisconMode
        self.serialPort = serialPort
        self.mailboxName = mailboxName
        self.appDuration = appDuration  #seconds to run
        self.minObjectScore = minObjectScore

        modelFile = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
        objectLabelsFile = 'coco_labels.txt'
        print("Reading Model: ", modelFile)
        self.engine = DetectionEngine(modelFile)
        print("Reading object labels: ", objectLabelsFile)
        self.labels = self.readLabelFile(objectLabelsFile)
        print("Minimal object score: ", self.minObjectScore)
コード例 #7
0
def ssd_inferencer(results, frameBuffer, model, device):

    ssd_engine = None
    ssd_engine = DetectionEngine(model, device)
    print("Loaded Graphs!!! (SSD)")

    while True:

        if frameBuffer.empty():
            continue

        # Run inference.
        color_image = frameBuffer.get()
        prepimg_ssd = color_image[:, :, ::-1].copy()
        prepimg_ssd = Image.fromarray(prepimg_ssd)
        tinf = time.perf_counter()
        result_ssd = ssd_engine.DetectWithImage(prepimg_ssd,
                                                threshold=0.5,
                                                keep_aspect_ratio=True,
                                                relative_coord=False,
                                                top_k=10)
        print(time.perf_counter() - tinf, "sec (SSD)")
        results.put(result_ssd)
コード例 #8
0
ファイル: detect.py プロジェクト: Tubbz-alt/AAMS
def main():
    default_model_dir = '../models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.4,
                        help='class score threshold')
    args = parser.parse_args()

    # load the label file
    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()

    def user_callback(image, svg_canvas):
        # get the inference time
        nonlocal last_time
        start_time = time.monotonic()
        # perform the image detection
        objs = engine.detect_with_image(image,
                                        threshold=args.threshold,
                                        keep_aspect_ratio=True,
                                        relative_coord=True,
                                        top_k=args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]
        print(' '.join(text_lines))
        last_time = end_time
        # generates the image for viewing
        generate_svg(svg_canvas, objs, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback)
コード例 #9
0
def run_two_models_one_tpu(classification_model, detection_model, image_name,
                           num_inferences, batch_size):
    """Runs two models ALTERNATIVELY using one Edge TPU.

  It runs classification model `batch_size` times and then switch to run
  detection model `batch_size` time until each model is run `num_inferences`
  times.

  Args:
    classification_model: string, path to classification model
    detection_model: string, path to detection model.
    image_name: string, path to input image.
    num_inferences: int, number of inferences to run for each model.
    batch_size: int, indicates how many inferences to run one model before
      switching to the other one.

  Returns:
    double, wall time it takes to finish the job.
  """
    start_time = time.perf_counter()
    engine_a = ClassificationEngine(classification_model)
    # `engine_b` shares the same Edge TPU as `engine_a`
    engine_b = DetectionEngine(detection_model, engine_a.device_path())
    with open_image(image_name) as image:
        # Resized image for `engine_a`, `engine_b`.
        tensor_a = get_input_tensor(engine_a, image)
        tensor_b = get_input_tensor(engine_b, image)

    num_iterations = (num_inferences + batch_size - 1) // batch_size
    for _ in range(num_iterations):
        # Using `classify_with_input_tensor` and `detect_with_input_tensor` on purpose to
        # exclude image down-scale cost.
        for _ in range(batch_size):
            engine_a.classify_with_input_tensor(tensor_a, top_k=1)
        for _ in range(batch_size):
            engine_b.detect_with_input_tensor(tensor_b, top_k=1)
    return time.perf_counter() - start_time
コード例 #10
0
    def __init__(self, path):
        '''Initialize ros publisher, ros subscriber'''
        # topic where we publish
        self.font_path = "/home/pi/python/cascadia_font/CascadiaCode-Regular-VTT.ttf"
        self.font = ImageFont.truetype(self.font_path, 15)

        #self.engine = DetectionEngine('/home/pi/customModels/output_tflite_graph_edgetpu-3.tflite')
        #self.engine = DetectionEngine('/home/pi/customModels/output_tflite_ssd_v2_corrected_graph_edgetpu-1000.tflite')
        self.engine = DetectionEngine(path + '/model.tflite')
        #self.engine = DetectionEngine('/home/pi/customModels/exampleRobot/output_tflite_graph_edgetpu.tflite')

        #self.labels = self.ReadLabelFile('/home/pi/customModels/totoro_label.txt')

        #self.engine = DetectionEngine('/home/pi/TPU-MobilenetSSD/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
        #self.labels = self.ReadLabelFile('/home/pi/TPU-MobilenetSSD/coco_labels.txt')
        self.labels = self.ReadLabelFile(path + '/labels.txt')
        # self.labels = self.ReadLabelFile('/home/pi/customModels/exampleRobot/exampleRobot_labels.txt')

        self.image_pub = rospy.Publisher("/output/image_detected/compressed",
                                         CompressedImage,
                                         queue_size=1)
        # self.bridge = CvBridge()
        self.tpu_objects_pub = rospy.Publisher("/tpu_objects",
                                               tpu_objects,
                                               queue_size=1)

        # subscribed Topic
        self.subscriber = rospy.Subscriber("/output/image_raw/compressed",
                                           CompressedImage,
                                           self.callback,
                                           queue_size=1)

        self.velocity_publisher = rospy.Publisher('/cmd_vel',
                                                  Twist,
                                                  queue_size=1)
        self.vel_msg = Twist()
        rospy.init_node('image_feature', anonymous=False)
コード例 #11
0
def make_tflite_face_getter():
    from edgetpu.detection.engine import DetectionEngine
    camera = cv2.VideoCapture(0)
    cameraIndex = 0
    if not camera.isOpened():
        camera = cv2.VideoCapture(1)
        cameraIndex = 1

    width, height = int(camera.get(3)), int(camera.get(4))

    engine = DetectionEngine(
        './models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite')

    def zoomer():
        nonlocal cameraIndex
        nonlocal camera

        if not camera.isOpened():
            camera.release()
            print(
                "[error] couldn't open camera. Aborting and trying new index.")
            cameraIndex += 1
            cameraIndex = cameraIndex % 2
            camera = cv2.VideoCapture(cameraIndex)
            return []

        success, img = camera.read()
        if not success:
            print("[error] Could't read from webcam.")
            return []

        ans = engine.detect_with_image(Image.fromarray(img),
                                       threshold=0.05,
                                       keep_aspect_ratio=False,
                                       relative_coord=False,
                                       top_k=10)

        def result_getter(face):
            x, y, x2, y2 = list(map(int, face.bounding_box.flatten().tolist()))
            w = x2 - x
            h = y2 - y
            return (img[y:y + h, x:x + w], (x, y))

        if ans:
            return list(map(result_getter, ans))
        return []

    return zoomer, width, height
コード例 #12
0
class PreppedQueueProcessor(threading.Thread):
    def __init__(self, cameras, prepped_frame_queue):

        threading.Thread.__init__(self)
        self.cameras = cameras
        self.prepped_frame_queue = prepped_frame_queue

        # Load the edgetpu engine and labels
        self.engine = DetectionEngine(PATH_TO_CKPT)
        self.labels = ReadLabelFile(PATH_TO_LABELS)

    def run(self):
        # process queue...
        while True:
            frame = self.prepped_frame_queue.get()

            # Actual detection.
            objects = self.engine.DetectWithInputTensor(
                frame['frame'], threshold=frame['region_threshold'], top_k=3)
            # print(self.engine.get_inference_time())

            # parse and pass detected objects back to the camera
            parsed_objects = []
            for obj in objects:
                box = obj.bounding_box.flatten().tolist()
                parsed_objects.append({
                    'frame_time':
                    frame['frame_time'],
                    'label_id':
                    obj.label_id,
                    'name':
                    str(self.labels[obj.label_id]),
                    'score':
                    float(obj.score),
                    'xmin':
                    int((box[0] * frame['region_size']) +
                        frame['region_x_offset']),
                    'ymin':
                    int((box[1] * frame['region_size']) +
                        frame['region_y_offset']),
                    'xmax':
                    int((box[2] * frame['region_size']) +
                        frame['region_x_offset']),
                    'ymax':
                    int((box[3] * frame['region_size']) +
                        frame['region_y_offset'])
                })
            self.cameras[frame['camera_name']].add_objects(parsed_objects)
コード例 #13
0
class object_detection:
    MODEL_V1 = 'models/ssd_mobilenet_v1_coco_quant_postprocess_edgetpu.tflite'
    MODEL_V2 = 'models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'
    LABELS = 'models/coco_labels.txt'

    def __init__(self,
                 threshold=0.5,
                 num_results=10,
                 model=MODEL_V2,
                 labels=LABELS):
        self.engine = DetectionEngine(model)
        self.model_labels = read_label_file(labels)
        self.objs = None
        self.boxes = None
        self.scores = None
        self.labels = None
        self.threshold = threshold
        self.num_results = num_results

    def set_threshold(self, num):
        self.threshold = num

    def set_max_results(self, num):
        self.num_results = num

    def detect(self, img):
        img = Image.fromarray(img)
        self.objs = self.engine.detect_with_image(img,
                                                  threshold=self.threshold,
                                                  keep_aspect_ratio=True,
                                                  relative_coord=False,
                                                  top_k=self.num_results)
        self.boxes = [obj.bounding_box.flatten().tolist() for obj in self.objs]
        self.scores = [obj.score for obj in self.objs]
        self.labels = [self.model_labels[obj.label_id] for obj in self.objs]
        return self.objs

    def get_bounding_boxes(self):
        return self.boxes

    def get_scores(self):
        return self.scores

    def get_labels(self):
        return self.labels
コード例 #14
0
ファイル: dad_watch.py プロジェクト: paulvee/dad-watcher
def init():
    global labels, model, vs, fps

    # set the GPIO mode
    GPIO.setmode(GPIO.BCM)
    if DEBUG:
        GPIO.setwarnings(True)
    else:
        GPIO.setwarnings(False)

    # set GPIO port as output driver for the beeper
    GPIO.setup(BEEPER, GPIO.OUT)

    # the core temp must be at the lowest at startup, so report it.
    logger.info("Core temp is {} degrees C".format(get_cpu_temp()))

    # initialize the labels dictionary
    print("parsing class labels...")
    labels = {}
    # loop over the class labels file
    for row in open("/home/pi/edgetpu_models/coco_labels.txt"):
        # unpack the row and update the labels dictionary
        (classID, label) = row.strip().split(maxsplit=1)
        labels[int(classID)] = label.strip()

    # load the Google Coral tpu object detection model
    print("loading Coral model...")
    model = DetectionEngine(
        "/home/pi/edgetpu_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
    )

    # initialize the video stream and allow the camera sensor to warmup
    print("starting video stream...")
    vs = VideoStream(src=0).start()  # webcam
    #    vs = VideoStream(usePiCamera=True).start()  # Picam
    # let the camera sensor warm-up
    time.sleep(2.0)

    if not DAEMON:
        # start the frames per second throughput estimator
        fps = FPS().start()

    beep()
コード例 #15
0
def init(input_src, faceModel, emotionModel):
    # initiate camera feed
    if input_src == 'csi':
        stream_thread = CsiThread('Csi Thread', gstreamer_pipeline())
    if input_src == 'usb':
        stream_thread = UsbThread('USB Thread', 0)

    stream_thread.start()

    # initialise models
    faceEngine = DetectionEngine(faceModel)
    global emotionList
    emotionList = [
        'angry', 'disgust', 'scared', 'happy', 'sad', 'surprised', 'neutral'
    ]
    emotionNet = load_model(emotionModel, compile=False)

    # initialise tracker
    tracker = DeepSortTracker()
    return stream_thread, faceEngine, emotionNet, tracker
コード例 #16
0
ファイル: detector.py プロジェクト: pwrozycki/monitoring
class SynchronizedDetectionEngine(Engine):
    @inject
    def __init__(self, config: ConfigProvider):
        from edgetpu.detection.engine import DetectionEngine
        self._engine = DetectionEngine(config.detector_model_file)
        self._engine_lock = Lock()
        self._pending_processing_start = 0

    def detect(self, img, threshold):
        with self._engine_lock:
            self._pending_processing_start = time.monotonic()
            result = self._engine.DetectWithImage(img,
                                                  threshold=threshold,
                                                  keep_aspect_ratio=True,
                                                  relative_coord=False,
                                                  top_k=1000)
            self._pending_processing_start = 0
            return result

    def get_pending_processing_seconds(self) -> float:
        start = self._pending_processing_start
        return (time.monotonic() - start) if start != 0 else 0
コード例 #17
0
def get_person_fn():
    model = '/Downloads/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    engine = DetectionEngine(model)

    def inference(frame):
        ans = engine.DetectWithImage(Image.fromarray(frame),
                                     threshold=0.1,
                                     top_k=10)
        ans = filter(lambda x: x.label_id == 0, ans)

        height, width = frame.shape[:2]
        one_column = width // 12
        person = np.zeros((height, one_column * 4, 3), dtype=np.uint8)
        for obj in ans:
            box = obj.bounding_box
            box = box * np.array([width, height])
            box = box.astype(np.int32)
            person = frame[box[0][1]:box[1][1], box[0][0]:box[1][0]]
            person = cv2.resize(person, (one_column * 4, height))
        return np.concatenate((frame, person), axis=1)

    return inference
コード例 #18
0
ファイル: object_detection.py プロジェクト: rourke750/frigate
class PreppedQueueProcessor(threading.Thread):
    def __init__(self, cameras, prepped_frame_queue):

        threading.Thread.__init__(self)
        self.cameras = cameras
        self.prepped_frame_queue = prepped_frame_queue

        # Load the edgetpu engine and labels
        self.engine = DetectionEngine(PATH_TO_CKPT)
        self.labels = LABELS

    def run(self):
        # process queue...
        while True:
            frame = self.prepped_frame_queue.get()

            # Actual detection.
            objects = self.engine.DetectWithInputTensor(frame['frame'],
                                                        threshold=0.5,
                                                        top_k=5)
            # print(self.engine.get_inference_time())

            # parse and pass detected objects back to the camera
            parsed_objects = []
            for obj in objects:
                parsed_objects.append({
                    'region_id':
                    frame['region_id'],
                    'frame_time':
                    frame['frame_time'],
                    'name':
                    str(self.labels[obj.label_id]),
                    'score':
                    float(obj.score),
                    'box':
                    obj.bounding_box.flatten().tolist()
                })
            self.cameras[frame['camera_name']].add_objects(parsed_objects)
コード例 #19
0
class EdgeTPUInferencer:

    def __init__(self, model):
        self.engine = DetectionEngine(model)

        self.watch = Stopwatch()

    def inference(self, img):

        self.watch.start()
        initial_h, initial_w, _ = img.shape
        if (initial_h, initial_w) != (300, 300):
            frame = cv2.resize(img, (300, 300))
        else:
            frame = img
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        self.watch.stop(Stopwatch.MODE_PREPROCESS)

        self.watch.start()
        ans = self.engine.detect_with_input_tensor(frame.flatten(), threshold=0.5, top_k=10)
        self.watch.stop(Stopwatch.MODE_INFER)

        # Display result
        self.watch.start()
        results = []
        for obj in ans:
            box = obj.bounding_box.flatten().tolist()
            bbox = [0] * 4
            bbox[0] = box[0] * initial_w
            bbox[1] = box[1] * initial_h
            bbox[2] = (box[2] - box[0]) * initial_w
            bbox[3] = (box[3] - box[1]) * initial_h

            result = (bbox, obj.label_id + 1, obj.score)
            results.append(result)
        self.watch.stop(Stopwatch.MODE_POSTPROCESS)

        return results
コード例 #20
0
ファイル: facedetect.py プロジェクト: sjamthe/hoverboard_ros
def main():

    output = SplitFrames()
    output.pub = rospy.Publisher('/FaceDetect/image_raw/compressed',
                                 CompressedImage,
                                 queue_size=1)
    rospy.init_node('facedetect', anonymous=True)
    rate = rospy.Rate(FRAME_RATE)

    # Initialize engine
    output.engine = DetectionEngine(MODEL)

    with picamera.PiCamera(resolution=RES, framerate=FRAME_RATE) as camera:
        camera.start_preview()
        # Give the camera some warm-up time
        time.sleep(2)
        camera.start_recording(output, format='mjpeg')
        camera.wait_recording(0)
        try:
            rospy.spin()
        except KeyboardInterrupt:
            print("Shutting down")
            camera.stop_recording()
コード例 #21
0
class FaceDetector:
    def __init__(self, model_path: str = './model'):
        self.engine = DetectionEngine(model_path)

    def predict(self, on_img: Image) -> List[List[int]]:
        ans = self.engine.detect_with_image(on_img,
                                            threshold=0.05,
                                            keep_aspect_ratio=False,
                                            relative_coord=False,
                                            top_k=10)
        faces = []

        if ans:
            for obj in ans:
                # if labels:
                #     print(labels[obj.label_id])
                # print('score = ', obj.score)
                faces.append(obj.bounding_box.flatten().tolist())

        return faces

    def check_image_contains_face(self, img: Image):
        predictions = self.predict(img)
        return len(predictions) > 0
コード例 #22
0
def main():
    '''Main function for running head tracking.
    It will initialize picam, CoralTPU with a given model (e.g. face recorgnition)
    Then, it will capture an image, feed it into CoralTPU, get coordinates and move servos
    (pan, tilt) towards the center of the box
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model', help='Path of the detection model.', required=True)

    args = parser.parse_args()

    image_number = 0

    # Let's create the images directory
    if not exists(IMAGES_DIR):
        makedirs(IMAGES_DIR)

    # Initialize the pan-tilt thing
    reset_pan_tilt()

    # Initialize engine.

    engine = DetectionEngine(args.model)

    # Initialize the camera
    #cam = cv2.VideoCapture(camera)
    camera = PiCamera()
    time.sleep(2)
    camera.resolution = IMAGE_SIZE
    camera.vflip = True
    # Create the in-memory stream
    stream = io.BytesIO()

    debug = DEBUG
    image_center = IMAGE_CENTER

    object_on_sight = False
    timer = None

    print("Capture started")
    while True:
        try:
            #ret, cv2_im = cam.read()
            stream = io.BytesIO() #wipe the contents
            camera.capture(stream, format='jpeg', use_video_port=True)
            stream.seek(0)
            # we need to flip it here!
            pil_im = Image.open(stream)
            #cv2_im = np.array(pil_im)
            #cv2_im = cv2.flip(cv2_im, 1) #Flip horizontally
            #cv2_im = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)

            # Remember. Doc at https://coral.ai/docs/reference/edgetpu.detection.engine/
            ans = engine.DetectWithImage(pil_im, threshold=0.3, keep_aspect_ratio=True,
                                        relative_coord=False, top_k=1)
            if ans:
                #for obj in ans:  # For multiple objects on screen
                obj = ans[0]  # Follow only one object, the first detected object

                object_on_sight = True
                timer = None
                result = show_box_center_and_size(obj.bounding_box)
                moved = center_camera(result, image_center, debug)

                if moved == 0:  # it didn't move, and there is an object on sight
                    image_number = save_picture(pil_im, image_number)

            else:
                #print("No objects detected with the given threshold")
                object_on_sight = False

            if not object_on_sight and not timer:  # if there was an object before, and is no longer on screen
                timer = time.time()  # We start a timer for reseting the angles later

            elif not object_on_sight and timer:
                elapsed_time = time.time() - timer
                # If 5 seconds have passed without activity. More than 8, do nothing
                if elapsed_time > 5 and elapsed_time < 8:
                    reset_pan_tilt()
                    timer = None  # We stop the timer

        except KeyboardInterrupt:
            print("Closing program")
            reset_pan_tilt()
            sys.exit()

        except:
            reset_pan_tilt()
            raise
コード例 #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='Path of the detection model.',
                        required=True)
    parser.add_argument('--label', help='Path of the labels file.')
    parser.add_argument(
        '--mode',
        help='Mode for de detection: OBJECT_DETECTION or IMAGE_CLASSIFICATION',
        required=True)
    parser.add_argument('--camera',
                        help='Camera source (if multiple available)',
                        type=int,
                        required=False)

    args = parser.parse_args()

    # Initialize engine.

    if args.mode == "OBJECT_DETECTION":
        engine = DetectionEngine(args.model)
    elif args.mode == "IMAGE_CLASSIFICATION":
        engine = ClassificationEngine(args.model)
    else:
        print(
            "Please insert the mode from OBJECT_DETECTION or IMAGE_CLASSIFICATION"
        )
        exit()

    labels = read_label_file(args.label) if args.label else None
    label = None
    camera = args.camera if args.camera else 0

    # Initialize the camera
    #cam = cv2.VideoCapture(camera)
    camera = PiCamera()
    time.sleep(2)
    camera.resolution = (640, 480)
    # Create the in-memory stream
    stream = io.BytesIO()

    # Initialize the timer for fps
    start_time = time.time()
    frame_times = deque(maxlen=40)

    while True:
        #ret, cv2_im = cam.read()
        stream = io.BytesIO()  #wipe the contents
        camera.capture(stream, format='jpeg', use_video_port=True)
        stream.seek(0)
        pil_im = Image.open(stream)
        cv2_im = np.array(pil_im)
        cv2_im = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)

        if args.mode == "OBJECT_DETECTION":
            ans = engine.DetectWithImage(pil_im,
                                         threshold=0.05,
                                         keep_aspect_ratio=True,
                                         relative_coord=False,
                                         top_k=10)
            if ans:
                for obj in ans:
                    if obj.score > 0.4:
                        if labels:
                            label = labels[obj.label_id] + " - {0:.2f}".format(
                                obj.score)
                        draw_rectangles(obj.bounding_box, cv2_im, label=label)
            else:
                draw_text(cv2_im, 'No object detected!')

        else:
            i = 0
            for result in engine.ClassifyWithImage(pil_im, top_k=5):
                if result:
                    label = labels[result[0]]
                    score = result[1]

                    draw_text(cv2_im, label, i)
                    i += 1
                else:
                    draw_text(cv2_im, 'No classification detected!')
        lastInferenceTime = engine.get_inference_time()
        frame_times.append(time.time())
        fps = len(frame_times) / float(frame_times[-1] - frame_times[0] +
                                       0.001)
        draw_text(cv2_im, "{:.1f} / {:.2f}ms".format(fps, lastInferenceTime))
        #print("FPS / Inference time: " + "{:.1f} / {:.2f}ms".format(fps, lastInferenceTime))

        #flipping the image: cv2.flip(cv2_im, 1)

        #cv2_im = cv2.resize(cv2_im, (800, 600))
        cv2.imshow('object detection', cv2_im)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            exit()
            break
コード例 #24
0
 def __init__(self, modelPath, device_path):
     self.detector = DetectionEngine(modelPath, device_path)
コード例 #25
0
def main():
    global QUIT
    global AlarmMode  # would be Notify, Audio, or Idle, Idle mode doesn't save detections
    AlarmMode = "Audio"  # will be Email, Audio, or Idle  via MQTT from alarmboneServer
    global CameraToView
    CameraToView = 0
    global UImode
    UImode = 0  # controls if MQTT buffers of processed images from selected camera are sent as topic: ImageBuffer
    global subscribeTopic
    global Nonvif
    global Nrtsp
    global Nmqtt
    global mqttCamOffset
    global mqttFrameDrops
    global inframe
    global Ncameras
    global mqttFrames
    global dbg
    global blobThreshold

    # *** get command line parameters
    # construct the argument parser and parse the arguments for this module
    ap = argparse.ArgumentParser()

    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.70,
                    help="detection confidence threshold")
    ap.add_argument("-vc",
                    "--verifyConfidence",
                    type=float,
                    default=0.80,
                    help="detection confidence for verification")
    ap.add_argument("-nvc",
                    "--noVerifyConfidence",
                    type=float,
                    default=.98,
                    help="initial detection confidence to skip verification")
    ap.add_argument(
        "-blob",
        "--blobFilter",
        type=float,
        default=.20,
        help="reject detections that are more than this fraction of the frame")
    ap.add_argument(
        "-dbg",
        "--debug",
        action="store_true",
        help="display images to debug detection verification thresholds")

    # specify text file with list of URLs for camera rtsp streams
    ap.add_argument("-rtsp",
                    "--rtspURLs",
                    default="cameraURL.rtsp",
                    help="path to file containing rtsp camera stream URLs")

    # specify text file with list of URLs cameras http "Onvif" snapshot jpg images
    ap.add_argument("-cam",
                    "--cameraURLs",
                    default="cameraURL.txt",
                    help="path to file containing http camera jpeg image URLs")

    # display mode, mostly for test/debug and setup, general plan would be to run "headless"
    ap.add_argument(
        "-d",
        "--display",
        type=int,
        default=1,
        help="display images on host screen, 0=no display, 1=live display")

    # specify MQTT broker
    ap.add_argument("-mqtt",
                    "--mqttBroker",
                    default="localhost",
                    help="name or IP of COntroller/Viewer MQTT Broker")

    # specify MQTT broker for camera images via MQTT, if not "localhost"
    ap.add_argument("-camMQTT",
                    "--mqttCameraBroker",
                    default="localhost",
                    help="name or IP of MQTTcam/# message broker")
    # number of MQTT cameras published as Topic: MQTTcam/N, subscribed here as Topic: MQTTcam/#, Cams numbered 0 to N-1
    ap.add_argument(
        "-Nmqtt",
        "--NmqttCams",
        type=int,
        default=0,
        help=
        "number of MQTT cameras published as Topic: MQTTcam/N,  Cams numbered 0 to N-1"
    )
    # alternate, specify a list of camera numbers
    ap.add_argument(
        "-camList",
        "--mqttCamList",
        type=int,
        nargs='+',
        help=
        "list of MQTTcam/N subscription topic numbers,  cam/N numbered from 0 to Nmqtt-1."
    )

    # specify display width and height
    ap.add_argument("-dw",
                    "--displayWidth",
                    type=int,
                    default=1920,
                    help="host display Width in pixels, default=1920")
    ap.add_argument("-dh",
                    "--displayHeight",
                    type=int,
                    default=1080,
                    help="host display Height in pixels, default=1080")

    # specify host display width and height of camera image
    ap.add_argument(
        "-iw",
        "--imwinWidth",
        type=int,
        default=640,
        help="camera image host display window Width in pixels, default=640")
    ap.add_argument(
        "-ih",
        "--imwinHeight",
        type=int,
        default=360,
        help="camera image host display window Height in pixels, default=360")

    # enable local save of detections on AI host, useful if node-red notification code is not being used
    ap.add_argument("-ls",
                    "--localSave",
                    action="store_true",
                    help="save detection images on local AI host")
    # specify file path of location to same detection images on the localhost
    ap.add_argument(
        "-sp",
        "--savePath",
        default="",
        help="path to location for saving detection images, default ~/detect")
    # save all processed images, fills disk quickly, really slows things down, but useful for test/debug
    ap.add_argument(
        "-save",
        "--saveAll",
        action="store_true",
        help=
        "save all images not just detections on host filesystem, for test/debug"
    )

    args = vars(ap.parse_args())

    # set variables from command line auguments or defaults
    confidence = args["confidence"]
    verifyConf = args["verifyConfidence"]
    noVerifyNeeded = args["noVerifyConfidence"]
    blobThreshold = args["blobFilter"]
    dbg = args["debug"]
    MQTTcameraServer = args["mqttCameraBroker"]
    Nmqtt = args["NmqttCams"]
    camList = args["mqttCamList"]
    if camList is not None:
        Nmqtt = len(camList)
    elif Nmqtt > 0:
        camList = []
        for i in range(Nmqtt):
            camList.append(i)
    dispMode = args["display"]
    if dispMode > 1:
        displayMode = 1
    CAMERAS = args["cameraURLs"]
    RTSP = args["rtspURLs"]
    MQTTserver = args[
        "mqttBroker"]  # this is for command and control messages, and detection messages
    displayWidth = args["displayWidth"]
    displayHeight = args["displayHeight"]
    imwinWidth = args["imwinWidth"]
    imwinHeight = args["imwinHeight"]
    savePath = args["savePath"]
    saveAll = args["saveAll"]
    localSave = args["localSave"]
    if saveAll:
        localSave = True

    # *** get Onvif camera URLs
    # cameraURL.txt file can be created by first running the nodejs program (requires node-onvif be installed):
    # nodejs onvif_discover.js
    #
    # This code does not really use any Onvif features, Onvif compatability is useful to "automate" getting  URLs used to grab snapshots.
    # Any camera that returns a jpeg image from a web request to a static URL should work.
    try:
        CameraURL = [line.rstrip()
                     for line in open(CAMERAS)]  # force file not found
        Nonvif = len(CameraURL)
        print("[INFO] " + str(Nonvif) +
              " http Onvif snapshot threads will be created.")
    except:
        # No Onvif cameras
        print("[INFO] No " + str(CAMERAS) +
              " file.  No Onvif snapshot threads will be created.")
        Nonvif = 0
    Ncameras = Nonvif

    # *** get rtsp URLs
    try:
        rtspURL = [line.rstrip() for line in open(RTSP)]
        Nrtsp = len(rtspURL)
        print("[INFO] " + str(Nrtsp) + " rtsp stream threads will be created.")
    except:
        # no rtsp cameras
        print("[INFO] No " + str(RTSP) +
              " file.  No rtsp stream threads will be created.")
        Nrtsp = 0
    Ncameras += Nrtsp

    # *** setup path to save AI detection images
    if savePath == "":
        detectPath = os.getcwd()
        detectPath = detectPath + "/detect"
        if os.path.exists(detectPath) == False and localSave:
            os.mkdir(detectPath)
    else:
        detectPath = savePath
        if os.path.exists(detectPath) == False:
            print(
                " Path to location to save detection images must exist!  Exiting ..."
            )
            quit()

    # *** allocate queues
    # we simply make one queue for each camera, rtsp stream, and MQTTcamera
    QDEPTH = 2  # small values improve latency
    ##    QDEPTH = 1      # small values improve latency
    print("[INFO] allocating camera and stream image queues...")
    mqttCamOffset = Ncameras
    mqttFrameDrops = 0
    mqttFrames = 0
    Ncameras += Nmqtt  # I generally expect Nmqtt to be zero if Ncameras is not zero at this point, but its not necessary
    if Ncameras == 0:
        print(
            "[INFO] No Cameras, rtsp Streams, or MQTT image inputs specified!  Exiting..."
        )
        quit()
    if Nmqtt > 0:
        print("[INFO] allocating " + str(Nmqtt) + " MQTT image queues...")
##    results = Queue(2*Ncameras)
    results = Queue(int(Ncameras / 2) + 1)
    inframe = list()
    for i in range(Ncameras):
        inframe.append(Queue(QDEPTH))

    # build grey image for mqtt windows
    img = np.zeros((imwinHeight, imwinWidth, 3), np.uint8)
    img[:, :] = (127, 127, 127)
    retv, img_as_jpg = cv2.imencode('.jpg', img,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 50])

    # *** setup display windows if necessary
    # mostly for initial setup and testing, not worth a lot of effort at the moment
    if dispMode > 0:
        if Nonvif > 0:
            print("[INFO] setting up Onvif camera image windows ...")
            for i in range(Nonvif):
                name = str("Live_" + str(i))
                cv2.namedWindow(name,
                                flags=cv2.WINDOW_GUI_NORMAL +
                                cv2.WINDOW_AUTOSIZE)
                cv2.waitKey(1)
        if Nrtsp > 0:
            print("[INFO] setting up rtsp camera image windows ...")
            for i in range(Nrtsp):
                name = str("Live_" + str(i + Nonvif))
                cv2.namedWindow(name,
                                flags=cv2.WINDOW_GUI_NORMAL +
                                cv2.WINDOW_AUTOSIZE)
                cv2.waitKey(1)
        if Nmqtt > 0:
            print("[INFO] setting up MQTT camera image windows ...")
            for i in range(Nmqtt):
                name = str("Live_" + str(i + mqttCamOffset))
                cv2.namedWindow(name,
                                flags=cv2.WINDOW_GUI_NORMAL +
                                cv2.WINDOW_AUTOSIZE)
                cv2.imshow(name, img)
                cv2.waitKey(1)

        # *** move windows into tiled grid
        top = 20
        left = 2
        ##left=1900           ## overrides for my 4K monitors
        ##displayHeight=1900  ## overrides for my 4K monitors
        Xshift = imwinWidth + 3
        Yshift = imwinHeight + 28
        Nrows = int(displayHeight / imwinHeight)
        for i in range(Ncameras):
            name = str("Live_" + str(i))
            col = int(i / Nrows)
            row = i % Nrows
            cv2.moveWindow(name, left + col * Xshift, top + row * Yshift)

    # *** connect to MQTT broker for control/status messages
    print("[INFO] connecting to MQTT " + MQTTserver + " broker...")
    client = mqtt.Client()
    client.on_connect = on_connect
    client.on_message = on_message
    client.on_publish = on_publish
    client.on_disconnect = on_disconnect
    client.will_set(
        "AI/Status", "Python AI has died!", 2, True
    )  # let everyone know we have died, perhaps node-red can restart it
    client.connect(MQTTserver, 1883, 60)
    client.loop_start()

    # *** MQTT send a blank image to the dashboard UI
    print("[INFO] Clearing dashboard ...")
    client.publish("ImageBuffer/!AI has Started.", bytearray(img_as_jpg), 0,
                   False)

    # *** setup and start Coral AI threads
    # Might consider moving this into the thread function.
    ### Setup Coral AI
    # initialize the labels dictionary
    print("[INFO] parsing mobilenet_ssd_v2 coco class labels for Coral TPU...")
    labels = {}
    for row in open("mobilenet_ssd_v2/coco_labels.txt"):
        # unpack the row and update the labels dictionary
        (classID, label) = row.strip().split(maxsplit=1)
        labels[int(classID)] = label.strip()
    print("[INFO] loading Coral mobilenet_ssd_v2_coco model...")
    model = DetectionEngine(
        "mobilenet_ssd_v2/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
    )

    # *** Open second MQTT client thread for MQTTcam/# messages "MQTT cameras"
    # Requires rtsp2mqttPdemand.py mqtt camera source
    if Nmqtt > 0:
        mqttFrameDrops = []
        mqttFrames = []
        mqttCam = list()
        print("[INFO] connecting to " + MQTTcameraServer +
              " broker for MQTT cameras...")
        print("INFO all MQTT cameras will be handled in a single thread.")
        for i in camList:
            mqttFrameDrops.append(0)
            mqttFrames.append(0)
        mqttCam = mqtt.Client(userdata=camList, clean_session=True)
        mqttCam.on_connect = on_mqttCam_connect
        mqttCam.on_message = on_mqttCam
        mqttCam.on_publish = on_publish
        mqttCam.on_disconnect = on_disconnect
        mqttCam.connect(MQTTcameraServer, 1883, 60)
        mqttCam.loop_start()
        time.sleep(0.1)  # force thread dispatch
        for i in camList:
            mqttCam.publish(str("sendOne/" + str(i)), "", 0,
                            False)  # start messages

    # *** start camera reading threads
    o = list()
    if Nonvif > 0:
        print("[INFO] starting " + str(Nonvif) + " Onvif Camera Threads ...")
        for i in range(Nonvif):
            o.append(
                Thread(target=onvif_thread,
                       args=(inframe[i], i, CameraURL[i])))
            o[i].start()
    if Nrtsp > 0:
        global threadLock
        global threadsRunning
        threadLock = Lock()
        threadsRunning = 0
        print("[INFO] starting " + str(Nrtsp) +
              " RTSP Camera Sampling Threads ...")
        for i in range(Nrtsp):
            o.append(
                Thread(target=rtsp_thread,
                       args=(inframe[i + Nonvif], i + Nonvif, rtspURL[i])))
            o[i + Nonvif].start()
        while threadsRunning < Nrtsp:
            time.sleep(0.5)
        print("[INFO] All " + str(Nrtsp) +
              " RTSP Camera Sampling Threads are running.")

    # *** start Coral TPU thread
    print("[INFO] starting Coral TPU AI Thread ...")
    Ct = Thread(target=TPU_thread,
                args=(results, inframe, model, labels, 0, Ncameras,
                      PREPROCESS_DIMS, confidence, noVerifyNeeded, verifyConf))
    Ct.start()

    #*************************************************************************************************************************************
    # *** enter main program loop (main thread)
    # loop over frames from the camera and display results from AI_thread
    excount = 0
    aliveCount = 0
    waitCnt = 0
    prevUImode = UImode
    currentDT = datetime.datetime.now()
    print("[INFO] AI/Status: Python AI running." +
          currentDT.strftime("  %Y-%m-%d %H:%M:%S"))
    client.publish(
        "AI/Status",
        "Python AI running." + currentDT.strftime("  %Y-%m-%d %H:%M:%S"), 2,
        True)
    #start the FPS counter
    print("[INFO] starting the FPS counter ...")
    fps = FPS().start()
    while not QUIT:
        try:
            try:
                (img, cami, personDetected, dt, ai,
                 bp) = results.get(True, 0.100)
            except:
                waitCnt += 1
                img = None
                aliveCount = (
                    aliveCount + 1
                ) % 200  # MQTTcam images stop while Lorex reboots, recovers eventually so keep alive
                if aliveCount == 0:
                    client.publish("AmAlive", "true", 0, False)
                continue
            if img is not None:
                fps.update()  # update the FPS counter
                # setup for file saving
                folder = dt.strftime("%Y-%m-%d")
                filename = dt.strftime("%H_%M_%S.%f")
                filename = filename[:-5] + "_" + ai  #just keep tenths, append AI source
                if localSave:
                    lfolder = str(detectPath + "/" + folder)
                    if os.path.exists(lfolder) == False:
                        os.mkdir(lfolder)
                    if personDetected:
                        outName = str(lfolder + "/" + filename + "_" + "Cam" +
                                      str(cami) + "_AI.jpg")
                    else:  # in case saveAll option
                        outName = str(lfolder + "/" + filename + "_" + "Cam" +
                                      str(cami) + ".jpg")
                    if (personDetected and not AlarmMode.count("Idle")
                        ) or saveAll:  # save detected image
                        cv2.imwrite(outName, img,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 80])
                if personDetected:
                    retv, img_as_jpg = cv2.imencode(
                        '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
                    if retv:
                        outName = str("AIdetection/!detect/" + folder + "/" +
                                      filename + "_" + "Cam" + str(cami) +
                                      ".jpg")
                        outName = outName + "!" + str(bp[0]) + "!" + str(
                            bp[1]) + "!" + str(bp[2]) + "!" + str(
                                bp[3]) + "!" + str(bp[4]) + "!" + str(
                                    bp[5]) + "!" + str(bp[6]) + "!" + str(
                                        bp[7])
                        client.publish(str(outName), bytearray(img_as_jpg), 0,
                                       False)
##                        print(outName)  # log detections
                    else:  # never seen this failure, is this really necessary?
                        print(
                            "[INFO] conversion of np array to jpg in buffer failed!"
                        )
                        img_as_jpg = None
                        continue
                # send image for live display in dashboard
                if ((CameraToView == cami) and
                    (UImode == 1 or (UImode == 2 and personDetected))) or (
                        UImode == 3 and personDetected):
                    if personDetected:
                        topic = str("ImageBuffer/!" + filename + "_" + "Cam" +
                                    str(cami) + "_AI.jpg")
                    else:
                        retv, img_as_jpg = cv2.imencode(
                            '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 40])
                        if retv:
                            topic = str("ImageBuffer/!" + filename + "_" +
                                        "Cam" + str(cami) + ".jpg")
                        else:
                            print(
                                "[INFO] conversion of np array to jpg in buffer failed!"
                            )
                            img_as_jpg = None
                            continue
                    client.publish(str(topic), bytearray(img_as_jpg), 0, False)
                # display the frame to the screen if enabled, in normal usage display is 0 (off)
                if dispMode > 0:
                    name = str("Live_" + str(cami))
                    cv2.imshow(name, cv2.resize(img,
                                                (imwinWidth, imwinHeight)))
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord(
                            "q"
                    ):  # if the `q` key was pressed, break from the loop
                        QUIT = True  # exit main loop
                        continue
                aliveCount = (aliveCount + 1) % 200
                if aliveCount == 0:
                    client.publish("AmAlive", "true", 0, False)
                if prevUImode != UImode:
                    img = np.zeros((imwinHeight, imwinWidth, 3), np.uint8)
                    img[:, :] = (154, 127, 100)
                    retv, img_as_jpg = cv2.imencode(
                        '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 40])
                    client.publish("ImageBuffer/!AI Mode Changed.",
                                   bytearray(img_as_jpg), 0, False)
                    prevUImode = UImode
        # if "ctrl+c" is pressed in the terminal, break from the loop
        except KeyboardInterrupt:
            QUIT = True  # exit main loop
            continue
        except Exception as e:
            currentDT = datetime.datetime.now()
            print(" **** Main Loop Error: " + str(e) +
                  currentDT.strftime(" -- %Y-%m-%d %H:%M:%S.%f"))
            excount = excount + 1
            if excount <= 3:
                continue  # hope for the best!
            else:
                break  # give up! Hope watchdog gets us going again!
    #end of while not QUIT  loop
    #*************************************************************************************************************************************

    # *** Clean up for program exit
    fps.stop()  # stop the FPS counter timer
    currentDT = datetime.datetime.now()
    print("[INFO] Program Exit signal received:" +
          currentDT.strftime("  %Y-%m-%d %H:%M:%S"))
    # display FPS information
    print("*** AI processing approx. FPS: {:.2f} ***".format(fps.fps()))
    print("[INFO] Run elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] Frames processed by AI system: " + str(fps._numFrames))
    print("[INFO] Main looped waited for results: " + str(waitCnt) + " times.")
    currentDT = datetime.datetime.now()
    client.publish(
        "AI/Status",
        "Python AI stopped." + currentDT.strftime("  %Y-%m-%d %H:%M:%S"), 2,
        True)
    print("[INFO] AI/Status: Python AI stopped." +
          currentDT.strftime("  %Y-%m-%d %H:%M:%S"))

    # stop cameras
    if Nmqtt > 0:
        mqttCam.disconnect()
        mqttCam.loop_stop()
        for i in range(Nmqtt):
            print("MQTTcam/" + str(camList[i]) + " has dropped: " +
                  str(mqttFrameDrops[i]) + " frames out of: " +
                  str(mqttFrames[i]))

    # wait for threads to exit
    if Nonvif > 0:
        for i in range(Nonvif):
            o[i].join()
        print("[INFO] All Onvif Camera have exited ...")
    if Nrtsp > 0:
        for i in range(Nrtsp):
            o[i + Nonvif].join()
        print("[INFO] All rtsp Camera have exited ...")
    # stop TPU
    Ct.join()
    print("[INFO] All Coral TPU AI Thread has exited ...")

    # destroy all windows if we are displaying them
    if args["display"] > 0:
        cv2.destroyAllWindows()

    # Send a blank image the dashboard UI
    print("[INFO] Clearing dashboard ...")
    img = np.zeros((imwinHeight, imwinWidth, 3), np.uint8)
    img[:, :] = (32, 32, 32)
    retv, img_as_jpg = cv2.imencode('.jpg', img,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 50])
    client.publish("ImageBuffer/!AI has Exited.", bytearray(img_as_jpg), 0,
                   False)
    time.sleep(1.0)

    # clean up localhost MQTT
    client.disconnect()  # normal exit, Will message should not be sent.
    currentDT = datetime.datetime.now()
    print("[INFO] Stopping MQTT Threads." +
          currentDT.strftime("  %Y-%m-%d %H:%M:%S"))
    client.loop_stop()  ### Stop MQTT thread

    # bye-bye
    currentDT = datetime.datetime.now()
    print("Program Exit." + currentDT.strftime("  %Y-%m-%d %H:%M:%S"))
    print("")
    print("")
コード例 #26
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
            '--model', help='File path of Tflite model.', required=True)
    parser.add_argument(
            '--label', help='File path of label file.', required=True)
    parser.add_argument(
            '--top_k', help="keep top k candidates.", default=3)
    parser.add_argument(
            '--threshold', help="threshold to filter results.", default=0.5, type=float)
    parser.add_argument(
            '--width', help="Resolution width.", default=640, type=int)
    parser.add_argument(
            '--height', help="Resolution height.", default=480, type=int)
    args = parser.parse_args()

    # Initialize window.
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO)
    cv2.moveWindow(WINDOW_NAME, 100, 200)

    # Initialize engine.
    engine = DetectionEngine(args.model)
    labels = ReadLabelFile(args.label) if args.label else None

    # Generate random colors.
    last_key = sorted(labels.keys())[len(labels.keys()) - 1]
    colors = visual.random_colors(last_key)

    elapsed_list = []
    resolution_width = args.width
    rezolution_height = args.height
    with picamera.PiCamera() as camera:

        camera.resolution = (resolution_width, rezolution_height)
        camera.framerate = 30
        _, width, height, channels = engine.get_input_tensor_shape()
        rawCapture = PiRGBArray(camera)

        # allow the camera to warmup
        time.sleep(0.1)

        try:
            for frame in camera.capture_continuous(rawCapture,
                                                 format='rgb',
                                                 use_video_port=True):
                rawCapture.truncate(0)

                # input_buf = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                image = frame.array
                im = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                input_buf = PIL.Image.fromarray(image)

                # Run inference.
                start_ms = time.time()
                ans = engine.DetectWithImage(input_buf, threshold=args.threshold,
                       keep_aspect_ratio=False, relative_coord=False, top_k=args.top_k)
                # ans = engine.DetectWithInputTensor(input_buf, threshold=0.05,
                #         keep_aspect_ratio=False, relative_coord=False, top_k=10)
                elapsed_ms = time.time() - start_ms

                # Display result.
                if ans:
                    for obj in ans:
                        label_name = 'Unknown'
                        if labels:
                            label_name = labels[obj.label_id]
                        caption = '{0}({1:.2f})'.format(label_name, obj.score)

                        # Draw a rectangle and caption.
                        box = obj.bounding_box.flatten().tolist()
                        visual.draw_rectangle(im, box, colors[obj.label_id])
                        visual.draw_caption(im, box, caption)

                # Calc fps.
                fps = 1 / elapsed_ms
                elapsed_list.append(elapsed_ms)
                avg_text = ""
                if len(elapsed_list) > 100:
                    elapsed_list.pop(0)
                    avg_elapsed_ms = np.mean(elapsed_list)
                    avg_fps = 1 / avg_elapsed_ms
                    avg_text = ' AGV: {0:.2f}ms, {1:.2f}fps'.format(
                        (avg_elapsed_ms * 1000.0), avg_fps)

                # Display fps
                fps_text = '{0:.2f}ms, {1:.2f}fps'.format(
                        (elapsed_ms * 1000.0), fps)
                visual.draw_caption(im, (10, 30), fps_text + avg_text)

                # display
                cv2.imshow(WINDOW_NAME, im)
                if cv2.waitKey(10) & 0xFF == ord('q'):
                    break

        finally:
            camera.stop_preview()

    # When everything done, release the window
    cv2.destroyAllWindows()
コード例 #27
0
ファイル: demo.py プロジェクト: Kao1126/EdgeTPU-FaceNet
def main():

    load_time = time.time()

    # Initialize engine.
    engine = DetectionEngine(Model_weight)
    labels = None

    # Face recognize engine
    face_engine = ClassificationEngine(FaceNet_weight)
    # read embedding
    class_arr, emb_arr = read_embedding(Embedding_book)

    l = time.time() - load_time

    with tf.Graph().as_default():
        with tf.compat.v1.Session() as sess:

            cap = cv2.VideoCapture(0)

            while (True):
                t1 = cv2.getTickCount()
                print('Load_model: {:.2f} sec'.format(l))

                ret, frame = cap.read()

                img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(img)

                # Run inference.
                ans = engine.DetectWithImage(img,
                                             threshold=0.05,
                                             keep_aspect_ratio=False,
                                             relative_coord=False,
                                             top_k=10)

                img = numpy.asarray(img)
                # Display result.
                if ans:
                    crop_img = crop_image(ans, frame)

                    if cv2.waitKey(1) == ord('a'):

                        for k in range(0, len(crop_img)):
                            new_class_name = input(
                                'Please input your name of class:')
                            new_save = cv2.cvtColor(crop_img[k],
                                                    cv2.COLOR_BGR2RGB)
                            cv2.imwrite(
                                'pictures/' + str(new_class_name) + '.jpg',
                                new_save)

                        Create_embeddings(face_engine)
                        class_arr, emb_arr = read_embedding(
                            'embedding_book/embeddings.h5')

                    embs = Tpu_FaceRecognize(face_engine, crop_img)

                    face_num = len(ans)
                    face_class = ['Others'] * face_num

                    for i in range(face_num):
                        diff = np.mean(np.square(embs[i] - emb_arr), axis=1)
                        min_diff = min(diff)

                        if min_diff < THRED:

                            index = np.argmin(diff)
                            face_class[i] = class_arr[index]

                    print('Face_class:', face_class)
                    print('Classes:', class_arr)

                    for count, obj in enumerate(ans):
                        print('-----------------------------------------')
                        if labels:
                            print(labels[obj.label_id])
                        print('Score = ', obj.score)
                        box = obj.bounding_box.flatten().tolist()

                        # Draw a rectangle and label
                        cv2.rectangle(img, (int(box[0]), int(box[1])),
                                      (int(box[2]), int(box[3])),
                                      (255, 255, 0), 2)
                        cv2.putText(img, '{}'.format(face_class[count]),
                                    (int(box[0]), int(box[1]) - 5),
                                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1,
                                    cv2.LINE_AA)

                t2 = cv2.getTickCount()
                t = (t2 - t1) / cv2.getTickFrequency()
                fps = 1.0 / t
                cv2.putText(img, 'fps: {:.2f}'.format(fps), (5, 20),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1,
                            cv2.LINE_AA)

                cv2.putText(img, 'A: Add new class', (5, 450),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1,
                            cv2.LINE_AA)
                cv2.putText(img, 'Q: Quit', (5, 470), cv2.FONT_HERSHEY_PLAIN,
                            1, (255, 0, 0), 1, cv2.LINE_AA)
                img_ = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

                cv2.imshow('frame', img_)

                if cv2.waitKey(1) == ord('q'):
                    break

            cap.release()
            cv2.destroyAllWindows()
コード例 #28
0
ファイル: fever.py プロジェクト: vklovo1/fever
def main(_):
    if FLAGS.detect:
        # Initialize ambient sensors.
        ambient = bme680.BME680(i2c_addr=bme680.I2C_ADDR_PRIMARY,
                                i2c_device=SMBus(1))
        # TODO: Tune settings.
        ambient.set_humidity_oversample(bme680.OS_2X)
        ambient.set_pressure_oversample(bme680.OS_4X)
        ambient.set_temperature_oversample(bme680.OS_8X)
        ambient.set_filter(bme680.FILTER_SIZE_3)
        ambient.set_gas_status(bme680.DISABLE_GAS_MEAS)

        # Load the face detection model.
        face_detector = DetectionEngine(FLAGS.face_model)

    # Start the frame processing loop.
    with PureThermal() as camera:

        # Initialize thermal image buffers.
        input_shape = (camera.height(), camera.width())
        raw_buffer = np.zeros(input_shape, dtype=np.int16)
        scaled_buffer = np.zeros(input_shape, dtype=np.uint8)
        if FLAGS.detect:
            rgb_buffer = np.zeros((*input_shape, 3), dtype=np.uint8)
        if FLAGS.visualize:
            window_buffer = np.zeros((WINDOW_HEIGHT, WINDOW_WIDTH, 3),
                                     dtype=np.uint8)
        raw_scale_factor = (FLAGS.max_temperature -
                            FLAGS.min_temperature) // 255
        window_scale_factor_x = WINDOW_WIDTH / camera.width()
        window_scale_factor_y = WINDOW_HEIGHT / camera.height()

        if FLAGS.visualize:
            # Initialize the window.
            cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
            cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)

        while (not FLAGS.visualize
               or cv2.getWindowProperty(WINDOW_NAME, 0) != -1):
            try:
                start_time = time()

                if FLAGS.detect:
                    # Acquire ambient sensor readings.
                    if not ambient.get_sensor_data():
                        logging.warning('Ambient sensor data not ready')
                    ambient_data = ambient.data
                    logging.debug('Ambient temperature: %.f °C' %
                                  ambient_data.temperature)
                    logging.debug('Ambient pressure: %.f hPa' %
                                  ambient_data.pressure)
                    logging.debug('Ambient humidity: %.f %%' %
                                  ambient_data.humidity)

                # Get the latest frame from the thermal camera and copy it.
                with camera.frame_lock():
                    np.copyto(dst=raw_buffer, src=camera.frame())

                # Map the raw temperature data to a normal range before
                # reducing the bit depth and min/max normalizing for better
                # contrast.
                np.clip(
                    (raw_buffer - FLAGS.min_temperature) // raw_scale_factor,
                    0,
                    255,
                    out=scaled_buffer)
                cv2.normalize(src=scaled_buffer,
                              dst=scaled_buffer,
                              alpha=0,
                              beta=255,
                              norm_type=cv2.NORM_MINMAX)

                if FLAGS.detect:
                    # Convert to the expected RGB format.
                    cv2.cvtColor(src=scaled_buffer,
                                 dst=rgb_buffer,
                                 code=cv2.COLOR_GRAY2RGB)

                    # Detect any faces in the frame.
                    faces = face_detector.detect_with_image(
                        Image.fromarray(rgb_buffer),
                        threshold=FLAGS.face_confidence,
                        top_k=FLAGS.max_num_faces,
                        keep_aspect_ratio=True,  # Better quality.
                        relative_coord=False,  # Expect pixel coordinates.
                        resample=Image.BILINEAR)  # Good enough and fast.

                    # TODO: Estimate distance based on face size.

                    # TODO: Model thermal attenuation based on distance and
                    #       ambient temperature, pressure, and humidity.

                    # Find the (highest) temperature of each face.
                    if len(faces) == 1:
                        logging.info('1 person')
                    else:
                        logging.info('%d people' % len(faces))
                    for face in faces:
                        temperature = get_temperature(raw_buffer,
                                                      face.bounding_box)
                        if not temperature:
                            logging.warning('Empty crop')
                            continue
                        logging.info(format_temperature(temperature))

                if FLAGS.visualize:
                    # Apply the colormap.
                    turbo_buffer = TURBO_COLORMAP[scaled_buffer]

                    # Resize for the window.
                    cv2.cvtColor(src=turbo_buffer,
                                 dst=turbo_buffer,
                                 code=cv2.COLOR_RGB2BGR)
                    cv2.resize(src=turbo_buffer,
                               dst=window_buffer,
                               dsize=(WINDOW_WIDTH, WINDOW_HEIGHT),
                               interpolation=cv2.INTER_CUBIC)

                    if FLAGS.detect:
                        # Draw the face bounding boxes and temperature.
                        for face in faces:
                            bbox = face.bounding_box
                            top_left = (int(window_scale_factor_x *
                                            bbox[0, 0]),
                                        int(window_scale_factor_y *
                                            bbox[0, 1]))
                            bottom_right = (int(window_scale_factor_x *
                                                bbox[1, 0]),
                                            int(window_scale_factor_y *
                                                bbox[1, 1]))
                            cv2.rectangle(window_buffer, top_left,
                                          bottom_right, LINE_COLOR,
                                          LINE_THICKNESS)

                            temperature = get_temperature(
                                raw_buffer, face.bounding_box)
                            if not temperature:
                                continue
                            label = format_temperature(temperature,
                                                       add_unit=False)
                            label_size, _ = cv2.getTextSize(
                                label, LABEL_FONT, LABEL_SCALE,
                                LABEL_THICKNESS)
                            label_position = (
                                (top_left[0] + bottom_right[0]) // 2 -
                                label_size[0] // 2,
                                (top_left[1] + bottom_right[1]) // 2 +
                                label_size[1] // 2)
                            cv2.putText(window_buffer, label, label_position,
                                        LABEL_FONT, LABEL_SCALE, LABEL_COLOR,
                                        LABEL_THICKNESS, cv2.LINE_AA)

                    # Draw the frame.
                    cv2.imshow(WINDOW_NAME, window_buffer)
                    cv2.waitKey(1)

                # Calculate timing stats.
                duration = time() - start_time
                logging.debug('Frame took %.f ms (%.2f Hz)' %
                              (duration * 1000, 1 / duration))

            # Stop on SIGINT.
            except KeyboardInterrupt:
                break

    if FLAGS.visualize:
        cv2.destroyAllWindows()
コード例 #29
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Flask app exposing coral USB stick")
    parser.add_argument(
        "--models_directory",
        default=DEFAULT_MODELS_DIRECTORY,
        help="the directory containing the model & labels files",
    )
    parser.add_argument(
        "--model",
        default=DEFAULT_MODEL,
        help="model file",
    )
    parser.add_argument("--labels",
                        default=DEFAULT_LABELS,
                        help="labels file of model")
    parser.add_argument("--port", default=5000, type=int, help="port number")
    args = parser.parse_args()

    global MODEL
    MODEL = args.model
    model_file = args.models_directory + args.model
    labels_file = args.models_directory + args.labels

    engine = DetectionEngine(model_file)
    print("\n Loaded engine with model : {}".format(model_file))

    labels = ReadLabelFile(labels_file)
    app.run(host="0.0.0.0", port=args.port)
コード例 #30
0
ファイル: detect.py プロジェクト: fmiusov/ssd-dag
    logger.info("label dict: {}".format(label_dict))

    # this value is validated by the argument definition
    if args.model_name == Models.tf_lite:
        logger.info('TF Lite Model loading...')
        interpreter = get_tflite_interpreterer(logger, args.model_path)
        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()
        model_input_shape = input_shape = input_details[0]['shape']   # (batch, h, w, channels)
        model_image_dim = (model_input_shape[1], model_input_shape[2])    # model - image dimension
        model_input_dim = (1, model_input_shape[1], model_input_shape[2], 3) # model - batch of images dimensions
        logger.info("Model Input Dimension: {}".format(model_input_dim))
    elif args.model_name == Models.edge_tpu:
        from edgetpu.detection.engine import DetectionEngine
        logger.info('Edge TPU Model loading...')
        engine = DetectionEngine(args.model_path)
        model_image_dim = (300,300)
        model_input_dim = (1,300,300,3)



        
    if args.image_dir != None:
        image_list, image_count = get_image_list(args.image_dir)
        for i in range(image_count):
                image_filepath = image_list[i]
                logger.info("Image: {}".format(image_filepath))
            

                if args.model_name == Models.tf_lite:
                    model_type = 'tf_lite'