コード例 #1
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("video")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    start_time = time.time()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    start_time = time.time()
    cap = cv2.VideoCapture(FLAGS.video)

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    out = None
    if FLAGS.output:
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*"XVID")
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    while True:
        _, img = cap.read()

        if img is None:
            logging.warning("Empty Frame")
            time.sleep(0.1)
            break

        image_tensor = np.expand_dims(img, axis=0)
        detections = model(image_tensor)

        boxes = detections["detection_boxes"][0].numpy()
        classes = detections["detection_classes"][0].numpy().astype(np.int32)
        scores = detections["detection_scores"][0].numpy()

        output_image = draw_boxes(
            img.copy(),
            boxes,
            classes,
            scores,
            labels,
            height,
            width,
            min_threshold=FLAGS.threshold,
        )

        output_image = cv2.resize(output_image, (width, height))
        cv2.imshow("Object Detection", output_image)
        if out:
            out.write(output_image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.release()
    end_time = time.time()
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
コード例 #2
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("ip")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    stream_url = f"rtsp://{FLAGS.ip}:{FLAGS.port}/h264_ulaw.sdp"
    if FLAGS.username and FLAGS.password:
        stream_url = f"rtsp://{FLAGS.username}:{FLAGS.password}@{FLAGS.ip}:{FLAGS.port}/h264_ulaw.sdp"

    start_time = time.time()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    start_time = time.time()
    cap = VideoStream(cam=stream_url).start()

    width = int(cap.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
    out = None
    if FLAGS.output:
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*"XVID")
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    while True:
        img = cap.read()
        image_tensor = np.expand_dims(img, axis=0)
        detections = model(image_tensor)

        boxes = detections["detection_boxes"][0].numpy()
        classes = detections["detection_classes"][0].numpy().astype(np.int32)
        scores = detections["detection_scores"][0].numpy()

        output_image = draw_boxes(
            img.copy(),
            boxes,
            classes,
            scores,
            labels,
            height,
            width,
            min_threshold=FLAGS.threshold,
        )
        cv2.imshow("Object Detection", cv2.resize(output_image, (800, 600)))
        if out:
            out.write(output_image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.stop()
    end_time = time.time()
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
コード例 #3
0
def get_security_cam_arguments(flag: FLAGS) -> tuple:
    """parse arguments from config file

    Args:
        flag (FLAGS): command line arguments

    Raises:
        Exception: Error when config file cannot be loaded

    Returns:
        tuple: (config file args in form of dictionary, labels as dictionary)
    """
    try:
        with open(flag.config, "r") as f:
            cfg = json.load(f)
        args = {}
        cam_args = cfg["camera"]
        if cam_args["network"].get("ip") is not None:
            stream_url = cam_args["network"]["url_format"]
            stream_url = stream_url.replace("<ip>", cam_args["network"]["ip"])
            stream_url = stream_url.replace("<port>",
                                            cam_args["network"]["port"])
            username = cam_args["network"]["username"]
            password = cam_args["network"]["password"]
            if (username and password) or (username != "" and password != ""):
                stream_url = stream_url.replace("<username>", username)
                stream_url = stream_url.replace("<password>", password)
            else:
                stream_url = stream_url.replace("<username>:<password>@", "")
            args["cam"] = stream_url
        else:
            args["cam"] = cam_args["id"]
        args["model"] = cfg["detector"]["detector_model_path"]
        args["fps"] = cfg["detector"]["fps"]
        args["res"] = cfg["detector"]["resolution"]
        args["recording_save_path"] = cfg["detector"]["recording_save_path"]
        args["temp_dir"] = cfg["detector"]["temp_dir"]
        args["neglect_categories"] = cfg["detector"]["neglect_categories"]
        args["min_threshold"] = cfg["detector"]["min_threshold"]
        args["min_recording_time"] = cfg["detector"]["min_recording_time"]
        args["wait_after_message_send"] = cfg["notifier"][
            "wait_after_message_send"]
        args["send_mail_to"] = cfg["notifier"]["send_mail_to"]
        labels = read_label_pbtxt(cfg["detector"]["labels_path"])
        return args, labels
    except Exception as e:
        raise Exception("An error occurred while loading config: ", e)
コード例 #4
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("image")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    start_time = time.time()
    tf.keras.backend.clear_session()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    image_np = load_image(FLAGS.image)
    image_tensor = np.expand_dims(image_np, axis=0)
    image_tensor = preprocess_input(image_tensor)
    height, width, _ = image_np.shape
    start_time = time.time()
    detections = model(image_tensor)
    end_time = time.time()

    boxes = detections["detection_boxes"][0].numpy()
    classes = detections["detection_classes"][0].numpy().astype(np.int32)
    scores = detections["detection_scores"][0].numpy()

    output_image = draw_boxes(
        image_np.copy(),
        boxes,
        classes,
        scores,
        labels,
        height,
        width,
        min_threshold=FLAGS.threshold,
    )

    output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
    cv2.imwrite(FLAGS.output, output_image)
    cv2.imshow("Object Detection", output_image)
    cv2.waitKey(0)
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")