コード例 #1
0
def process(video_in, pipeline, video_out=None, headless=True, inference=True):
    """Run video through pipeline"""
    while video_in.isOpened():
        ok, frame = video_in.read()
        if not ok:
            logger.error('Error reading video')
            break

        # quit
        if not headless:
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # main loop
        tracks = pipeline.forward(frame)
        boxes = [track['box'] for track in tracks]
        boxes = [scale_box(box, frame) for box in boxes]
        logger.debug("boxes: %s" % boxes)
        frame = draw_boxes(frame, boxes)

        # inference mode - write data to api
        if inference:
            write_api(tracks)

        # display resulting frame
        if not headless:
            cv2.imshow('Video', frame)

        # Write the frame into output file
        if video_out:
            video_out.write(frame)
コード例 #2
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("video")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    start_time = time.time()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    start_time = time.time()
    cap = cv2.VideoCapture(FLAGS.video)

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    out = None
    if FLAGS.output:
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*"XVID")
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    while True:
        _, img = cap.read()

        if img is None:
            logging.warning("Empty Frame")
            time.sleep(0.1)
            break

        image_tensor = np.expand_dims(img, axis=0)
        detections = model(image_tensor)

        boxes = detections["detection_boxes"][0].numpy()
        classes = detections["detection_classes"][0].numpy().astype(np.int32)
        scores = detections["detection_scores"][0].numpy()

        output_image = draw_boxes(
            img.copy(),
            boxes,
            classes,
            scores,
            labels,
            height,
            width,
            min_threshold=FLAGS.threshold,
        )

        output_image = cv2.resize(output_image, (width, height))
        cv2.imshow("Object Detection", output_image)
        if out:
            out.write(output_image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.release()
    end_time = time.time()
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
コード例 #3
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("ip")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    stream_url = f"rtsp://{FLAGS.ip}:{FLAGS.port}/h264_ulaw.sdp"
    if FLAGS.username and FLAGS.password:
        stream_url = f"rtsp://{FLAGS.username}:{FLAGS.password}@{FLAGS.ip}:{FLAGS.port}/h264_ulaw.sdp"

    start_time = time.time()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    start_time = time.time()
    cap = VideoStream(cam=stream_url).start()

    width = int(cap.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
    out = None
    if FLAGS.output:
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*"XVID")
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    while True:
        img = cap.read()
        image_tensor = np.expand_dims(img, axis=0)
        detections = model(image_tensor)

        boxes = detections["detection_boxes"][0].numpy()
        classes = detections["detection_classes"][0].numpy().astype(np.int32)
        scores = detections["detection_scores"][0].numpy()

        output_image = draw_boxes(
            img.copy(),
            boxes,
            classes,
            scores,
            labels,
            height,
            width,
            min_threshold=FLAGS.threshold,
        )
        cv2.imshow("Object Detection", cv2.resize(output_image, (800, 600)))
        if out:
            out.write(output_image)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.stop()
    end_time = time.time()
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
コード例 #4
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("image")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    start_time = time.time()
    tf.keras.backend.clear_session()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    image_np = load_image(FLAGS.image)
    image_tensor = np.expand_dims(image_np, axis=0)
    image_tensor = preprocess_input(image_tensor)
    height, width, _ = image_np.shape
    start_time = time.time()
    detections = model(image_tensor)
    end_time = time.time()

    boxes = detections["detection_boxes"][0].numpy()
    classes = detections["detection_classes"][0].numpy().astype(np.int32)
    scores = detections["detection_scores"][0].numpy()

    output_image = draw_boxes(
        image_np.copy(),
        boxes,
        classes,
        scores,
        labels,
        height,
        width,
        min_threshold=FLAGS.threshold,
    )

    output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
    cv2.imwrite(FLAGS.output, output_image)
    cv2.imshow("Object Detection", output_image)
    cv2.waitKey(0)
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
コード例 #5
0
                        "-p",
                        type=str,
                        action='store',
                        help='Path to detector model file')

    args = parser.parse_args()

    # read image
    image = cv2.imread(args.input_filepath)
    # preprocess
    pp_image = cv2.resize(image, (224, 224))
    pp_image = cv2.cvtColor(pp_image, cv2.COLOR_BGR2RGB)
    # detect
    detector = Detector(args.model_path)
    t0 = perf_counter()
    boxes = detector.detect(pp_image)
    logger.info("detect timer: %f" % (perf_counter() - t0))

    # scale boxes back to image size
    def scale_box(box, width, height):
        x, y, w, h = box  # coco format
        return [x * width, y * height, w * width, h * height]

    width, height, _ = image.shape
    boxes = [scale_box(box, width, height) for box in boxes]
    # draw
    boxes = [xywh_to_xyxy(box) for box in boxes]  # convert to xyxy format
    image = draw_boxes(image, boxes, GREEN)
    # save
    cv2.imwrite(args.output_filepath, image)
コード例 #6
0
ファイル: predict.py プロジェクト: ttorkar/YOLOdrone
def _main_(args):

    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(architecture=config['model']['architecture'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'],
                gpu=1)  #One GPU for prediction

    ###############################
    #   Load trained weights
    ###############################

    print('Weights Path: ', weights_path)
    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes
    ###############################

    if image_path[-4:] == '.mp4' or image_path[-4:] == '.avi':
        video_out = image_path[:-4] + '_detected' + image_path[-4:]

        video_reader = cv2.VideoCapture(image_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 50.0,
                                       (frame_w, frame_h))

        for i in tqdm(list(range(nb_frames))):
            _, image = video_reader.read()

            boxes = yolo.predict(image)
            image = draw_boxes(image, boxes, config['model']['labels'])

            video_writer.write(np.uint8(image))

        video_reader.release()
        video_writer.release()
    else:
        image = cv2.imread(image_path)
        boxes = yolo.predict(image)
        image = draw_boxes(image, boxes, config['model']['labels'])

        print(len(boxes), 'boxes are found')

        cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)
        cv2.imshow('Prediction', image)
コード例 #7
0
ファイル: app.py プロジェクト: tarun-bisht/security-camera
def main(_argv):
    # helper variables
    record = False
    num_categories = 4
    next_active = [0] * num_categories
    record_active = 0
    freq = cv2.getTickFrequency()
    tmp_folder = None
    frame_count = 0

    # parse config file arguments
    args, security_cam_category_index = get_security_cam_arguments(FLAGS)
    min_threshold = args.get("min_threshold")
    fps = args.get("fps")
    wait_time = args.get("wait_after_message_send")
    min_record_time = args.get("min_recording_time")
    temp_path = args.get("temp_dir")
    record_path = args.get("recording_save_path")
    recipients = args.get("send_mail_to")
    neglect_categories = args.get("neglect_categories")
    tf.keras.backend.clear_session()
    model = tf.saved_model.load(args.get("model"))
    logging.info("...model loaded...")

    # collect index of categories to neglect (do not detect) from all categories in labels model trained
    neglect = []
    if neglect_categories:
        for key, value in security_cam_category_index.items():
            if value["name"] in neglect_categories:
                neglect.append(value["id"])

    # create a temp directory to save frames when intruder detected to create video.
    os.makedirs(temp_path, exist_ok=True)

    # initiating stream capture
    cap = VideoStream(resolution=args.get("res"), cam=args.get("cam")).start()

    width = int(cap.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # recover previously failed videos whose frames are currently stored in temp folder. Video generation failed at that time because of some system failure
    recover_videos(temp_path, record_path, fps, resolution=(width, height))

    # initiating mailer
    mailer = SendMail(mail_id=EMAIL_ADDRESS,
                      password=EMAIL_PASSWORD,
                      recipients=recipients)

    # detect intruder in stream
    while True:
        t1 = cv2.getTickCount()
        img = cap.read()
        image_tensor = np.expand_dims(img, axis=0)
        detections = model(image_tensor)
        boxes = detections["detection_boxes"][0].numpy()
        classes = detections["detection_classes"][0].numpy().astype(np.int32)
        scores = detections["detection_scores"][0].numpy()

        # draw bounding box in frame
        frame = img.copy()
        frame = draw_boxes(
            frame,
            boxes,
            classes,
            scores,
            security_cam_category_index,
            height,
            width,
            min_threshold=min_threshold,
            put_label=False,
        )

        # when a new category detected start recording video and send mail to notify and record till category is being detected.
        # sends a mail for every new category.
        category = int(
            get_category_index(scores, classes, min_threshold=min_threshold))
        if category not in neglect and 0 < category <= num_categories:
            if time.time() > next_active[category - 1]:
                next_active[category - 1] = time.time() + wait_time
                class_name = security_cam_category_index[category]["name"]
                mailer.send_mail(create_mail_msg(class_name),
                                 get_image_bytes(frame))
            if tmp_folder is None:
                tmp_folder = os.path.join(temp_path, f"{secrets.token_hex(8)}")
                os.makedirs(tmp_folder, exist_ok=True)
                frame_count = 0
            record = True
            record_active = time.time() + min_record_time

        # print local time to frame
        local_time = time.strftime("%a %d-%b-%Y %H:%M:%S", time.localtime())
        cv2.putText(
            frame,
            f"{local_time}",
            (10, 25),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.4,
            (0, 255, 255),
            1,
            cv2.LINE_AA,
        )
        # calculate fps and print it to frame
        t2 = cv2.getTickCount()
        t = (t2 - t1) / freq
        cv2.putText(
            frame,
            f"FPS: {1 / t:.2f}".format(),
            (10, 50),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.4,
            (255, 255, 0),
            1,
            cv2.LINE_AA,
        )
        # saving recording frames
        if record and tmp_folder is not None:
            cv2.imwrite(
                os.path.join(tmp_folder,
                             f"{frame_count:08}_{secrets.token_hex(4)}.jpg"),
                frame,
            )
            if time.time() > record_active:
                logging.info("...recording stopped, generating video...")
                record = False
                # create video from frames in a new thread
                SaveVideo(tmp_folder, record_path, fps, (width, height)).save()
                tmp_folder = None
            frame_count += 1

        # if live enable then show UI
        if FLAGS.live:
            cv2.imshow("Intelligent Security Camera",
                       cv2.resize(frame, (800, 600)))
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
    cap.stop()