def thread2_function(name):
    global Q
    ## Detection
    logging.info("Thread %s: starting ", name)

    detector = yoloDetection(args["yolo"], args["input"], args["confidence"],
                             args["threshold"], args["bbox"])

    detector.prepareModel()

    start = datetime.datetime.now()
    # numFrames = 0

    while True:
        ### GEtting rid of frames.
        ### TO be done while recording.
        for i in range(14):
            Q.get()

        frame = Q.get()

        print("Thread 2: starting ", Q.qsize())

        # numFrames += 1
        out = detector.runInference(frame)

        if args["bbox"] is True:
            cv2.imshow('detector', out)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("FPS: ",
                      numF / (datetime.datetime.now() - start).total_seconds())
                break
        else:
            print(out)

    logging.info("Thread %s: finishing", name)
Ejemplo n.º 2
0
                type=float,
                default=0.5,
                help="minimum probability to filter weak detections")
ap.add_argument("-t",
                "--threshold",
                type=float,
                default=0.3,
                help="threshold when applyong non-maxima suppression")
ap.add_argument("-b",
                "--bbox",
                required=False,
                default=True,
                help="turn on bounding box")
args = vars(ap.parse_args())

detector = yoloDetection(args["yolo"], args["input"], args["confidence"],
                         args["threshold"], args["bbox"])

detector.prepareModel()

imgPath = 'images/'
images = os.listdir(imgPath)

# for image in images:
# 	img = cv2.imread(imgPath+image)

# 	out = detector.runInference(img)

# 	if args["bbox"] is True:
# 		cv2.imshow(''.format(image), out)
# 	else:
# 		print(out)