示例#1
0
        pos = tracker.get_position()

        # unpack the position object
        startX = int(pos.left())
        startY = int(pos.top())
        endX = int(pos.right())
        endY = int(pos.bottom())

        # add the bounding box coordinates to the rectangles list
        rects.append((startX, startY, endX, endY))

    # check to see if we should look for new detections
    if totalFrames % args["skip_frames"] == 0:
        # detect people
        results = detect_people(frame,
                                net,
                                ln,
                                personIdx=LABELS.index("person"))

        for r in results:

            startX = r[1][0]
            startY = r[1][1]
            endX = r[1][2]
            endY = r[1][3]

            r_cX = int((startX + endX) / 2.0)
            r_cY = int((startY + endY) / 2.0)
            r_c = (r_cX, r_cY)

            new_tracker = True
示例#2
0
    if ret == False:
        break

    image = cv2.resize(image, (720, 640))
    # cv2.namedWindow("output", cv2.WINDOW_NORMAL)

    (H, W) = image.shape[:2]
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
    blob = cv2.dnn.blobFromImage(image, 
                                1/255.0, 
                                (416, 416), 
                                swapRB=True, 
                                crop=False)

    results = detect_people(image, net, ln,
                            personIdx=Labels.index("person"))
        

    net.setInput(blob)
    start = time.time()
    layerOutputs = net.forward(ln)
    end = time.time()
    print("Time taken to predict the image: {:.6f}seconds".format(end-start))
    boxes = []
    confidences = []
    classIDs = []

    for output in layerOutputs:
        for detection in output:
            scores = detection[5:]
            classID = np.argmax(scores)