コード例 #1
0
def follow_object(objectID, centroid, totalUp, totalDown):
    to = trackableObjects.get(objectID, None)
    if to is None:
        to = TrackableObject(objectID, centroid)
    else:
        y = [c[1] for c in to.centroids]
        direction = centroid[1] - np.mean(y)
        to.centroids.append(centroid)
        if not to.counted:
            if direction < 0 and centroid[0] < W // 4:
                totalUp += 1
                to.counted = True
            elif direction > 0 and centroid[0] > W // 4:
                totalDown += 1
                to.counted = True
    trackableObjects[objectID] = to
    return totalUp, totalDown
コード例 #2
0
    def assosciate_tracked_objects(self, rects, ct, frame, H, W, writer):
        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():
            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0:
                        self.countExit += 1
                        to.counted = True

                    elif direction > 0:
                        self.countEnter += 1
                        to.counted = True

            self.trackableObjects[objectID] = to

        # Display the count of people entered & exited the store/frame.

        info = [
            ("# Exited", self.countExit),
            ("# Entered", self.countEnter),
        ]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (W - 170, (i * 20) + 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        # Bounding boxes on the tracked detections
        for rect in rects:
            (startX, startY, endX, endY) = rect
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255),
                          2)

        if writer is not None:
            writer.write(frame)

        self.totalFrames += 1
コード例 #3
0
def processVideo(prototxt, model, filepath):
    print("[INFO] Filepath: " + filepath)
    print("[INFO] model: " + model)
    print("[INFO] prototxt: " + prototxt)
    outputPath = "./userapp/output.avi"
    skipframes = 30
    conf = 0.4
    # construct the argument parse and parse the arguments
    # 	ap = argparse.ArgumentParser()
    # 	ap.add_argument(prototxt)
    # 	ap.add_argument(model)
    # 	ap.add_argument(filepath)
    # 	# ap.add_argument("-o", "--output", type=str,
    # 	# 	help="path to optional output video file")
    # 	ap.add_argument("-c", "--confidence", type=float, default=0.4,
    # 		help="minimum probability to filter weak detections")
    # 	ap.add_argument("-s", "--skip-frames", type=int, default=30,
    # 		help="# of skip frames between detections")
    # 	args = vars(ap.parse_args())
    # print("[INFO] Starting2.....")

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(prototxt, model)

    # if a video path was not supplied, grab a reference to the webcam
    # if not args.get("input", False):
    # 	print("[INFO] starting video stream...")
    # 	vs = VideoStream(src=0).start()
    # 	time.sleep(2.0)

    # otherwise, grab a reference to the video file
    # else:
    # 	print("[INFO] opening video file...")
    # 	vs = cv2.VideoCapture(args["input"])
    vs = cv2.VideoCapture(filepath)

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        print("[Info] Filepath is" + filepath)
        frame = vs.read()

        frame = frame[1] if filepath else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video

        if filepath is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if outputPath is not None and writer is None:
            print("[INFO] loading model...after vs1")
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            print("[INFO] loading model...after vs2  :" + outputPath)
            writer = cv2.VideoWriter(outputPath, fourcc, 30, (W, H), True)
            print("[INFO] loading model...after vs3")

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        print("[INFO] loading model...after vs")
        rects = []
        print("[INFO] loading model...after cv2")

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % skipframes == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > conf:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        # cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    print("[INFO] Total Up Count: " + str(totalUp))

    # if we are not using a video file, stop the camera video stream
    # if filepath is not None:
    # vs.stop()

    # # otherwise, release the video file pointer
    # else:
    # 	vs.release()

    # close any open windows
    cv2.destroyAllWindows()
コード例 #4
0
            # only proceed if the radius meets a minimum size
            if radius > args["radius"]:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255),
                           2)
                cv2.circle(frame, center[-1], 5, (0, 0, 255), -1)
                rects.append((x - radius, y - radius, x + radius, y + radius))
        # use centroidtracker to associate detected objects with objects in previous frame
        objects = ct.update(rects)
        # use trackable object to plot object trail
        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid, args["buffer"])
                to.deque.appendleft(centroid)
            else:
                to.centroids.append(centroid)
                to.deque.appendleft(centroid)

            trackableObjects[objectID] = to
            for j in range(1, len(to.deque)):
                if to.deque[j - 1] is None or to.deque[j] is None:
                    continue
                thickness = int(np.sqrt(args["buffer"] / float(j + 1)) * 2.5)
                cv2.line(frame, (to.deque[j - 1][0], to.deque[j - 1][1]),
                         (to.deque[j][0], to.deque[j][1]), (0, 0, 255),
                         thickness)
    # show the frame to our screen
    cv2.imshow("Frame", frame)
コード例 #5
0
	up_arrowpt = (W // 2, H // 2 - 30)

	center_pt2 = (W // 2, H // 2 + 8)
	down_arrowpt = (W // 2, H // 2 + 30)

	cv2.arrowedLine(frame, center_pt1, up_arrowpt, (0,0,255), 2)
	cv2.arrowedLine(frame, center_pt2, down_arrowpt, (255,0,0), 2)

	objects = ct.update(rects)

	for (objectID, centroid) in objects.items():

		to = trackableObjects.get(objectID, None)

		if to is None:
			to = TrackableObject(objectID, centroid)

		else:

			y = [c[1] for c in to.centroids]
			direction = centroid[1] - np.mean(y)
			to.centroids.append(centroid)

			if not to.counted:
				if direction < 0 and centroid[1] < H // 2:
					totalUp += 1
					to.directiontext = "(up)"
					to.counted = True
              

				elif direction > 0 and centroid[1] > H // 2:
コード例 #6
0
ファイル: backbone.py プロジェクト: laoserra/serving
def processor(img, h, w):
    global tracker_list
    global max_detection
    global min_detection
    global track_id_list
    global sum_unmatched_dets
    global totalDown
    global totalUp
    global id_counter
    global n_frame
    global horizontal
    global tracker_dict

    #declare either horizontal or vertical line for pedestrian countings
    if horizontal == True:
        loi = h
        coord = 1
    else:
        loi = w
        coord = 0

    n_frame = n_frame + 1
    # get the detections bounding boxes
    # get_localization function runs the tf detection
    z_box_raw = det.get_localization(img)
    z_box = z_box_raw[0]
    output_dict = z_box_raw[1]
    # initiate the tracker list
    x_box = []

    if len(tracker_list) > 0:
        for trk in tracker_list:
            # add tracking boxes from the previous frames
            # it updates constantly unmatched tracks eventually deleted
            x_box.append(trk.box)

    # call the function
    matched, unmatched_dets, unmatched_trks = assign_detections_to_trackers(
        x_box, z_box, iou_thrd=0.3)
    # calculate the total number of objetcs appeared in the footage (roughly)
    # considering each unmatched_det is the new object appeared in the frame

    # matched detections
    # not entering this part from the beginning (matched size is 0 at first run)
    if matched.size > 0:
        for det_idx, trk_idx in matched:
            # extract the boxes of matched detected objects only
            z = z_box[det_idx]
            z = np.expand_dims(z, axis=0).T
            # extract the boxes of matched tracked objects only
            tmp_trk = tracker_list[trk_idx]
            # Apply kalman filter with update part
            tmp_trk.kalman_filter(z)
            # extract bounding boxes XX
            xx = tmp_trk.x_state.T[0].tolist()
            # extract positions only (up, left, down, right)
            xx = [xx[0], xx[2], xx[4], xx[6]]
            # write KF output box to the tracked boxes array
            x_box[trk_idx] = xx
            tmp_trk.box = xx
            # number of detection matches
            tmp_trk.hits += 1

    # unmatched detections
    # the code enters this part first because first detections are unmatched by default (no tracking values)
    if len(unmatched_dets) > 0:
        # loop over unmatched detections
        for idx in unmatched_dets:
            z = z_box[idx]
            z = np.expand_dims(z, axis=0).T
            # call the object "Tracker"
            tmp_trk = tracking_layer.Tracker()  # new tracker
            # create array with the bounding boxes locations and the velocities
            # velocity is set to 0 if the detection is not matched
            x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T

            # assign the state array for Kalman filter
            tmp_trk.x_state = x
            # predict thfe tracking boxes and velocities using predict_only
            # function different from KF (no update part)
            tmp_trk.predict_only()
            # write the predicted values into XX variable
            xx = tmp_trk.x_state
            # Transpose the array
            xx = xx.T[0].tolist()
            # extract the locations only (uo, left, down, right)
            xx = [xx[0], xx[2], xx[4], xx[6]]
            # list to store the coordinates for a bounding box
            tmp_trk.box = xx
            # assign the ID to the tracked box
            tmp_trk.id = id_counter
            id_counter = id_counter + 1
            # add the tracking box into the tracker list (in the binary form for cv2 draw)
            tracker_list.append(tmp_trk)
            # add the tracking box into the tracker list (in the integer form)
            x_box.append(xx)

    # unmatched tracks
    # this tracks are not stored into tracker_list so not drawn on the image
    if len(unmatched_trks) > 0:
        for trk_idx in unmatched_trks:
            tmp_trk = tracker_list[trk_idx]
            # add a number of unmatched tracks (track loss)
            tmp_trk.no_losses += 1
            # function different from KF (no update part)
            # these values are not using for the further computations so can bde deleted
            tmp_trk.predict_only()
            xx = tmp_trk.x_state
            xx = xx.T[0].tolist()
            xx = [xx[0], xx[2], xx[4], xx[6]]
            tmp_trk.box = xx
            x_box[trk_idx] = xx

    good_tracker_list = []

    # loop over all tracked list
    for trk in tracker_list:
        # if within a constraint
        if ((trk.hits >= min_detection) and (trk.no_losses <= max_detection)):
            good_tracker_list.append(trk)
            x_cv2 = trk.box
            #convert boxes to their centroid values
            center = convert_boxes_to_centroids(x_cv2)
            #append object ID as keys and centers as values to the dictionary
            tracker_dict.setdefault(trk.id, []).append(center)
            #draw the box on frame
            img = tracking_utils.draw_box_label(trk.id, img, x_cv2)

    #update tracks every 100 frames
    # if n_frame == 25:
    # with open(PATH_TO_SAVE_TRACKS+'/'+output_tracks_name + '.json', 'w') as fp:
    # json.dump(tracker_dict, fp)
    # n_frame = 0;

#iterate through the dictionary
    for (objectID, center) in tracker_dict.items():
        #check if ObjectID is in the trackableObjects class
        check = trackableObjects.get(objectID, None)

        # if there is no existing trackable object, create one
        if check is None:
            check = TrackableObject(objectID, center)

# otherwise, there is a trackable object so we can utilize it
# to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* 5 centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down') We need its length to be more than 5
            #x = [c[1] for c in check.center]

            if len(center) > 5:
                check.center.append(center)
                mean_value = (center[-5][coord] + center[-4][coord] +
                              center[-3][coord] + center[-2][coord] +
                              center[-1][coord]) / 5
                direction = center[-1][coord] - mean_value

                # check to see if the object has been counted or not
                if not check.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the check
                    # line AND the mean of the previous values is below the line, count the object
                    # Mean of the previous values needs to be checked so prevent objects which are already below the line
                    # and moving to the negative direction to be counted
                    if direction < 0 and center[-1][
                            coord] < loi / 2 and mean_value > loi / 2:
                        totalUp += 1
                        check.counted = True
                    # same logic but for the positive direction
                    elif direction > 0 and center[-1][
                            coord] > loi / 2 and mean_value < loi / 2:
                        totalDown += 1
                        check.counted = True
# store the trackable object in our dictionary
        trackableObjects[objectID] = check

        # draw the line and put text on the frame
        if horizontal == True:
            cv2.line(img, (0, int(loi / 2)), (w, int(loi / 2)), (0, 0xFF, 0),
                     5)
        else:
            cv2.line(img, (int(loi / 2), 0), (int(loi / 2), h), (0, 0xFF, 0),
                     5)

        cv2.putText(
            img, 'Up ' + str(totalUp) + '            Down ' + str(totalDown) +
            '             Total ' + str(totalUp + totalDown), (10, 35),
            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2,
            cv2.FONT_HERSHEY_SIMPLEX)

    print('Total number of people crossed ' + str(totalUp + totalDown))
    tracker_list = [x for x in tracker_list if x.no_losses <= max_detection]
    return img, output_dict
コード例 #7
0
def Stream():

    st.title("Customer Tracker")
    st.text(
        "This application will track how many customer enter & exit your premise"
    )
    st.markdown("\n", unsafe_allow_html=True)

    camera = st.text_input("Enter Camera/Webcam Path")

    col1, col2 = st.beta_columns(2)
    if col1.button('Start ▶️') and not col2.button("Stop ⏹️"):

        if camera.isnumeric():
            camera = int(camera)
            st.info("Live Streaming")
        elif camera is not None:
            st.error("Please Enter the Correct Camera Path")

        image_placeholder = st.empty()
        #confidenceValue = 0.4
        #frameValue = 30
        # initialize the list of class labels MobileNet SSD was trained to
        # detect

        CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        # load our serialized model from disk
        net = cv2.dnn.readNetFromCaffe("MobileNetSSD_deploy.prototxt",
                                       "MobileNetSSD_deploy.caffemodel")

        print("[INFO] Starting the video..")
        vs = cv2.VideoCapture(camera)

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=80, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # initialize the total number of frames processed thus far, along
        # with the total number of objects that have moved either up or down
        totalFrames = 0
        totalDown = 0
        totalUp = 0
        x = []
        empty = []
        empty1 = []

        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:
            # grab the next frame and handle if we are reading from either
            ret, frame = vs.read()

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=700)  # Default width = 500
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if totalFrames % 30 == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > 0.4:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 0, 255), 3)
            cv2.putText(frame, "Prediction border", (10, H - ((i * 20) + 200)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            empty.append(totalUp)
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            empty1.append(totalDown)
                            #print(empty1[-1])
                            x = []
                            # compute the sum of total people inside
                            x.append(len(empty1) - len(empty))
                            #print("Total people inside:", x)
                            # if the people limit exceeds over threshold, send an email alert
                            if sum(x) >= config.Threshold:
                                cv2.putText(frame,
                                            "-ALERT: People limit exceeded-",
                                            (10, frame.shape[0] - 80),
                                            cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                            (0, 0, 255), 2)
                                if config.ALERT:
                                    print("[INFO] Sending email alert..")
                                    Mailer().send(config.MAIL)
                                    print("[INFO] Alert sent")

                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4,
                           (255, 255, 255), -1)

            # construct a tuple of information we will be displaying on the
            info = [
                ("Exit", totalUp),
                ("Enter", totalDown),
                ("Status", status),
            ]

            info2 = [
                ("Total people inside", x),
            ]

            # Display the output
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 250), 2)

            for (i, (k, v)) in enumerate(info2):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (265, H - ((i * 20) + 60)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)

            #Logs.csv
            # Initiate a simple log to save data at end of the day
            # if config.Log:
            #     datetimee = [datetime.datetime.now()]
            #     d = [datetimee, empty1, empty, x]
            #     export_data = zip_longest(*d, fillvalue = '')

            #     with open('Log.csv', 'w', newline='') as myfile:
            #         wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            #         wr.writerow(("End Time", "In", "Out", "Total Inside"))
            #         wr.writerows(export_data)

            #cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
            # show the output frame
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_placeholder.image(frame)
            #key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            #if key == ord("q"):
            #    break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

            if config.Timer:
                # Automatic timer to stop the live stream. Set to 8 hours (28800s).
                t1 = time.time()
                num_seconds = (t1 - t0)
                if num_seconds > 28800:
                    break

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # # if we are not using a video file, stop the camera video stream
        # if not args.get("input", False):
        # 	vs.stop()
        #
        # # otherwise, release the video file pointer
        # else:
        # 	vs.release()

        # close any open windows
        cv2.destroyAllWindows()
コード例 #8
0
 else:
     for tracker in trackers:
         status = "Tracking"
         tracker.update(rgb)
         pos = tracker.get_position()
         startX = int(pos.left())
         startY = int(pos.top())
         endX = int(pos.right())
         endY = int(pos.bottom())
         rects.append((startX, startY, endX, endY))
 cv2.line(frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)
 objects = ct.update(rects)
 for (objectID, centroid) in objects.items():
     to = trackableObjects.get(objectID, None)
     if to is None:
         to = TrackableObject(objectID, centroid)
     else:
         x = [c[0] for c in to.centroids]
         direction = centroid[0] - np.mean(x)
         to.centroids.append(centroid)
         if not to.counted:
             if direction < 0 and centroid[0] < W // 2:
                 totalLeft += 1
                 to.counted = True
             elif direction > 0 and centroid[0] > W // 2:
                 totalRight += 1
                 to.counted = True
     trackableObjects[objectID] = to
     text = "ID {}".format(objectID)
     cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
コード例 #9
0
ファイル: people_counter.py プロジェクト: dskaustubh/IBM-Hack
def evaluate(stream=None):
	mixer.init()
	mixer.music.load('support/alert.wav')
	playing = False
	args = dict(
		prototxt='support/model.prototxt',
		model='support/model.caffemodel',
		input=stream,
		output=None,
		confidence=0.4,
		skip_frames=30)

	# initialize the list of class labels MobileNet SSD was trained to
	# detect
	CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
		"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
		"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
		"sofa", "train", "tvmonitor"]

	# load our serialized model from disk
	print("[INFO] loading model...")
	net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

	# if a video path was not supplied, grab a reference to the webcam
	if not args.get("input", False):
		print("[INFO] starting video stream...")
		vs = VideoStream(src=0).start()
		time.sleep(2.0)

	# otherwise, grab a reference to the video file
	else:
		print("[INFO] opening video file...")
		vs = cv2.VideoCapture(args["input"])

	# initialize the video writer (we'll instantiate later if need be)
	writer = None

	# initialize the frame dimensions (we'll set them as soon as we read
	# the first frame from the video)
	W = None
	H = None

	# instantiate our centroid tracker, then initialize a list to store
	# each of our dlib correlation trackers, followed by a dictionary to
	# map each unique object ID to a TrackableObject
	ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
	trackers = []
	trackableObjects = {}

	# initialize the total number of frames processed thus far, along
	# with the total number of objects that have moved either up or down
	totalFrames = 0
	total = 0

	# start the frames per second throughput estimator
	fps = FPS().start()

	# loop over frames from the video stream
	while True:
		# grab the next frame and handle if we are reading from either
		# VideoCapture or VideoStream
		frame = vs.read()
		frame = frame[1] if args.get("input", False) else frame

		# if we are viewing a video and we did not grab a frame then we
		# have reached the end of the video
		if args["input"] is not None and frame is None:
			break

		# resize the frame to have a maximum width of 500 pixels (the
		# less data we have, the faster we can process it), then convert
		# the frame from BGR to RGB for dlib
		frame = imutils.resize(frame, width=500)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

		# if the frame dimensions are empty, set them
		if W is None or H is None:
			(H, W) = frame.shape[:2]

		# if we are supposed to be writing a video to disk, initialize
		# the writer
		if args["output"] is not None and writer is None:
			fourcc = cv2.VideoWriter_fourcc(*"MJPG")
			writer = cv2.VideoWriter(args["output"], fourcc, 30,
				(W, H), True)

		# initialize the current status along with our list of bounding
		# box rectangles returned by either (1) our object detector or
		# (2) the correlation trackers
		status = "Waiting"
		rects = []

		# check to see if we should run a more computationally expensive
		# object detection method to aid our tracker
		if totalFrames % args["skip_frames"] == 0:
			# set the status and initialize our new set of object trackers
			status = "Detecting"
			trackers = []

			# convert the frame to a blob and pass the blob through the
			# network and obtain the detections
			blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
			net.setInput(blob)
			detections = net.forward()

			# loop over the detections
			for i in np.arange(0, detections.shape[2]):
				# extract the confidence (i.e., probability) associated
				# with the prediction
				confidence = detections[0, 0, i, 2]

				# filter out weak detections by requiring a minimum
				# confidence
				if confidence > args["confidence"]:
					# extract the index of the class label from the
					# detections list
					idx = int(detections[0, 0, i, 1])

					# if the class label is not a person, ignore it
					if CLASSES[idx] != "person":
						continue

					# compute the (x, y)-coordinates of the bounding box
					# for the object
					box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
					(startX, startY, endX, endY) = box.astype("int")

					# construct a dlib rectangle object from the bounding
					# box coordinates and then start the dlib correlation
					# tracker
					tracker = dlib.correlation_tracker()
					rect = dlib.rectangle(startX, startY, endX, endY)
					tracker.start_track(rgb, rect)

					# add the tracker to our list of trackers so we can
					# utilize it during skip frames
					trackers.append(tracker)

		# otherwise, we should utilize our object *trackers* rather than
		# object *detectors* to obtain a higher frame processing throughput
		else:
			# loop over the trackers
			for tracker in trackers:
				# set the status of our system to be 'tracking' rather
				# than 'waiting' or 'detecting'
				status = "Tracking"

				# update the tracker and grab the updated position
				tracker.update(rgb)
				pos = tracker.get_position()

				# unpack the position object
				startX = int(pos.left())
				startY = int(pos.top())
				endX = int(pos.right())
				endY = int(pos.bottom())

				# add the bounding box coordinates to the rectangles list
				rects.append((startX, startY, endX, endY))

		# draw a horizontal line in the center of the frame -- once an
		# object crosses this line we will determine whether they were
		# moving 'up' or 'down'
		# cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

		# use the centroid tracker to associate the (1) old object
		# centroids with (2) the newly computed object centroids
		objects = ct.update(rects)

		
		# loop over the tracked objects
		for (objectID, centroid) in objects.items():
			# check to see if a trackable object exists for the current
			# object ID
			to = trackableObjects.get(objectID, None)

			# if there is no existing trackable object, create one
			if to is None:
				to = TrackableObject(objectID, centroid)

			# otherwise, there is a trackable object so we can utilize it
			# to determine direction
			else:
				# the difference between the y-coordinate of the *current*
				# centroid and the mean of *previous* centroids will tell
				# us in which direction the object is moving (negative for
				# 'up' and positive for 'down')
				y = [c[1] for c in to.centroids]
				direction = centroid[1] - np.mean(y)
				to.centroids.append(centroid)

				# check to see if the object has been counted or not
				if not to.counted:
					# if the direction is negative (indicating the object
					# is moving up) AND the centroid is above the center
					# line, count the object
					if direction < 0 and centroid[1] < H // 2:
						total += 1
						to.counted = True

					# if the direction is positive (indicating the object
					# is moving down) AND the centroid is below the
					# center line, count the object
					elif direction > 0 and centroid[1] > H // 2:
						total += 1
						to.counted = True

			# store the trackable object in our dictionary
			trackableObjects[objectID] = to

			# draw both the ID of the object and the centroid of the
			# object on the output frame
			text = "ID {}".format(objectID)
			cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

		corona_danger_count = 0	
		current_id_centroid = []
		for (objectID, centroid) in objects.items():
			current_id_centroid.append(centroid)

		t = np.array(current_id_centroid)
		already_played = []
		if(t.ndim == 2):
			D = dist.cdist(current_id_centroid, current_id_centroid,metric ="euclidean")
			(row,col) = D.shape
			for i in range(row):
				for j in range(col):
					if(i==j):
						continue
					if(D[i][j]< 120):
						corona_danger_count+=1
						cv2.circle(frame,(current_id_centroid[i][0],current_id_centroid[i][1]), 8, (0, 0 , 255), -1)
						danger = "DANGER"
						if(mixer.music.get_busy() or (i,j) in already_played):
							pass
						else:
							mixer.music.play()
							already_played.append((i,j))
							already_played.append((j,i))
						cv2.putText(frame, danger, (current_id_centroid[i][0] - 10, current_id_centroid[i][1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 3)
						



		

		# construct a tuple of information we will be displaying on the
		# frame
		info = [
			("Distinguishable Objects", total),
			("Voilation Count", corona_danger_count),
			("Status", status),
		]

		# loop over the info tuples and draw them on our frame
		for (i, (k, v)) in enumerate(info):
			text = "{}: {}".format(k, v)
			cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
				cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)

		# check to see if we should write the frame to disk
		if writer is not None:
			writer.write(frame)

		# show the output frame
		cv2.imshow("Social Distancing Enforcer", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if key == ord("q"):
			break

		# increment the total number of frames processed thus far and
		# then update the FPS counter
		totalFrames += 1
		fps.update()

	# stop the timer and display FPS information
	fps.stop()
	print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

	# check to see if we need to release the video writer pointer
	if writer is not None:
		writer.release()

	# if we are not using a video file, stop the camera video stream
	if not args.get("input", False):
		vs.stop()

	# otherwise, release the video file pointer
	else:
		vs.release()

	# close any open windows
	cv2.destroyAllWindows()
コード例 #10
0
def main():
    global payload

    # argument parsing
    parser = argparse.ArgumentParser()
    parser.add_argument('--headless',
                        help='run the pygame headlessly',
                        action='store_true')

    parser.add_argument("--color_depth",
                        help="integer number of colors to use to draw temps",
                        type=int)
    parser.add_argument('--max_temp', help='initial max temperature', type=int)
    parser.add_argument(
        '--ambient_offset',
        help='value to offset ambient temperature by to get rolling MAXTEMP',
        type=int)
    parser.add_argument(
        '--ambient_time',
        help='length of ambient temperature collecting intervals in seconds',
        type=int)

    parser.add_argument('--blob_min_threshold',
                        help='blod detection min threshold',
                        type=int)
    parser.add_argument('--blob_max_threshold',
                        help='blod detection min threshold',
                        type=int)

    parser.add_argument('--blob_filterbyarea',
                        help='blod detection filter by area',
                        action='store_true')
    parser.add_argument('--blob_min_area',
                        help='blod detection filter by area min area',
                        type=int)

    parser.add_argument('--blob_filterbycircularity',
                        help='blod detection filter by circularity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_circularity',
        help='blod detection filter by circularity min circularity',
        type=float)

    parser.add_argument('--blob_filterbyconvexity',
                        help='blod detection filter by convexity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_convexity',
        help='blod detection filter by convexity min convexity',
        type=float)

    parser.add_argument('--blob_filterbyinertia',
                        help='blod detection filter by inertia',
                        action='store_true')
    parser.add_argument('--blob_min_inertiaratio',
                        help='blod detection filter by inertia inertia ratio',
                        type=float)

    parser.add_argument(
        '--mysql_send_interval',
        help='length of intervals between attempted mysql insert in seconds',
        type=int)

    args = parser.parse_args()
    print(args)
    i2c_bus = busio.I2C(board.SCL, board.SDA)

    COLOR_DEPTH = args.color_depth
    MAX_TEMP = args.max_temp
    AMBIENT_OFFSET = args.ambient_offset
    AMBIENT_TIME = args.ambient_time

    BLOB_MIN_THRESHOLD = args.blob_min_threshold
    BLOB_MAX_THRESHOLD = args.blob_max_threshold

    BLOB_FILTERBYAREA = args.blob_filterbyarea
    BLOB_MIN_AREA = args.blob_min_area

    BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
    BLOB_MIN_CIRCULARITY = args.blob_min_circularity

    BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
    BLOB_MIN_CONVEXITY = args.blob_min_convexity

    BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
    BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio

    MYSQL_SEND_INTERVAL = args.mysql_send_interval

    if args.headless:
        os.putenv('SDL_VIDEODRIVER', 'dummy')
    else:
        os.putenv('SDL_FBDEV', '/dev/fb1')

    pygame.init()

    # initialize the sensor
    sensor = adafruit_amg88xx.AMG88XX(i2c_bus)

    points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
    grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]

    # sensor is an 8x8 grid so lets do a square
    height = 240
    width = 240

    # the list of colors we can choose from
    black = Color("black")
    colors = list(black.range_to(Color("white"), COLOR_DEPTH))

    # create the array of colors
    colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
              for c in colors]

    displayPixelWidth = width / 30
    displayPixelHeight = height / 30

    lcd = pygame.display.set_mode((width, height))

    lcd.fill((255, 0, 0))

    pygame.display.update()
    pygame.mouse.set_visible(False)

    lcd.fill((0, 0, 0))
    pygame.display.update()

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    if BLOB_MIN_THRESHOLD:
        params.minThreshold = BLOB_MIN_THRESHOLD
    if BLOB_MAX_THRESHOLD:
        params.maxThreshold = BLOB_MAX_THRESHOLD

    # Filter by Area.
    if BLOB_FILTERBYAREA:
        params.filterByArea = BLOB_FILTERBYAREA
        params.minArea = BLOB_MIN_AREA

    # Filter by Circularity
    if BLOB_FILTERBYCIRCULARITY:
        params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
        params.minCircularity = BLOB_MIN_CIRCULARITY

    # Filter by Convexity
    if BLOB_FILTERBYCONVEXITY:
        params.filterByConvexity = BLOB_FILTERBYCONVEXITY
        params.minConvexity = BLOB_MIN_CONVEXITY

    # Filter by Inertia
    if BLOB_FILTERBYINERTIA:
        params.filterByInertia = BLOB_FILTERBYINERTIA
        params.minInertiaRatio = BLOB_MIN_INERTIARATIO

    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # initialize centroid tracker
    ct = CentroidTracker()

    # a dictionary to map each unique object ID to a TrackableObject
    trackableObjects = {}

    # the total number of objects that have moved either up or down
    total_down = 0
    total_up = 0
    total_down_old = 0
    total_up_old = 0

    # let the sensor initialize
    time.sleep(.1)

    # press key to exit
    screencap = True

    # array to hold mode of last 10 minutes of temperatures
    mode_list = []

    send_thread = threading.Thread(target=send_mysql,
                                   args=(MYSQL_SEND_INTERVAL, ))
    send_thread.start()

    print('sensor started!')

    while (screencap):
        start = time.time()

        # read the pixels
        pixels = []
        for row in sensor.pixels:
            pixels = pixels + row

        payload['a'] = 0
        payload['o'] = 0
        payload['c'] = ct.get_count()

        mode_result = stats.mode([round(p) for p in pixels])
        mode_list.append(int(mode_result[0]))

        # instead of taking the ambient temperature over one frame of data take it over a set amount of time
        MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
        pixels = [
            map_value(p, mode_result[0] + 1, MAX_TEMP, 0, COLOR_DEPTH - 1)
            for p in pixels
        ]

        # perform interpolation
        bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')

        # draw everything
        for ix, row in enumerate(bicubic):
            for jx, pixel in enumerate(row):
                try:
                    pygame.draw.rect(
                        lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
                        (displayPixelHeight * ix, displayPixelWidth * jx,
                         displayPixelHeight, displayPixelWidth))
                except:
                    print("Caught drawing error")

        surface = pygame.display.get_surface()
        myfont = pygame.font.SysFont("comicsansms", 25)

        img = pygame.surfarray.array3d(surface)
        img = np.swapaxes(img, 0, 1)

        # Read image
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.bitwise_not(img)

        # Detect blobs.
        keypoints = detector.detect(img)
        img_with_keypoints = cv2.drawKeypoints(
            img, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        pygame.draw.line(lcd, (255, 255, 255), (0, height // 2),
                         (width, height // 2), 2)
        pygame.display.update()

        for i in range(0, len(keypoints)):
            x = keypoints[i].pt[0]
            y = keypoints[i].pt[1]

            # print circle around blobs
            pygame.draw.circle(lcd, (200, 0, 0), (int(x), int(y)),
                               round(keypoints[i].size), 2)

        # update our centroid tracker using the detected centroids
        objects = ct.update(keypoints)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    # the historical centroid must present in the lower half of the screen
                    if direction < 0 and centroid[
                            1] < height // 2 and count_within_range(
                                y, height // 2, height) > 0:
                        total_up += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    # the historical centroid must present in the upper half of the screen
                    elif direction > 0 and centroid[
                            1] > height // 2 and count_within_range(
                                y, 0, height // 2) > 0:
                        total_down += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

        # update counter in top left
        textsurface1 = myfont.render("IN: " + str(total_up), False,
                                     (255, 255, 255))
        textsurface2 = myfont.render('OUT: ' + str(total_down), False,
                                     (255, 255, 255))
        lcd.blit(textsurface1, (0, 0))
        lcd.blit(textsurface2, (0, 25))

        total_up_old = total_up
        total_down_old = total_down

        pygame.display.update()

        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                print('terminating...')
                screencap = False
                break

        # for running the save on for a certain amount of time
        # if time.time() - start_time >= 10:
        #    print('terminating...')
        #    screencap = False

        # empty mode_list every AMBIENT_TIME *10 seconds to get current ambient temperature
        if len(mode_list) > AMBIENT_TIME:
            mode_list = []
        time.sleep(max(1. / 25 - (time.time() - start), 0))

    # Release everything if job is finished
    cv2.destroyAllWindows()
コード例 #11
0
def main():
    # argument parsing
    parser = argparse.ArgumentParser()
    parser.add_argument('--headless',
                        help='run the pygame headlessly',
                        action='store_true')

    parser.add_argument("--color_depth",
                        help="integer number of colors to use to draw temps",
                        type=int)
    parser.add_argument('--max_temp', help='initial max temperature', type=int)
    parser.add_argument(
        '--ambient_offset',
        help='value to offset ambient temperature by to get rolling MAXTEMP',
        type=int)
    parser.add_argument(
        '--ambient_time',
        help='length of ambient temperature collecting intervals in seconds',
        type=int)

    parser.add_argument('--blob_min_threshold',
                        help='blod detection min threshold',
                        type=int)
    parser.add_argument('--blob_max_threshold',
                        help='blod detection min threshold',
                        type=int)

    parser.add_argument('--blob_filterbyarea',
                        help='blod detection filter by area',
                        action='store_true')
    parser.add_argument('--blob_min_area',
                        help='blod detection filter by area min area',
                        type=int)

    parser.add_argument('--blob_filterbycircularity',
                        help='blod detection filter by circularity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_circularity',
        help='blod detection filter by circularity min circularity',
        type=float)

    parser.add_argument('--blob_filterbyconvexity',
                        help='blod detection filter by convexity',
                        action='store_true')
    parser.add_argument(
        '--blob_min_convexity',
        help='blod detection filter by convexity min convexity',
        type=float)

    parser.add_argument('--blob_filterbyinertia',
                        help='blod detection filter by inertia',
                        action='store_true')
    parser.add_argument('--blob_min_inertiaratio',
                        help='blod detection filter by inertia inertia ratio',
                        type=float)

    args = parser.parse_args()
    print(args)

    COLOR_DEPTH = args.color_depth
    MAX_TEMP = args.max_temp
    AMBIENT_OFFSET = args.ambient_offset
    AMBIENT_TIME = args.ambient_time

    BLOB_MIN_THRESHOLD = args.blob_min_threshold
    BLOB_MAX_THRESHOLD = args.blob_max_threshold

    BLOB_FILTERBYAREA = args.blob_filterbyarea
    BLOB_MIN_AREA = args.blob_min_area

    BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
    BLOB_MIN_CIRCULARITY = args.blob_min_circularity

    BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
    BLOB_MIN_CONVEXITY = args.blob_min_convexity

    BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
    BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio

    # create data folders if they don't exist
    if not os.path.exists(get_filepath('../img')):
        os.makedirs(get_filepath('../img'))
    if not os.path.exists(get_filepath('../data')):
        os.makedirs(get_filepath('../data'))
    if not os.path.exists(get_filepath('../video')):
        os.makedirs(get_filepath('../video'))

    # empty the images folder
    for filename in os.listdir(get_filepath('../img/')):
        if filename.endswith('.jpeg'):
            os.unlink(get_filepath('../img/') + filename)

    i2c_bus = busio.I2C(board.SCL, board.SDA)

    # For headless pygame
    if args.headless:
        os.putenv('SDL_VIDEODRIVER', 'dummy')
    else:
        os.putenv('SDL_FBDEV', '/dev/fb1')

    pygame.init()

    # initialize the sensor
    sensor = adafruit_amg88xx.AMG88XX(i2c_bus)

    points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
    grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]

    # sensor is an 8x8 grid so lets do a square
    height = 240
    width = 240

    # the list of colors we can choose from
    black = Color("black")
    colors = list(black.range_to(Color("white"), COLOR_DEPTH))

    # create the array of colors
    colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
              for c in colors]

    displayPixelWidth = width / 30
    displayPixelHeight = height / 30

    lcd = pygame.display.set_mode((width, height))

    lcd.fill((255, 0, 0))

    pygame.display.update()
    pygame.mouse.set_visible(False)

    lcd.fill((0, 0, 0))
    pygame.display.update()

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    if BLOB_MIN_THRESHOLD:
        params.minThreshold = BLOB_MIN_THRESHOLD
    if BLOB_MAX_THRESHOLD:
        params.maxThreshold = BLOB_MAX_THRESHOLD

    # Filter by Area.
    if BLOB_FILTERBYAREA:
        params.filterByArea = BLOB_FILTERBYAREA
        params.minArea = BLOB_MIN_AREA

    # Filter by Circularity
    if BLOB_FILTERBYCIRCULARITY:
        params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
        params.minCircularity = BLOB_MIN_CIRCULARITY

    # Filter by Convexity
    if BLOB_FILTERBYCONVEXITY:
        params.filterByConvexity = BLOB_FILTERBYCONVEXITY
        params.minConvexity = BLOB_MIN_CONVEXITY

    # Filter by Inertia
    if BLOB_FILTERBYINERTIA:
        params.filterByInertia = BLOB_FILTERBYINERTIA
        params.minInertiaRatio = BLOB_MIN_INERTIARATIO

    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # initialize centroid tracker
    ct = CentroidTracker()

    # a dictionary to map each unique object ID to a TrackableObject
    trackableObjects = {}

    # the total number of objects that have moved either up or down
    total_down = 0
    total_up = 0
    total_down_old = 0
    total_up_old = 0

    # let the sensor initialize
    time.sleep(.1)

    # press key to exit
    screencap = True

    # json dump
    data = {}
    data['sensor_readings'] = []

    # array to hold mode of last 10 minutes of temperatures
    mode_list = []

    print('sensor started!')

    start_time = time.time()

    while (screencap):
        start = time.time()
        date = datetime.now()
        # read the pixels
        pixels = []
        for row in sensor.pixels:
            pixels = pixels + row

        data['sensor_readings'].append({
            'time': datetime.now().isoformat(),
            'temps': pixels,
            'count': ct.get_count()
        })
        mode_result = stats.mode([round(p) for p in pixels])
        mode_list.append(int(mode_result[0]))

        # instead of taking the ambient temperature over one frame of data take it over a set amount of time
        MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
        pixels = [
            map_value(p,
                      np.mean(mode_list) + 1, MAX_TEMP, 0, COLOR_DEPTH - 1)
            for p in pixels
        ]

        # perform interpolation
        bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')

        # draw everything
        for ix, row in enumerate(bicubic):
            for jx, pixel in enumerate(row):
                try:
                    pygame.draw.rect(
                        lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
                        (displayPixelHeight * ix, displayPixelWidth * jx,
                         displayPixelHeight, displayPixelWidth))
                except:
                    print("Caught drawing error")

        surface = pygame.display.get_surface()
        myfont = pygame.font.SysFont("comicsansms", 25)

        # frame saving
        folder = get_filepath('../img/')
        filename = str(date) + '.jpeg'
        #pygame.image.save(surface, folder + filename)

        img = pygame.surfarray.array3d(surface)
        img = np.swapaxes(img, 0, 1)

        # Read image
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_not = cv2.bitwise_not(img)

        # Detect blobs.
        keypoints = detector.detect(img_not)
        img_with_keypoints = cv2.drawKeypoints(
            img, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        pygame.draw.line(lcd, (255, 255, 255), (0, height // 2),
                         (width, height // 2), 2)
        pygame.display.update()

        for i in range(0, len(keypoints)):
            x = keypoints[i].pt[0]
            y = keypoints[i].pt[1]

            # print circle around blob
            pygame.draw.circle(lcd, (200, 0, 0), (int(x), int(y)),
                               round(keypoints[i].size), 2)

        # update  our centroid tracker using the detected centroids
        objects = ct.update(keypoints)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[
                            1] < height // 2 and count_within_range(
                                y, height // 2, height) > 0:
                        total_up += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[
                            1] > height // 2 and count_within_range(
                                y, 0, height // 2) > 0:
                        total_down += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

        # update counter in top left
        textsurface1 = myfont.render("IN: " + str(total_up), False,
                                     (255, 255, 255))
        textsurface2 = myfont.render('OUT: ' + str(total_down), False,
                                     (255, 255, 255))
        lcd.blit(textsurface1, (0, 0))
        lcd.blit(textsurface2, (0, 25))

        total_up_old = total_up
        total_down_old = total_down

        pygame.display.update()
        pygame.image.save(surface, folder + filename)

        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                print('terminating...')
                screencap = False
                break

        # for running the save on for a certain amount of time
        # if time.time() - start_time >= 10:
        #    print('terminating...')
        #    screencap = False

        # empty mode_list every AMBIENT_TIME seconds
        if len(mode_list) > AMBIENT_TIME:
            mode_list = []
        time.sleep(max(1. / 25 - (time.time() - start), 0))

    # write raw sensor data to file
    data_index = 0
    while os.path.exists(
            get_filepath('../data/') + 'data%s.json' % data_index):
        data_index += 1
    data_path = str(get_filepath('../data/') + 'data%s.json' % data_index)

    with open(data_path, 'w+') as outfile:
        json.dump(data, outfile, indent=4)

    # stitch the frames together
    dir_path = get_filepath('../img/')
    ext = '.jpeg'

    out_index = 0
    while os.path.exists(
            get_filepath('../video/') + 'output%s.avi' % out_index):
        out_index += 1
    output = str(get_filepath('../video/') + 'output%s.avi' % out_index)

    framerate = 10

    # get files from directory
    images = []
    for f in os.listdir(dir_path):
        if f.endswith(ext):
            images.append(f)

    # sort files
    images = sorted(images,
                    key=lambda x: datetime.strptime(
                        x.split('.j')[0], '%Y-%m-%d %H:%M:%S.%f'))
    # determine width and height from first image
    image_path = os.path.join(dir_path, images[0])
    frame = cv2.imread(image_path)
    if not args.headless:
        cv2.imshow('video', frame)
    height, width, channels = frame.shape

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'XVID')  # Be sure to use lower case
    out = cv2.VideoWriter(output, fourcc, framerate, (width, height))

    for image in images:

        image_path = os.path.join(dir_path, image)
        frame = cv2.imread(image_path)

        out.write(frame)  # Write out frame to video

        if not args.headless:
            cv2.imshow('video', frame)
            if (cv2.waitKey(1) & 0xFF) == ord('q'):  # Hit `q` to exit
                break

    print('video created!')
    # Release everything if job is finished
    out.release()
    cv2.destroyAllWindows()
コード例 #12
0
    def detect_temp(self):
        self.face_detector.start()
        res = libuvc.uvc_start_streaming(self.devh, byref(self.ctrl),
                                         PTR_PY_FRAME_CALLBACK, None, 0)
        if res < 0:
            print("uvc_start_streaming failed: {0}".format(res))
        trackableObjects = {}
        # loop over frames from the video stream
        while _THREADING["shouldrun"]:
            detector_ready = self.face_detector.isstarted()
            # grab frame from each camera
            data = q.get(True, 500)
            if data is None or not detector_ready:
                time.sleep(1.0)
                continue
            data = cv2.resize(data[:, :], (400, 300))
            raw_data = data.copy()
            lepton_frame = raw_to_8bit(data)
            lepton_frame = cv2.flip(lepton_frame, 1)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = self.face_detector.objects.copy()
            obj_rects = self.face_detector.obj_rects.copy()

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # compute max temp for ROI
                (startX, startY, endX, endY) = obj_rects[objectID]
                if startX < 0:
                    startX = 0
                if startY < 0:
                    startY = 0
                if endX > 400:
                    endX = 400
                if endY > 300:
                    endY = 300
                max_temp = np.max(raw_data[startY:endY, startX:endX])
                val = ktoc(max_temp)
                temp = "Max {0:.1f} degC".format(val)
                color = (0, 255, 0)
                if self.low_temp < val < self.high_temp:
                    color = (0, 255, 255)
                elif val >= 37:
                    color = (0, 0, 255)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                #cv2.putText(lepton_frame, text, (centroid[0] - 10, centroid[1] - 10),
                #    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
                cv2.circle(lepton_frame, (centroid[0], centroid[1]), 4, color,
                           -1)
                cv2.rectangle(lepton_frame, (startX, startY), (endX, endY),
                              color, 2)
                cv2.putText(lepton_frame, temp, (startX, startY - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 0), 2)

                #Determine if face close enough to temp camera (improve reading accuracy)
                iou = self.bb_intersection_over_union(
                    self.TEMP_BBOX, [startX, startY, endX, endY])
                if iou > 0.5:
                    self.temperature = "{0:.1f}".format(val)

            # show the output frame
            # draw temperature reading box
            lepton_frame = cv2.rectangle(
                lepton_frame, (self.CROP_X, self.CROP_Y),
                (self.CROP_X + self.SIZE, self.CROP_Y + self.SIZE),
                (255, 255, 255), 2)
            lepton_frame = cv2.resize(lepton_frame, (800, 600),
                                      interpolation=cv2.INTER_LINEAR)

            self.thermal_frame = lepton_frame
コード例 #13
0
def counting_vehicle(input_path, output_path, model_cfg, name_list,
                    model_weight, skip_frame, thresh_prop, horizon):
    '''
    Parameters:
        + video_path: 
            . None: take frame from camera
            . path: take frame from video
        + output_path: 
            . None: show the result
            . path: save into new video
        + name_list: list labels
        + model_cfg: model config 
        + model_weight: model weight
        + skip_frame: number of frame that system take sample 1 time
        + thresh_prop: the minimum score (propability) accepted to recognize an object 
    Output: None
        + horizon = the number decide line system counting on
    '''

    if input_path == None:
        print("Starting streaming")
        cap = cv2.VideoCapture(0)
        time.sleep(2.0)

    else:
        print("Opening " + input_path)
        cap = cv2.VideoCapture(input_path)

    if output_path != None:
        writer = None
    config = model_cfg
    weight = model_weight
    name   = name_list

    with open(name, 'r') as f:
        # generate all classes of COCO, bicycle idx = 1, car idx = 2 and motorbike idx = 3
        classes = [line.strip() for line in f.readlines()] 
    np.random.seed(11)
    COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

    # Read the model
    net = cv2.dnn.readNet(weight, config)
    # take shape of image in order to scale it to 416x416, first layer of Yolo CNN

    scale = 0.00392

    W = None
    H = None

    ct = CentroidTracker(maxDisappeared=20, maxDistance=30)
    # trackers = []
    trackableObjects = {}

    totalFrames = 0
    totalDown = 0
    totalUp = 0

    while True:
        
        ret,frame = cap.read()
        # Resize width to 500 and scale height propability equal  
        # 500/old_width (because the smaller pics the faster our model run) 
        frame = imutils.resize(frame, width = 500)
        # convert to rgb for dlib
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            # take original for scale the output of YOLO
            (H,W) = frame.shape[:2]
        status = "Waiting"
        rects = []
        # release memory
        if totalFrames == skip_frame**3:
            totalFrames = 1
        if totalFrames % skip_frame == 0:
            status = "Detecting"
            trackers = []
            blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0,0,0), True, crop =False)
            net.setInput(blob)
            outs = net.foward(get_output_layers(net))

            for out in outs:
                for detection in out:
                    scores = detection[5:]
                # get the highest score to determine its label
                class_id = np.argmax(scores)
                # make sure we only choose vehicle
                if class_id not in [0,1,2,3,7]:
                    continue
                else:
                    # score of that object, make sure more than 50% correct label
                    confidence = scores[class_id]
                    if confidence > thresh_prop:
                        center_x = int(detection[0] * Width)
                        center_y = int(detection[1] * Height)
                        w = int(detection[2] * Width)
                        h = int(detection[3] * Height)
                        # remember it return x_center and y_center, not x,y, so we need to find x,y
                        x = center_x - w / 2
                        y = center_y - h / 2
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(x, y, x+w, y+h)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            for tracker in tracks:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))
        cv2.line(frame, (0, horizon), (W, horizon), (0, 255, 255), 2)
        objects = ct.update(rects)
        for (objectID, centroid) in objects.items():
    		
            to = trackableObjects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True
                    elif direction > 0 and centroid[1] > H // 2:
    					totalDown += 1
					to.counted = True
            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
		    cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
			        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
		    cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
        info = [
        ("Up", totalUp),
        ("Down", totalDown),
        ("Status", status),
        ]

        for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        
        cv2.imshow("Frame", frame)
	    key = cv2.waitKey(1) & 0xFF
コード例 #14
0
def run():
    classes = [c.strip() for c in open('coco.names').readlines()]
    conf_threshold = 0.6  # lay confidence > 0.5
    nmsThreshold = 0.4  # > 0.5 se ap dung Non-max Surpression
    shape = 288
    colors = []
    colors.append([(randint(0, 255), randint(0, 255), randint(0, 255))
                   for i in range(1000)])
    detected_classes = ['cell phone']
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    pts = [deque(maxlen=10) for _ in range(1000)]
    counter = 0
    center = None
    trackers = []
    totalIn = []
    empty = []
    empty1 = []
    trackableObjects = {}
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    (W, H) = (None, None)
    net = yolo_net("yolov3.weights", "yolov3.cfg")
    if config.Thread:
        vid = thread.ThreadingClass(0)
    else:
        vid = cv2.VideoCapture(0)
    while True:
        if config.Thread:
            img = vid.read()
        else:
            _, img = vid.read()
        img = cv2.resize(img, (600, 500))
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = img.shape[:2]
        status = "Waiting"
        rects = []
        if totalFrames % 30 == 0:
            status = "Detecting"
            trackers = []
            outputs = yolo_output(net, img, shape)
            bbox, classIds, confs = yolo_predict(outputs, conf_threshold, H, W)
            indices = cv2.dnn.NMSBoxes(bbox, confs, conf_threshold,
                                       nmsThreshold)
            for i in indices:
                i = i[0]
                if classes[classIds[i]] not in detected_classes: continue
                box = bbox[i]
                color = colors[0][i]
                x, y, w, h = box[0], box[1], box[2], box[3]
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(x, y, x + w, y + h)
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
                cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))
        cv2.line(img, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        obj = ct.update(rects)
        for (objectID, centroid) in obj.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                oy = [c[1] for c in to.centroids]
                directionY = centroid[1] - np.mean(oy)
                to.centroids.append(centroid)
                if not to.counted:
                    if directionY < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        empty.append(totalUp)
                        to.counted = True

                    elif directionY > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        empty1.append(totalDown)
                        # print(empty1[-1])
                        totalIn = []
                        # compute the sum of total people inside
                        totalIn.append(len(empty1) - len(empty))
                        print("Total people inside:", totalIn)
                        # if the people limit exceeds over threshold, send an email alert
                        if sum(totalIn) >= config.Threshold:
                            cv2.putText(img, "-ALERT: People limit exceeded-",
                                        (10, img.shape[0] - 80),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 0, 255), 2)
                            if config.ALERT:
                                print("[INFO] Sending email alert..")
                                # Mailer().send(config.MAIL)
                                print("[INFO] Alert sent")

                        to.counted = True

            trackableObjects[objectID] = to

            text1 = "ID {}".format(objectID)
            colorID = colors[0][objectID]
            cv2.circle(img, (centroid[0], centroid[1]), 4, colorID, -1)
            # cv2.putText(img, "Direction: {}".format(direction), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            center = (centroid[0], centroid[1])
            pts[objectID].append(center)
            for i in range(1, len(pts[objectID])):
                if pts[objectID][i - 1] is None or pts[objectID][i] is None:
                    continue
                thickness = int(np.sqrt(10 / float(i + 1)) * 2.5)
                cv2.line(img, pts[objectID][i - 1], pts[objectID][i], colorID,
                         thickness)
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]
        info2 = [
            ("Total people inside", totalIn),
        ]

        for (i, (k, v)) in enumerate(info):
            text2 = "{}: {}".format(k, v)
            cv2.putText(img, text2, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        for (i, (k, v)) in enumerate(info2):
            text3 = "{}: {}".format(k, v)
            cv2.putText(img, text3, (265, H - ((i * 20) + 60)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
        if config.Log:
            datetimee = [
                datetime.datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
            ]
            d = [datetimee, empty1, empty, totalIn]
            print("D: ", d)
            export_data = zip_longest(*d, fillvalue='')
            print("Export Data: ", export_data)
            with open('Log.csv', 'w', newline='') as myfile:
                wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
                wr.writerow(("End Time", "In", "Out", "Total Inside"))
                wr.writerows(export_data)
                myfile.close()
            with open('Log.csv', 'a', newline='') as f:
                wr = csv.writer(f, quoting=csv.QUOTE_ALL)
                wr.writerows(export_data)

        cv2.imshow('Result', img)
        key = cv2.waitKey(1)
        if key == ord('q'): break
        totalFrames += 1
        if config.Timer:
            # Automatic timer to stop the live stream. Set to 8 hours (28800s).
            t1 = time.time()
            num_seconds = (t1 - t0)
            if num_seconds > 28800:
                break
    if not config.Thread:
        vid.release()
    cv2.destroyAllWindows()
def  age_gender(vs,dur):
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    rects=[]
    agen = []
    gend = []
    writer = None
    W= None
    H= None
    up = []
    down = []
    skipFrames = rate
    status = ""
    net = cv2.dnn.readNet("deploy.prototxt", "deploy.caffemodel")
    age_net = cv2.dnn.readNetFromCaffe('deploy_age.prototxt', 'age_net.caffemodel')
    gender_net = cv2.dnn.readNetFromCaffe('deploy_gender.prototxt', 'gender_net.caffemodel')
    font = cv2.FONT_HERSHEY_SIMPLEX
    MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
    age_list = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']
    gender_list = ['Male', 'Female']
    CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
    "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
    "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
    "sofa", "train", "tvmonitor"]
    frameST = st.empty()
    trackers = []
    trackableObjects = {}


    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)

    widgets = ['Loading: ', progressbar.AnimatedMarker()]
    bar = progressbar.ProgressBar(widgets=widgets).start() 
    
    with st.spinner('Processing...'):
        while True:
            frame = vs.read()
            frame = frame[1]

            if frame is None:
                break

            frame = imutils.resize(frame, width=320)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if W is None or H is None:
                (H, W) = frame.shape[:2]

            status = "Waiting"
            
            rects = []
            
            if totalFrames % skipFrames < skipFrames/5:
                status = "Detecting"
                trackers = []

                face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                faces = face_cascade.detectMultiScale(gray, 1.1, 5)
                for (x, y, w, h )in faces:
                    cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)

                    face_img = frame[y:y+h, h:h+w].copy()
                    blob = cv2.dnn.blobFromImage(face_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)

                    gender_net.setInput(blob)
                    gender_preds = gender_net.forward()
                    gender = gender_list[gender_preds[0].argmax()]
                    print("Gender : " + gender)
                    age_net.setInput(blob)
                    age_preds = age_net.forward()
                    age = age_list[age_preds[0].argmax()]
                    print("Age Range: " + age)
                    overlay_text = "%s %s" % (gender, age)
                    cv2.putText(frame, overlay_text, (x, y), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
                    agen.append(age)
                    gend.append(gender)
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)

                net.setInput(blob)
                detections = net.forward()

                for i in np.arange(0, detections.shape[2]):

                    confidence = detections[0, 0, i, 2]
                    if confidence > confidence_threshold:
                        idx = int(detections[0, 0, i, 1])

                        if CLASSES[idx] not in ("person"):
                            continue
         

                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
          
                        (startX, startY, endX, endY) = box.astype("int")
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)
                        trackers.append(tracker)
            else:
              for tracker in trackers:
                status = "Tracking"

                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

            cv2.line(frame, (0, 0), (W, 0), (0, 255, 255), 2)

            objects = ct.update(rects)

            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)


                if to is None:
                    to = TrackableObject(objectID, centroid)


                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            to.counted = True


                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            to.counted = True

                up.append(totalUp)
                down.append(totalDown)
                trackableObjects[objectID] = to


                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
               
            info = [("Up", totalUp),("Down", totalDown),("Status", status),]


            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            totalFrames += 1
            frameST.image(frame, channels="BGR",caption='output video', use_column_width=True)
            if writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"XVID")
                writer = cv2.VideoWriter("output.avi", fourcc, rate,(frame.shape[1], frame.shape[0]), True)

        return up, down, gend, agen
コード例 #16
0
    def sm_update(self):
        print("Entered state update")

        cv2.line(self.frame, (0, self.height // 2),
                 (self.width, self.height // 2), (0, 255, 255), 2)

        objects = self.ct.update(self.rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():

            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids give the
                # in which direction the object is moving
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has already been counted
                if not to.counted:

                    # direction is negative(moving up)
                    # AND centroid is above centre, count the obect
                    if direction < 0 and centroid[1] < self.height // 2:
                        self.Up += 1
                        to.counted = True

                    # direction is positive(moving down)
                    # AND centroid is below centre, count the obect
                    elif direction > 0 and centroid[1] > self.height // 2:
                        self.Down += 1
                        to.counted = True
            # store the trackable object
            # trackableObjects[objectID] = to

            # get count of people inside
            self.peopleInside = self.Down - self.Up

            self.trackableObjects[objectID] = to

            text = "ID {}".format(objectID)
            cv2.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                       -1)

        # for rect in self.rects:
        #     (startX, startY, endX, endY) = rect
        #     cv2.rectangle(self.frame, (startX, startY), (endX, endY),
        #                   (0, 255, 0), 2)

        info = [
            ("People inside", self.peopleInside),
            ("Tracker", self.selected_tracker),
        ]

        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(self.frame, text, (10, self.height - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        if self.writer is not None:
            self.writer.write(self.frame)

        cv2.imshow("Frame", self.frame)
        key = cv2.waitKey(1) & 0xFF

        if key == 27:  # Escape
            self.processing_videotofinalize_video.emit()

        if key == ord('s'):
            currentDate = datetime.now()
            date = datetime.strftime(currentDate, "%m%d%H%M")
            photoname = os.path.join("results", "captura_" + date + ".jpg")
            print("[SAVING]", photoname)
            cv2.imwrite(photoname, self.frame)

        self.totalFrames += 1
        self.fps.update()

        self.updatetoreading_frames.emit()