def Cov():
    global time
    try:
        from pyimagesearch.centroidtracker import CentroidTracker
        from pyimagesearch.trackableobject import TrackableObject
        from imutils.video import VideoStream
        from imutils.video import FPS
        import numpy as np
        import argparse
        import imutils
        import time
        import dlib
        import cv2
        from datetime import datetime
        import pyodbc
        conn = pyodbc.connect(
            'DRIVER={SQL Server};SERVER=182.156.200.178;DATABASE=python;UID=sa;PWD=elmcindia786@'
        )
        entry = conn.cursor()
        exitt = conn.cursor()
        # construct the argument parse and parse the arguments
        ap = argparse.ArgumentParser()
        """CAMERA IS DEFINED HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"""
        cam = "rtsp://*****:*****@[email protected]/cam/realmonitor?channel=1&subtype=0"

        ap.add_argument("-p",
                        "--prototxt",
                        default="mobilenet_ssd/MobileNetSSD_deploy.prototxt",
                        help="path to Caffe 'deploy' prototxt file")
        ap.add_argument("-m",
                        "--model",
                        default="mobilenet_ssd/MobileNetSSD_deploy.caffemodel",
                        help="path to Caffe pre-trained model")
        ap.add_argument("-i",
                        "--input",
                        type=str,
                        default=cam,
                        help="path to optional input video file")
        ap.add_argument("-o",
                        "--output",
                        type=str,
                        default="output/2.mp4",
                        help="path to optional output video file")
        ap.add_argument("-c",
                        "--confidence",
                        type=float,
                        default=0.4,
                        help="minimum probability to filter weak detections")
        ap.add_argument("-s",
                        "--skip-frames",
                        type=int,
                        default=30,
                        help="# of skip frames between detections")
        args = vars(ap.parse_args())

        # initialize the list of class labels MobileNet SSD was trained to
        # detect
        CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

        # if a video path was not supplied, grab a reference to the webcam
        if not args.get("input", False):
            print("[INFO] starting video stream...")
            vs = VideoStream(src=0).start()
            time.sleep(2.0)

        # otherwise, grab a reference to the video file
        else:
            print("[INFO] opening video file...")
            vs = cv2.VideoCapture(args["input"])

        # initialize the video writer (we'll instantiate later if need be)
        writer = None

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # initialize the total number of frames processed thus far, along
        # with the total number of objects that have moved either up or down
        totalFrames = 0
        totalDown = 0
        totalUp = 0
        Up = []
        Down = []

        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:
            # grab the next frame and handle if we are reading from either
            # VideoCapture or VideoStream
            frame = vs.read()
            frame = frame[1] if args.get("input", False) else frame

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video
            if args["input"] is not None and frame is None:
                break

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # if we are supposed to be writing a video to disk, initialize
            # the writer
            if args["output"] is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H),
                                         True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if totalFrames % args["skip_frames"] == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > args["confidence"]:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            Up.append(totalUp)
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            Down.append(totalDown)
                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Up", totalUp),
                ("Down", totalDown),
                ("Status", status),
            ]
            today = datetime.today()
            if len(Up) > 0:

                print("up:", str(Up))
                print("Log", (str(today)))
                sql = "INSERT INTO [python].[dbo].[peopleCounter] (status, logDateTime, personcount) VALUES ('0','" + str(
                    today) + "','1')"
                entry.execute(sql)
                conn.commit()
                Up.clear()

            if len(Down) > 0:
                print("Down:", str(Down))
                print("Log", (str(today)))
                sql = "INSERT INTO [python].[dbo].[peopleCounter] (status, logDateTime, personcount) VALUES ('1','" + str(
                    today) + "','1')"
                exitt.execute(sql)
                conn.commit()
                Down.clear()

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # check to see if we should write the frame to disk
            # if writer is not None:
            # 	writer.write(frame)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # check to see if we need to release the video writer pointer
        if writer is not None:
            writer.release()
            time.sleep(1)
            Recall()

        # if we are not using a video file, stop the camera video stream
        if not args.get("input", False):
            vs.stop()

        # otherwise, release the video file pointer
        else:
            vs.release()

        # close any open windows
        cv2.destroyAllWindows()
    except Exception as e:
        print(e)
        time.sleep(1)
        print("sd")
        Recall()
print("[INFO] warming up camera...")
camera_right = CSICamera(capture_device=0, width=1280, height=720)
camera_left = CSICamera(capture_device=1, width=1280, height=720)
time.sleep(2.0)

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
H = None
W = None
cnt = 0


# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=conf["max_disappear"],
	maxDistance=conf["max_distance"])
trackers = []
trackableObjects = {}

# keep the count of total number of frames
totalFrames = 0

# initialize the log file
logFile = None

# initialize the list of various points used to calculate the avg of
# the vehicle speed
# points = [("A", "B"), ("B", "C"), ("C", "D")]

# start the frames per second throughput estimator
fps = FPS().start()
Ejemplo n.º 3
0
else:
	print("[INFO] opening video file...")
	vs = cv2.VideoCapture(args["input"])

# initialize the video writer (we'll instantiate later if need be)
writer = None

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}

# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0

# start the frames per second throughput estimator
fps = FPS().start()

# loop over frames from the video stream
while True:
	# grab the next frame and handle if we are reading from either
Ejemplo n.º 4
0
def gen():
	# construct the argument parse and parse the arguments
	now = datetime.datetime.now()
	#if now.hour == 18 and  now.minute==0 :
		#fireInterVal(now.hour)
    #if now.hour == 9 and  now.minute==0 :
		#fireInterVal(now.hour)
	#if now.hour == 12 and  now.minute==0 :
		#fireInterVal(now.hour)
	#if now.hour == 15 and  now.minute==0 :
		#fireInterVal(now.hour)
	
	ap = argparse.ArgumentParser()
	ap.add_argument("-p", "--prototxt", default='mobilenet_ssd/MobileNetSSD_deploy.prototxt',
		help="path to Caffe 'deploy' prototxt file")
	ap.add_argument("-m", "--model", default='mobilenet_ssd/MobileNetSSD_deploy.caffemodel',
		help="path to Caffe pre-trained model")
	ap.add_argument("-i", "--input", type=str,default='videos/example_07.mp4',
		help="path to optional input video file")
	ap.add_argument("-o", "--output", type=str,default='output/webcam_output.avi',
		help="path to optional output video file")
	ap.add_argument("-c", "--confidence", type=float, default=0.4,
		help="minimum probability to filter weak detections")
	ap.add_argument("-s", "--skip-frames", type=int, default=30,
		help="# of skip frames between detections")
	args = vars(ap.parse_args())

	# initialize the list of class labels MobileNet SSD was trained to
	# detect
	CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
		"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
		"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
		"sofa", "train", "tvmonitor"]

	# load our serialized model from disk
	print("[INFO] loading model...")
	net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

	# if a video path was not supplied, grab a reference to the webcam
	if not args.get("input", False):
		print("[INFO] starting video stream...")
		vs = VideoStream(src=0).start()
		time.sleep(2.0)

	# otherwise, grab a reference to the video file
	else:
		print("[INFO] opening video file...")
		vs = cv2.VideoCapture(args["input"])

	# initialize the video writer (we'll instantiate later if need be)
	writer = None

	# initialize the frame dimensions (we'll set them as soon as we read
	# the first frame from the video)
	W = None
	H = None

	# instantiate our centroid tracker, then initialize a list to store
	# each of our dlib correlation trackers, followed by a dictionary to
	# map each unique object ID to a TrackableObject
	ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
	trackers = []
	trackableObjects = {}

	# initialize the total number of frames processed thus far, along
	# with the total number of objects that have moved either up or down
	totalFrames = 0
	totalDown = 0
	totalUp = 0

	# start the frames per second throughput estimator
	fps = FPS().start()	

	# loop over frames from the video stream
	while True:
		# grab the next frame and handle if we are reading from either
		# VideoCapture or VideoStream
		frame = vs.read()
		frame = frame[1] if args.get("input", False) else frame

		# if we are viewing a video and we did not grab a frame then we
		# have reached the end of the video
		if args["input"] is not None and frame is None:
			break

		# resize the frame to have a maximum width of 500 pixels (the
		# less data we have, the faster we can process it), then convert
		# the frame from BGR to RGB for dlib
		frame = imutils.resize(frame, width=500)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

		# if the frame dimensions are empty, set them
		if W is None or H is None:
			(H, W) = frame.shape[:2]

		

		# initialize the current status along with our list of bounding
		# box rectangles returned by either (1) our object detector or
		# (2) the correlation trackers
		status = "Waiting"
		rects = []

		# check to see if we should run a more computationally expensive
		# object detection method to aid our tracker
		if totalFrames % args["skip_frames"] == 0:
			# set the status and initialize our new set of object trackers
			status = "Detecting"
			trackers = []

			# convert the frame to a blob and pass the blob through the
			# network and obtain the detections
			blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
			net.setInput(blob)
			detections = net.forward()

			# loop over the detections
			for i in np.arange(0, detections.shape[2]):
				# extract the confidence (i.e., probability) associated
				# with the prediction
				confidence = detections[0, 0, i, 2]

				# filter out weak detections by requiring a minimum
				# confidence
				if confidence > args["confidence"]:
					# extract the index of the class label from the
					# detections list
					idx = int(detections[0, 0, i, 1])

					# if the class label is not a person, ignore it
					if CLASSES[idx] != "person":
						continue

					# compute the (x, y)-coordinates of the bounding box
					# for the object
					box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
					(startX, startY, endX, endY) = box.astype("int")

					# construct a dlib rectangle object from the bounding
					# box coordinates and then start the dlib correlation
					# tracker
					tracker = dlib.correlation_tracker()
					rect = dlib.rectangle(startX, startY, endX, endY)
					tracker.start_track(rgb, rect)

					# add the tracker to our list of trackers so we can
					# utilize it during skip frames
					trackers.append(tracker)

		# otherwise, we should utilize our object *trackers* rather than
		# object *detectors* to obtain a higher frame processing throughput
		else:
			# loop over the trackers
			for tracker in trackers:
				# set the status of our system to be 'tracking' rather
				# than 'waiting' or 'detecting'
				status = "Tracking"

				# update the tracker and grab the updated position
				tracker.update(rgb)
				pos = tracker.get_position()

				# unpack the position object
				startX = int(pos.left())
				startY = int(pos.top())
				endX = int(pos.right())
				endY = int(pos.bottom())

				# add the bounding box coordinates to the rectangles list
				rects.append((startX, startY, endX, endY))

		# draw a horizontal line in the center of the frame -- once an
		# object crosses this line we will determine whether they were
		# moving 'up' or 'down'
		cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

		# use the centroid tracker to associate the (1) old object
		# centroids with (2) the newly computed object centroids
		objects = ct.update(rects)

		# loop over the tracked objects
		for (objectID, centroid) in objects.items():
			# check to see if a trackable object exists for the current
			# object ID
			to = trackableObjects.get(objectID, None)

			# if there is no existing trackable object, create one
			if to is None:
				to = TrackableObject(objectID, centroid)

			# otherwise, there is a trackable object so we can utilize it
			# to determine direction
			else:
				# the difference between the y-coordinate of the *current*
				# centroid and the mean of *previous* centroids will tell
				# us in which direction the object is moving (negative for
				# 'up' and positive for 'down')
				y = [c[1] for c in to.centroids]
				direction = centroid[1] - np.mean(y)
				to.centroids.append(centroid)

				# check to see if the object has been counted or not
				if not to.counted:
					# if the direction is negative (indicating the object
					# is moving up) AND the centroid is above the center
					# line, count the object
					if direction < 0 and centroid[1] < H // 2:
						totalUp += 1
						to.counted = True

					# if the direction is positive (indicating the object
					# is moving down) AND the centroid is below the
					# center line, count the object
					elif direction > 0 and centroid[1] > H // 2:
						totalDown += 1
						to.counted = True

			# store the trackable object in our dictionary
			trackableObjects[objectID] = to

			# draw both the ID of the object and the centroid of the
			# object on the output frame
			text = "ID {}".format(objectID)
			cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

		# construct a tuple of information we will be displaying on the
		# frame
		info = [
			("Up", totalUp),
			("Down", totalDown),
			("Status", status),
		]

		
		if status=='Tracking':
			currentCount = totalDown-totalUp
			if currentCount!=previousCount:
				fireUpdate(currentCount)

		# loop over the info tuples and draw them on our frame
		for (i, (k, v)) in enumerate(info):
			text = "{}: {}".format(k, v)
			cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
				cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
       
		
		
		ret, jpeg = cv2.imencode('.jpg', frame)
		yield (b'--frame\r\n'
			b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
		totalFrames += 1
		fps.update()
	
	fps.stop()
	vs.release()
def multiple_detection(vs, num):
    #vs= VideoStream(src=0).start()
    time.sleep(2.0)

    # otherwise, grab a reference to the video file

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

    # resize the frame to have a maximum width of 500 pixels (the
    # less data we have, the faster we can process it), then convert
    # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

    # if we are supposed to be writing a video to disk, initialize
    # the writer
    # if args["output"] is not None and writer is None:
    # 	fourcc = cv2.VideoWriter_fourcc(*"MJPG")
    # 	writer = cv2.VideoWriter(args["output"], fourcc, 30,
    # 		(W, H), True)

    # initialize the current status along with our list of bounding
    # box rectangles returned by either (1) our object detector or
    # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            cam_list[num] = 0
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                # compute the (x, y)-coordinates of the bounding box
                # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

    # otherwise, we should utilize our object *trackers* rather than
    # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Detected"
                cam_list[num] = 1

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

    # draw a horizontal line in the center of the frame -- once an
    # object crosses this line we will determine whether they were
    # moving 'up' or 'down'
        cv2.line(frame, (0, H), (W, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

        # otherwise, there is a trackable object so we can utilize it
        # to determine direction
        # else:
        # 	# the difference between the y-coordinate of the *current*
        # 	# centroid and the mean of *previous* centroids will tell
        # 	# us in which direction the object is moving (negative for
        # 	# 'up' and positive for 'down')
        # 	y = [c[1] for c in to.centroids]
        # 	direction = centroid[1] - np.mean(y)
        # 	to.centroids.append(centroid)

        # 	# check to see if the object has been counted or not
        # 	if not to.counted:
        # 		# if the direction is negative (indicating the object
        # 		# is moving up) AND the centroid is above the center
        # 		# line, count the object
        # 		#if direction < 0 and centroid[1] < H // 2:
        # 		totalUp += 1
        # 		to.counted = True

        # 		# if the direction is positive (indicating the object
        # 		# is moving down) AND the centroid is below the
        # 		# center line, count the object
        # 		# elif direction > 0 and centroid[1] > H // 2:
        # 		# 	totalDown += 1
        # 		# 	to.counted = True

        # # store the trackable object in our dictionary
        # trackableObjects[objectID] = to

        # # draw both the ID of the object and the centroid of the
        # # object on the output frame
        # text = "ID {}".format(objectID)
        # cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
        # 	cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        # cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

    # construct a tuple of information we will be displaying on the
    # frame
    # info = [
    # 	("Countrer", totalUp),
    # 	("Status", status),
    # ]

    # loop over the info tuples and draw them on our frame
    # for (i, k) in enumerate(info):
    # 	# text = "{}: {}".format(k, v)
    # 	# cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
    # 	# 	cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        print("Status:", status)
        # print("counter",i)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

    # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # increment the total number of frames processed thus far and
    # then update the FPS counter
        totalFrames += 1
        fps.update()

# stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

# if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        vs.stop()

# otherwise, release the video file pointer
    else:
        vs.release()

# close any open windows
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
def make_prediction(in_filename, out_filename, confidence_threshold=0.3, skip_frames=200, caffe_prototxt_file=None, model_file=None):
	# initialize the list of class labels MobileNet SSD was trained to
	# detect
	CLASSES = ["aeroplane", "bicycle", "bird", "boat",
		"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
		"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
		"sofa", "train", "tvmonitor"]

	# load our serialized model from disk
	print("[INFO] loading model...")
	# net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
	net = model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=True)

	# if a video path was not supplied, grab a reference to the webcam
	if not in_filename:
		print("[INFO] starting video stream...")
		vs = VideoStream(src=0).start()
		time.sleep(2.0)

	# otherwise, grab a reference to the video file
	else:
		print("[INFO] opening video file...")
		vs = cv2.VideoCapture(in_filename)

	# initialize the video writer (we'll instantiate later if need be)
	writer = None

	# initialize the frame dimensions (we'll set them as soon as we read
	# the first frame from the video)
	W = None
	H = None

	# instantiate our centroid tracker, then initialize a list to store
	# each of our dlib correlation trackers, followed by a dictionary to
	# map each unique object ID to a TrackableObject
	ct = CentroidTracker(maxDisappeared=10, maxDistance=50)
	trackers = []
	trackableObjects = {}

	# initialize the total number of frames processed thus far, along
	# with the total number of objects that have moved either up or down
	totalFrames = 0
	totalIn = 0
	totalOut = 0

	# start the frames per second throughput estimator
	fps = FPS().start()

	# loop over frames from the video stream
	while True:
		# grab the next frame and handle if we are reading from either
		# VideoCapture or VideoStream
		frame = vs.read()
		frame = frame[1] if in_filename else frame

		# if we are viewing a video and we did not grab a frame then we
		# have reached the end of the video
		if in_filename is not None and frame is None:
			break

		# resize the frame to have a maximum width of 500 pixels (the
		# less data we have, the faster we can process it), then convert
		# the frame from BGR to RGB for dlib
		frame = imutils.resize(frame, width=500)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

		# if the frame dimensions are empty, set them
		if W is None or H is None:
			(H, W) = frame.shape[:2]

		# if we are supposed to be writing a video to disk, initialize
		# the writer
		if out_filename is not None and writer is None:
			fourcc = cv2.VideoWriter_fourcc(*"MJPG")
			writer = cv2.VideoWriter(out_filename, fourcc, 30,
				(W, H), True)

		# initialize the current status along with our list of bounding
		# box rectangles returned by either (1) our object detector or
		# (2) the correlation trackers
		status = "Waiting"
		rects = []

		# check to see if we should run a more computationally expensive
		# object detection method to aid our tracker
		if totalFrames % skip_frames == 0:
			# set the status and initialize our new set of object trackers
			status = "Detecting"
			trackers = []

			# convert the frame to a blob and pass the blob through the
			# network and obtain the detections

			class_IDs, scores, bounding_boxes = net(data.transforms.presets.ssd.transform_test(mx.nd.array(frame), 270)[0])

			# loop over the detections
			for i, (class_ID, score, bounding_box) in enumerate(zip(class_IDs[0], scores[0], bounding_boxes[0])):

				class_ID = int(class_ID[0].asnumpy()[0])
				# extract the confidence (i.e., probability) associated
				# with the prediction
				confidence = score[0].asnumpy()[0]

				# filter out weak detections by requiring a minimum
				# confidence
				if confidence > confidence_threshold:
					# extract the index of the class label from the
					# detections list
					idx = int(class_ID)

					# if the class label is not a person, ignore it
					if CLASSES[idx] != "person":
						continue

					# compute the (x, y)-coordinates of the bounding box
					# for the object
					box = bounding_box.asnumpy()
					(startX, startY, endX, endY) = box.astype("int")

					# construct a dlib rectangle object from the bounding
					# box coordinates and then start the dlib correlation
					# tracker
					tracker = dlib.correlation_tracker()
					rect = dlib.rectangle(startX, startY, endX, endY)
					tracker.start_track(rgb, rect)

					# add the tracker to our list of trackers so we can
					# utilize it during skip frames
					trackers.append(tracker)

		# otherwise, we should utilize our object *trackers* rather than
		# object *detectors* to obtain a higher frame processing throughput
		else:
			# loop over the trackers
			for tracker in trackers:
				# set the status of our system to be 'tracking' rather
				# than 'waiting' or 'detecting'
				status = "Tracking"

				# update the tracker and grab the updated position
				tracker.update(rgb)
				pos = tracker.get_position()

				# unpack the position object
				startX = int(pos.left())
				startY = int(pos.top())
				endX = int(pos.right())
				endY = int(pos.bottom())

				# add the bounding box coordinates to the rectangles list
				rects.append((startX, startY, endX, endY))

		# use the centroid tracker to associate the (1) old object
		# centroids with (2) the newly computed object centroids
		objects = ct.update(rects)

		# loop over the tracked objects
		for (objectID, centroid) in objects.items():
			# check to see if a trackable object exists for the current
			# object ID
			to = trackableObjects.get(objectID, None)

			# if there is no existing trackable object, create one
			if to is None:
				to = TrackableObject(objectID, centroid)

			# otherwise, there is a trackable object so we can utilize it
			# to determine direction
			else:
				# the difference between the y-coordinate of the *current*
				# centroid and the mean of *previous* centroids will tell
				# us in which direction the object is moving (negative for
				# 'up' and positive for 'down')
				y = [c[1] for c in to.centroids]
				x = [c[0] for c in to.centroids]
				direction_y = centroid[1] - np.mean(y)
				direction_x = centroid[0] - np.mean(x)
				to.centroids.append(centroid)

				cur_x = np.mean(x)
				cur_y = np.mean(y)
				x_low, x_high, y_low, y_high = W // 3, 2 * W // 3, H // 4, 3 * H // 4
				
				cv2.line(frame, (x_low, y_low), (x_low, y_high), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_low), (x_high, y_high), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_low), (x_low, y_low), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_high), (x_low, y_high), color=(0, 255, 0))

				# check to see if the object has been counted or not
				if not to.counted:
					# if the direction is negative (indicating the object
					# is moving up) AND the centroid is above the center
					# line, count the object
					delta_pixels = 10
					pred_x, pred_y = cur_x + delta_pixels * np.sign(direction_x), cur_y + delta_pixels * np.sign(direction_y)

					if ((cur_x < x_low or cur_x > x_high) and pred_x >= x_low and pred_x >= x_high) and ((cur_y < y_low or cur_y > y_high) and pred_y >= y_low and pred_y >= y_high):
						totalIn += 1
						to.counted = True
					elif cur_x >= x_low and cur_x <= x_high and cur_y >= y_low or cur_y <= y_high:
						totalOut += 1
						to.counted = True
					elif cur_x >= x_low and cur_x <= x_high and cur_y >= y_low and cur_y <= y_high and (pred_x < x_low or pred_x > x_high or pred_y < y_low or pred_y > y_high):
						totalOut += 1
						to.counted = True

			# store the trackable object in our dictionary
			trackableObjects[objectID] = to

			# draw both the ID of the object and the centroid of the
			# object on the output frame
			text = "ID {}".format(objectID)
			cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

		# construct a tuple of information we will be displaying on the
		# frame
		info = [
			("In", totalIn),
			("Out", totalOut),
			("Status", status),
		]

		# loop over the info tuples and draw them on our frame
		for (i, (k, v)) in enumerate(info):
			text = "{}: {}".format(k, v)
			cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
				cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

		# check to see if we should write the frame to disk
		if writer is not None:
			writer.write(frame)

		# show the output frame
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if key == ord("q"):
			break

		# increment the total number of frames processed thus far and
		# then update the FPS counter
		totalFrames += 1
		fps.update()

	# stop the timer and display FPS information
	fps.stop()
	print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

	# check to see if we need to release the video writer pointer
	if writer is not None:
		writer.release()

	# if we are not using a video file, stop the camera video stream
	if not in_filename:
		vs.stop()

	# otherwise, release the video file pointer
	else:
		vs.release()

	# close any open windows
	cv2.destroyAllWindows()
Ejemplo n.º 7
0
# 2: Load the trained weights into the model.

# TODO: Set the path of the trained weights.
weights_path = 'VGG_VOC0712Plus_SSD_512x512_ft_iter_160000.h5'

model.load_weights(weights_path, by_name=True)

# 3: Compile the model so that Keras won't complain the next time you load it.

adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
confidence_thresh = 0.55
ct = CentroidTracker()

writer = None
orig_images = []  # Store the images here.
input_images = []  # Store resized versions of the images here.

VideoInput = [
    'gambir011', 'gambir014', 'lenteng', 'pulogadung', 'kebonsirih',
    'kananbagus'
]
objects_1 = []
IDCentroid = []
btsMerah = []
btsHijau = []
hijau = 'hijau'
merah = 'merah'
def main(dataPoints_normal, dataPoints_perspective):
    # start without log the data
    loggingPrespective = False
    loggingNormal = False
    mouse_capturing = MouseCapture([])
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v",
                    "--video_source",
                    required=False,
                    default='/dev/video0',
                    help="The video to use as a source")
    ap.add_argument("-f",
                    "--framesToEscape",
                    required=False,
                    default=30,
                    help="Capture data every 30 frames")
    ap.add_argument("-p",
                    "--prototxt",
                    required=False,
                    help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.5,
                    help="minimum probability to filter weak detections")
    ap.add_argument("-m",
                    "--model",
                    required=False,
                    help="path to Caffe pre-trained model")
    args = vars(ap.parse_args())

    # initialize our centroid tracker and frame dimensions
    ct = CentroidTracker()
    (H, W) = (None, None)

    # create file to write the data to
    # if os.path.exists("datasetPoints.csv"):
    #     dataPoints = open("datasetPoints.csv", "w")
    # else:
    #     dataPoints = open("datasetPoints.csv", "w+")

    # load our serialized model from disk
    print("[INFO] loading model...")
    out_size = 1000
    # Defult value for the first point for prespctive transormed
    pts1 = np.float32([[155, 120], [480, 120], [20, 475], [620, 475]])
    pts1 = np.float32([[0, 0], [out_size, 0], [0, out_size],
                       [out_size, out_size]])

    # initialize the video stream and allow the camera sensor to warmup
    print(f"[INFO] starting video stream from source: {args['video_source']}")
    vs = cv2.VideoCapture(args['video_source'])
    print(vs.read())
    framecount = 0

    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    # With webcam get(CV_CAP_PROP_FPS) does not work.
    # Let's see for ourselves.

    if int(major_ver) < 3:
        fps = vs.get(cv2.cv.CV_CAP_PROP_FPS)
        # FrameRate = cv2.VideoCapture(args['framesToEscape'])
        print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".
              format(fps))
    else:
        fps = vs.get(cv2.CAP_PROP_FPS)
        # FrameRate = cv2.VideoCapture(args['framesToEscape'])
        print(
            "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(
                fps))

    # create the frame windows
    cv2.namedWindow("Frame")
    # attach the mouse call bace event to the windows
    cv2.setMouseCallback("Frame", mouse_capturing)
    # store the 4 points that will be prespctive transormed in the list
    #time.sleep(2.0)

    # from test import TestKeypointRcnn, torch_tensor_to_img
    from Person_det_track import Pipeline

    # net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
    # net = TestKeypointRcnn(920, out_size)
    net = Pipeline()

    # loop over the frames from the video stream
    while True:
        # read the next frame from the video stream and resize it
        # time.sleep(2.0)
        r, frame = vs.read()
        framecount += 1
        result = frame
        # frame = imutils.resize(frame, width=800)

        for center_position in mouse_capturing.pointList:
            # print(center_position)
            cv2.circle(frame, center_position, 5, (0, 0, 255), -1)

        # # construct a blob from the frame, pass it through the network,
        # # obtain our output predictions, and initialize the list of
        # # bounding box rectangles
        # blob = cv2.dnn.blobFromImage(frame, 1.0, (W, H),
        #                              (104.0, 177.0, 123.0))

        rects = []
        #################################################################
        # When using the rcnn model in test.py
        # predictions, frame = net(frame)
        # frame = torch_tensor_to_img(frame)
        # result = frame.copy()
        # predictions_length = predictions['boxes'].shape[0]
        #################################################################

        #################################################################
        # When using the Person_det_track.py from the
        # Person-Detection-and-Tracking
        new_frame, predictions = net(frame)
        predictions_length = len(predictions)
        #################################################################
        # if the frame dimensions are None, grab them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
            H = int(H / out_size)
            W = int(W / out_size)

        ################################################################
        # here where the prespective transformation should happen
        # the following link is showing that we can apply to a point
        #
        ################################################################
        if len(mouse_capturing.pointList) >= 4:
            pts1 = np.float32([[mouse_capturing.pointList[0]],
                               [mouse_capturing.pointList[1]],
                               [mouse_capturing.pointList[2]],
                               [mouse_capturing.pointList[3]]])
            # print("point0:", [pointList[0]], "point1:", [pointList[1]], "point2:", [pointList[2]], "point3:",[pointList[3]])

        pts2 = np.float32([[0, 0], [result.shape[1], 0], [0, result.shape[0]],
                           [result.shape[1], result.shape[0]]])
        # calculate the the prepective transformation materix.
        matrix = cv2.getPerspectiveTransform(pts1, pts2)
        # apply the transformation to the result frame
        result = cv2.warpPerspective(frame, matrix, result.shape[:2])

        # loop over the detections
        for i in range(0, predictions_length):
            # filter out weak detections by ensuring the predicted
            # probability is greater than a minimum threshold

            # # If using the pytorch version in test.py
            # score = predictions['scores'][i]

            # # If using the Person-Detection-and-Tracking
            score = 1

            if score > args["confidence"]:
                # compute the (x, y)-coordinates of the bounding box for
                # the object, then update the bounding box rectangles list

                # # If using the pytorch version in test.py
                # box = predictions['boxes'][i].cpu().detach().numpy()

                # # If using the Person-Detection-and-Tracking
                box = np.array(predictions[i].box)
                rects.append(box.astype("int"))

                # draw a bounding box surrounding the object so we can
                # visualize it
                (startX, startY, endX,
                 endY) = (box * np.array([W, H, W, H])).astype("int")
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)
        # update our centroid tracker using the computed set of bounding
        # box rectangles
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # captured data after 30 frame
            if framecount >= fps:
                # print('Frame number', framecount)
                framecount = 0

                if loggingNormal:
                    # print("wrote normal")
                    dataPoints_normal.writerow({
                        'Datetime': datetime.now(),
                        'ObjectID': objectID,
                        'xLocation': centroid[0],
                        'yLocation': centroid[1]
                    })
                    # '{0},{1},{2},{3}\n'.format(datetime.now(), objectID, centroid[0], centroid[1]))

                # Print the object time, object ID , x, y
                # These points are from the orginal frame
                transformed_points = cv2.perspectiveTransform(
                    centroid.reshape(1, 1, -1).astype(np.float), matrix)[0, 0]
                # if logging is activited
                if (loggingPrespective):
                    # log the interested area
                    if transformed_points[0] > 0 and transformed_points[1] > 0 and \
                       transformed_points[0] < result.shape[0] and transformed_points[1] < result.shape[1]:
                        # print(datetime.now(), ";", objectID, ";",transformed_points[0], ";", transformed_points[1])
                        # print("wrote Perspective")
                        # write to the file
                        dataPoints_perspective.writerow({
                            'Datetime':
                            datetime.now(),
                            'ObjectID':
                            objectID,
                            'xLocation':
                            transformed_points[0],
                            'yLocation':
                            transformed_points[1]
                        })
                        # .write('{0},{1},{2},{3}\n'.format(datetime.now(),objectID,transformed_points[0],transformed_points[1]))
                        # cv2.circle(result, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

                        cv2.circle(result, (int(transformed_points[0]),
                                            int(transformed_points[1])), 3,
                                   (255, 100, 0), 2)

        # show the output frame
        cv2.imshow("Frame", frame)
        cv2.imshow("Result", result)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop

        if key == ord("q"):
            break
        if key == ord("d"):
            mouse_capturing.pointList = []
        if key == ord("r"):
            print("[INFO] Starting to log perspective")
            loggingPrespective = True
        if key == ord("n"):
            print("[INFO] Starting to log normal")
            loggingNormal = True
        if key == ord("s"):
            logging = False

    # do a bit of cleanup
    cv2.destroyAllWindows()
Ejemplo n.º 9
0
def main(args, total=0):

    WIDTH = 300
    HEIGHT = 300

    try:
        # direction going in
        in_direction = MoveDirection(args["direction"].lower())
    except:
        raise ValueError(
            "Only 'up' or 'down', 'left', 'right' directions are supported")

    centroid_idx = get_horiz_vert(in_direction).value
    is_visual = args["visual"]

    # mask used to indicate the "in" direction
    if is_visual:
        mask = np.zeros((HEIGHT, WIDTH, 3)).astype('uint8')
        w = WIDTH // 2
        h = HEIGHT // 2

        if in_direction == MoveDirection.LEFT:
            mask[:, :w, 2] = 255
        elif in_direction == MoveDirection.RIGHT:
            mask[:, w:, 2] = 255
        elif in_direction == MoveDirection.DOWN:
            mask[h:, :, 2] = 255
        else:
            mask[:h, :, 2] = 255

    # store or ignore the count we receive in the reset request
    store_count = args['set_count']

    if args["debug"]:
        logging.info("Please attach a debugger to port 5680")
        import ptvsd
        ptvsd.enable_attach(('0.0.0.0', 5680))
        ptvsd.wait_for_attach()
        ptvsd.break_into_debugger()

    if args["id"] is None:
        raise ValueError("id must be specified")

    global running, sess

    # load our serialized model from disk
    logging.info("loading model...")

    detector_type = args["detector"]
    if detector_type == "opencv":
        detector = DetectorCV(args["model"],
                              args["prototxt"],
                              confidence_thresh=args["confidence"])
    elif detector_type == "onnx":
        detector = DetectorOnnx(args["model"])
    else:
        raise ValueError(
            f"Unkonwn detector: {args['detector']}. Use 'opencv' or 'onnx'")

    is_rtsp = args["input"] is not None and args["input"].startswith("rtsp://")
    source = "prod" if is_rtsp else "test"

    # if a video path was not supplied, grab a reference to the webcam
    logging.info("starting video stream...")
    if not args.get("input", False):
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        if (not is_rtsp and not os.path.exists(args["input"])):
            raise FileNotFoundError(args["input"])

        vs = cv2.VideoCapture(args["input"])
        if is_rtsp:
            vs.set(cv2.CAP_PROP_BUFFERSIZE, 600)

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    centroidTracker = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = total if store_count and (
        in_direction == MoveDirection.DOWN
        or in_direction == MoveDirection.RIGHT) else 0
    totalUp = total if store_count and (
        in_direction == MoveDirection.UP
        or in_direction == MoveDirection.LEFT) else 0

    # detailed counters of foot traffic
    currentIn = 0
    currentOut = 0

    # report counts from this camera
    messageEvery = args["report_count"]

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if messenger.should_reset() or (args["input"] is not None
                                        and frame is None):
            logging.debug("We are DONE!")
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = cv2.resize(frame, (WIDTH, HEIGHT),
                           interpolation=cv2.INTER_LINEAR)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
            # count the object when it's crossing either mid-height or mid-width
            crossingDimension = get_dir_dimension(in_direction, W, H) // 2

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if args["output"] is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []
            detections = detector.detect(frame)

            for startX, startY, endX, endY in detections:
                # construct a dlib rectangle object from the bounding
                # box coordinates and then start the dlib correlation
                # tracker
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (255, 0, 0), 2)

                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)
                tracker.start_track(rgb, rect)

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (255, 0, 0), 2)

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        if is_visual:
            if get_horiz_vert(in_direction) == CountDirection.VERTICAL:
                cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
            else:
                cv2.line(frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = centroidTracker.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            trackableObject = trackableObjects.get(objectID, None)

            if trackableObject is None:
                trackableObject = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # where have we seen it last?
                xy = trackableObject.centroids[-1][centroid_idx]
                # see if we need to count it.
                # we count iff the centroid crossed the mid-line since its last known position
                direction = get_trigger_count(xy, centroid[centroid_idx],
                                              crossingDimension)

                trackableObject.centroids.append(centroid)

                # if the direction is negative (indicating the object
                # is moving up/left) AND the centroid is above the center
                # line, count the object
                if direction < 0:
                    totalUp += 1
                    currentOut += 1

                # if the direction is positive (indicating the object
                # is moving down/right) AND the centroid is below the
                # center line, count the object
                elif direction > 0:
                    totalDown += 1
                    currentIn += 1

            # store the trackable object in our dictionary
            trackableObjects[objectID] = trackableObject

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            if is_visual:
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

        # up or down counting - based on what we have parameterized
        total = totalDown - totalUp

        if in_direction == MoveDirection.UP or in_direction == MoveDirection.LEFT:
            total = -total
            # swap current "in" and "out" counters depending on direction
            tmp = currentIn
            currentIn = currentOut
            currentOut = tmp

        messenger.update_count(total)

        if totalFrames % messageEvery == 0:
            # messenger has been initialized with resettableCount
            messenger.send_count()

            # send current "in" and "out" foot traffic and update direction
            messenger.send_traffic_details(currentIn, currentOut, source)

            currentIn = currentOut = 0

        if is_visual:
            # construct a tuple of information we will be displaying on the
            # frame
            up, down = get_cur_direction_names(in_direction)

            info = [
                (up, totalUp),
                (down, totalDown),
                ("Total", total),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        if is_visual:
            img = cv2.addWeighted(frame, 0.8, mask, 0.2, 0)
            # show the output frame
            cv2.imshow("Frame", img)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            # and completely stop running
            if key == ord("q"):
                running = False
                break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    logging.info("elapsed time: {:.2f}".format(fps.elapsed()))
    logging.info("approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()
Ejemplo n.º 10
0
    def run(self):
        self.signals.changeTitleBox.emit(" Sol Toplam\n"
                                         "Sağ Toplam\n"
                                         "       Durum")
        self.vs = cv2.VideoCapture(self.video_source)
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.model_path, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        label_map = label_map_util.load_labelmap(self.label_path)
        categories = label_map_util.convert_label_map_to_categories(
            label_map, max_num_classes=self.num_classes, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)

        W = None
        H = None
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        totalFrames = 0
        skip_frame = 10

        fps = FPS().start()

        # Operation
        with detection_graph.as_default():
            with tf.Session(graph=detection_graph) as sess:
                while True:
                    ret, self.frame = self.vs.read()
                    if self.frame is None or self.stopped:
                        print("Video stream ended.")
                        break

                    self.frame = imutils.resize(
                        self.frame,
                        width=1000)  # Less data we have, faster we are.
                    rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                    self.frame = rgb

                    if W is None or H is None:
                        (H, W, ch) = self.frame.shape

                    self.status = "Bekliyor"
                    rects = []

                    if totalFrames % skip_frame == 0:
                        self.status = "Saptanıyor"
                        trackers = []

                        frame_expanded = np.expand_dims(self.frame, axis=0)
                        image_tensor = detection_graph.get_tensor_by_name(
                            'image_tensor:0')
                        boxes = detection_graph.get_tensor_by_name(
                            'detection_boxes:0')
                        scores = detection_graph.get_tensor_by_name(
                            'detection_scores:0')
                        classes = detection_graph.get_tensor_by_name(
                            'detection_classes:0')
                        num_detections = detection_graph.get_tensor_by_name(
                            'num_detections:0')

                        (boxes, scores, classes, num_detections) = sess.run(
                            [boxes, scores, classes, num_detections],
                            feed_dict={image_tensor: frame_expanded})

                        ymin = int((boxes[0][0][0] * H))
                        xmin = int((boxes[0][0][1] * W))
                        ymax = int((boxes[0][0][2] * H))
                        xmax = int((boxes[0][0][3] * W))

                        box_area = (xmax - xmin) * (ymax - ymin)
                        total_area = W * H
                        # For eliminating the false positives.
                        if box_area > total_area * 0.5:
                            ymin, xmin, xmax, ymax = (None, None, None, None)

                        if ymin is not None:
                            tracker = dlib.correlation_tracker()
                            rect = dlib.rectangle(xmin, ymin, xmax, ymax)
                            tracker.start_track(rgb, rect)

                            trackers.append(tracker)

                    else:

                        for tracker in trackers:
                            self.status = "Takip Ediliyor"

                            tracker.update(rgb)
                            pos = tracker.get_position()

                            xmin = int(pos.left())
                            ymin = int(pos.top())
                            xmax = int(pos.right())
                            ymax = int(pos.bottom())

                            rects.append((xmin, ymin, xmax, ymax))

                    # cv2.line(self.frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)

                    objects = ct.update(rects)

                    for (objectID, centroid) in objects.items():
                        trackable_obj = trackableObjects.get(objectID, None)

                        if trackable_obj is None:
                            trackable_obj = TrackableObject(objectID, centroid)

                        else:
                            x = [c[0] for c in trackable_obj.centroids]
                            direction = centroid[0] - np.mean(x)
                            trackable_obj.centroids.append(centroid)

                            if not trackable_obj.counted:
                                # if the direction is negative (indicating the object
                                # is moving up) AND the centroid is above the center
                                # line, count the object
                                if direction < 0 and centroid[0] < int(
                                        W * 0.25):
                                    self.totalLeft += 1
                                    trackable_obj.counted = True
                                elif direction > 0 and centroid[0] > int(
                                        W * 0.75):
                                    self.totalRight += 1
                                    trackable_obj.counted = True

                        trackableObjects[objectID] = trackable_obj
                        text = "ID {}".format(objectID)

                        cv2.putText(self.frame, text,
                                    (centroid[0] - 10, centroid[1] - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2)
                        cv2.circle(self.frame, (centroid[0], centroid[1]), 4,
                                   (0, 255, 0), -1)

                    self.signals.changeTextBox.emit(
                        f"{self.totalLeft}\n{self.totalRight}\n{self.status}")
                    # End of the loop
                    bytesPerLine = ch * W
                    convertToQtFormat = QImage(rgb.data, W, H, bytesPerLine,
                                               QImage.Format_RGB888)
                    p = convertToQtFormat.scaled(800, 600, Qt.KeepAspectRatio)
                    self.signals.changePixmap.emit(p)

                    totalFrames += 1
                    fps.update()
        #
        self.signals.changeTitleBox.emit("Durum: ")
        # Clear output
        self.signals.changeTextBox.emit("Rapor kaydedildi.")
        # Alter button to Start.
        self.signals.changeButton.emit("start_button")
        # Stop FPS count.
        fps.stop()
        # Get total elapsed time.
        self.total_elapsed_time = fps.elapsed()
        # Create report to database.
        self.create_report(self.totalLeft, self.totalRight, fps.elapsed())
        # Finally, set placeholder.
        self.signals.changePixmap.emit(QImage('./Resources/placeholder2.png'))
Ejemplo n.º 11
0
# initialize the video writer
writer = None

#initialize the file writer
file_object = open("cv_detection_results_east.txt", "a+")

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=25, maxDistance=150) #was 25 and 150
trackers = []
trackableObjects = {}
#Set the time in epoch
local_time = 1564718220
time = datetime.fromtimestamp(local_time)
print('start time:', time)

# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
temp_in = 0
temp_out = 0
Ejemplo n.º 12
0
class detector:
    def __init__(self,
                 prototxt="mobilenet_ssd/MobileNetSSD_deploy.prototxt",
                 model="mobilenet_ssd/MobileNetSSD_deploy.caffemodel",
                 confidence=0.4,
                 skipframes=10):

        print("Initiate detector...")

        self.CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        self.confidence = confidence
        self.net = cv2.dnn.readNetFromCaffe(prototxt, model)

        #self.vs = VideoStream(src = 0, usePiCamera = True).start()
        self.vs = VideoStream(src=0).start()

        self.W = None
        self.H = None

        self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        self.trackers = []
        self.trackableObjects = {}

        self.totalFrames = 0
        self.skipframes = skipframes

        self.fps = FPS().start()

    def main(self):

        while True:

            frame = self.vs.read()
            frame = frame[1]
            # frame = frame[1] if args.get("input", False) else frame
            frame = imutils.resize(frame, width=250)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if self.W is None or self.H is None:
                (self.H, self.W) = frame.shape[:2]

            status = "Waiting"
            rects = []

            if self.totalFrames % self.skipframes == 0:

                status = "Detecting"
                self.trackers = []

                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()

                for i in np.arange(0, detections.shape[2]):

                    confidence = detections[0, 0, i, 2]

                    if confidence > self.confidence:

                        idx = int(detections[0, 0, i, 1])

                        if self.CLASSES[idx] != "person":
                            continue

                        box = detections[0, 0, i, 3:7] * np.array(
                            [self.W, self.H, self.W, self.H])
                        (startX, startY, endX, endY) = box.astype("int")

                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        self.trackers.append(tracker)

            else:
                for tracker in self.trackers:

                    status = "Tracking"

                    tracker.update(rgb)
                    pos = tracker.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    rects.append((startX, startY, endX, endY))

            cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2),
                     (0, 255, 255), 2)

            objects = self.ct.update(rects)

            for (objectID, centroid) in objects.items():

                to = self.trackableObjects.get(objectID, None)

                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:

                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:

                        if direction < 0 and centroid[1] < self.H // 2:
                            totalUp += 1
                            to.counted = True

                        elif direction > 0 and centroid[1] > self.H // 2:
                            totalDown += 1
                            to.counted = True

                self.trackableObjects[objectID] = to

                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

            info = [
                ("Up", totalUp),
                ("Down", totalDown),
                ("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
            self.fps.update()
        # stop the timer and display FPS information
        self.fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

        self.vs.stop()

        cv2.destroyAllWindows()
def tracker():
    rospy.init_node("Centroid_Dist_Publisher", anonymous=True)
    pub = rospy.Publisher("CentroidDistance", CentroidDist, queue_size=10)
    rate = rospy.Rate(30)

    centre_x = 0
    centre_y = 0

    # initialize our centroid tracker and frame dimensions
    ct = CentroidTracker()
    (H, W) = (None, None)

    confidence = 0.7

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe("deploy.prototxt",
                                   "res10_300x300_ssd_iter_140000.caffemodel")

    # initialize the video stream and allow the camera sensor to warmup
    print("[INFO] starting video stream...")
    # resource_name = "/dev/video" + resource
    # resource = int(resource)
    # vs = cv2.VideoCapture('rtsp://192.168.1.254/sjcam.mov')
    # vs = VideoStream(src='rtsp://192.168.1.254/sjcam.mov').start()
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    while not rospy.is_shutdown():
        # loop over the frames from the video stream
        while True:
            # read the next frame from the video stream and resize it
            # ret, frame = vs.read()
            frame = vs.read()
            frame = imutils.resize(frame, width=400)
            # frame = cv2.resize(frame, (225,400))
            # print(frame.shape)

            # if the frame dimensions are None, grab them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # construct a blob from the frame, pass it through the network,
            # obtain our output predictions, and initialize the list of
            # bounding box rectangles
            blob = cv2.dnn.blobFromImage(frame, 1.0, (W, H),
                                         (104.0, 177.0, 123.0))
            # fps = video.get(cv2.CAP_PROP_FPS)

            net.setInput(blob)
            detections = net.forward()
            rects = []

            # loop over the detections
            for i in range(0, detections.shape[2]):
                # if len(detections[2]) == 1:
                # filter out weak detections by ensuring the predicted
                # probability is greater than a minimum threshold
                if detections[0, 0, i, 2] > confidence:
                    # compute the (x, y)-coordinates of the bounding box for
                    # the object, then update the bounding box rectangles list
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    rects.append(box.astype("int"))

                    # draw a bounding box surrounding the object so we can
                    # visualize it
                    (startX, startY, endX, endY) = box.astype("int")
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 255, 0), 2)
                    midX = (startX + endX) / 2
                    midY = (startY + endY) / 2

            # update our centroid tracker using the computed set of bounding
            # box rectangles
            objects = ct.update(rects)

            c = 0

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():

                CDist = CentroidDist()
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)

                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

                # print(centroid[0], centroid[1])
                # print("\n\n")
                centre_x += (200 - centroid[0])
                centre_y += (112 - centroid[1])

                c = c + 1

                # print(200 - centroid[0], 150 - centroid[1])
                # # print("\n")

                # print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)

                # if(centroid[0]>220):
                # 	com ="Move Left"
                # elif(centroid[0]<180):
                # 	com ="Move Right"
                # else:
                # 	if centroid[1] > 220:
                # 		com = "Move Down"
                # 	elif centroid[1] < 180:
                # 		com = "Move Up"
                # 	else:
                # 		com = "Centered!"

                object_count = "Count: {}".format(len(objects.items()))

                cv2.putText(frame, object_count, (300, 290),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)

            # show the output frame
            # cv2.putText(frame, fps, (140,20),
            # 		cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            if c != 0:
                CDist.dx = centre_x / c
                CDist.dy = centre_y / c

            else:
                CDist.dx = 0
                CDist.dy = 0

            pub.publish(CDist)

            centre_x = 0
            centre_y = 0

            c = 0

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the ` ` key was pressed, break from the loop
            if key == ord(" "):
                break

            rate.sleep()

    # do a bit of cleanup
    #cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 14
0
	def main_process(self):
		Base={
			"max_disappear": 30,

			"max_distance": 200,

			"track_object": 4,

			"confidence": 0.4,

			"frame_height": 400,

			"line_point" : 125,

			"display": "true",

			"model_path": "MobileNetSSD_deploy.caffemodel",

			"prototxt_path": "MobileNetSSD_deploy.prototxt",

			"output_path": "output",

			"csv_name": "log.csv"
		}

		CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
			"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
			"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
			"sofa", "train", "tvmonitor"]

		print("[INFO] loading model...")
		net = cv2.dnn.readNetFromCaffe(Base["prototxt_path"],
			Base["model_path"])


		print("[INFO] warming up camera...")
		vs = cv2.VideoCapture(self.filename)

		H = None
		W = None

		ct = CentroidTracker(maxDisappeared=Base["max_disappear"],
			maxDistance=Base["max_distance"])
		trackers = []
		trackableObjects = {}

		totalFrames = 0

		logFile = None

		points = [("A", "B"), ("B", "C"), ("C", "D")]

		fps = FPS().start()

		while True:
			ret, frame  = vs.read()
			ts = datetime.now()
			newDate = ts.strftime("%m-%d-%y")
			minut=ts.minute

			if frame is None:
				break

			frame = imutils.resize(frame, height=Base["frame_height"])
			rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

			if W is None or H is None:
				(H, W) = frame.shape[:2]

			rects = []

			if totalFrames % Base["track_object"] == 0:
				trackers = []

				blob = cv2.dnn.blobFromImage(frame, size=(300, 300),
					ddepth=cv2.CV_8U)
				net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5,
					127.5, 127.5])
				detections = net.forward()

				# loop over the detections
				for i in np.arange(0, detections.shape[2]):
					confidence = detections[0, 0, i, 2]

					if confidence > Base["confidence"]:
						idx = int(detections[0, 0, i, 1])

						if CLASSES[idx] != "car":
							if CLASSES[idx] != "bus":
								if CLASSES[idx] != "motorbike":
									continue

						box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
						(startX, startY, endX, endY) = box.astype("int")

						tracker = dlib.correlation_tracker()
						rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
						tracker.start_track(rgb, rect)
						cv2.rectangle(frame, (startX, startY), (endX, endY), (0,225,0), 4)
						trackers.append(tracker)

			else:
				for tracker in trackers:
					tracker.update(rgb)
					pos = tracker.get_position()

					startX = int(pos.left())
					startY = int(pos.top())
					endX = int(pos.right())
					endY = int(pos.bottom())
					cv2.rectangle(frame, (startX, startY), (endX, endY), (0,225,0), 4)
					rects.append((startX, startY, endX, endY))

			objects = ct.update(rects)

			for (objectID, centroid) in objects.items():
				to = trackableObjects.get(objectID, None)

				if to is None:
					to = TrackableObject(objectID, centroid)

				elif not to.estimated:
					 
					y = [c[1] for c in to.centroids]
					direction = centroid[1] - np.mean(y)
					to.direction = direction
					if(to.direction>0):
						tet = "down"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
						if minut%2==0:
							if not to.belowline:
								if(centroid[1] < self.line_point):
									to.belowline = "F"
								else:
									to.belowline = "T"

							else:
								if(to.belowline == "F" and centroid[1] > self.line_point):
									if not to.savethefile:
										#crop = frame[startX:endX, startY:endY]
										cv2.imwrite('output/violation'+str(self.saveno)+'.jpg', frame)
										to.savethefile = 1
										self.saveno += 1
									cv2.circle(frame, (centroid[0]+10, centroid[1]), 4,
									(0, 0, 255), -1)

						else:
							if to.belowline:
								to.belowline = None
							

					elif(to.direction<0):
						tet = "up"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			
					elif(to.direction==0):
						tet = "stationary"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

				trackableObjects[objectID] = to

				text = "ID {}".format(objectID)
				cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10)
					, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
				cv2.circle(frame, (centroid[0], centroid[1]), 4,
					(0, 255, 0), -1)
				if minut%2==0:
					cv2.line(frame, (0, self.line_point), (2000, self.line_point), (0,0,255), 4)
				else:
					cv2.line(frame, (0, self.line_point), (2000, self.line_point), (0,255,0), 4)

			if Base["display"]=="true":
				cv2.imshow("frame", frame)
				key = cv2.waitKey(1) & 0xFF

				if key == ord("q"):
					break

			
			totalFrames += 1
			fps.update()

		fps.stop()
		print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
		print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

		cv2.destroyAllWindows()
		vs.release()
Ejemplo n.º 15
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=True,
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=True,
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.5,
        help="minimum probability to filter weak detections")
    ap.add_argument("--fps",type=int, default=15,
        help="frame rate")
    ap.add_argument("-a", "--min-area", type=int, default=500,
                    help="minimum area size")
    ap.add_argument("-i","--motion-tracking",type=bool,default=True,
                    help="Enable motion tracking")
    ap.add_argument("-o", "--object-tracking", type=bool, default=True,
                    help="Enable selectable object tracking")
    ap.add_argument("-t","--tracker",type=str,default="kcf",
                    help="Tracker type for object tracking")
    ap.add_argument("-f", "--facial_recog", type=bool, default=True,
                    help="Facial Recognition Tracking")
    args = vars(ap.parse_args())
    
    
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    if args['facial_recog']:
        (H, W) = (None, None)
        # initialize our centroid tracker and frame dimensions
        ct = CentroidTracker()

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
        # initialize the video stream and allow the camera sensor to warmup
        
    if args['motion_tracking']:
        firstFrame=None
        
    if args["object_tracking"]:
        initBB = None
        # extract the OpenCV version info
        (major, minor) = cv2.__version__.split(".")[:2]
    
        # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
        # function to create our object tracker
        if int(major) == 3 and int(minor) < 3:
            tracker = cv2.Tracker_create(args["tracker"].upper())
    
        # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
        # approrpiate object tracker constructor:
        else:
            OPENCV_OBJECT_TRACKERS = {
                "csrt": cv2.TrackerCSRT_create,
                "kcf": cv2.TrackerKCF_create,
                "boosting": cv2.TrackerBoosting_create,
                "mil": cv2.TrackerMIL_create,
                "tld": cv2.TrackerTLD_create,
                "medianflow": cv2.TrackerMedianFlow_create,
                "mosse": cv2.TrackerMOSSE_create
            }
            tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()

    while True:
        st=time.time()
        #read the next frame from the video stream and resize it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        
        if args['motion_tracking']:
            
            firstFrame=motionTracker(frame.copy(),firstFrame,args["min_area"])
            
        if args['object_tracking']:
            initBB=objectTracker(frame.copy(),initBB,tracker)
        
        if args['facial_recog']:
            W,H = facialRecogTracker(frame.copy(),net,ct,W,H,args["confidence"])

        sleepTime = (1 / args['fps'])-(time.time()-st)
        if sleepTime > 0:
            time.sleep(sleepTime)

        key = cv2.waitKey(1) & 0xFF
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 16
0
def video_feed_counter(conf, mode, input, output, url, camera):
    # construct the argument parser and parse the arguments
    # load the configuration file
    conf = Conf(conf)
    count = 0
    # initialize the MOG foreground background subtractor object
    # mog = cv2.bgsegm.createBackgroundSubtractorMOG()
    mog = cv2.createBackgroundSubtractorMOG2()
    # initialize and define the dilation kernel
    dKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # initialize the video writer process
    writerProcess = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker and initialize a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(conf["max_disappeared"], conf["max_distance"])
    trackableObjects = {}

    # if a video path was not supplied, grab a reference to the webcam
    # if not args.get("input", False):
    # if input:
    # 	print("[INFO] starting video stream...")
    # 	# vs = VideoStream(src=0).start()
    # 	vs = VideoStream(usePiCamera=True).start()
    # 	time.sleep(2.0)

    # otherwise, grab a reference to the video file
    # else:
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(url, cv2.CAP_FFMPEG)
    # vs = cv2.VideoCapture(args["input"])

    # check if the user wants to use the difference flag feature
    if conf["diff_flag"]:
        # initialize the start counting flag and mouse click callback
        start = False
        cv2.namedWindow("set_points")
        cv2.setMouseCallback("set_points", set_points, [mode])

    # otherwise, the user does not want to use it
    else:
        # set the start flag as true indicating to start traffic counting
        start = True

    # initialize the direction info variable (used to store information
    # such as up/down or left/right vehicle count) and the difference
    # point (used to differentiate between left and right lanes)
    directionInfo = None
    diffPt = None
    fps = FPS().start()
    # print('fbs')
    # loop over frames from the video stream
    while (vs.isOpened()):
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        # frame = vs.read()
        ret, frame = vs.read()  # import image
        # if not ret:
        # 	frame = cv2.VideoCapture(url)
        #     continue
        # if ret:
        #     frame = cv2.VideoCapture(url)
        #     continue

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if input is not None and frame is None:
            break
        #print("frame in while")

        # check if the start flag is set, if so, we will start traffic
        # counting

        if start:
            # if the frame dimensions are empty, grab the frame
            # dimensions, instantiate the direction counter, and set the
            # centroid tracker direction

            if W is None or H is None:
                # start the frames per second throughput estimator
                #fps = FPS().start()
                (H, W) = frame.shape[:2]
                dc = DirectionCounter(mode, W - conf["x_offset"],
                                      H - conf["y_offset"])
                ct.direction = mode

                # check if the difference point is set, if it is, then
                # set it in the centroid tracker object
                if diffPt is not None:
                    ct.diffPt = diffPt

            # begin writing the video to disk if required
            if output is not None and writerProcess is None:
                # set the value of the write flag (used to communicate when
                # to stop the process)
                writeVideo = Value('i', 1)

                # initialize a shared queue for the exhcange frames,
                # initialize a process, and start the process
                frameQueue = Queue()
                writerProcess = Process(target=write_video,
                                        args=(output, writeVideo, frameQueue,
                                              W, H))
                writerProcess.start()

            # initialize a list to store the bounding box rectangles
            # returned by background subtraction model
            rects = []

            # convert the frame to grayscale image and then blur it
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)

            # apply the MOG background subtraction model which returns
            # a mask
            mask = mog.apply(gray)

            # apply dilation
            dilation = cv2.dilate(mask, dKernel, iterations=2)

            # find contours in the mask
            cnts = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            # loop over each contour
            for c in cnts:
                # if the contour area is less than the minimum area
                # required then ignore the object
                if cv2.contourArea(c) < conf["min_area"]:
                    continue

                # get the (x, y)-coordinates of the contour, along with
                # height and width
                (x, y, w, h) = cv2.boundingRect(c)

                # check if direction is *vertical and the vehicle is
                # further away from the line, if so then, no need to
                # detect it
                if mode == "vertical" and y < conf["limit"]:
                    continue

                # otherwise, check if direction is horizontal and the
                # vehicle is further away from the line, if so then,
                # no need to detect it
                elif mode == "horizontal" and x > conf["limit"]:
                    continue

                # add the bounding box coordinates to the rectangles list
                rects.append((x, y, x + w, y + h))

            # check if the direction is vertical
            if mode == "vertical":
                # draw a horizontal line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'up' or 'down'
                cv2.line(frame, (0, H - conf["y_offset"]),
                         (W, H - conf["y_offset"]), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw
                # a line diving the two lanes
                if diffPt is not None:
                    cv2.line(frame, (diffPt, 0), (diffPt, H), (255, 0, 0), 2)

            # otherwise, the direction is horizontal
            else:
                # draw a vertical line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'left' or 'right'
                # print('ddds')
                cv2.line(frame, (W - conf["x_offset"], 0),
                         (W - conf["x_offset"], H), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw a
                # line dividing the two lanes
                if diffPt is not None:
                    cv2.line(frame, (0, diffPt), (W, diffPt), (255, 0, 0), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the
                # current object ID and initialize the color
                to = trackableObjects.get(objectID, None)
                color = (0, 0, 255)

                # create a new trackable object if needed
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can
                # utilize it to determine direction
                else:
                    # find the direction and update the list of centroids
                    dc.find_direction(to, centroid)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:

                        # find the direction of motion of the vehicles
                        directionInfo = dc.count_object(to, centroid, camera)

                    # otherwise, the object has been counted and set the
                    # color to green indicate it has been counted
                    else:
                        color = (0, 255, 0)

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

            # extract the traffic counts and write/draw them
            if directionInfo is not None:
                for (i, (k, v)) in enumerate(directionInfo):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # put frame into the shared queue for video writing
            if writerProcess is not None:
                frameQueue.put(frame)

            # show the output frame
            # cv2.imshow("Frame", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # update the FPS counter
            fps.update()

        # otherwise, the user has to select a difference point
        else:
            # show the output frame
            # cv2.imshow("set_points", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `s` key was pressed, start traffic counting
            if key == ord("s"):
                # begin counting and eliminate the informational window
                start = True
                cv2.destroyWindow("set_points")

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    # if not args.get("input", False):
    # 	vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
Ejemplo n.º 17
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i",
                    "--input",
                    type=str,
                    help="path to optional input video file")
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    help="path to optional output video file")
    ap.add_argument(
        "-m",
        "--model",
        type=str,
        default="yolov3",
        help="which model to use for detection: ssd_caffe, ssd_tf, yolov3")
    ap.add_argument("-t",
                    "--tracker",
                    type=str,
                    default="cv2",
                    help="which tracker to use: cv2 or dlib")

    args = vars(ap.parse_args())

    if args["model"] == "yolov3":
        model = MLObjectDetectionModelYolov3.Load()
    elif args["model"] == "ssd_caffe":
        model = MLObjectDetectionModelCaffe.Load()
    elif args["model"] == "ssd_tf":
        model = MLObjectDetectionModelTF.Load()

    # if a video path was not supplied, grab a reference to the webcam
    if not args.get("input", False):
        print("[INFO] starting video stream...")
        vs = VideoReader(0)
    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = VideoReader(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    frameWidth = None
    frameHeight = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}
    countMap = defaultdict(lambda: 0)

    # start the frames per second throughput estimator
    fps = FPS().start()

    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle
        frameNDArray = vs.get_frame()

        if frameNDArray is None:
            break

        # resize the frame,
        # then convert the frame from BGR to RGB for dlib
        scale_percent = 60  # percent of original size
        # resize image
        frameNDArray = frameNDArray.resizeToScalePercent(scale_percent)
        rgbFrame = frameNDArray.rgbFrame

        # if the frame dimensions are empty, set them
        if frameWidth is None or frameHeight is None:
            (frameHeight, frameWidth) = frameNDArray.frame_height_width

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if args["output"] is not None and writer is None:
            writer = VideoWriter(args["output"], (frameWidth, frameHeight))

        # initialize the list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        rects = []
        tracker_type = args["tracker"]

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        track_classes = ["person"]
        if totalFrames % skip_frames == 0:
            if args["model"] == "yolov3":
                trackers = frameNDArray.detectNewObjectsYolov3(
                    frameWidth, frameHeight, model, countMap, track_classes)
            elif args["model"] == "ssd_caffe":
                trackers = frameNDArray.detectNewObjectsSSDMobileNetCaffe(
                    frameWidth, frameHeight, model, countMap, track_classes)
            elif args["model"] == "ssd_tf":
                trackers = frameNDArray.detectNewObjectsSSDMobileNetTF(
                    frameWidth, frameHeight, model, countMap, track_classes)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # update the tracker and grab the updated position
                tracker.update(rgbFrame)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        frameNDArray.drawHorizontalLineInCenter()

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < frameHeight // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > frameWidth // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frameNDArray.frame, text,
                        (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frameNDArray.frame, (centroid[0], centroid[1]), 4,
                       (0, 255, 0), -1)

        #construct a tuple of information we will be displaying on the frame
        info = [("Up", totalUp), ("Down", totalDown)]

        #loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frameNDArray.frame, text,
                        (10, frameHeight - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frameNDArray.frame)

        # show the output frame
        cv2.imshow("Frame", frameNDArray.frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    vs.release()

    # close any open windows
    cv2.destroyAllWindows()
Ejemplo n.º 18
0
class SpeedEstimator:

    # class constructor
    def __init__(self, keys, videoSource):
        # inform the user about framerates and speeds
        print("[INFO] NOTE: When using an input video file, speeds will be " \
            "inaccurate because OpenCV can't throttle FPS according to the " \
            "framerate of the video. This script is for development purposes " \
            "only.")

        # passed values.
        self.keys = keys
        self.conf = json.loads(keys.json_string)

        # initialize the list of class labels MobileNet SSD was trained to detect
        self.CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        # load our serialized model from disk
        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(self.conf["prototxt_path"],
                                            self.conf["model_path"])

        # initialize the video stream and allow the camera sensor to warmup
        print("[INFO] warming up camera...")
        self.vs = cv2.VideoCapture(videoSource)

        # passing the main executing thread for some period of time.
        time.sleep(1.0)

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        self.H = None
        self.W = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        self.ct = CentroidTracker(maxDisappeared=self.conf["max_disappear"],
                                  maxDistance=self.conf["max_distance"])

        self.trackers = []
        self.trackableObjects = {}

        # keep the count of total number of frames
        self.totalFrames = 0

        # initialize the log file
        self.logFile = None

        # initialize the list of various points used to calculate the avg of the vehicle speed
        self.points = [("A", "B"), ("B", "C"), ("C", "D")]

    # function responsible for starting the frames per second throughput estimator.
    def startFPS(self):
        self.fps = FPS().start()

    # function for creating a log file, if it does not exist.
    def createLogFileIfNotExist(self):
        # if the log file has not been created or opened
        if self.logFile is None:
            # build the log file path and create/open the log file
            self.logPath = os.path.join(self.conf["output_path"],
                                        self.conf["csv_name"])
            self.logFile = open(logPath, mode="a")

            # set the file pointer to end of the file
            pos = self.logFile.seek(0, os.SEEK_END)

            # if using an empty log file then
            # write the column headings
            if pos == 0:
                self.logFile.write(
                    "Year,Month,Day,Time,Speed (in MPH),ImageID\n")

    # function for determining the type of algorithm to use in detecting
    # vehicle speed.
    def runComputationallyTaskingAlgoIfBasicAlgoFails(self):
        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % self.conf["track_object"] == 0:
            # initialize our new set of object trackers
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame,
                                         size=(300, 300),
                                         ddepth=cv2.CV_8U)
            self.net.setInput(blob,
                              scalefactor=1.0 / 127.5,
                              mean=[127.5, 127.5, 127.5])
            detections = self.net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by ensuring the `confidence`
                # is greater than the minimum confidence
                if confidence > self.conf["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a car, ignore it
                    if self.CLASSES[idx] != "car":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array(
                        [self.W, self.H, self.W, self.H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(self.rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # update the tracker and grab the updated position
                tracker.update(self.rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                self.rects.append((startX, startY, endX, endY))

    # function for passing tempFile created, along with other
    # metadata.
    def upload_file(tempFile, client, imageID):
        # upload the image to Dropbox and cleanup the tempory image
        print("[INFO] uploading {}...".format(imageID))
        path = "/{}.jpg".format(imageID)
        client.files_upload(open(tempFile.path, "rb").read(), path)
        tempFile.cleanup()

    # program loop.
    def programLoop(self):
        while True:
            # grab the next frame from the stream, store the current
            # timestamp, and store the new date
            self.ret, self.frame = self.vs.read()
            self.ts = datetime.now()
            newDate = self.ts.strftime("%m-%d-%y")

            # check if the frame is None, if so, break out of the loop
            if (self.frame is None):
                # break
                return

            # if the log file has not been created or opened
            self.createLogFileIfNotExist()

            # resize the frame
            self.frame = imutils.resize(self.frame,
                                        width=self.conf["frame_width"])
            self.rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if ((self.W is None) or (self.H is None)):
                (self.H, self.W) = self.frame.shape[:2]
                self.meterPerPixel = self.conf["distance"] / self.W

            # initialize our list of bounding box rectangles returned by
            # either (1) our object detector or (2) the correlation trackers
            rects = []

            runComputationallyTaskingAlgoIfBasicAlgoFails()

            objects = self.ct.update(self.rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = self.trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, if there is a trackable object and its speed has
                # not yet been estimated then estimate it
                elif not to.estimated:
                    # check if the direction of the object has been set, if
                    # not, calculate it, and set it
                    if to.direction is None:
                        y = [c[0] for c in to.centroids]
                        direction = centroid[0] - np.mean(y)
                        to.direction = direction

                    # if the direction is positive (indicating the object
                    # is moving from left to right)
                    if to.direction > 0:
                        # check to see if timestamp has been noted for
                        # point A
                        if to.timestamp["A"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["A"]:
                                to.timestamp["A"] = self.ts
                                to.position["A"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point B
                        elif to.timestamp["B"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["B"]:
                                to.timestamp["B"] = self.ts
                                to.position["B"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point C
                        elif to.timestamp["C"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["C"]:
                                to.timestamp["C"] = self.ts
                                to.position["C"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point D
                        elif to.timestamp["D"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp, set the position as the
                            # centroid's x-coordinate, and set the last point
                            # flag as True
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["D"]:
                                to.timestamp["D"] = self.ts
                                to.position["D"] = centroid[0]
                                to.lastPoint = True

                    # if the direction is negative (indicating the object
                    # is moving from right to left)
                    elif to.direction < 0:
                        # check to see if timestamp has been noted for
                        # point D
                        if to.timestamp["D"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["D"]:
                                to.timestamp["D"] = self.ts
                                to.position["D"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point C
                        elif to.timestamp["C"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["C"]:
                                to.timestamp["C"] = self.ts
                                to.position["C"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point B
                        elif to.timestamp["B"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["B"]:
                                to.timestamp["B"] = self.ts
                                to.position["B"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point A
                        elif to.timestamp["A"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp, set the position as the
                            # centroid's x-coordinate, and set the last point
                            # flag as True
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["A"]:
                                to.timestamp["A"] = self.ts
                                to.position["A"] = centroid[0]
                                to.lastPoint = True

                    # check to see if the vehicle is past the last point and
                    # the vehicle's speed has not yet been estimated, if yes,
                    # then calculate the vehicle speed and log it if it's
                    # over the limit
                    if to.lastPoint and not to.estimated:
                        # initialize the list of estimated speeds
                        estimatedSpeeds = []

                        # loop over all the pairs of points and estimate the
                        # vehicle speed
                        for (i, j) in self.points:
                            # calculate the distance in pixels
                            d = to.position[j] - to.position[i]
                            distanceInPixels = abs(d)

                            # check if the distance in pixels is zero, if so,
                            # skip this iteration
                            if distanceInPixels == 0:
                                continue

                            # calculate the time in hours
                            t = to.timestamp[j] - to.timestamp[i]
                            timeInSeconds = abs(t.total_seconds())
                            timeInHours = timeInSeconds / (60 * 60)

                            # calculate distance in kilometers and append the
                            # calculated speed to the list
                            distanceInMeters = distanceInPixels * self.meterPerPixel
                            distanceInKM = distanceInMeters / 1000
                            estimatedSpeeds.append(distanceInKM / timeInHours)

                        # calculate the average speed
                        to.calculate_speed(estimatedSpeeds)

                        # set the object as estimated
                        to.estimated = True
                        print("[INFO] Speed of the vehicle that just passed" \
                            " is: {:.2f} MPH".format(to.speedMPH))

                # store the trackable object in our dictionary
                self.trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(self.frame, text,
                            (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(self.frame, (centroid[0], centroid[1]), 4,
                           (0, 255, 0), -1)

                # check if the object has not been logged
                if not to.logged:
                    # check if the object's speed has been estimated and it
                    # is higher than the speed limit
                    if to.estimated and to.speedMPH > self.conf["speed_limit"]:
                        # set the current year, month, day, and time
                        year = self.ts.strftime("%Y")
                        month = self.ts.strftime("%m")
                        day = self.ts.strftime("%d")
                        time = self.ts.strftime("%H:%M:%S")

                        # check if dropbox is to be used to store the vehicle
                        # image
                        if self.conf["use_dropbox"]:
                            # initialize the image id, and the temporary file
                            imageID = self.ts.strftime("%H%M%S%f")
                            tempFile = TempFile()
                            cv2.imwrite(tempFile.path, self.frame)

                            # create a thread to upload the file to dropbox
                            # and start it
                            t = Thread(target=upload_file,
                                       args=(
                                           tempFile,
                                           client,
                                           imageID,
                                       ))
                            t.start()

                            # log the event in the log file
                            info = "{},{},{},{},{},{}\n".format(
                                year, month, day, time, to.speedMPH, imageID)
                            self.logFile.write(info)

                        # otherwise, we are not uploading vehicle images to
                        # dropbox
                        else:
                            # log the event in the log file
                            info = "{},{},{},{},{}\n".format(
                                year, month, day, time, to.speedMPH)
                            self.logFile.write(info)

                        # set the object has logged
                        to.logged = True

            # # if the *display* flag is set, then display the current frame
            # to the screen and record if a user presses a key
            if self.conf["display"]:
                cv2.imshow("frame", self.frame)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key is pressed, break from the loop
                if key == ord("q"):
                    break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
            self.fps.update()

            # stop the timer and display FPS information
            self.fps.stop()
            print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
            print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

    def closeLogFile(self):
        # check if the log file object exists, if it does, then close it
        if self.logFile is not None:
            self.logFile.close()

    # destroying all used resources.
    def destroyUsedResources(self):
        # close any open windows
        cv2.destroyAllWindows()
        # clean up
        print("[INFO] cleaning up...")
        self.vs.release()

    def main(self):
        # starting FPS.
        self.startFPS()

        # running program, loop.
        self.programLoop()

        # closing the log file
        self.closeLogFile()

        # destroying all used resources
        destroyUsedResources()
	else:
		output_count += 1
print("[INFO] output directory: ", writer_path)

# инициализируем размеры кадра как пустые значения
# они будут переназначены при анализе первого кадра и только
# это ускорит работу программы
width = None
height = None

# инициализируем алгоритм трекинга
# maxDisappeared = кол-во кадров, на которое объект может исчезнуть с видео и потом опять
# будет распознан
# maxDistance = максимальное расстояние между центрами окружностей, вписанных в боксы машин
# Если расстояние меньше заданного, то происходит переприсваение ID
car_ct = CentroidTracker()
car_ct.maxDisappeared = 10
person_ct = CentroidTracker()
person_ct.maxDisappeared = 10
truck_ct = CentroidTracker()
truck_ct.maxDisappeared = 10
bike_ct = CentroidTracker()
bike_ct.maxDisappeared = 10
bicycle_ct = CentroidTracker()
bicycle_ct.maxDisappeared = 10
bus_ct = CentroidTracker()
bus_ct.maxDisappeared = 10

# сам список трекеров
trackers = []
# список объектов для трекинга
colors = np.random.uniform(0, 255, size=(len(classes), 3))

#cap=cv2.VideoCapture("/home/giuser/249_248_Source_backup/vehical_count/outpy.avi") #0 for 1st webcam
cap = cv2.VideoCapture()
font = cv2.FONT_HERSHEY_SIMPLEX
starting_time = time.time()
frame_id = 0

# frame_width = int(cap.get(3))
# frame_height = int(cap.get(4))

out_ = cv2.VideoWriter('outpy_testing.avi',
                       cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 1,
                       (1200, 678))

ct = CentroidTracker()
trackers = []
trackableObjects = {}

totalFrames = 0
totalDown = 0
totalUp = 0
x_ = []
empty = []
empty1 = []

#################### In out thresold logic #####################
OUT = 320  # old 340
IN = 580
middle_cut_X = 575
middle_cut_Y = 150
Ejemplo n.º 21
0
    else:
        i += 1
print("[INFO] output directory: ", writer_path)

# инициализируем размеры кадра как пустые значения
# они будут переназначены при анализе первого кадра и только
# это ускорит работу программы
width = None
height = None

# инициализируем алгоритм трекинга
# maxDisappeared = кол-во кадров, на которое объект может исчезнуть с видео и потом опять
# будет распознан
# maxDistance = максимальное расстояние между центрами окружностей, вписанных в боксы машин
# Если расстояние меньше заданного, то происходит переприсваение ID
ct = CentroidTracker()

# сам список трекеров
trackers = []
# список объектов для трекинга
trackableObjects = {}

# полное число кадров в видео
totalFrames = 0

# счетчик машин и временная переменная
total = 0
temp = None

# статус: распознавание или отслеживание
status = None
def getRealTimePeopleCount():
    try:
        CLASSES = ["person"]
        # Grab path to current working directory
        CWD_PATH = os.getcwd()
        OUTPUT_DIRECTORY = 'output'
        MODEL_DIRECTORY = 'inference_graph'
        PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_DIRECTORY,
                                    'frozen_inference_graph.pb')
        output = os.path.join(CWD_PATH, OUTPUT_DIRECTORY, 'example_01.avi')
        input = os.path.join(CWD_PATH, 'videos', 'example_01.avi')
        input = None
        defaultConfidence = 0.4
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
            sess = tf.Session(graph=detection_graph)
        print("[INFO] loading model...")
        # if a video path was not supplied, grab a reference to the webcam
        if input is None:
            print("[INFO] starting video stream...")
            vs = VideoStream(
                src='rtsp://*****:*****@[email protected]').start()
            time.sleep(2.0)

        # otherwise, grab a reference to the video file
        else:
            print("[INFO] opening video file...")
            vs = cv2.VideoCapture(input)

        # initialize the video writer (we'll instantiate later if need be)
        writer = None

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # initialize the total number of frames processed thus far, along
        # with the total number of objects that have moved either up or down
        totalFrames = 0
        peopleCount = 0
        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:
            # grab the next frame and handle if we are reading from either
            # VideoCapture or VideoStream
            frame = vs.read()
            frame = frame[1] if input is not None else frame

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video
            if input is not None and frame is None:
                break
            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # if we are supposed to be writing a video to disk, initialize
            # the writer
            if output is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(output, fourcc, 30, (W, H), True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if totalFrames % 30 == 0:
                trackers = []
                rows = frame.shape[0]
                cols = frame.shape[1]
                inp = cv2.resize(frame, (300, 300))
                inp = inp[:, :, [2, 1, 0]]  # BGR2RGB
                out = sess.run(
                    [
                        sess.graph.get_tensor_by_name('num_detections:0'),
                        sess.graph.get_tensor_by_name('detection_scores:0'),
                        sess.graph.get_tensor_by_name('detection_boxes:0'),
                        sess.graph.get_tensor_by_name('detection_classes:0')
                    ],
                    feed_dict={
                        'image_tensor:0':
                        inp.reshape(1, inp.shape[0], inp.shape[1], 3)
                    })

                # Visualize detected bounding boxes.
                num_detections = int(out[0][0])
                for i in range(num_detections):
                    classId = int(out[3][0][i])
                    classId -= 1
                    score = float(out[1][0][i])
                    bbox = [float(v) for v in out[2][0][i]]
                    if score > defaultConfidence:
                        startX = int(bbox[1] * cols)
                        startY = int(bbox[0] * rows)
                        endX = int(bbox[3] * cols)
                        endY = int(bbox[2] * rows)

                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        peopleCount += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.rectangle(frame, ((centroid[0] - 30, centroid[1] - 40)),
                              ((centroid[0] + 30, centroid[1] + 40)),
                              (0, 255, 0), 1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [("PeopleCount", peopleCount)]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # check to see if we should write the frame to disk
            if writer is not None:
                writer.write(frame)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        outputVideoUrl = "localhost:8000/static/videos/example_01.avi"
        print("PeopleCount", peopleCount)
        print("outputVideoUrl", outputVideoUrl)
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # check to see if we need to release the video writer pointer
        if writer is not None:
            writer.release()

        # if we are not using a video file, stop the camera video stream
        if input is None:
            vs.stop()

        # otherwise, release the video file pointer
        else:
            vs.release()

        # close any open windows
        cv2.destroyAllWindows()
        return (peopleCount, outputVideoUrl)
    except Exception as ex:
        traceback.print_exc()
        raise ex
Ejemplo n.º 23
0
if args["mask"] is not None:
    try:
        x_min, y_min, x_max, y_max = [
            int(item.replace(" ", "")) for item in args["mask"].split(",")
        ]
        observation_mask = [(x_min, y_min), (x_max, y_max)]
    except ValueError:
        print("Invalid mask format!")

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
centroidTracker_max_disappeared = 15
centroidTracker_max_distance = 100
ct = CentroidTracker(maxDisappeared=centroidTracker_max_disappeared,
                     maxDistance=centroidTracker_max_distance,
                     mask=observation_mask)
trackers = []
trackableObjects = {}

# Load the model
#net = cv2.dnn.readNet('models/mobilenet-ssd/FP16/mobilenet-ssd.xml', 'models/mobilenet-ssd/FP16/mobilenet-ssd.bin')
net = cv2.dnn.readNetFromCaffe("models/MobileNetSSD_deploy.prototxt",
                               "models/MobileNetSSD_deploy.caffemodel")

# Specify target device
#net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

# if a video path was not supplied, grab a reference to the webcam
if not args.get("input", False):
    print("[INFO] starting video stream...")
Ejemplo n.º 24
0
config = tf.compat.v1.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.8
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)

flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
                    'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('camera', '1', 'camera id')
flags.DEFINE_integer('nframes', 100, 'camera id')
flags.DEFINE_string('output', './output.mp4', 'path to output video')

centroidTracker = CentroidTracker(20)


def main(_argv):
    if FLAGS.tiny:
        yolo = YoloV3Tiny()
    else:
        yolo = YoloV3()

    yolo.load_weights(FLAGS.weights)
    logging.info('weights loaded')

    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')

    times = []
Ejemplo n.º 25
0
print("[INFO] loading model...")
prototxt = "models/MobileNetSSD_deploy.prototxt"
model = "models/MobileNetSSD_deploy.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt, model)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
           "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
           "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
           "sofa", "train", "tvmonitor"]

print("[INFO] starting video stream...")
vs = cv2.VideoCapture(0)
time.sleep(2.0)

ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}

totalFrames = 0
totalDown = 0
totalUp = 0
totalPeople = 0


now = datetime.now().time()
print("now =", now)
hr = now.hour
min = now.minute

Ejemplo n.º 26
0
def capture_loop(age_net, gender_net):
    font = cv2.FONT_HERSHEY_SIMPLEX
    # capture frames from the camera
    totalFrames = 0
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalFrames = 0
    totalDown = 0
    totalUp = 0

    W = None
    H = None
    tracker = dlib.correlation_tracker()
    ages = []
    genders = []
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):

        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text
        image = frame.array
        #/usr/local/share/OpenCV/haarcascades/
        face_cascade = cv2.CascadeClassifier(
            '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml'
        )
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        if W is None or H is None:
            (H, W) = image.shape[:2]

        rects = []

        if totalFrames % 5 == 0:
            trackers = []
            faces = face_cascade.detectMultiScale(gray, 1.1, 5)
            print("Found " + str(len(faces)) + " face(s)")
            #Draw a rectangle around every found face
            ages = []
            genders = []
            for (x, y, w, h) in faces:

                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2)
                face_img = image[y:y + h, x:x + w].copy()
                blob = cv2.dnn.blobFromImage(face_img,
                                             1, (227, 227),
                                             MODEL_MEAN_VALUES,
                                             swapRB=False)
                # Predict gender
                gender_net.setInput(blob)
                gender_preds = gender_net.forward()
                gender = gender_list[gender_preds[0].argmax()]
                # Predict age
                age_net.setInput(blob)
                age_preds = age_net.forward()
                age = age_list[age_preds[0].argmax()]
                overlay_text = "%s, %s" % (gender, age)
                cv2.putText(image, overlay_text, (x, y), font, 1,
                            (255, 255, 255), 2, cv2.LINE_AA)

                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(x, y, x + w, y + h)
                tracker.start_track(rgb, rect)

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                trackers.append(tracker)
                ages.append(age)
                genders.append(gender)

        else:
            # loop over the trackers
            for i in range(len(trackers)):
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                trackers[i].update(rgb)
                pos = trackers[i].get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                cv2.rectangle(image, (startX, startY), (endX, endY),
                              (255, 255, 0), 2)
                overlay_text = "%s, %s" % (genders[i], ages[i])
                cv2.putText(image, overlay_text, (startX, startY), font, 1,
                            (255, 255, 255), 2, cv2.LINE_AA)
                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
        objects = ct.update(rects, ages, genders, image)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            #cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
            #    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        cv2.imshow("Image", image)

        key = cv2.waitKey(1) & 0xFF

        # clear the stream in preparation for the next frame
        rawCapture.truncate(0)
        totalFrames += 1
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
Ejemplo n.º 27
0
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,	help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
args = vars(ap.parse_args())

# initialize our centroid tracker and frame dimensions
ct = CentroidTracker()
(H, W) = (None, None)

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# initialize the video stream and allow the camera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

# loop over the frames from the video stream
while True:
	# read the next frame from the video stream and resize it
	frame = vs.read()
Ejemplo n.º 28
0
def main_func(args, vs, yolo):
    scheduler = BackgroundScheduler()  # 初始化任务函数
    # 添加调度任务
    # 调度方法为 timedTask,触发器选择 interval(间隔性),间隔时长为 1 秒
    scheduler.add_job(timedTask, 'interval',
                      seconds=cfg.KAFKA.PUSHINTER)  # 间隔为1秒
    # 启动调度任务
    scheduler.start()
    writer = None  # 写入对象,如果要写入视频,将实例化该对象
    W = None
    H = None  # W,H是我们框架的大小、
    ct = CentroidTracker(
        maxDisappeared=cfg.CRT.MAXDISAPPEARED,
        maxDistance=cfg.CRT.MAXDISTANCE)  # 质心追踪对象,连续40帧脱靶则注销#maxt=120
    trackers = []  # 用于存储dlib相关追踪器的列表
    trackableObjects = {}  # 映射id的字典
    totalFrames = 0  # 已处理帧总数
    global total_up, total_down  # 向下运动的人数
    fps = FPS().start()  # 用于基准测试的每秒帧数估算器
    inference_times = []
    # 已完成所有初始化,下面遍历传入的帧
    while True:

        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        st = time.time()
        frame = vs.read()
        frame = frame[1] if cfg.DATA.INPUT else frame
        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if cfg.DATA.INPUT is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(
            frame, width=cfg.FRAME.WIDTH)  # 调整框架的最大宽度为500像素,拥有的像素越少,处理速度越快
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        et = time.time()
        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []  # 保存检测到或追踪到的对象

        if totalFrames % 2 == 0:

            if totalFrames % cfg.FRAME.SKIPFRAMES == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers_a = []  # 追踪对象的列表
                st = time.time()
                image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
                boxs, class_names = yolo.detect_image(image)
                et = time.time()
                print('detection take time : ', et - st)
                for box in boxs:
                    box = np.array(box)
                    (minx, miny, maxx, maxy) = box.astype("int")
                    cY = int((miny + maxy) / 2.0)
                    if cY > int(H * cfg.CRT.MINCY) and cY < int(
                            H * cfg.CRT.MAXCY):
                        tracker = dlib.correlation_tracker()  # 实例化dlib相关性追踪器
                        rect = dlib.rectangle(
                            minx, miny, maxx,
                            maxy)  # 将对象的边界框坐标传给dlib.rectangle,结果存储在rect中
                        cv2.rectangle(frame, (minx, miny), (maxx, maxy),
                                      (2, 255, 0), 2)
                        rects.append((minx, miny, maxx, maxy))
                        # 开始追踪
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers_a.append(tracker)

            else:
                st = time.time()
                # loop over the trackers
                for tracker in trackers_a:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (2, 0, 255), 2)
                    rects.append((startX, startY, endX, endY))
                et = time.time()
                tt = et - st

            # 画一条水平的可视化线(行人必须交叉才能被追踪),并使用质心跟踪器更新对象质心
            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            cv2.line(frame, (int(W * 0), int(H * cfg.FRAME.LINE)),
                     (int(W * 1), int(H * cfg.FRAME.LINE)), (0, 255, 0),
                     2)  # 闸机测试

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)
            # 在下一个步骤中,我们将回顾逻辑,该逻辑计算一个人是否在框架中向上或向下移动:
            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    """
                    我们获取给定对象的所有先前质心位置的y坐标值。
                    然后,我们通过获取current-object当前质心位置与current-object所有先前质心位置的平均值之间的差来计算方向。
                    我们之所以这样做是为了确保我们的方向跟踪更加稳定。
                    如果我们仅存储该人的先前质心位置,则我们可能会错误地计算方向。
                    """
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:

                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        """
                        检查方向是否为负(指示对象正在向上移动)以及质心是否在中心线上方。
                        在这种情况下,我们增加 totalUp  。
                        """
                        if to.centroids[0][1] < int(
                                H * cfg.FRAME.LINE) and centroid[1] > int(
                                    H * cfg.FRAME.LINE):
                            total_down += 1
                            to.counted = True
                            to.flag = 'DOWN'
                        elif to.centroids[0][1] > int(
                                H * cfg.FRAME.LINE) and centroid[1] < int(
                                    H * cfg.FRAME.LINE):
                            total_up += 1
                            to.counted = True
                            to.flag = 'UP'
                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # 屏显
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                if to.counted:
                    cv2.putText(frame, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4,
                               (0, 0, 255), -1)
                else:
                    cv2.putText(frame, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4,
                               (0, 0, 255), -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Up", total_up),
                ("Down", total_down),
                ("Status", status),
            ]
            print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>totalDown", total_down)
            print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>totalUp", total_up)
            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            if cfg.DATA.OUTPUT is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(cfg.DATA.OUTPUT, fourcc, 30, (W, H),
                                         True)

            # 写入操作
            # check to see if we should write the frame to disk
            if writer is not None:
                writer.write(frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        end_time = time.time()
        inference_times.append(end_time - st)
        totalFrames += 1
        fps.update()
    # stop the timer and display FPS information
    try:
        inference_time = sum(inference_times) / len(inference_times)  # 计算FPS
        fps1 = 1.0 / inference_time  # FPS计算方式
        print("---------------------------------------------------------")
        print("FPS is ..............{}".format(fps1))
    except Exception as e:
        print(e)
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    print('totaldown people:', total_down)
    print('totalup people:', total_up)
    # 写测试信息
    with open(info_txt, 'w') as f:
        f.write("[INFO] elapsed time: " + str("{:.2f}".format(fps.elapsed())) +
                "\n")
        f.write("[INFO] approx. FPS: " + str("{:.2f}".format(fps.fps())) +
                "\n")
        f.write('totaldown people: ' + str(total_down) + "\n")
        f.write('totalup people: ' + str(total_up))
    # release the video capture
    vs.release()
    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()
    # close any open windows
    cv2.destroyAllWindows()
Ejemplo n.º 29
0
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--source", required=True,
	help="Source of video stream (webcam/host)")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
	help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
	help="# of skip frames between detections")
ap.add_argument("--ip", required=True,
	help="The IP Address")
args = vars(ap.parse_args())

ipaddress = args["ip"]
url = 'http://'+ipaddress+'/html/cam_pic_new.php?'

ct = CentroidTracker() #Makes the magic happen
(H, W) = (None, None) #Sets the size of the image

# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
	"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
	"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
	"sofa", "train", "tvmonitor"]
#IGNORE all classes except for person
IGNORE = ["background", "aeroplane", "bicycle", "bird", "boat",
	"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
	"dog", "horse", "motorbike", "pottedplant", "sheep",
	"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
Ejemplo n.º 30
0
class MyVideoCapture:
    def __init__(self, video_source):
        # Open the video source
        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(
            "mobilenet_ssd/MobileNetSSD_deploy.prototxt",
            "mobilenet_ssd/MobileNetSSD_deploy.caffemodel")
        print("[INFO] opening video file...")
        #"rtsp://*****:*****@192.168.0.200:554/cam/realmonitor?channel=1&subtype=0"
        self.vs = cv2.VideoCapture(video_source)
        self.W = None
        self.H = None
        self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        self.trackers = []
        self.trackableObjects = {}
        self.totalFrames = 0
        self.totalDown = 0
        self.totalUp = 0
        self.fps = FPS().start()
        if not self.vs.isOpened():
            raise ValueError("Unable to open video source", video_source)

    def get_frame(self):
        if self.vs.isOpened():
            ret, frame = self.vs.read()
            frame = imutils.resize(frame, width=900)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.H, self.W = frame.shape[:2]

            status = "Waiting"
            rects = []
            if self.totalFrames % 5 == 0:
                status = "Detecting"

                self.trackers = []
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()
                for i in np.arange(0, detections.shape[2]):
                    confidence = detections[0, 0, i, 2]
                    if confidence > 0.4:
                        idx = int(detections[0, 0, i, 1])
                        if idx == 15:
                            box = detections[0, 0, i, 3:7] * np.array(
                                [self.W, self.H, self.W, self.H])
                            (startX, startY, endX, endY) = box.astype("int")
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 255), 2)
                            #centroid=(int((startX+endX)/2),int((startY+endY)/2))
                            #person detection
                            #if centroid[1]<= self.H-230 and centroid[1]>= self.H-320:
                            tracker = dlib.correlation_tracker()
                            rect = dlib.rectangle(startX, startY, endX, endY)
                            tracker.start_track(rgb, rect)
                            self.trackers.append(tracker)
            else:
                for tracker in self.trackers:
                    status = "Tracking"
                    tracker.update(rgb)
                    pos = tracker.get_position()
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    rects.append((startX, startY, endX, endY))

            #cv2.line(frame, (0, self.H-320), (self.W, self.H-320), (255, 0, 0), 2)
            cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2),
                     (0, 255, 255), 2)
            #cv2.line(frame, (0, self.H-230), (self.W, self.H-230), (255, 0, 0), 2)
            objects = self.ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = self.trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                    if not to.counted:
                        if direction < 0 and centroid[1] < self.H // 2:
                            self.totalDown += 1
                            to.counted = True
                        if direction > 0 and centroid[1] > self.H // 2:
                            self.totalUp += 1
                            to.counted = True
                self.trackableObjects[objectID] = to
            self.totalFrames += 1
            info = [
                ("IN", self.totalUp),
                ("Status", status),
            ]
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                #cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                frame = cv2.resize(frame, (500, 500),
                                   interpolation=cv2.INTER_AREA)
            global count
            global count1
            count1 = self.totalUp
            count = self.totalDown
            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            else:
                return (ret, None)
        else:
            return (ret, None)

    # Release the video source when the object is destroyed
    def __del__(self):
        if self.vs.isOpened():
            self.fps.stop()
            self.vs.release()