Пример #1
0
    def _analyze_agent_movement(
        self,
        timestamp,
        agent_id,
        centroid,
        frame_mid,
    ):
        to = self._trackable_objects.get(agent_id, None)

        if to is None:
            to = TrackableObject(agent_id, centroid)
            self._trackable_objects[agent_id] = to
            return

        # Compute vertical delta
        mean_y = mean([cent[1] for cent in to.centroids])
        direction = centroid[1] - mean_y
        to.centroids.append(centroid)

        if to.counted:
            return

        # Moved up (entered)
        if direction < 0 and centroid[1] < frame_mid:
            self.enter_times.append(timestamp)
            to.counted = True

        # Moved down (left)
        if direction > 0 and centroid[1] > frame_mid:
            self.leave_times.append(timestamp)
            to.counted = True

        self._trackable_objects[agent_id] = to
Пример #2
0
def make_prediction(in_filename, out_filename, confidence_threshold=0.3, skip_frames=200, caffe_prototxt_file=None, model_file=None):
	# initialize the list of class labels MobileNet SSD was trained to
	# detect
	CLASSES = ["aeroplane", "bicycle", "bird", "boat",
		"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
		"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
		"sofa", "train", "tvmonitor"]

	# load our serialized model from disk
	print("[INFO] loading model...")
	# net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
	net = model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=True)

	# if a video path was not supplied, grab a reference to the webcam
	if not in_filename:
		print("[INFO] starting video stream...")
		vs = VideoStream(src=0).start()
		time.sleep(2.0)

	# otherwise, grab a reference to the video file
	else:
		print("[INFO] opening video file...")
		vs = cv2.VideoCapture(in_filename)

	# initialize the video writer (we'll instantiate later if need be)
	writer = None

	# initialize the frame dimensions (we'll set them as soon as we read
	# the first frame from the video)
	W = None
	H = None

	# instantiate our centroid tracker, then initialize a list to store
	# each of our dlib correlation trackers, followed by a dictionary to
	# map each unique object ID to a TrackableObject
	ct = CentroidTracker(maxDisappeared=10, maxDistance=50)
	trackers = []
	trackableObjects = {}

	# initialize the total number of frames processed thus far, along
	# with the total number of objects that have moved either up or down
	totalFrames = 0
	totalIn = 0
	totalOut = 0

	# start the frames per second throughput estimator
	fps = FPS().start()

	# loop over frames from the video stream
	while True:
		# grab the next frame and handle if we are reading from either
		# VideoCapture or VideoStream
		frame = vs.read()
		frame = frame[1] if in_filename else frame

		# if we are viewing a video and we did not grab a frame then we
		# have reached the end of the video
		if in_filename is not None and frame is None:
			break

		# resize the frame to have a maximum width of 500 pixels (the
		# less data we have, the faster we can process it), then convert
		# the frame from BGR to RGB for dlib
		frame = imutils.resize(frame, width=500)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

		# if the frame dimensions are empty, set them
		if W is None or H is None:
			(H, W) = frame.shape[:2]

		# if we are supposed to be writing a video to disk, initialize
		# the writer
		if out_filename is not None and writer is None:
			fourcc = cv2.VideoWriter_fourcc(*"MJPG")
			writer = cv2.VideoWriter(out_filename, fourcc, 30,
				(W, H), True)

		# initialize the current status along with our list of bounding
		# box rectangles returned by either (1) our object detector or
		# (2) the correlation trackers
		status = "Waiting"
		rects = []

		# check to see if we should run a more computationally expensive
		# object detection method to aid our tracker
		if totalFrames % skip_frames == 0:
			# set the status and initialize our new set of object trackers
			status = "Detecting"
			trackers = []

			# convert the frame to a blob and pass the blob through the
			# network and obtain the detections

			class_IDs, scores, bounding_boxes = net(data.transforms.presets.ssd.transform_test(mx.nd.array(frame), 270)[0])

			# loop over the detections
			for i, (class_ID, score, bounding_box) in enumerate(zip(class_IDs[0], scores[0], bounding_boxes[0])):

				class_ID = int(class_ID[0].asnumpy()[0])
				# extract the confidence (i.e., probability) associated
				# with the prediction
				confidence = score[0].asnumpy()[0]

				# filter out weak detections by requiring a minimum
				# confidence
				if confidence > confidence_threshold:
					# extract the index of the class label from the
					# detections list
					idx = int(class_ID)

					# if the class label is not a person, ignore it
					if CLASSES[idx] != "person":
						continue

					# compute the (x, y)-coordinates of the bounding box
					# for the object
					box = bounding_box.asnumpy()
					(startX, startY, endX, endY) = box.astype("int")

					# construct a dlib rectangle object from the bounding
					# box coordinates and then start the dlib correlation
					# tracker
					tracker = dlib.correlation_tracker()
					rect = dlib.rectangle(startX, startY, endX, endY)
					tracker.start_track(rgb, rect)

					# add the tracker to our list of trackers so we can
					# utilize it during skip frames
					trackers.append(tracker)

		# otherwise, we should utilize our object *trackers* rather than
		# object *detectors* to obtain a higher frame processing throughput
		else:
			# loop over the trackers
			for tracker in trackers:
				# set the status of our system to be 'tracking' rather
				# than 'waiting' or 'detecting'
				status = "Tracking"

				# update the tracker and grab the updated position
				tracker.update(rgb)
				pos = tracker.get_position()

				# unpack the position object
				startX = int(pos.left())
				startY = int(pos.top())
				endX = int(pos.right())
				endY = int(pos.bottom())

				# add the bounding box coordinates to the rectangles list
				rects.append((startX, startY, endX, endY))

		# use the centroid tracker to associate the (1) old object
		# centroids with (2) the newly computed object centroids
		objects = ct.update(rects)

		# loop over the tracked objects
		for (objectID, centroid) in objects.items():
			# check to see if a trackable object exists for the current
			# object ID
			to = trackableObjects.get(objectID, None)

			# if there is no existing trackable object, create one
			if to is None:
				to = TrackableObject(objectID, centroid)

			# otherwise, there is a trackable object so we can utilize it
			# to determine direction
			else:
				# the difference between the y-coordinate of the *current*
				# centroid and the mean of *previous* centroids will tell
				# us in which direction the object is moving (negative for
				# 'up' and positive for 'down')
				y = [c[1] for c in to.centroids]
				x = [c[0] for c in to.centroids]
				direction_y = centroid[1] - np.mean(y)
				direction_x = centroid[0] - np.mean(x)
				to.centroids.append(centroid)

				cur_x = np.mean(x)
				cur_y = np.mean(y)
				x_low, x_high, y_low, y_high = W // 3, 2 * W // 3, H // 4, 3 * H // 4
				
				cv2.line(frame, (x_low, y_low), (x_low, y_high), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_low), (x_high, y_high), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_low), (x_low, y_low), color=(0, 255, 0))
				cv2.line(frame, (x_high, y_high), (x_low, y_high), color=(0, 255, 0))

				# check to see if the object has been counted or not
				if not to.counted:
					# if the direction is negative (indicating the object
					# is moving up) AND the centroid is above the center
					# line, count the object
					delta_pixels = 10
					pred_x, pred_y = cur_x + delta_pixels * np.sign(direction_x), cur_y + delta_pixels * np.sign(direction_y)

					if ((cur_x < x_low or cur_x > x_high) and pred_x >= x_low and pred_x >= x_high) and ((cur_y < y_low or cur_y > y_high) and pred_y >= y_low and pred_y >= y_high):
						totalIn += 1
						to.counted = True
					elif cur_x >= x_low and cur_x <= x_high and cur_y >= y_low or cur_y <= y_high:
						totalOut += 1
						to.counted = True
					elif cur_x >= x_low and cur_x <= x_high and cur_y >= y_low and cur_y <= y_high and (pred_x < x_low or pred_x > x_high or pred_y < y_low or pred_y > y_high):
						totalOut += 1
						to.counted = True

			# store the trackable object in our dictionary
			trackableObjects[objectID] = to

			# draw both the ID of the object and the centroid of the
			# object on the output frame
			text = "ID {}".format(objectID)
			cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

		# construct a tuple of information we will be displaying on the
		# frame
		info = [
			("In", totalIn),
			("Out", totalOut),
			("Status", status),
		]

		# loop over the info tuples and draw them on our frame
		for (i, (k, v)) in enumerate(info):
			text = "{}: {}".format(k, v)
			cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
				cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

		# check to see if we should write the frame to disk
		if writer is not None:
			writer.write(frame)

		# show the output frame
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if key == ord("q"):
			break

		# increment the total number of frames processed thus far and
		# then update the FPS counter
		totalFrames += 1
		fps.update()

	# stop the timer and display FPS information
	fps.stop()
	print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

	# check to see if we need to release the video writer pointer
	if writer is not None:
		writer.release()

	# if we are not using a video file, stop the camera video stream
	if not in_filename:
		vs.stop()

	# otherwise, release the video file pointer
	else:
		vs.release()

	# close any open windows
	cv2.destroyAllWindows()
Пример #3
0
    def frames():
        global useIPCam
        out, weights, imgsz = \
        'inference/output', 'weights/yolov5s.pt', 640
        if (useIPCam):
            source = 'stream.txt'
        else:
            source = 'traff.mp4'
        device = torch_utils.select_device()
        if os.path.exists(out):
            shutil.rmtree(out)  # delete output folder
        os.makedirs(out)  # make new output folder
        start = time.time()
        elapsed = 0
        # Load model
        google_utils.attempt_download(weights)
        model = torch.load(weights, map_location=device)['model']

        model.to(device).eval()

        # Second-stage classifier
        classify = False
        if classify:
            modelc = torch_utils.load_classifier(name='resnet101',
                                                 n=2)  # initialize
            modelc.load_state_dict(
                torch.load('weights/resnet101.pt',
                           map_location=device)['model'])  # load weights
            modelc.to(device).eval()

        # Half precision
        half = False and device.type != 'cpu'
        print('half = ' + str(half))

        if half:
            model.half()

        # Set Dataloader
        vid_path, vid_writer = None, None
        if (useIPCam):
            dataset = LoadStreams(source, img_size=imgsz)
        else:
            dataset = LoadImages(source, img_size=imgsz)

        names = model.names if hasattr(model, 'names') else model.modules.names
        colors = [[random.randint(0, 255) for _ in range(3)]
                  for _ in range(len(names))]

        # Run inference
        t0 = time.time()
        ct = CentroidTracker()
        listDet = ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck']

        totalDownPerson = 0
        totalDownBicycle = 0
        totalDownCar = 0
        totalDownMotor = 0
        totalDownBus = 0
        totalDownTruck = 0

        totalUpPerson = 0
        totalUpBicycle = 0
        totalUpCar = 0
        totalUpMotor = 0
        totalUpBus = 0
        totalUpTruck = 0
        pub = False
        trackableObjects = {}
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        for path, img, im0s, vid_cap in dataset:
            elapsed = time.time() - start
            img = torch.from_numpy(img).to(device)
            img = img.half() if half else img.float()  # uint8 to fp16/32
            img /= 255.0  # 0 - 255 to 0.0 - 1.0
            if img.ndimension() == 3:
                img = img.unsqueeze(0)

            # Inference
            t1 = torch_utils.time_synchronized()
            pred = model(img, augment=False)[0]

            # Apply NMS
            pred = non_max_suppression(pred,
                                       0.4,
                                       0.5,
                                       fast=True,
                                       classes=None,
                                       agnostic=False)
            t2 = torch_utils.time_synchronized()

            # Apply Classifier
            if classify:
                pred = apply_classifier(pred, modelc, img, im0s)

            rects = []
            labelObj = []
            yObj = []
            arrCentroid = []
            for i, det in enumerate(pred):  # detections per image

                if (useIPCam):
                    p, s, im0 = path[i], '%g: ' % i, im0s[i].copy(
                    )  #if rtsp/camera
                else:
                    p, s, im0 = path, '', im0s
                height, width, channels = im0.shape
                cv2.line(im0, (0, int(height / 1.5)),
                         (int(width), int(height / 1.5)), (0, 0, 0),
                         thickness=1)
                save_path = str(Path(out) / Path(p).name)
                s += '%gx%g ' % img.shape[2:]  # print string
                gn = torch.tensor(im0.shape)[[1, 0, 1,
                                              0]]  #  normalization gain whwh
                if det is not None and len(det):
                    # Rescale boxes from img_size to im0 size
                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                              im0.shape).round()

                    for c in det[:, -1].unique():
                        n = (det[:, -1] == c).sum()  # detections per class
                        s += '%g %s, ' % (n, names[int(c)])  # add to string
                    for *xyxy, conf, cls in det:
                        label = '%s %.2f' % (names[int(cls)], conf)
                        x = xyxy
                        tl = None or round(0.002 *
                                           (im0.shape[0] + im0.shape[1]) /
                                           2) + 1  # line/font thickness
                        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))

                        label1 = label.split(' ')
                        if label1[0] in listDet:
                            box = (int(x[0]), int(x[1]), int(x[2]), int(x[3]))
                            rects.append(box)
                            labelObj.append(label1[0])
                            cv2.rectangle(im0,
                                          c1,
                                          c2, (0, 0, 0),
                                          thickness=tl,
                                          lineType=cv2.LINE_AA)
                            tf = max(tl - 1, 1)
                            t_size = cv2.getTextSize(label,
                                                     0,
                                                     fontScale=tl / 3,
                                                     thickness=tf)[0]
                            c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                            cv2.rectangle(im0, c1, c2, (0, 100, 0), -1,
                                          cv2.LINE_AA)
                            cv2.putText(im0,
                                        label, (c1[0], c1[1] - 2),
                                        0,
                                        tl / 3, [225, 255, 255],
                                        thickness=tf,
                                        lineType=cv2.LINE_AA)

                detCentroid = Camera.generateCentroid(rects)
                objects = ct.update(rects)

                for (objectID, centroid) in objects.items():
                    arrCentroid.append(centroid[1])
                for (objectID, centroid) in objects.items():
                    #print(idxDict)
                    to = trackableObjects.get(objectID, None)
                    if to is None:
                        to = TrackableObject(objectID, centroid)
                    else:
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)
                        if not to.counted:  #arah up

                            if direction < 0 and centroid[
                                    1] < height / 1.5 and centroid[
                                        1] > height / 1.7:  ##up truble when at distant car counted twice because bbox reappear
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'person'):
                                    totalUpPerson += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bicycle'):
                                    totalUpBicycle += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'car'):
                                    totalUpCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorbike'):
                                    totalUpMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bus'):
                                    totalUpBus += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalUpTruck += 1
                                    to.counted = True

                            elif direction > 0 and centroid[
                                    1] > height / 1.5:  #arah down
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'person'):
                                    totalDownPerson += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bicycle'):
                                    totalDownBicycle += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'car'):
                                    totalDownCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorbike'):
                                    totalDownMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bus'):
                                    totalDownBus += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalDownTruck += 1
                                    to.counted = True

                    trackableObjects[objectID] = to

                cv2.putText(im0, 'Down Person : ' + str(totalDownPerson),
                            (int(width * 0.7), int(height * 0.05)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down bicycle : ' + str(totalDownBicycle),
                            (int(width * 0.7), int(height * 0.1)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down car : ' + str(totalDownCar),
                            (int(width * 0.7), int(height * 0.15)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down motorbike : ' + str(totalDownMotor),
                            (int(width * 0.7), int(height * 0.2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down bus : ' + str(totalDownBus),
                            (int(width * 0.7), int(height * 0.25)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down truck : ' + str(totalDownTruck),
                            (int(width * 0.7), int(height * 0.3)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)

                cv2.putText(im0, 'Up Person : ' + str(totalUpPerson),
                            (int(width * 0.02), int(height * 0.05)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up bicycle : ' + str(totalUpBicycle),
                            (int(width * 0.02), int(height * 0.1)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up car : ' + str(totalUpCar),
                            (int(width * 0.02), int(height * 0.15)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up motorbike : ' + str(totalUpMotor),
                            (int(width * 0.02), int(height * 0.2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up bus : ' + str(totalUpBus),
                            (int(width * 0.02), int(height * 0.25)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up truck : ' + str(totalUpTruck),
                            (int(width * 0.02), int(height * 0.3)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                #print(elapsed)
                if (elapsed > 60):
                    ObjListku = [
                        'Person', 'Bicycle', 'Car', 'Motorbike', 'Bus', 'Truck'
                    ]
                    objCountUp = []
                    objCountDown = []
                    objCountDown.append(totalDownPerson)
                    objCountDown.append(totalDownBicycle)
                    objCountDown.append(totalDownCar)
                    objCountDown.append(totalDownMotor)
                    objCountDown.append(totalDownBus)
                    objCountDown.append(totalDownTruck)

                    objCountUp.append(totalUpPerson)
                    objCountUp.append(totalUpBicycle)
                    objCountUp.append(totalUpCar)
                    objCountUp.append(totalUpMotor)
                    objCountUp.append(totalUpBus)
                    objCountUp.append(totalUpTruck)

                    date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')

                    totalDownPerson = 0
                    totalDownBicycle = 0
                    totalDownCar = 0
                    totalDownMotor = 0
                    totalDownBus = 0
                    totalDownTruck = 0

                    totalUpPerson = 0
                    totalUpBicycle = 0
                    totalUpCar = 0
                    totalUpMotor = 0
                    totalUpBus = 0
                    totalUpTruck = 0

                    elapsed = 0
                    start = time.time()
                    #db.insert(date,ObjListku,objCountUp,objCountDown) #insert ke module database
                    date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
                    data_set = {
                        "Timestamp": str(date),
                        "dPerson": totalDownPerson,
                        "dBicycle": totalDownBicycle,
                        "dCar": totalDownCar,
                        "dBus": totalDownBus,
                        "dTruck": totalDownTruck,
                        "uPerson": totalUpPerson,
                        "uBicycle": totalUpBicycle,
                        "uCar": totalUpCar,
                        "uBus": totalUpBus,
                        "uTruck": totalUpTruck
                    }
                    MQTT_MSG = json.dumps(data_set)
                    client.publish(MQTT_TOPIC, MQTT_MSG)
                #time.sleep(00.1)
                if pub == False:
                    proc = subprocess.Popen(
                        'ffmpeg -re -f mjpeg -i http://0.0.0.0:5000/video_feed -f lavfi -i anullsrc -c:v libx264 -g 60 -c:a aac -ar 44100 -ac 2 -f flv rtmp://your-rtmp-server',
                        shell=True)
                    pub = True
            yield cv2.imencode('.jpg', cv2.resize(im0,
                                                  (800, 600)))[1].tobytes()
Пример #4
0
    def loop(self):
        W = None
        H = None

        frame = self.vs.read()
        frame = frame[1]

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if self.file is not None and frame is None:
            return

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        self.rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % 30 == 0:
            # set the status and initialize our new set of object trackers
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > 0.4:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                self.rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(rgb, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = self.ct.update(self.rects)

        # loop over the tracked objects

        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        self.totalUp += 1
                        self.total = self.totalDown - self.totalUp
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        self.totalDown += 1
                        self.total = self.totalDown - self.totalUp
                        to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame

            cv2.circle(rgb, (centroid[0], centroid[1] - 20), 4, (0, 255, 0),
                       -1)

        # show the output frame
        self.videoframe = rgb
        self.master.after(25, self.loop)

        self.totalFrames = self.totalFrames + 1
Пример #5
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()

    index_default = 0
    prototxt_default = ["mobilenet_ssd/MobileNetSSD_deploy.prototxt", "pednet/deploy.prototxt"]
    caffe_default = ["mobilenet_ssd/MobileNetSSD_deploy.caffemodel", "pednet/snapshot_iter_70800.caffemodel"]
    labels_default = ["mobilenet_ssd/MobileNetSSD_deploy_labels.txt", "pednet/class_labels.txt"]

    ap.add_argument("-p", "--prototxt", default=prototxt_default[index_default],
                    help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", default=caffe_default[index_default],
                    help="path to Caffe pre-trained model")
    ap.add_argument("-l", "--labels", default=labels_default[index_default],
                    help="path to Caffe class labels")
    ap.add_argument("-i", "--input", type=str,
        help="path to optional input video file")
    ap.add_argument("-o", "--output", type=str,
        help="path to optional output video file")
    ap.add_argument("-c", "--confidence", type=float, default=0.4,
        help="minimum probability to filter weak detections")
    ap.add_argument("-s", "--skip-frames", type=int, default=30,
        help="# of skip frames between detections")
    args = vars(ap.parse_args())

    # initialize the list of class labels MobileNet SSD was trained to
    # detect

    CLASSES = []
    if index_default == 0:
        CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
        "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
        "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
        "sofa", "train", "tvmonitor"]
    else:
        with open(args["labels"]) as f:
            CLASSES = f.readlines()
        CLASSES = [line.strip() for line in CLASSES]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # if a video path was not supplied, grab a reference to the webcam
    if not args.get("input", False):
        print("[INFO] starting video stream...")
        # vs = VideoStream(src=0).start()
        vs = cv2.VideoCapture(get_jetson_gstreamer_source(), cv2.CAP_GSTREAMER)
        time.sleep(2.0) # TODO creo que esto está para darle tiempo a cv2 para que cargue el recurso de la camarucha...

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    # fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        ret, frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if args["output"] is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30,
                (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()
            print(CLASSES)

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    print(CLASSES[idx])
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        # fps.update()

    # stop the timer and display FPS information
    # fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        # vs.stop()
        vs.release()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
Пример #6
0
def detect():
    global video_stream, outputFrame, lock, client, configurations

    config = configurations['people_counter']

    classes = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(config["prototxt"], config["model"])

    print("[INFO] opening video file...")
    video_stream = cv2.VideoCapture(config["input"])

    W = None
    H = None
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackable_objects = {}

    total_frames = 0
    total_down = 0
    total_up = 0
    total = 0
    new_person = False
    fps = FPS().start()

    while True:

        frame = video_stream.read()
        frame = frame[1] if "input" in config else frame
        if "input" in config and frame is None:
            break
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        status = "Waiting"
        rects = []

        if total_frames % config["skip_frames"] == 0:
            status = "Detecting"
            trackers = []
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > config["confidence"]:
                    idx = int(detections[0, 0, i, 1])
                    if classes[idx] != "person":
                        continue

                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (start_x, start_y, end_x, end_y) = box.astype("int")

                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(start_x, start_y, end_x, end_y)
                    tracker.start_track(rgb, rect)

                    trackers.append(tracker)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                start_x = int(pos.left())
                start_y = int(pos.top())
                end_x = int(pos.right())
                end_y = int(pos.bottom())
                rects.append((start_x, start_y, end_x, end_y))

        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():

            to = trackable_objects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0 and centroid[1] < H // 2:
                        total_up += 1
                        total += 1
                        to.counted = True
                        new_person = True

                    elif direction > 0 and centroid[1] > H // 2:
                        total_down += 1
                        total += 1
                        to.counted = True
                        new_person = True

            trackable_objects[objectID] = to

            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        info = [
            ("Up", total_up),
            ("Down", total_down),
            ("Status", status),
        ]

        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        if new_person:
            message = {"data": {"value": total}}
            client.publish('people', json.dumps(message))
            new_person = False

            with lock:
                outputFrame = frame.copy()

        total_frames += 1
        fps.update()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    video_stream.release()
    def get_frame(self):
        if self.vs.isOpened():
            ret, frame = self.vs.read()
            frame = imutils.resize(frame, width=900)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.H, self.W = frame.shape[:2]

            status = "Waiting"
            rects = []
            if self.totalFrames % 10 == 0:
                status = "Detecting"

                self.trackers = []
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()
                for i in np.arange(0, detections.shape[2]):
                    confidence = detections[0, 0, i, 2]
                    if confidence > 0.4:
                        idx = int(detections[0, 0, i, 1])
                        if idx == 15:
                            box = detections[0, 0, i, 3:7] * np.array(
                                [self.W, self.H, self.W, self.H])
                            (startX, startY, endX, endY) = box.astype("int")
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 255), 2)
                            centroid = (int(
                                (startX + endX) / 2), int((startY + endY) / 2))
                            if centroid[1] <= self.H - 230 and centroid[
                                    1] >= self.H - 320:
                                tracker = dlib.correlation_tracker()
                                rect = dlib.rectangle(startX, startY, endX,
                                                      endY)
                                tracker.start_track(rgb, rect)
                                self.trackers.append(tracker)
            else:
                for tracker in self.trackers:
                    status = "Tracking"
                    tracker.update(rgb)
                    pos = tracker.get_position()
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    rects.append((startX, startY, endX, endY))

            #cv2.line(frame, (0, self.H-320), (self.W, self.H-320), (255, 0, 0), 2)
            cv2.line(frame, (0, self.H - 290), (self.W, self.H - 290),
                     (0, 255, 255), 2)
            #cv2.line(frame, (0, self.H-230), (self.W, self.H-230), (255, 0, 0), 2)
            objects = self.ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = self.trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                    if not to.counted:
                        if direction < 0 and centroid[1] < self.H - 270:
                            self.totalDown += 1
                            to.counted = True
                        if direction > 0 and centroid[1] > self.H - 270:
                            self.totalUp += 1
                            to.counted = True
                self.trackableObjects[objectID] = to
            self.totalFrames += 1
            info = [
                ("IN", self.totalUp),
                ("Status", status),
            ]
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                #cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                frame = cv2.resize(frame, (500, 500),
                                   interpolation=cv2.INTER_AREA)
            global count
            count = self.totalUp
            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            else:
                return (ret, None)
        else:
            return (ret, None)
    def run(self, confidence_thresh=0.4, skip_frames=30):
        firstframe = self.vs.read()
        if not firstframe[0]:
            logging.error(
                f"Did not get a single frame from input file {self.inputFile}")
            raise ValueError('Input file did not produce a single frame.')
        while True:
            # grab the next frame and handle if we are reading from
            # VideoStream
            frame = self.vs.read()
            frame = frame[1]

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video, restart
            if frame is None:
                self.vs = cv2.VideoCapture(self.inputFile)
                self.currentOccupancy = 25
                continue

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if self.W is None or self.H is None:
                (self.H, self.W) = frame.shape[:2]

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if self.totalFrames % skip_frames == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > confidence_thresh:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if self.CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array(
                            [self.W, self.H, self.W, self.H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            #cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = self.ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = self.trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < self.H // 2:
                            self.totalOut += 1
                            self.currentOccupancy -= 1
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > self.H // 2:
                            self.totalIn += 1
                            self.currentOccupancy += 1
                            to.counted = True

                # store the trackable object in our dictionary
                self.trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
            #    text = "ID {}".format(objectID)
            #    cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
            #               cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #    cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # construct a tuple of information we will be displaying on the
            # frame
            #info = [
            #    ("Out", self.totalOut),
            #    ("In", self.totalIn),
            #    ("Occupancy", self.currentOccupancy),
            #    ("Status", status),
            #]

            ## loop over the info tuples and draw them on our frame
            #for (i, (k, v)) in enumerate(info):
            #    text = "{}: {}".format(k, v)
            #    cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
            #                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # show the output frame
            #cv2.imshow("Frame", frame)
            #key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            #if key == ord("q"):
            #    break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
Пример #9
0
    def loop_function(self):
        frame = self.ci.vs.read()
        # if it is videostream change the value
        frame = frame[1]
        # frame = frame

        # if the video is over break the loop
        if self.videoInput is not None and frame is None:
            return#break

        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        if self.ci.W is None or self.ci.H is None:
            (self.ci.H, self.ci.W) = frame.shape[:2]
            print(self.ci.H, self.ci.W)

        self.ci.status = "Waiting"
        rects = []

        if self.ci.totalFrames % self.skipFrames == 0:
            self.ci.status = "Detecting"
            self.ci.trackers = []

            blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.ci.W, self.ci.H), 127.5)
            self.ci.net.setInput(blob)
            detections = self.ci.net.forward()

            for i in np.arange(0, detections.shape[2]):

                confidence = detections[0, 0, i, 2]

                if confidence > self.confidence:

                    idx = int(detections[0, 0, i, 1])

                    if self.CLASSES[idx] != "person":
                        continue

                    box = detections[0, 0, i, 3:7] * np.array([self.ci.W, self.ci.H, self.ci.W, self.ci.H])
                    (startX, startY, endX, endY) = box.astype("int")

                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    self.ci.trackers.append(tracker)

        else:
            for tracker in self.ci.trackers:
                self.ci.status = "Tracking"

                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                rects.append((startX, startY, endX, endY))

        objects = self.ci.ct.update(rects)

        for (objectID, centroid) in objects.items():

            to = self.ci.trackableObjects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                y = [c[1] for c in to.centroids]
                if len(to.centroids) > 1:
                    p1, p2 = to.centroids[-2:]
                    for i in self.lines:
                        if i.complete:
                            check = self.intersect([i.x1, i.y1], [i.x2, i.y2], p1, p2)
                            if check:
                                if i.type == 0:
                                    print([i.x1, i.y1], [i.x2, i.y2], p1, p2, check)
                                    i.itPassed()
                                else:
                                    print("alert")
                                    fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                                    start = time.time()
                                    outputPath = "output/" + str(start) + ".avi"
                                    writer = cv2.VideoWriter(outputPath, fourcc, 30, (self.ci.W, self.ci.H), True)
                                    record = True

                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0 and centroid[1] < self.ci.H // 2:
                        self.ci.totalUp += 1
                        to.counted = True

                    elif direction > 0 and centroid[1] > self.ci.H // 2:
                        self.ci.totalDown += 1
                        to.counted = True

            self.ci.trackableObjects[objectID] = to

            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        info = [
            ("Up", self.ci.totalUp),
            ("Down", self.ci.totalDown),
            ("Status", self.ci.status),
        ]

        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, self.ci.H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        cv2.setMouseCallback('Frame', self.draw_shape)
        for i in self.lines:
            if i.complete:
                if i.type == 0:
                    cv2.line(frame, (i.x1, i.y1), (i.x2, i.y2), (255, 0, 0), 2)
                elif i.type == 1:
                    cv2.line(frame, (i.x1, i.y1), (i.x2, i.y2), (255, 255, 0), 2)

        if (self.ci.record):
            self.ci.writer.write(frame)
            end = time.time()
            if (end - start) > self.recordDuration:
                self.ci.writer.release()
                record = False

        self.ci.totalFrames += 1
        return frame
def fn2(userName):
    Prototxt = 'C:\\Users\\Kanchi\\PycharmProjects\\demo\\mobilenet_ssd\\MobileNetSSD_deploy.prototxt'
    model = 'C:\\Users\\Kanchi\\PycharmProjects\\demo\\mobilenet_ssd\\MobileNetSSD_deploy.caffemodel'
    Input = 'test\\test8.mp4'
    output = 'output\\output_01.avi'
    Confidence = 0.85
    skip_frames = 2
    info = []
    rightList = []
    queueCount = 0

    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--prototxt", required=True,
    #                 help="path to Caffe 'deploy' prototxt file")
    # ap.add_argument("-m", "--model", required=True,
    #                 help="path to Caffe pre-trained model")
    # ap.add_argument("-i", "--input", type=str,
    #                 help="path to optional input video file")
    # ap.add_argument("-o", "--output", type=str,
    #                 help="path to optional output video file")
    # ap.add_argument("-c", "--confidence", type=float, default=0.4,
    #                 help="minimum probability to filter weak detections")
    # ap.add_argument("-s", "--skip-frames", type=int, default=30,
    #                 help="# of skip frames between detections")
    # args = vars(ap.parse_args())

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(Prototxt, model)

    # if a video path was not supplied, grab a reference to the webcam
    if not Input:
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(Input)

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    # ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    ct = CentroidTracker(maxDisappeared=20, maxDistance=100)

    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    right = 0
    people = 0
    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:

        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if Input else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if Input is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if output is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(output, fourcc, 30, (W, H), True)

        # initialize the cur	rent status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % skip_frames == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > Confidence:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()

                    rect = dlib.rectangle(startX, startY, endX, endY)

                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers

            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                if (startX + endX) / 2 > 2 * W // 3:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 255, 0), 2)
                    # x1 = int((startX + endX) / 2)
                    # y1 = int((startY + endY) / 2)

                    # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (2 * W // 3, 0), (2 * W // 3, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)
        right = 0
        total = 0
        for i in objects.items():
            total += 1
            if i[1][0] > 2 * W // 3:
                right += 1

        people += total

        # loop over the tracked objects

        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                x = [c[0] for c in to.centroids]

                # direction = centroid[0] - np.mean(x)
                to.centroids.append(centroid)
                # print(x)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    # if direction < 0 and centroid[1] < 2*H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    # if centroid[1] < 2*H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True
                    #
                    # if centroid[1] > 2*H // 3:
                    # 	totalDown += 1
                    # 	to.counted = True
                    # if centroid[0] < 2 * H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True

                    if centroid[0] > 2 * W // 3:
                        totalDown += 1
                        to.counted = True

                # print(len(x))

                # print(x[len(x) - 1])
                # print(x[len(x) - 2])
                if x[len(x) - 2] > (2 * W // 3) and x[len(x) - 1] < (2 * W //
                                                                     3):
                    print(
                        "------------------------------------------------subtracted"
                    )
                    totalDown -= 1
                    to.counted = True

            #
            # if x1 > 2 * H // 3:
            # 	totalDown += 1
            # to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [("Total", total), ("Right", right), ("Status", status)]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)

        rightCount = info[1][1]

        if rightCount == 0:
            queueCount += 1
            print('queueCount=', queueCount)

        if len(rightList) == 0:

            if rightCount != 0:

                rightList.append(rightCount)

                fromaddr = "*****@*****.**"

                toaddr = userName

                msg = MIMEMultipart()

                msg['From'] = fromaddr

                msg['To'] = toaddr

                msg['Subject'] = "QUEUE NEEDS TO BE MANAGED!"

                msg.attach(
                    MIMEText(
                        "Over Crowded persons for queue are " +
                        str(rightCount), 'plain'))

                server = smtplib.SMTP('smtp.gmail.com', 587)

                server.starttls()

                server.login(fromaddr, "Retail123")

                text = msg.as_string()

                server.sendmail(fromaddr, toaddr, text)

                server.quit()
        else:

            if rightCount != 0:

                if rightList[-1] == rightCount:

                    pass

                else:

                    rightList.append(rightCount)

                    fromaddr = "*****@*****.**"

                    toaddr = userName

                    msg = MIMEMultipart()

                    msg['From'] = fromaddr

                    msg['To'] = toaddr

                    msg['Subject'] = "QUEUE NEEDS TO BE MANAGED!"

                    msg.attach(
                        MIMEText(
                            "Over Crowded persons for queue are " +
                            str(rightCount), 'plain'))

                    server = smtplib.SMTP('smtp.gmail.com', 587)

                    server.starttls()

                    server.login(fromaddr, "Retail123")

                    text = msg.as_string()

                    server.sendmail(fromaddr, toaddr, text)

                    server.quit()

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    queue = people // totalFrames
    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] Total Frames: ", totalFrames)
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    print("[INFO] Average Queue: ", queue)

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not Input:
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()

    # info.append(('queue',queue))

    print("rightList====", rightList)

    return queue, queueCount
Пример #11
0
def detect_track_count():
    centroidTracker_max_disappeared = int(os.getenv('CENTROID_TRACKER_MAX_DISAPPEARED'))
    centroidTracker_max_distance = int(os.getenv('CENTROID_TRACKER_MAX_DISTANCE'))
    skip_frames = int(os.getenv("SKIP_FRAMES"))

    print("[INFO] warming up camera...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    # initialize the frame dimensions
    H = None
    W = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(centroidTracker_max_disappeared, centroidTracker_max_distance)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    totalOverall = 0

    while True:
        frame = vs.read()

        if frame is None:
            break

        # resize the frame
        frame = imutils.resize(frame, width=900)
        frame = cv2.flip(frame, 0)
        bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % skip_frames == 0:
            # initialize our new set of object trackers
            trackers = []
            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, size=(300, 300),
                                         ddepth=cv2.CV_8U)
            net.setInput(blob, scalefactor=1.0 / 127.5, mean=[127.5,
                                                              127.5, 127.5])
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > 0.4:
                    # extract the index of the class label
                    idx = int(detections[0, 0, i, 1])
                    if idx > 2:
                        continue
                    # if the class label is not a car or person, ignore it
                    if CLASSES[idx] == "person" or CLASSES[idx] == "car":
                        # compute the (x, y)-coordinates of the bounding box
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")
                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(bgr, rect)
                        # add the tracker to our list of trackers so we can utilize it during skip frames
                        trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # update the tracker and grab the updated position
                tracker.update(bgr)
                pos = tracker.get_position()
                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))
        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)
            if to is None:
                timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
                to = TrackableObject(objectID, centroid)
                if CLASSES[idx] == "car":
                    objectInfo = [("ID", objectID), ("timestamp", timestamp), ("object", CLASSES[idx])]
                    print(objectInfo)

                if CLASSES[idx] == "person":
                    objectInfo = [("ID", objectID), ("timestamp", timestamp), ("object", CLASSES[idx])]
                    results = detect_and_predict(frame, faceNet, ageNet, genderNet)
                    # loop over the results
                    for r in results:
                        objectInfo = [("ID", objectID), ("timestamp", timestamp), ("object", CLASSES[idx]),
                                      ("gender", r["gender"]), ("age", r["age"])]
                    print(objectInfo)

            else:
                y = [c[1] for c in to.centroids]
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    totalOverall += 1
                    to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

        totalFrames += 1
Пример #12
0
def people_counter(video_capture, net, ct, classes,
                   output_video, fourcc, total_frames, total_left,
                   total_right, trackable_objects, confidence, skip_frames):
    """ Track people over time
    Args:
           video_capture ([type]): video
           net (cv2.dnn): neural network
           ct ([type]): dlib centroid tracker
           classes ([type]): classes of identification
           output_video ([type]): what the output video is saved under
           fourcc ([type]): video writer
           total_frames (float): complete number of frames
           total_left (float): complete number of lefts
           total_right (float): complete number of rights
           trackable_objects ([type]): list of objects on screen from classes
           confidence (float): minimum for detection, defaults to 0.4
           skip_frames (float): defaults to 30
    Returns:
           fps.fps(): frames per second
    """
    fps = FPS().start()
    while True:
        ok, frame = video_capture.read()
        if frame is None: # END of Video Loop
            break

        frame = cv2.resize(frame, (640, 480))
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        (height, width) = frame.shape[:2]
        writer = cv2.VideoWriter(output_video, fourcc, 30, (width, height), True)
        status = "Waiting"
        rects = []

        videotime = video_capture.get(cv2.CAP_PROP_POS_MSEC) / 1000
        if total_frames % skip_frames == 0:
            trackers = []

            detections, status = get_detections(net, frame, width, height)
            people_list = get_people(detections, classes, width, height, confidence)

            for rect in people_list:
                tracker = dlib.correlation_tracker()
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
        else:
            rects, status = track_people(trackers, rgb)

        write_on_frame_line(frame, width, height)
        objects = ct.update(rects)

        for (object_identification, centroid) in objects.items():
            to = trackable_objects.get(object_identification, None)
            if to is None:
                to = TrackableObject(object_identification, centroid)
            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[0] - np.mean(y)
                to.centroids.append(centroid)
                if not to.counted:
                    if direction < 0 and centroid[1] < width // 2:
                        total_left += 1
                        to.counted = True
                    elif direction > 0 and centroid[1] > width // 2:
                        total_right += 1
                        to.counted = True
            trackable_objects[object_identification] = to
            write_on_frame_object(object_identification, centroid, frame)

        write_on_frame_legend(total_left, total_right, videotime, frame, height)
        writer.write(frame)

        cv2.imshow("People Counter", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
        total_frames += 1
        fps.update()

    fps.stop()
    cv2.destroyAllWindows()

    return fps.fps()
Пример #13
0
def getRealTimePeopleCount():
    try:
        # Grab path to current working directory
        CWD_PATH = os.getcwd()
        CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
                   "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
                   "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
                   "sofa", "train", "tvmonitor"]
        APP_DIRECTORY = 'detection_app'
        MODEL_DIRECTORY = 'mobilenet_ssd'
        OUTPUT_DIRECTORY = 'output'
        prototxt = os.path.join(CWD_PATH, APP_DIRECTORY, MODEL_DIRECTORY, 'MobileNetSSD_deploy.prototxt')
        model = os.path.join(CWD_PATH, APP_DIRECTORY, MODEL_DIRECTORY, 'MobileNetSSD_deploy.caffemodel')
        output = os.path.join(CWD_PATH, APP_DIRECTORY, OUTPUT_DIRECTORY, 'example_01.avi')
        input = os.path.join(CWD_PATH, APP_DIRECTORY, 'example_01.avi')
        input = None
        defaultConfidence = 0.4

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(prototxt, model)

        # if a video path was not supplied, grab a reference to the webcam
        if input is None:
            print("[INFO] starting video stream...")
            vs = VideoStream(src='rtsp://*****:*****@[email protected]').start()
            time.sleep(2.0)

        # otherwise, grab a reference to the video file
        else:
            print("[INFO] opening video file...")
            vs = cv2.VideoCapture(input)

        # initialize the video writer (we'll instantiate later if need be)
        writer = None

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # initialize the total number of frames processed thus far, along
        # with the total number of objects that have moved either up or down
        totalFrames = 0
        peopleCount = 0
        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:
            # grab the next frame and handle if we are reading from either
            # VideoCapture or VideoStream
            frame = vs.read()
            frame = frame[1] if input is not None else frame

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video
            if input is not None and frame is None:
                break

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # if we are supposed to be writing a video to disk, initialize
            # the writer
            if output is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(output, fourcc, 15, (W, H), True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            currentStatus = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if True:
                # set the status and initialize our new set of object trackers
                currentStatus = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > defaultConfidence:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        rects.append((startX, startY, endX, endY))
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            # cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                else:
                    # check to see if the object has been counted or not
                    if not to.counted:
                        # count the object
                        peopleCount += 1
                        to.counted = True
                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                # cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
                cv2.rectangle(frame, ((centroid[0] - 30, centroid[1] - 40)), ((centroid[0] + 30, centroid[1] + 40)),
                              (0, 255, 0), 1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("PeopleCount", peopleCount)
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # check to see if we should write the frame to disk
            if writer is not None:
                writer.write(frame)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        outputVideoUrl = "localhost:8000/static/videos/example_01.avi"
        print("PeopleCount", peopleCount)
        print("outputVideoUrl", outputVideoUrl)
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # check to see if we need to release the video writer pointer
        if writer is not None:
            writer.release()

        # if we are not using a video file, stop the camera video stream
        if input is None:
            vs.stop()

        # otherwise, release the video file pointer
        else:
            vs.release()

        # close any open windows
        cv2.destroyAllWindows()
        return (peopleCount, outputVideoUrl)
    except Exception as ex:
        traceback.print_exc()
        raise ex
Пример #14
0
    def get_predicted_frame(self, frame, frame_number):

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        #frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        if self.W is None or self.H is None:
            (self.H, self.W) = frame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if frame_number % self.skip_frames == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                         127.5)
            self.net.setInput(blob)
            detections = self.net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > self.confidence:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if self.CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array(
                        [self.W, self.H, self.W, self.H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        #cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2), (0, 255, 255), 2)

        cv2.line(frame, (self.W // 2, 0), (self.W // 2, self.H), (0, 255, 255),
                 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = self.ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    if (len(to.centroids) >= 2):
                        for c in range(len(to.centroids) - 1):
                            if (to.centroids[c][0] > (self.W // 2)
                                    and centroid[0] < (self.W // 2)):
                                self.totalUp += 1
                                to.counted = True
                                break
                            if (to.centroids[c][0] < (self.W // 2)
                                    and centroid[0] > (self.W // 2)):
                                self.totalDown += 1
                                to.counted = True
                                break
                    # # if the direction is negative (indicating the object
                    # # is moving up) AND the centroid is above the center
                    # # line, count the object
                    # if direction < 0 and centroid[0] < self.W // 2:
                    # 	self.totalUp += 1
                    # 	to.counted = True

                    # # if the direction is positive (indicating the object
                    # # is moving down) AND the centroid is below the
                    # # center line, count the object
                    # elif direction > 0 and centroid[0] > self.W // 2:
                    # 	self.totalDown += 1
                    # 	to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", self.totalUp),
            ("Down", self.totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        cv2.waitKey(25)

        return frame
Пример #15
0
def thread_for_capturing_face():
    print("[INFO] Running Thread 1...")
    global net
    global total_faces_detected_locally
    global CLASSES
    global vs
    global ct
    global trackers
    global trackableObjects
    global totalFrames
    global totalDown
    global totalUp
    global totalPeople
    global run_program

    while run_program:
        ret, frame = vs.read()

        frame = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_AREA)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        (H, W) = frame.shape[:2]

        status = "Waiting"
        rects = []

        if totalFrames % 10 == 0:
            status = "Detecting"
            trackers = []

            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > 0.5:
                    idx = int(detections[0, 0, i, 1])
                    if CLASSES[idx] != "person":
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            # loop over the trackers
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        cv2.line(frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)
        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                y = [c[1] for c in to.centroids]
                x = [c[0] for c in to.centroids]
                direction = centroid[0] - np.mean(x)
                print("Direction of person:", direction)
                print("Current Centroid {} vs. Middle {}".format(centroid[0], H //2))
                to.centroids.append(centroid)
                if not to.counted:
                    if direction < 0 and centroid[0] < H // 2:
                        totalUp += 1
                        totalPeople += 1
                        total_faces_detected_locally -= 1
                        # print(type(totalPeople))
                        to.counted = True
                    elif direction > 0 and centroid[0] > H // 2:
                        totalPeople -= 1
                        totalDown += 1
                        total_faces_detected_locally += 1
                        to.counted = True

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)


        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Num Of people in the building: ", total_faces_detected_locally),
            ("Status", status),
        ]

        #for (i, (k, v)) in enumerate(info):
            #text = "{}: {}".format(k, v)
            #cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        #cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        # print("{} people entered in today".format(totalPeople))

        totalFrames += 1
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
Пример #16
0
def record(dining_hall, src, output, sf):
    args = {
        "input": src,
        "output": output,
        "confidence": 0.4,
        "skip_frames": sf
    }

    df = pd.DataFrame([], columns=['Location', 'Date', 'People'])

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    execution_path = os.getcwd()
    net = cv2.dnn.readNetFromCaffe(
        os.path.join(execution_path,
                     "mobilenet_ssd/MobileNetSSD_deploy.prototxt"),
        os.path.join(execution_path,
                     "mobilenet_ssd/MobileNetSSD_deploy.caffemodel"))

    # grab a reference to the video file
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            now = datetime.datetime.now()
            datetime_current = print(now.strftime("%Y-%m-%d %H:%M"))
            output_path = 'output/{}{}.avi'.format(dining_hall[0],
                                                   datetime_current)
            writer = cv2.VideoWriter(os.path.join(execution_path, output_path),
                                     fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        if totalDown - totalUp > 0:
            now = datetime.datetime.now()
            df = df.append(
                {
                    'Location': dining_hall[0],
                    'Date': now.strftime("%Y-%m-%d %H:%M:%S"),
                    'People': totalDown - totalUp
                },
                ignore_index=True)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # otherwise, release the video file pointer
    vs.release()

    # write results to csv
    df.to_csv('results.csv', mode='a', index=True, header=False)

    # close any open windows
    cv2.destroyAllWindows()
    def doStuff(self, frame, origFrame, W, H):
        # the frame from BGR to RGB for dlib
        # frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        if self.origW is None or self.origH is None:
            (self.origW, self.origH) = origFrame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % self.SKIP_FRAMES == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            self.trackers = []

            yoloDetections = self.yoloInference.runInference(
                frame, W, H, self.CONFIDENCE_LIMIT)
            # loop over the detections
            for detection in yoloDetections:
                class_type = detection.classType

                # compute the (x, y)-coordinates of the bounding box
                # for the object
                box = detection.box[0:4] * np.array([1, 1, 1, 1])
                (startX, startY, endX, endY) = box.astype("int")

                # construct a dlib rectangle object from the bounding
                # box coordinates and then start the dlib correlation
                # tracker
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)

                if __myDebug__:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 0, 0), 1)
                    cv2.putText(frame, class_type, (startX, startY),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

                tracker.start_track(rgb, rect)

                container = TrackerExt(class_type, tracker,
                                       (startX, startY, endX, endY))

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                self.trackers.append(container)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for trackerContainer in self.trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"
                tracker = trackerContainer.tracker

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                trackerContainer.rect = (startX, startY, endX, endY)
                # add the bounding box coordinates to the rectangles list
                rects.append(trackerContainer.rect)

                if __myDebug__:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 0, 0), 2)
                    cv2.putText(frame, trackerContainer.class_type,
                                (startX, startY), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        extractedRects = [
            trackerContainer for trackerContainer in self.trackers
        ]

        objects = self.ct.update(extractedRects)

        # loop over the tracked objects
        for (objectID, centroidTrackerData) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            centroid = centroidTrackerData[0]
            className = centroidTrackerData[1]
            rect = centroidTrackerData[2]

            directionX = 0
            directionY = 0

            ratioH = self.origH / H
            ratioW = self.origW / W

            # if there is no existing trackable object, create one
            if to is None:

                rect2 = (rect[0] * ratioW, rect[1] * ratioH, rect[2] * ratioW,
                         rect[3] * ratioH)
                clipped = clipImage(origFrame, rect2)

                if className == 'car' or className == 'truck':
                    details = self.__getObjectDetails__(origFrame,
                                                        rect,
                                                        typeName=className)
                    if details and len(details) > 0:
                        predictions = details["predictions"]
                        try:
                            isPost = next((match for match in predictions
                                           if float(match["probability"]) > 0.7
                                           and match["tagName"] == "Post"),
                                          None)
                        except GeneratorExit:
                            pass
                        if isPost:
                            className = "postcar"
                            messageIoTHub = IoTHubMessage(
                                """{"Name":"Postauto"}""")
                            AppState.HubManager.send_event_to_output(
                                "output2", messageIoTHub, 0)

                self.__saveToBlobStorage(clipped,
                                         id=objectID,
                                         typeName=className)
                fullName = className + "-full"
                clipped = clipImage(origFrame, [0, 0, self.origW, self.origH])
                self.__saveToBlobStorage(clipped,
                                         id=objectID,
                                         typeName=fullName)

                to = TrackableObject(objectID, className, centroid)
                self.__sendToIoTHub__(to, rect, frame)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving
                y = [c[1] for c in to.centroids]
                x = [c[0] for c in to.centroids]

                directionY = centroid[1] - np.mean(y)
                directionX = centroid[0] - np.mean(x)

                if len(to.centroids) >= 300:
                    temp = to.centroids[2:]
                    to.centroids = temp
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    if int(directionY) == 0 and int(directionX) == 0:
                        #self.totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif directionY > 0 and centroid[1] > H // 2:
                        self.totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            directX = int(round(directionX, 1))
            directY = int(round(directionY, 1))
            colorCircle = (20, 250, 130)
            colorArrow = (0, 0, 250)
            colorText = (0, 255, 0)
            text = "{}: {}".format(objectID, to.type)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, colorText, 1)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, colorCircle, -1)
            cv2.arrowedLine(frame, (centroid[0], centroid[1]),
                            (centroid[0] + directX, centroid[1] + directY),
                            colorArrow, 2)
            #if len(to.centroids)>1 :
            #    cv2.polylines(frame, np.int32(to.centroids), True, colorCircle, 1)
        # increment the total number of frames processed thus far and
        # then update the FPS counter
        self.totalFrames += 1
        self.fps.update()
Пример #18
0
    def coreCounter(self):
        #load model and video for process
        net = self.loadModel()
        vs = self.loadVideo() #self.loadWebcam()

        # writer will be used as video writer
        writer = None
        #W and H are frame dimensions
        W = None
        H = None
        #Centroid Tracker is created
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}
        #Total values for late informations
        totalFrames = 0
        totalDown = 0
        totalUp = 0
        record = 0
        #Start fps
        fps = FPS().start()
        
        #loop for video frames
        while True:
            frame = vs.read()
            #if it is videostream change the value
            frame = frame[1]
            #frame = frame

            #if the video is over break the loop
            if self.videoInput is not None and frame is None:
                break
            
            frame = imutils.resize(frame,width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if W is None or H is None:
                (H, W) = frame.shape[:2]
                print(H,W)

            status = "Waiting"
            rects = []

            if totalFrames % self.skipFrames == 0:
                status = "Detecting"
                trackers = []

                blob = cv2.dnn.blobFromImage(frame, 0.007843,(W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()


                for i in np.arange(0, detections.shape[2]):
                    
                    confidence = detections[0, 0, i, 2]

                    if confidence > self.confidence:
                        
                        idx = int(detections[0, 0, i, 1])

                        if self.CLASSES[idx] != "person":
                            continue

                        box = detections[0, 0, i, 3:7] * np.array([W,H,W,H])
                        (startX, startY, endX, endY) = box.astype("int")

                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        trackers.append(tracker)
            
            else:
                for tracker in trackers:
                    status = "Tracking"

                    tracker.update(rgb)
                    pos = tracker.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    rects.append((startX, startY, endX, endY))




            objects = ct.update(rects)

            for(objectID, centroid) in objects.items():

                to = trackableObjects.get(objectID, None)

                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    y =[c[1] for c in  to.centroids]
                    if len(to.centroids) > 1:
                        p1, p2 = to.centroids[-2:]
                        for i in self.lines:
                            if i.complete:
                                check = self.intersect([i.x1,i.y1],[i.x2,i.y2],p1,p2)
                                if check:
                                    if i.type == 0:
                                        print([i.x1,i.y1],[i.x2,i.y2],p1,p2, check)   
                                        i.itPassed()
                                    else:
                                        print("alert")
                                        fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                                        start = time.time()
                                        outputPath = "output/"+str(start)+".avi"
                                        writer = cv2.VideoWriter(outputPath, fourcc,30,(W, H), True)
                                        record = True
                                        

                    
                        
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            to.counted = True

                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            to.counted = True
                
                trackableObjects[objectID] = to

                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
            


            info = [
                ("Up", totalUp),
                ("Down", totalDown),
                ("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i*20)+20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            

            cv2.setMouseCallback('Frame',self.draw_shape)
            for i in self.lines:
                if i.complete:
                    if i.type == 0:
                        cv2.line(frame, (i.x1, i.y1), (i.x2, i.y2), (255, 0, 0), 2)
                        cv2.imshow("Frame", frame)
                    elif i.type == 1:
                        cv2.line(frame, (i.x1, i.y1), (i.x2, i.y2), (255, 255, 0), 2)
                        cv2.imshow("Frame", frame)

                    

            if(record):
                writer.write(frame)
                end = time.time()
                if (end-start) > self.recordDuration:
                    writer.release()
                    record = False

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break
            
            totalFrames += 1
            fps.update()

        fps.stop()

        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        #change if it uses webcam
        vs.release()
        #vs.stop()
        cv2.destroyAllWindows()
Пример #19
0
def display_instances(image, boxes, masks, ids, names, scores, nFrames,
                      totalDown, totalUp, trackers):
    """
        take the image and results and apply the mask, box, and Label
    """

    # initialize the current status along with our list of bounding
    # box rectangles returned by either (1) our object detector or
    # (2) the correlation trackers
    status = "Waiting"
    rects = []
    """" - - - - - - - - - -- - - - - - -"""

    H = image.shape[0]
    W = image.shape[1]
    n_instances = boxes.shape[0]
    #print(n_instances)
    if not n_instances:
        print('NO INSTANCES TO DISPLAY')
    else:
        assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
    if nFrames % args["skip_frames"] == 0:
        status = "Detecting"
        trackers = []
    for i in range(n_instances):
        # detecting objects loop
        if not np.any(boxes[i]):
            continue
        label = names[ids[i]]
        if label != "car":  #and label != "person" and label != "motorcycle" and label != "bicycle":
            continue
        if label == "car" and scores[i] > 0.85:
            image, x1, y1, x2, y2 = detections(image, scores[i], class_dict,
                                               masks[:, :, i], label, boxes[i])
        else:
            continue
        # if label == "person" and scores[i] > 0.90:
        #     image, x1, y1, x2, y2 = detections(image, scores[i], class_dict, masks[:, :, i], label, boxes[i])
        # images and masks of image
        #image, x1, y1, x2, y2 = detections(image, scores[i], class_dict, masks[:, :, i], label, boxes[i])
        if nFrames % args["skip_frames"] != 0:
            continue
        # add the bounding box coordinates to the rectangles list
        # rects.append((x1, y1, x2, y2))
        # construct a dlib rectangle object from the bounding
        # box coordinates and then start the dlib correlation
        # tracker
        tracker = dlib.correlation_tracker()
        rect = dlib.rectangle(x1, y1, x2, y2)
        tracker.start_track(image, rect)
        # add the tracker to our list of trackers so we can
        # utilize it during skip frames
        trackers.append(tracker)

    if nFrames % args["skip_frames"] != 0:

        # loop over the trackers
        for tracker in trackers:
            # set the status of our system to be 'tracking' rather
            # than 'waiting' or 'detecting'
            status = "Tracking"
            # update the tracker and grab the updated position
            tracker.update(image)
            pos = tracker.get_position()
            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            # add the bounding box coordinates to the rectangles list
            rects.append((startX, startY, endX, endY))

    # draw a horizontal line in the center of the frame -- once an
    # object crosses this line we will determine whether they were
    # moving 'up' or 'down'
    cv2.line(image, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
    #print(len(rects))
    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)
    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)
        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)

            # check to see if the object has been counted or not
            if not to.counted:
                # if the direction is negative (indicating the object
                # is moving up) AND the centroid is above the center
                # line, count the object
                if direction < 0 and centroid[1] < H // 2:
                    totalUp += 1
                    to.counted = True

# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
                elif direction > 0 and centroid[1] > H // 2:
                    totalDown += 1
                    to.counted = True

# store the trackable object in our dictionary
        trackableObjects[objectID] = to
        # draw both the ID of the object and the centroid of the
        # object on the output frame
        text = "ID {}".format(objectID)
        cv2.putText(image, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (4, 31, 26), 2)
        cv2.circle(image, (centroid[0], centroid[1]), 4, (4, 31, 26), -1)
    # construct a tuple of information we will be displaying on the

# frame
    info = [
        ("Up", totalUp),
        ("Down", totalDown),
        ("Status", status),
    ]

    # loop over the info tuples and draw them on our frame
    for (i, (k, v)) in enumerate(info):
        text = "{}: {}".format(k, v)
        cv2.putText(image, text, (10, H - ((i * 20) + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
    return image, totalDown, totalUp, trackers
    def run(self, confidence_thresh=0.4, skip_frames=30):
        firstframe = self.vs.read()
        if not firstframe[0]:
            logging.error(
                f"Did not get a single frame from input file {self.inputFile}")
            raise ValueError('Input file did not produce a single frame.')
        while True:
            # grab the next frame and handle if we are reading from
            # VideoStream
            frame = self.vs.read()
            frame = frame[1]

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video, restart
            if frame is None:
                self.vs = cv2.VideoCapture(self.inputFile)
                # also save out the video and delete the writer
                if self.writer is not None:
                    self.writer.release()
                    self.writer = None
                continue
            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them and init our queue magic numbers (obviously make this better)
            if self.W is None or self.H is None:
                (self.H, self.W) = frame.shape[:2]
                queueXmin = 0
                queueXmax = 5 * self.W // 8
                queueYmin = self.H // 4
                queueYmax = 3 * self.H // 4
                exitXmin = 0
                exitXmax = self.W // 5
                exitYmin = self.H // 4
                exitYmax = 3 * self.H // 4

            # if we're outputting video (for the demo) check that the writer is initialized.
            if self.outputFile is not None and self.writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                self.writer = cv2.VideoWriter(self.outputFile, fourcc, 30,
                                              (self.W, self.H), True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if self.totalFrames % skip_frames == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > confidence_thresh:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if self.CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array(
                            [self.W, self.H, self.W, self.H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw box where the queue exists -- once an object enters this
            # area they are "in" the queue, and once they either exit or disappear we will
            # pop the queue and calculate the wait time.
            cv2.rectangle(frame, (queueXmin, queueYmin),
                          (queueXmax, queueYmax), (0, 0, 255), 2)

            # now draw the exit box
            cv2.rectangle(frame, (exitXmin, exitYmin), (exitXmax, exitYmax),
                          (0, 255, 0), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = self.ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = self.trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    # check to see if the person magically appeared in the queue. If so, it's not real. Skip.
                    if queueXmin < centroid[
                            0] < queueXmax and queueYmin < centroid[
                                1] < queueYmax:
                        continue
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if queueXmin < centroid[
                                0] < queueXmax and queueYmin < centroid[
                                    1] < queueYmax:
                            self.startFramesQueue.put(self.totalFrames)
                            to.counted = True

                # if the object is in the exit, execute the exit procedure
                if exitXmin < centroid[0] < exitXmax and exitYmin < centroid[1] < exitYmax and \
                        objectID not in self.exitedIDs:
                    start_frame = self.startFramesQueue.get()
                    self.currentWaitTime = round(
                        (self.totalFrames - start_frame) /
                        30.0)  #assume 30 fps like a stream
                    self.exitedIDs.append(objectID)

                # store the trackable object in our dictionary
                self.trackableObjects[objectID] = to

                objColor = (0, 255, 0) if queueXmin < centroid[0] < queueXmax and \
                                          queueYmin < centroid[1] < queueYmax else (255, 0, 0)
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, objColor, 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, objColor, -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Queue Length", self.startFramesQueue.qsize()),
                ("Last Wait Time (sec)", self.currentWaitTime),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # check to see if we should write the frame to disk
            if self.writer is not None:
                self.writer.write(frame)
            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # try and simulate a real 30fps stream since we're using an input file and getting like 110+ fps
            #time.sleep(1.0/40)

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
Пример #21
0
def detect(save_img):
    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source.isnumeric() or source.startswith(
        ('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')

    # Initialize
    set_logging()
    cars = MainController.getLatestVehicleAmount('cars', 'carId')
    motors = MainController.getLatestVehicleAmount('motorcycles',
                                                   'motorcycleId')
    trucks = MainController.getLatestVehicleAmount('trucks', 'truckId')

    totalCarAmount = cars + motors + trucks
    totalCars = cars
    totalTrucks = trucks
    totalMotors = motors
    displayTotalAmount = totalCarAmount
    displayCarAmount = totalCars
    displayTruckAmount = totalTrucks
    displayMotorAmount = totalMotors
    oldCombinedAmount = 0
    combinedAmount = 0
    tempAmount = 0

    # Video = False, Webcam = True
    control = False

    elapsed = 0
    device = select_device(opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder
    start = time.time()
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size
    if half:
        model.half()  # to FP16

    # Second-stage classifier
    classify = False
    if classify:
        modelc = load_classifier(name='resnet101', n=2)  # initialize
        modelc.load_state_dict(
            torch.load('weights/resnet101.pt',
                       map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)

    else:
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    # colors = [[np.randint(0, 255) for _ in range(3)] for _ in range(len(names))]

    # Run inference

    t0 = time.time()
    ct = CentroidTracker()
    listDet = ['car', 'motorcycle', 'truck']

    totalDownCar = 0
    totalDownMotor = 0
    totalDownTruck = 0

    totalUpCar = 0
    totalUpMotor = 0
    totalUpTruck = 0

    trackableObjects = {}

    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img
              ) if device.type != 'cpu' else None  # run once
    for path, img, im0s, vid_cap in dataset:
        elapsed = time.time() - start
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        rects = []
        labelObj = []
        arrCentroid = []
        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
                # cv2.resize(im0, (2560, 1440))
            else:
                p, s, im0 = path, '', im0s
                # cv2.resize(im0, (2560, 1440))

            height, width, channels = im0.shape
            cv2.line(im0, (0, int(height / 1.5)),
                     (int(width), int(height / 1.5)), (255, 0, 0),
                     thickness=3)

            if not control:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                cv2.putText(im0, 'Bil: ' + str(displayCarAmount),
                            (int(width * 0.02), int(height * 0.55)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Motorcykel: ' + str(displayMotorAmount),
                            (int(width * 0.02), int(height * 0.60)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Lastbil: ' + str(displayTruckAmount),
                            (int(width * 0.02), int(height * 0.65)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
            else:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # cv2.line(im0, (int(width / 1.8), int(height / 1.5)), (int(width), int(height / 1.5)), (255, 127, 0), thickness=3)

            save_path = str(Path(out) / Path(p).name)
            txt_path = str(Path(out) / Path(p).stem) + (
                '_%g' % dataset.frame if dataset.mode == 'video' else '')
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  # normalization gain whwh
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    label = '%s %.2f' % (names[int(cls)], conf)
                    # print(xyxy)
                    x = xyxy
                    tl = None or round(0.002 * (im0.shape[0] + im0.shape[1]) /
                                       2) + 1  # line/font thickness
                    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
                    label1 = label.split(' ')
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                                gn).view(-1).tolist()  # normalized xywh
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * 5 + '\n') %
                                    (cls, *xywh))  # label format

                    if label1[0] in listDet:
                        cv2.rectangle(im0,
                                      c1,
                                      c2, (0, 0, 0),
                                      thickness=tl,
                                      lineType=cv2.LINE_AA)
                        box = (int(x[0]), int(x[1]), int(x[2]), int(x[3]))
                        rects.append(box)
                        labelObj.append(label1[0])
                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(label,
                                                 0,
                                                 fontScale=tl / 3,
                                                 thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(im0, c1, c2, (0, 100, 0), -1,
                                      cv2.LINE_AA)
                        cv2.putText(im0,
                                    label, (c1[0], c1[1] - 2),
                                    0,
                                    tl / 3, [225, 255, 255],
                                    thickness=tf,
                                    lineType=cv2.LINE_AA)

                detCentroid = generateCentroid(rects)
                objects = ct.update(rects)

                for (objectID, centroid) in objects.items():
                    arrCentroid.append(centroid[1])
                for (objectID, centroid) in objects.items():
                    # print(idxDict)
                    to = trackableObjects.get(objectID, None)
                    if to is None:
                        to = TrackableObject(objectID, centroid)
                    else:
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)
                        if not to.counted:  # arah up

                            if direction < 0 and centroid[
                                    1] < height / 1.5 and centroid[
                                        1] > height / 1.7:  ##up truble when at distant car counted twice because bbox reappear
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalUpCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalUpMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalUpTruck += 1
                                    to.counted = True

                            elif direction > 0 and centroid[
                                    1] > height / 1.5:  # arah down
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalDownCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalDownMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalDownTruck += 1
                                    to.counted = True

                    trackableObjects[objectID] = to

                oldCarAmount = totalCarAmount
                oldTotalCars = totalCars
                oldTotalTrucks = totalTrucks
                oldTotalMotors = totalMotors

                combinedAmount = totalDownCar + totalDownTruck + totalDownMotor + \
                                 totalUpCar + totalUpMotor + totalUpTruck

                totalCars = totalDownCar + totalUpCar
                totalTrucks = totalDownTruck + totalUpTruck
                totalMotors = totalDownMotor + totalUpMotor

                if not oldCombinedAmount == combinedAmount:
                    tempAmount = totalCarAmount + combinedAmount
                    oldCombinedAmount = combinedAmount

                if oldCarAmount < tempAmount:
                    totalCarAmount = tempAmount

                if not oldCarAmount == totalCarAmount:
                    displayTotalAmount += 1

                    if not oldTotalCars == totalCars:
                        dbInsOrUpdCar(totalCars)
                        displayCarAmount += 1

                    if not oldTotalTrucks == totalTrucks:
                        dbInsOrUpdTruck(totalTrucks)
                        displayTruckAmount += 1

                    if not oldTotalMotors == totalMotors:
                        dbInsOrUpdMotorcycle(totalMotors)
                        displayMotorAmount += 1

                if not control:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                else:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if view_img:
                cv2.namedWindow('Main', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Main', 1920, 1080)
                cv2.imshow("Main", im0)

                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)

                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fourcc = 'mp4v'  # output video codec
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*fourcc), fps,
                            (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % Path(out))

    print('Done. (%.3fs)' % (time.time() - t0))
Пример #22
0
def demo():

    params = {
        "video": "video.avi",  # Video to run detection upon
        "dataset": "pasacal",  # Dataset on which the network has been trained
        "confidence": 0.5,  # Object Confidence to filter predictions
        "nms_thresh": 0.4,  # NMS Threshold
        "cfgfile": "cfg/yolov3.cfg",  # Config file
        "weightsfile": "yolov3.weights",  # Weightsfile
        "repo":
        416  # Input resolution of the network.  Increase to increase accuracy.  Decrease to increase speed
    }

    confidence = float(params["confidence"])
    nms_thesh = float(params["nms_thresh"])
    start = 0

    CUDA = torch.cuda.is_available()

    num_classes = 80

    bbox_attrs = 5  #num_classes

    bboxes = []
    xywh = []

    print("Loading network.....")
    model = Darknet(params["cfgfile"])
    model.load_weights(params["weightsfile"])
    print("Network successfully loaded")

    model.net_info["height"] = params["repo"]
    inp_dim = int(model.net_info["height"])
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()

    model.eval()

    videofile = params["video"]
    # activate our centroid tracker
    (H, W) = (None, None)
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # set 0 for debug
    cap = cv2.VideoCapture(0)
    fps = FPS().start()
    rects = []
    status = "Waiting.."

    assert cap.isOpened(), 'Cannot capture source'

    frames = 0
    start = time.time()
    while cap.isOpened():

        ret, frame = cap.read()
        if ret:

            img, orig_im, dim = prep_image(frame, inp_dim)
            im_dim = torch.FloatTensor(dim).repeat(1, 2)

            if CUDA:
                im_dim = im_dim.cuda()
                img = img.cuda()

            with torch.no_grad():
                output = model(Variable(img), CUDA)
            output = write_results(output,
                                   confidence,
                                   num_classes,
                                   nms=True,
                                   nms_conf=nms_thesh)

            if type(output) == int:
                frames += 1
                print(
                    "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
                )
                print("FPS of the video is {:5.2f}".format(
                    frames / (time.time() - start)))
                cv2.imshow("frame", orig_im)
                key = cv2.waitKey(1)
                if key & 0xFF == ord('q'):
                    break
                continue

            im_dim = im_dim.repeat(output.size(0), 1)
            scaling_factor = torch.min(inp_dim / im_dim, 1)[0].view(-1, 1)

            output[:,
                   [1, 3]] -= (inp_dim -
                               scaling_factor * im_dim[:, 0].view(-1, 1)) / 2
            output[:,
                   [2, 4]] -= (inp_dim -
                               scaling_factor * im_dim[:, 1].view(-1, 1)) / 2

            output[:, 1:5] /= scaling_factor
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            for i in range(output.shape[0]):
                output[i, [1, 3]] = torch.clamp(output[i, [1, 3]], 0.0,
                                                im_dim[i, 0])
                output[i, [2, 4]] = torch.clamp(output[i, [2, 4]], 0.0,
                                                im_dim[i, 1])

            for i in output:
                x0 = i[1].int()
                y0 = i[2].int()
                x1 = i[3].int()
                y1 = i[4].int()
                bbox = (x0, y0, x1, y1)
                bboxes.append(bbox)
                print(bbox)
                w = x1 - x0
                h = y1 - y0
                xywh.append((x0, y0, w, h))
                print(x0, y0, w, h)

                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(x0, y0, x1, y1)
                tracker.start_track(rgb, rect)

                trackers.append(tracker)

            for tracker in trackers:
                # set the status of the system to tracking
                status = "Tracking.."
                # update the tracker and grap the update position
                tracker.update(rgb)
                pos = tracker.get_position()
                # Unpack the position
                x0 = int(pos.left())
                y0 = int(pos.top())
                x1 = int(pos.right())
                y1 = int(pos.bottom())
                #add the bounding box coordiants to the rectangle
                rects.append((x0, y0, x1, y1))
                # moving 'up' or 'down'
            cv2.line(frame, (0, h // 2), (w, h // 2), (0, 255, 255), 2)
            objects = ct.update(rects)
            # Loop through the tracked objects
            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)
                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                    if not to.counted:
                        # if the direction is negative
                        # indicatin gthe object is moving up
                        # and the centroid is above the center line
                        # count the object
                        if direction < 0 and centroid[1] < h // 2:
                            totalUp += 1
                            to.counted = True
                        # if the direction is positive
                        # indicating the object is moving down
                        # and centroid is below the center line
                        elif direction > 0 and centroid[1] > h // 2:
                            totalDown += 1
                            to.counted = True

                    # store the trackable object in the dictionary
                    trackableObjects[objectID] = to

                #draw both the ID of the object and the centroid of the object
                # on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)
                info = [("Up", totalUp), ("Down", totalDown),
                        ("Status", status)]
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, h - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)

            #return bboxes

            classes = load_classes('data/coco.names')
            colors = pkl.load(open("pallete", "rb"))

            # write bbox
            list(map(lambda x: write(x, orig_im, classes, colors), output))

            cv2.imshow("frame", orig_im)
            key = cv2.waitKey(1)
            if key & 0xFF == ord('q'):
                break
            frames += 1
            fps.update()
            fps.stop()
            print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
            print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

            #return xywh

        else:
            break
def videoDetection():
    global detectVideoName, line, startPoint, endPoint, lineFlag, startCountFlag, odapi, initBB, roi_area, roi_elements, totalDown

    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalFrames = 0
    W = None
    H = None
    roi = 250

    frame_size_w = 500
    frame_size_h = 400

    model_path = 'faster_rcnn_inception_v2/frozen_inference_graph.pb'
    odapi = DetectorAPI(path_to_ckpt=model_path)
    threshold = 0.7

    #cap = cv2.VideoCapture(UtilsIO.SAMPLE_FILE_NAME_2)
    # cap = c4v2.VideoCapture(config.CONFIG_IP_CAM)
    cap = cv2.VideoCapture("videos/" + str(detectVideoName) + ".avi")

    # start the frames per second throughput estimator
    fps = FPS().start()

    while True:
        r, img = cap.read()

        if img is None:
            if int(detectVideoName) > 1:
                os.remove("videos/" + str(detectVideoName - 1) + ".avi")

            detectVideoName += 1
            cap.release()
            cv2.destroyAllWindows()
            videoDetection()

        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (frame_size_w, frame_size_h))
        cv2.namedWindow('preview')

        cv2.setMouseCallback('preview', on_mouse)
        if lineFlag:
            if startPoint == True and endPoint == True:
                try:
                    cv2.line(img, (line[0], line[1]), (line[2], line[3]),
                             (255, 0, 255), 2)
                except:
                    pass

        if initBB is not None and roi_elements is None or (
                initBB is not None and initBB != roi_elements.box):
            roi_elements = RoiElements(initBB)

        if initBB is not None:
            roi_area = roi_elements.getRoiArea(img)

        if W is None or H is None:
            (H, W) = img.shape[:2]

        status = "Waiting"
        rects = []

        if totalFrames % 8 == 0:
            status = "Detecting"
            trackers = []
            if roi_area is not None:
                boxes, scores, classes, num = odapi.processFrame(roi_area)

            else:
                boxes, scores, classes, num = odapi.processFrame(img)

            # Visualization of the results of a detection.
            for i in range(len(boxes)):
                # Class 1 represents human
                if classes[i] == 1 and scores[i] > threshold:
                    box = boxes[i]

                    tracker = dlib.correlation_tracker()
                    if roi_elements is not None:

                        left = box[1] + roi_elements.roi_area.coord_right
                        top = box[0] + roi_elements.roi_area.coord_bottom
                        right = box[3] + roi_elements.roi_area.coord_right
                        bottom = box[2] + roi_elements.roi_area.coord_bottom
                        rect = dlib.rectangle(int(left), int(top), int(right),
                                              int(bottom))
                    else:
                        left = box[1]
                        top = box[0]
                        right = box[3]
                        bottom = box[2]
                        rect = dlib.rectangle(int(left), int(top), int(right),
                                              int(bottom))
                    tracker.start_track(rgb, rect)

                    trackers.append(tracker)

        else:
            for tracker in trackers:
                status = "Tracking"

                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                rects.append((startX, startY, endX, endY))
                cv2.rectangle(img, (startX, startY), (endX, endY), (0, 255, 0),
                              2)

            #        cv2.line(img, (0, roi), (W, roi), (0, 255, 255), 2)

            objects = ct.update(rects)

            if startCountFlag and roi_elements.line is not None:
                dy = roi_elements.calcMeanYDistance()
                dx = roi_elements.calcMeanXDistance()
                angle = roi_elements.calculateAngle()

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():

                if roi_elements is not None and roi_elements.line is not None:
                    if roi_elements.checkCentroidInsideLine(centroid) == False:
                        pass
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    directionY = centroid[1] - np.mean(y)
                    x = [c[0] for c in to.centroids]
                    directionX = centroid[0] - np.mean(x)

                    if not to.counted:

                        if roi_elements is not None and roi_elements.line is not None:

                            if angle > 45 and directionX > 0 and centroid[
                                    0] > dx:
                                totalDown += 1
                                to.counted = True
                                frameNew = img[startY:startY + endY,
                                               startX:startX + endX]
                                cv2.imsow("image", frameNew)
                                UtilsIO.saveImages(config.CONFIG_IP_CAM,
                                                   totalDown, frameNew)

                            elif angle < 45 and directionY > 0 and centroid[
                                    1] > dy:
                                totalDown += 1
                                to.counted = True
                                frameNew = img[startY:startY + endY,
                                               startX:startX + endX]
                                UtilsIO.saveImages(config.CONFIG_IP_CAM,
                                                   totalDown, frameNew)

                        elif (directionY > 0
                              and centroid[1] > frame_size_h // 2):
                            totalDown += 1
                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Down", totalDown),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(img, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            if roi_elements != None:
                # roi area
                cv2.rectangle(img, (roi_elements.roi_area.coord_left,
                                    roi_elements.roi_area.coord_top),
                              (roi_elements.roi_area.coord_right,
                               roi_elements.roi_area.coord_bottom),
                              (0, 255, 0), 2)
            if roi_elements != None and roi_elements.line != None:
                cv2.line(img, (roi_elements.line[0], roi_elements.line[1]),
                         (roi_elements.line[2], roi_elements.line[3]),
                         (255, 0, 255), 2)

            # show the output frame
            cv2.imshow("preview", img)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            if key == ord("s"):
                initBB = None
                initBB = cv2.selectROI("ROI_FRAME",
                                       img,
                                       fromCenter=False,
                                       showCrosshair=False)
            #            cv2.imshow("tmp", tmp)
            #            cv2.namedWindow('real image')
            #            a = cv.SetMouseCallback('real image', on_mouse, 0)
            #            cv2.imshow('real image', img)

            if key == ord("x"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                if lineFlag == False:
                    lineFlag = True
                else:
                    lineFlag = False

            if key == ord("b"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                if startCountFlag is False:
                    startCountFlag = True
                    if roi_elements is not None and roi_elements.line is None:
                        roi_elements.setLine(line)
                        totalDown = 0
                else:
                    startCountFlag = False

            # increment the total number of frames processed thus far and
            # then update the FPS counter
        totalFrames += 1
        fps.update()

        # stop the timer and display FPS information
    fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
Пример #24
0
def contar(lista):
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(
        "mobilenet_ssd/MobileNetSSD_deploy.prototxt",
        "mobilenet_ssd/MobileNetSSD_deploy.caffemodel")
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(f"{lista[0]}")
    writer = None
    W = None
    H = None
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    fps = FPS().start()
    while True:
        frame = vs.read()
        frame = frame[1]
        if f"{lista[0]}" is not None and frame is None:
            break
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if "static/output_02.mp4 " is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter("static/output_02.mp4", fourcc, 30,
                                     (W, H), True)
        status = "Waiting"
        rects = []
        if totalFrames % 30 == 0:
            status = "Detecting"
            trackers = []
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()
            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > 0.4:
                    idx = int(detections[0, 0, i, 1])
                    if CLASSES[idx] != "person":
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        objects = ct.update(rects)
        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)
                if not to.counted:
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True
            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        if writer is not None:
            writer.write(frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
        totalFrames += 1
        fps.update()
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
Пример #25
0
    def run(self):
        # load our serialized model from disk
        net = cv2.dnn.readNetFromCaffe(self.prototxt, self.model)
        vs = PiVideoStream().start()
        time.sleep(2.0)

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:

            if self.stop_thread:
                break
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream

            frame = vs.read()

            if (input != False and frame is None):
                break

# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=300)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them

            if W is None or H is None:
                (H, W) = frame.shape[:2]

# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if self.totalFrames % self.skipFrames == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > self.confidence:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if self.CLASSES[idx] != "person":
                            continue

# compute the (x, y)-coordinates of the bounding box
# for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

# otherwise, there is a trackable object so we can utilize it
# to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            self.totalIn += 1
                            to.counted = True

# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            self.totalOut += 1
                            to.counted = True

# store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

# construct a tuple of information we will be displaying on the
# frame
            info = [
                ("In", self.totalIn),
                ("Out", self.totalOut),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

# show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

# increment the total number of frames processed thus far and
# then update the FPS counter
            self.totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # close any open windows
        cv2.destroyAllWindows()
        vs.stop()
    def main(self):
        
        totalUp = 0
        totalDown = 0

        try:
            while True:

                (rpiName, frame) = self.imageHub.recv_image()
                self.imageHub.send_reply(b'OK')
                # frame = frame[1] if args.get("input", False) else frame
                if rpiName not in self.lastActive.keys():
                    print("[INFO] receiving data from {}...".format(rpiName))
                self.lastActive[rpiName] = datetime.now()
                
                rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if self.W is None or self.H is None:
                    (self.H, self.W) = frame.shape[:2]

                status = "Waiting"
                rects = []

                if self.totalFrames % self.skipframes == 0:
                
                    status = "Detecting"
                    self.trackers = []

                    blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H), 127.5)
                    self.net.setInput(blob)
                    detections = self.net.forward()

                    for i in np.arange(0, detections.shape[2]):
                        
                        confidence = detections[0, 0, i, 2]

                        if confidence > self.confidence:
                    
                            idx = int(detections[0, 0, i, 1])

                            if self.CLASSES[idx] != "person":
                                continue

                            box = detections[0, 0, i, 3:7] * np.array([self.W, self.H, self.W, self.H])
                            (startX, startY, endX, endY) = box.astype("int")
                            rects.append((startX, startY, endX, endY))

                            tracker = dlib.correlation_tracker()
                            rect = dlib.rectangle(startX, startY, endX, endY)
                            tracker.start_track(rgb, rect)

                            self.trackers.append(tracker)

                else:
                    for tracker in self.trackers:
                        
                        status = "Tracking"

                        tracker.update(rgb)
                        pos = tracker.get_position()

                        startX = int(pos.left())
                        startY = int(pos.top())
                        endX = int(pos.right())
                        endY = int(pos.bottom())

                        rects.append((startX, startY, endX, endY))

                cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2), (0, 255, 255), 2)

                objects = self.ct.update(rects)

                for (objectID, centroid) in objects.items():
                    
                    to = self.trackableObjects.get(objectID, None)

                    if to is None:
                        to = TrackableObject(objectID, centroid)

                    else:
                        
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)

                        if not to.counted:

                            if direction < 0 and centroid[1] < self.H // 2:
                                totalUp += 1
                                to.counted = True

                            elif direction > 0 and centroid[1] > self.H // 2:
                                totalDown += 1
                                to.counted = True

                    self.trackableObjects[objectID] = to

                    text = "ID {}".format(objectID)
                    cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

                info = [
                    ("Up", totalUp),
                    ("Down", totalDown),
                    ("Status", status),
                ]

                cv2.putText(frame, rpiName, (10, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                
                #self.frameDict[rpiName] = frame

                #montages = build_montages(self.frameDict.values(), (self.W, self.H), (self.mW, self.mH))

                #for (i, montage) in enumerate(montages):
                #    cv2.imshow("Home pet location monitor ({})".format(i),montage)

                # show the output frame
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(1) & 0xFF

                if (datetime.now() - self.lastActiveCheck).seconds > ACTIVE_CHECK_SECONDS:
                # loop over all previously active devices
                    for (rpiName, ts) in list(self.lastActive.items()):
                # remove the RPi from the last active and frame
                # dictionaries if the device hasn't been active recently
                        if (datetime.now() - ts).seconds > ACTIVE_CHECK_SECONDS:
                            print("[INFO] lost connection to {}".format(rpiName))
                            self.lastActive.pop(rpiName)
                            #frameDict.pop(rpiName)

                # set the last active check time as current time
                self.lastActiveCheck = datetime.now()

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

                # increment the total number of frames processed thus far and
                # then update the FPS counter
                self.totalFrames += 1
                self.fps.update()
        
        except:
            pass
        # stop the timer and display FPS information
        self.fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

        cv2.destroyAllWindows()
def thread_for_capturing_face():
    print("[INFO] Running Thread 1...")
    global net
    global total_faces_detected_locally
    global CLASSES
    global vs
    global ct
    global trackers
    global trackableObjects
    global totalFrames
    global totalDown
    global totalUp
    global totalPeople
    global centroid_list
    while True:
        ret, frame = vs.read()

        #frame = frame
        frame = cv2.resize(frame, (240, 240), interpolation=cv2.INTER_AREA)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        (H, W) = frame.shape[:2]

        status = "Waiting"
        rects = []

        if totalFrames % 10 == 0:
            status = "Detecting"
            trackers = []

            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > 0.5:
                    idx = int(detections[0, 0, i, 1])
                    if CLASSES[idx] != "person":
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 255, 0), 2)
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            # loop over the trackers
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():
            if objectID in moveDict:
                values = moveDict[objectID]
                values.append(centroid[1])
                moveDict[objectID] = values
            else:
                moveDict[objectID] = [centroid[1]]
            #print("[MOVE DICTIONARY]: ", moveDict)
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                #y = [c[1] for c in to.centroids]
                #x = [c[0] for c in to.centroids]
                #direction = centroid[0] - np.mean(x)
                #print("Direction of person:", direction)
                #print("Current Centroids 1: {} 2: {} vs. Middle {}".format(centroid[0], centroid[1], W //2))
                centroid_list.append(centroid[0])
                to.centroids.append(centroid)
                if not to.counted:
                    """
                    final_centroid = centroid_list[-1]
                    beginning_centroid = centroid_list[0]
                    if final_centroid < H // 2:
                        #print("[SRINI]: ", len(centroid_list))
                        #print("[MILAN]: ", (final_centroid - beginning_centroid))
                        if len(centroid_list) > 100 and final_centroid != beginning_centroid:
                            total_faces_detected_locally += 1
                            #print("[SRINI]: Number of people in =", total_faces_detected_locally)
                            to.counted = True
                            centroid_list.clear()
                    elif final_centroid > H // 2:
                        if len(centroid_list) > 100 and final_centroid != beginning_centroid:
                            total_faces_detected_locally -= 1
                            print("[SRINI]: Number of people in =", total_faces_detected_locally)
                            to.counted = True
                            centroid_list.clear()

                    """
                    print("CENTROID 1: ", centroid[1])
                    for keyName in moveDict:
                        keyVals = moveDict[keyName]
                        # for i in range(len(keyVals)):
                        # keyVals[i] = keyVals[i].item()
                        if "Counted" in keyVals:
                            pass
                        elif (keyVals[0] < W // 2) and (keyVals[-1] > W // 2):
                            totalUp += 1
                            totalPeople += 1
                            total_faces_detected_locally -= 1
                            values = moveDict[keyName]
                            values.append("Counted")
                            moveDict[keyName] = values
                            to.counted = True
                        elif (keyVals[0] > W // 2) and (keyVals[-1] < W // 2):
                            totalPeople -= 1
                            totalDown += 1
                            total_faces_detected_locally += 1
                            values = moveDict[keyName]
                            values.append("Counted")
                            moveDict[keyName] = values
                            to.counted = True

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        totalFrames += 1
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
Пример #28
0
    def read_video(self):
        fgbg = cv2.createBackgroundSubtractorMOG2(history=120, varThreshold=10, detectShadows=True)
        cap = cv2.VideoCapture(self.vid_name)

        c=0
        while c<5:
            cap.read()
            c+=1
        dis = 0

        prev_time = time()
        last_time = 0
        count_no_frame=0
        count = 0
        reinitialize = 0
        pflag = False
        i=0

        wt = 1#100
        pause = False
        first_frame = False
        W = None
        H = None
        ct = CentroidTracker(maxDisappeared=1, maxDistance=1200)
        trackableObjects = {}
        totalDown = 0
        totalUp = 0

        fp_count=0
        skip_frames=115
        #skip_frames=75


        #out = cv2.VideoWriter('./output.avi', -1, 20.0, (640,480))

        #size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        #fourcc = cv2.VideoWriter_fourcc(*'DIVX')  # 'x264' doesn't work
        #out = cv2.VideoWriter('./001_output.mp4',fourcc, 29.0, size, False)


        file_count=0
        print("Starting infinite loop and continue to read frames")
        while True:
            # print("hello")
            #print("fil=",file_count%1000)
            if(0 == file_count%1000):
                #print("check file")
                #lines
                print("Check if we have data to post")
                file_fd=open('post_data_file.txt','r')
                lines=file_fd.readlines()
                #print("lines=",lines)
                for line in lines:
                    #print("line=",line)
                    headers = {'content-type': "application/json",'cache-control': "no-cache"}
                    url = "http://app.gizmosmart.io/iot/1.3/public/peopleCounting/token/51987536541"
                    try:
                        arr = line
                        response = requests.request("POST", url, data=arr, headers=headers)
                        #print("in file post=",response.text)
                    except Exception as e:
                        #print ("Post Error: ",str(e)) 
                        print("Failed to post data: %s",str(e))
                        #self.post_data_file.write(arr+"\n")
                        #continue
                #print("truncate the file")
                #file_fd.truncate(0)
                with open('post_data_file.txt','a+') as fil:
                    fil.truncate(0)
                file_count=0

                        
            file_count+=1
            key = cv2.waitKey(wt) & 0xFF
            if (key == ord('p')):
                pause = not pause
            if (key == ord('s')):
                wt = int(wt * 2)
            if (key == ord('f')):
                wt = int(wt / 2)
            if(key == ord('q')):
                break

            if (pause):
                continue
            ret, frame = cap.read()

            #out.write(frame)

            if not ret:
                print("ret is null, could not open camera")
                tm.sleep(1)
                #continue

            fp_count +=1

            #if fp_count<skip_frames:
            #    continue
            #if(fp_count%3):
            #    continue

            if(self.cam_rotat):
                frame=self.rotateImage(frame, 90)

            #cv2.imshow("framee",frame)
            #continue


            if ret is True:
                # Show the filtered image

                h, w, c = frame.shape
                #print("shape=",w,h,c)
                fram = cv2.resize(frame, (int(w / 2), int(h / 2)))
                if self.draw:
                    cv2.imshow("original fram", fram)

            if not ret:
                count += 1
                print("ret is zero,count=",count)
                        #iscamopened=cap.isOpened()
                        #print("cap opened=",iscamopened)
                        #if(not iscamopened):
                        #    break
                if count >= 5:
                    print("frame reached max unread frame 5")
                            #reinitialize camera
                    print("release the camera",self.camera_id)
                    cap.release()
                    reinitialize += 1
                    if reinitialize <= 4000:
                        #cam_url=gen_session.get_cam_url(self.camera_id)
                        cam_url=self.vid_name
                        print(reinitialize," attempt to reopen camera",cam_url)
                        cap = cv2.VideoCapture(cam_url)
                        if(cap.isOpened()):
                            count=0
                            reinitialize = 0
                            continue
                        else:
                            print("camera could not be opned")
                                    #count=0
                            continue
                            #break
                    else:
                        print("reached to max 4000 reinitialized camera attempt")
                        break
                else:
                    continue

            cy = 350
            cx = 50
            ch = 380
            cw = 200


            cx = self.ref_line_min_x
            cw = self.ref_line_max_x - cx
            cy=self.ref_line_min_y
            ch=self.ref_line_max_y-cy



            #cy = 100
            #cx = 260
            #ch = 180
            #cw = 200

            # cy = 150
            # cx=200
            # ch=250
            # cw=220
            crop_img = fram[cy:cy + ch, cx:cx + cw]
            if W is None or H is None:
                (H, W) = crop_img.shape[:2]
            if not first_frame:
                #backSubtractor = BackGroundSubtractor(0.007, self.denoise(crop_img))
                backSubtractor = BackGroundSubtractor(0.007, self.denoise(crop_img))
                run = True
                first_frame = True
                continue

            # cv2.imshow("crop_img",crop_img)

            crop_img_dn = self.denoise(crop_img)
            # # cv2.imshow('input',denoise(fram))
            # cv2.imshow('input', crop_img_dn)
            #
            # # bgsimg=bgsobj.get_fg_image(denoise_img)
            # # cv2.imshow("bgsimg",bgsimg)
            foreGround = backSubtractor.getForeground(crop_img_dn)

            # ret, foreGroundb = cv2.threshold(foreGround, 35, 255, 0)
            foreGroundb = cv2.cvtColor(foreGround, cv2.COLOR_BGR2GRAY)
            # Apply thresholding on the background and display the resulting mask
            #ret, mask = cv2.threshold(foreGroundb, 38, 255, cv2.THRESH_BINARY)
            ret, mask = cv2.threshold(foreGroundb, 25, 255, cv2.THRESH_BINARY)
            cv2.imshow("mask",mask)
            h, w = mask.shape

            mask = self.line_erode(mask)
            f_contours = self.find_contours_img(mask)
            self.draw_contours(crop_img.copy(), f_contours)

            #if self.show_fgbg == 1:
            #    cv2.imshow('mask', mask)

            cv2.line(crop_img, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            rects = []
            count += 1
            status = "Waiting"

            for c in f_contours:
                box_colour = (0, 255, 0)
                (x, y, w, h) = cv2.boundingRect(c)
                (startX, startY, endX, endY) = (x, y, x + w, y + h)
                rects.append((startX, startY, endX, endY))
                cv2.rectangle(crop_img, (startX, startY), (int(endX), int(endY)), box_colour, 2)

            objects = ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    # for c in to.centroids:
                    #	print("c====",c)
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    dr_last_point = None
                    centroid_len = len(to.centroids)
                    if (centroid_len > 80):
                        del to.centroids[:(centroid_len - 80)]
                    for cc in to.centroids:
                        # print("cc=",cc)
                        cx = cc[0]
                        cy = cc[1]
                        if (dr_last_point == None):
                            dr_last_point = (cx, cy)
                            continue
                        # cx=cc[0]
                        # cy=cc[1]
                        # cv2.circle(crop_img, (cx,cy), 1, (0,0,255), thickness=2, lineType=8, shift=1)
                        cv2.line(crop_img, (dr_last_point), (cx, cy), (0, 255, 0), thickness=1, lineType=8)
                        dr_last_point = (cx, cy)

                    if not to.counted:
                        # print("centroid[1]=", centroid[1])
                        # print("H/2=", H / 2)
                        if direction < 0 and centroid[1] < H // 2:
                            up_dir_dist = H / 2 - centroid[1]
                            # print(H/4, " up_dir_dist=", up_dir_dist)
                            if (up_dir_dist > 0 and up_dir_dist < H / 4):
                            #if (up_dir_dist > 0 and up_dir_dist < H / 6):
                                # print("caputred up")
                                totalUp += 1
                                # count_in += 1
                                to.counted = True
                        elif direction > 0 and centroid[1] > H // 2:
                            down_dir_dist = centroid[1] - H / 2
                            # print(H/4, " down_dir_dist=", down_dir_dist)
                            if (down_dir_dist > 0 and down_dir_dist < H / 4):
                            #if (down_dir_dist > 0 and down_dir_dist < H / 6):
                                # print("caputured down")
                                totalDown += 1
                                # count_out += 1
                                to.counted = True

                trackableObjects[objectID] = to
                text = "ID {} {}".format(objectID, to.counted)

                cv2.putText(crop_img, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 1)
                cv2.circle(crop_img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

                # print("to=", to)

            info = [
                #("Up", totalUp),
                #("Down", totalDown),
                ("IN", totalUp),
                ("OUT", totalDown),
                #("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                # cv2.putText(crop_img, text, (10, H - ((i * 20) + 20)),
                # 			cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                cv2.putText(crop_img, text, (10, i * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)

            if self.draw:
                cv2.imshow("pc roi", crop_img)
            key = cv2.waitKey(1) & 0xFF

            #post the in/out status to server
            cur_time = time() - prev_time

            if (cur_time > 10 and cur_time - last_time > 10):
            #if (cur_time > 100000 and cur_time - last_time > 100000):
                last_time = cur_time
                cur_machine_time = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
                count_IN = totalUp
                count_OUT = totalDown

                arr = self.json_prepare(self.camera_id, str(count_IN), str(count_OUT),
                                    cur_machine_time)  # +" "+str("10:00:00"))
                print("arr=",arr)
                #print("arr=",json.dumps(arr))

                #url = "http://sales.gizmohelp.com/mcrm/2.1-test/people_counting"
                #arr = {'camera_id': '559475', 'created_date': '2018-10-03 10:00:00', 'people_out': '0', 'people_in': '0'}
            # arr = "{\"camera_id\":\"528365\",\"created_date\":\"2018-10-03 10:00:00\",\"people_in\":\"1\",\"people_out\":\"1\"}"
                headers = {'content-type': "application/json",'cache-control': "no-cache"}
                #print("post data=",json.dumps(arr))

                # response = requests.request("POST", url, data=json.dumps(arr), headers=headers)
                #
                # print("response.txt=",response.text)



                url = "http://app.gizmosmart.ios/iot/1.3/public/peopleCounting/token/51987536541"
                #arr = {"cameraId": 550113 , "data": [{"countIn": "1", "countOut": "2","date": "2018-11-13", "time": "03:00:01" }]}
                headers = {'content-type': "application/json",'cache-control': "no-cache"}
                #print("post data=",json.dumps(arr))
                try:
                    print("post count status:")
                    #response = requests.request("POST", url, data=json.dumps(arr), headers=headers)
                    response = requests.request("POST", url, data=arr, headers=headers)
                    print("response=",response.text)
                except Exception as e:
                    print ("Post Error: ",str(e)) 
                    self.post_data_file.write(arr+"\n")
                    continue
                    #continue


             #   response = requests.request("POST", url, data=arr, headers=headers)
                
             #   print("response.txt=",response.text)
            #if (cur_time > 10 and cur_time - last_time > 10):


                totalUp=0
                totalDown=0



        print("Processing Complete")
        print("Entrances: ", self.EntranceCounter)
        print(self.camera_id," Exits:", self.ExitCounter) 
        cap.release()
Пример #29
0
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)

            # check to see if the object has been counted or not
            if not to.counted:
                # if the direction is negative (indicating the object
                # is moving up) AND the centroid is above the center
                # line, count the object
                if direction < 0 and centroid[1] < H // 2:
                    totalUp += 1
                    to.counted = True
                    send_to_server(1)

                # if the direction is positive (indicating the object
                # is moving down) AND the centroid is below the
                # center line, count the object
                elif direction > 0 and centroid[1] > H // 2:
                    totalDown += 1
                    to.counted = True
                    send_to_server(-1)

        # store the trackable object in our dictionary
        trackableObjects[objectID] = to

        # draw both the ID of the object and the centroid of the
        # object on the output frame
Пример #30
0
def on_message(client, userdata, msg):
    # read image as base64 from MQTT
    # print(msg.topic+" "+str(msg.payload))
    global net, writer, ct, trackers, trackableObjects, totalFrames, \
        totalDown, totalUp, fps, counter, delta
    # sys.stdout.flush()
    # sys.stdout.write("[INFO] receive frame %d\n" % counter)
    sys.stdout.write("#")
    counter += 1
    jpg_as_text = msg.payload
    frame = decodeb64(jpg_as_text)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    (H, W) = frame.shape[:2]
    if args["output"] is not None and writer is None:
        fourcc = cv2.VideoWriter_fourcc(*"MJPG")
        writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True)
    status = "Waiting"
    rects = []
    if totalFrames % args["skip_frames"] == 0:
        status = "Detecting"
        trackers = []
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
        net.setInput(blob)
        detections = net.forward()
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            if confidence > args["confidence"]:
                idx = int(detections[0, 0, i, 1])
                if CLASSES[idx] != "person":
                    continue
                box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                (startX, startY, endX, endY) = box.astype("int")
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
    else:
        for tracker in trackers:
            status = "Tracking"
            tracker.update(rgb)
            pos = tracker.get_position()
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            rects.append((startX, startY, endX, endY))
    cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
    objects = ct.update(rects)
    coordinatex = []
    coordinatey = []
    for (objectID, centroid) in objects.items():
        to = trackableObjects.get(objectID, None)
        if to is None:
            to = TrackableObject(objectID, centroid)
        else:
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)
            if not to.counted:
                if direction < 0 and centroid[1] < H // 2:
                    # up -> user leaving, minus delta by 1
                    delta -= 1
                    totalUp += 1
                    to.counted = True
                elif direction > 0 and centroid[1] > H // 2:
                    # down -> user is going in, plus delta by 1
                    delta += 1
                    totalDown += 1
                    to.counted = True
        trackableObjects[objectID] = to
        text = "ID {}".format(objectID)
        cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
        coordinatex.append(centroid[0])
        coordinatey.append(centroid[1])
    #for loop
    for j in range(len(coordinatex)):
        for k in range(j + 1, len(coordinatex)):
            distance = math.sqrt(
                pow(coordinatex[j] - coordinatex[k], 2) +
                pow(coordinatey[j] - coordinatey[k], 2))
            if distance < (0.1 * coordinatey[j] + 40):
                cv2.line(frame, (coordinatex[j], coordinatey[j]),
                         (coordinatex[k], coordinatey[k]), (255, 0, 0), 3)
    info = [
        ("Up", totalUp),
        ("Down", totalDown),
        ("Status", status),
    ]
    for (i, (k, v)) in enumerate(info):
        text = "{}: {}".format(k, v)
        cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
    if writer is not None:
        writer.write(frame)
    totalFrames += 1
    fps.update()