コード例 #1
0
def distanciamiento(frame):
    # resize the frame and then detect people (and only people) in it
    frame = imutils.resize(frame, width=700)
    results = detect_people(frame, net, ln, personIdx=LABELS.index("person"))

    # initialize the set of indexes that violate the minimum social
    # distance
    violate = set()

    # ensure there are *at least* two people detections (required in
    # order to compute our pairwise distance maps)
    if len(results) >= 2:
        # extract all centroids from the results and compute the
        # Euclidean distances between all pairs of the centroids
        centroids = np.array([r[2] for r in results])
        D = dist.cdist(centroids, centroids, metric="euclidean")

        # loop over the upper triangular of the distance matrix
        for i in range(0, D.shape[0]):
            for j in range(i + 1, D.shape[1]):
                # check to see if the distance between any two
                # centroid pairs is less than the configured number
                # of pixels
                if D[i, j] < config.MIN_DISTANCE:
                    # update our violation set with the indexes of
                    # the centroid pairs
                    violate.add(i)
                    violate.add(j)

    # loop over the results
    for (i, (prob, bbox, centroid)) in enumerate(results):
        # extract the bounding box and centroid coordinates, then
        # initialize the color of the annotation
        (startX, startY, endX, endY) = bbox
        (cX, cY) = centroid
        color = (0, 255, 0)

        # if the index pair exists within the violation set, then
        # update the color
        if i in violate:
            color = (0, 0, 255)

        # draw (1) a bounding box around the person and (2) the
        # centroid coordinates of the person,
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
        cv2.circle(frame, (cX, cY), 5, color, 1)

    # draw the total number of social distancing violations on the
    # output frame
    text = "Violaziones de distanciamiento: {}".format(len(violate))
    cv2.putText(frame, text, (10, frame.shape[0] - 25),
                cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)

    return frame
コード例 #2
0
def gen1():
    while True:
        (grabbed, frame) = vc.read()
        if not grabbed:
            break
        frame = imutils.resize(frame, width=700)
        results = detect_people(frame,
                                net,
                                ln,
                                personIdx=LABELS.index("person"))
        violate = set()
        if len(results) >= 2:
            centroids = np.array([r[2] for r in results])
            D = dist.cdist(centroids, centroids, metric="euclidean")
            # loop over the upper triangular of the distance matrix
            for i in range(0, D.shape[0]):
                for j in range(i + 1, D.shape[1]):
                    if D[i, j] < config.MIN_DISTANCE:
                        violate.add(i)
                        violate.add(j)
        # loop over the results
        for (i, (prob, bbox, centroid)) in enumerate(results):
            (startX, startY, endX, endY) = bbox
            (cX, cY) = centroid
            color = (0, 255, 0)
            if i in violate:
                color = (0, 0, 255)
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
            cv2.circle(frame, (cX, cY), 5, color, 1)
        text = "Social Distancing Violations: {}".format(len(violate))
        cv2.putText(frame, text, (10, frame.shape[0] - 25),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
        cv2.imwrite("1.jpg", frame)
        (flag, encodedImage) = cv2.imencode(".jpg", frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) +
               b'\r\n')
vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
writer = None

# loop over the frames from the video stream
while True:
    # read the next frame from the file
    (grabbed, frame) = vs.read()

    # if the frame was not grabbed, then we have reached the end
    # of the stream
    if not grabbed:
        break

    # resize the frame and then detect people (and only people) in it
    frame = imutils.resize(frame, width=700)
    results = detect_people(frame, net, ln, personIdx=LABELS.index("person"))

    # initialize the set of indexes that violate the minimum social
    # distance
    violate = set()
    # ensure there are *at least* two people detections (required in
    # order to compute our pairwise distance maps)
    if len(results) >= 2:
        # extract all centroids from the results and compute the
        # Euclidean distances between all pairs of the centroids
        centroids = np.array([r[2] for r in results])
        D = dist.cdist(centroids, centroids, metric="euclidean")

        # loop over the upper triangular of the distance matrix
        for i in range(0, D.shape[0]):
            for j in range(i + 1, D.shape[1]):
def use_webcam(out='',display=True):
    # load the COCO class labels our YOLO model was trained on
    labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
    LABELS = open(labelsPath).read().strip().split("\n")

    # derive the paths to the YOLO weights and model configuration
    weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
    configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])
    print(weightsPath)
    print(configPath)
    # load our YOLO object detector trained on COCO dataset (80 classes)
    print("[INFO] loading YOLO from disk...")
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

    # check if we are going to use GPU
    if config.USE_GPU:
        # set CUDA as the preferable backend and target
        print("[INFO] setting preferable backend and target to CUDA...")
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

    # determine only the *output* layer names that we need from YOLO
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    # initialize the video stream and pointer to output video file
    print("[INFO] accessing video stream...")
    vs = cv2.VideoCapture(0)
    writer = None

    # loop over the frames from the video stream
    while True:
        # read the next frame from the file
        (grabbed, frame) = vs.read()

        # if the frame was not grabbed, then we have reached the end
        # of the stream
        if not grabbed:
            break
        
        # resize the frame and then detect people (and only people) in it
        frame = imutils.resize(frame, width=700)
        results = detect_people(frame, net, ln,
            personIdx=LABELS.index("person"))

        # initialize the set of indexes that violate the minimum social
        # distance
        violate = set()

        # ensure there are *at least* two people detections (required in
        # order to compute our pairwise distance maps)
        if len(results) >= 2:
            # extract all centroids from the results and compute the
            # Euclidean distances between all pairs of the centroids
            centroids = np.array([r[2] for r in results])
            D = dist.cdist(centroids, centroids, metric="euclidean")

            # loop over the upper triangular of the distance matrix
            for i in range(0, D.shape[0]):
                for j in range(i + 1, D.shape[1]):
                    # check to see if the distance between any two
                    # centroid pairs is less than the configured number
                    # of pixels
                    if D[i, j] < config.MIN_DISTANCE:
                        # update our violation set with the indexes of
                        # the centroid pairs
                        violate.add(i)
                        violate.add(j)

        # loop over the results
        for (i, (prob, bbox, centroid)) in enumerate(results):
            # extract the bounding box and centroid coordinates, then
            # initialize the color of the annotation
            (startX, startY, endX, endY) = bbox
            (cX, cY) = centroid
            color = (0, 255, 0)

            # if the index pair exists within the violation set, then
            # update the color
            if i in violate:
                color = (0, 0, 255)

            # draw (1) a bounding box around the person and (2) the
            # centroid coordinates of the person,
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
            cv2.circle(frame, (cX, cY), 5, color, 1)

        # draw the total number of social distancing violations on the
        # output frame
        text = "Social Distancing Violations: {}".format(len(violate))
        cv2.putText(frame, text, (10, frame.shape[0] - 25),
            cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)

        # check to see if the output frame should be displayed to our
        # screen
        if display > 0:
            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1)

            # if the `esc` key was pressed, break from the loop
            if key == 27:
                break

        # if an output video file path has been supplied and the video
        # writer has not been initialized, do so now
        if out != "" and writer is None:
            # initialize our video writer
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(out, fourcc, 25,
                (frame.shape[1], frame.shape[0]), True)
                
        # if the video writer is not None, write the frame to the output
        # video file
        if writer is not None:
            writer.write(frame)

    if writer:
        writer.release()
    vs.release()
    cv2.destroyAllWindows()
コード例 #5
0
def video_detection(net, video_input, video_output):
    # determine only the *output layer names that we need
    # from YOLOv3
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    # initalize the video stream and pointer to the output video file
    print("...Accessing video stream...")
    vs = cv2.VideoCapture(video_input)
    writer = None

    # initalize a frame number for putting text
    frame_number = 0

    # loop over the frames from the video stream
    while True:
        # read the next frame from the file
        (grabbed, frame) = vs.read()

        # if grabbed is False, then we have reach the end
        # of the stream
        if not grabbed:
            break

        # resize the frame and then detect people (only people) in it
        frame = imutils.resize(frame, width=700)
        results = detect_people(frame,
                                net,
                                ln,
                                personIdx=LABELS.index("person"))

        # use average height of people as the minimum distance required between
        # people. Call function calc_heights to calc the average height
        # of people in the frame
        ave_height_pixels = calc_heights(results)

        # initialize the set of indexes that violate
        # the minimum social distance (i.e., ave_height_pixels)
        violate = set()

        # initialize the set of tuples for neighbors that violate minimum
        # social distance (i.e., ave_height_pixels)
        neighbors = set()

        # ensure there are *at least* two people detections (required in
        # order to compute our pairwise distance maps)
        if len(results) >= 2:
            # extract all centroids from the results and compute
            # the Euclidean distance between all pairs of the centroids
            centroids = np.array([r[2] for r in results])
            D = dist.cdist(centroids, centroids, metric='euclidean')

            # loop over the upper triangular of the distance matrix
            for i in range(0, D.shape[0]):
                for j in range(i + 1, D.shape[1]):
                    # check to see if the distance between any two
                    # centroid pairs is less than the average height
                    if D[i, j] < ave_height_pixels:
                        # update violation set with the indexes of
                        # the centroid pairs
                        violate.add(i)
                        violate.add(j)

                        # update the neighbors set
                        if (i, j) not in neighbors or (j, i) not in neighbors:
                            neighbors.add((i, j))

        # draw bounding box using draw_boxes function
        draw_boxes(frame, results, violate)

        # initialize a list to store the neighbor distances
        # and clear it every 10th frame
        if frame_number % 10 == 0:
            neighbor_distances = list()

        # use function draw_lines to draw a line from the bad neighbors
        neighbor_distances = draw_line_print_distances(frame, results,
                                                       neighbors, frame_number,
                                                       neighbor_distances,
                                                       ave_height_pixels)

        # draw the total number of social distancing violations on the
        # output frame
        text = "Social Distancing Violations: {}".format(len(violate))
        cv2.putText(frame, text, (10, frame.shape[0] - 25),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)

        # if an output video file path has been supplied and the video
        # writer has not been itialized, do so now
        if video_output != "" and writer is None:
            # itialize video writer
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(video_output, fourcc, 25,
                                     (frame.shape[1], frame.shape[0]), True)

        # if the video writer is not None, write the frame to the output
        # video file
        if writer is not None:
            writer.write(frame)

        # counter for frame number
        frame_number += 1
コード例 #6
0
ファイル: front.py プロジェクト: sayansinha2019/FMSD
	def peopletracker(self):
		ap = argparse.ArgumentParser()
		ap.add_argument("-i", "--input", type=str, default="test1.mp4",
			help="path to (optional) input video file")
		ap.add_argument("-o", "--output", type=str, default="",
			help="path to (optional) output video file")
		ap.add_argument("-d", "--display", type=int, default=1,
			help="whether or not output frame should be displayed")
		args = vars(ap.parse_args())

		ct = CentroidTracker()
		(H, W) = (None, None)

		labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
		LABELS = open(labelsPath).read().strip().split("\n")

		weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
		configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

		print("[INFO] loading YOLO from disk...")
		net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

		if config.USE_GPU:
			# set CUDA as the preferable backend and target
			print("[INFO] setting preferable backend and target to CUDA...")
			net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
			net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

		ln = net.getLayerNames()
		ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

		print("[INFO] accessing video stream...")
		vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
		writer = None

		count=0

		while True:
			# read the next frame from the file
			(grabbed, frame) = vs.read()

			# if the frame was not grabbed, then we have reached the end
			# of the stream
			if not grabbed:
				break

			
			frame = imutils.resize(frame, width=700)
			results = detect_people(frame, net, ln,
				personIdx=LABELS.index("person"))

			
			
			rects = []

			for (i, (prob, bbox, centroid)) in enumerate(results):
				# extract the bounding box and centroid coordinates, then
				# initialize the color of the annotation
				(startX, startY, endX, endY) = bbox
				#(cX, cY) = centroid
				color = (0, 255, 0)
				rects.append(bbox)
				
				# draw (1) a bounding box around the person and (2) the
				# centroid coordinates of the person,
				cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

			objects = ct.update(rects)

			for (objectID, centroid) in objects.items():
				# draw both the ID of the object and the centroid of the
				# object on the output frame
				text = "ID {}".format(objectID)
				cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
					cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
				cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

			if args["display"] > 0:
				# show the output frame
				cv2.imshow("Frame", frame)
				key = cv2.waitKey(1) & 0xFF

				# if the `q` key was pressed, break from the loop
				if key == ord("q"):
					break

			if args["output"] != "" and writer is None:
				# initialize our video writer
				fourcc = cv2.VideoWriter_fourcc(*"MJPG")
				writer = cv2.VideoWriter(args["output"], fourcc, 25,
					(frame.shape[1], frame.shape[0]), True)

			if writer is not None:
				writer.write(frame)
コード例 #7
0
ファイル: front.py プロジェクト: sayansinha2019/FMSD
	def SocialDist(self):
		ap = argparse.ArgumentParser()
		ap.add_argument("-i", "--input", type=str, default="test.mp4",
				help="path to (optional) input video file")
		ap.add_argument("-o", "--output", type=str, default="",
			help="path to (optional) output video file")
		ap.add_argument("-d", "--display", type=int, default=1,
			help="whether or not output frame should be displayed")
		args = vars(ap.parse_args())
		labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
		LABELS = open(labelsPath).read().strip().split("\n")

		weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
		configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

		print("[INFO] loading YOLO from disk...")
		net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
		if config.USE_GPU:

			print("[INFO] setting preferable backend and target to CUDA...")
			net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
			net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

		ln = net.getLayerNames()
		ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

		print("[INFO] accessing video stream...")
		vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
		writer = None

		while True:
			(grabbed, frame) = vs.read()
			if not grabbed:
				break
			frame = imutils.resize(frame, width=700)
			results = detect_people(frame, net, ln,
				personIdx=LABELS.index("person"))	
			violate = set()

			if len(results) >= 2:
				centroids = np.array([r[2] for r in results])
				D = dist.cdist(centroids, centroids, metric="euclidean")

				for i in range(0, D.shape[0]):
					for j in range(i + 1, D.shape[1]):
						if D[i, j] < config.MIN_DISTANCE:
							violate.add(i)
							violate.add(j)

			for (i, (prob, bbox, centroid)) in enumerate(results):
				(startX, startY, endX, endY) = bbox
				(cX, cY) = centroid
				color = (0, 255, 0)


				if i in violate:
					color = (0, 0, 255)

				cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
				cv2.circle(frame, (cX, cY), 5, color, 1)

			text = "Social Distancing Violations: {}".format(len(violate))
			cv2.putText(frame, text, (10, frame.shape[0] - 25),
				cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)	

			if args["display"] > 0:
				cv2.imshow("Frame", frame)
				key = cv2.waitKey(1) & 0xFF

				if key == ord("q"):
					break

			if args["output"] != "" and writer is None:
				fourcc = cv2.VideoWriter_fourcc(*"MJPG")
				writer = cv2.VideoWriter(args["output"], fourcc, 25,
						(frame.shape[1], frame.shape[0]), True)	

			if writer is not None:
				writer.write(frame)
コード例 #8
0
def gen_social_distancing(video_index):
    #global current_violations_sd   
    #global average_violations_sd  
    global violations_in_past 
    while True:

        analytics.current_violations_sd   =0
        analytics.average_violations_sd   =0
        violations_in_past = []
        #for i in violations_in_past:
        #    i = 0
        args = {"input": video_config.SOCIAL_DISTANCE_INPUT[video_index], "output": video_config.SOCIAL_DISTANCE_OUTPUT, "display": 1}
        #args["input"] = "store.mp4"
        #args["output"] = "store_out.avi"
        #args["display"] = 1
        # load the COCO class labels our YOLO model was trained on
        labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
        LABELS = open(labelsPath).read().strip().split("\n")

        # derive the paths to the YOLO weights and model configuration
        weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
        configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

        # load our YOLO object detector trained on COCO dataset (80 classes)
        print("[INFO] loading YOLO from disk...")
        net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

        # check if we are going to use GPU
        if config.USE_GPU:
            # set CUDA as the preferable backend and target
            print("[INFO] setting preferable backend and target to CUDA...")
            net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
            net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

        # determine only the *output* layer names that we need from YOLO
        ln = net.getLayerNames()
        ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

        # initialize the video stream and pointer to output video file
        print("[INFO] accessing video stream...")
        #vs = cv2.VideoCapture(args["input"] if args["input"] else 0) # sami
        vs = cv2.VideoCapture(args["input"])
        # vs.set(cv2.CV_CAP_PROP_FPS, 15)
        vs.set(cv2.FONT_HERSHEY_SIMPLEX, 10)
        writer = None
        # loop over the frames from the video stream
        counter = 0
        counter20 = 0
        total_violations = 0 
        while True:
            # read the next frame from the file
            counter = counter + 1
            if(counter > 10000):
                counter = 1
                
            (grabbed, frame) = vs.read()

            #if(counter % 5 != 0):
            #    continue
            counter20 += 1
            # if the frame was not grabbed, then we have reached the end
            # of the stream
            if not grabbed:
                break

            # resize the frame and then detect people (and only people) in it
            frame = imutils.resize(frame, width=450)
            # frame = np.dstack([frame, frame, frame])
            results = detect_people(frame, net, ln,
                personIdx=LABELS.index("person"))

            # initialize the set of indexes that violate the minimum social
            # distance
            violate = set()

            # ensure there are *at least* two people detections (required in
            # order to compute our pairwise distance maps)
            if len(results) >= 2:
                # extract all centroids from the results and compute the
                # Euclidean distances between all pairs of the centroids
                centroids = np.array([r[2] for r in results])
                D = dist.cdist(centroids, centroids, metric="euclidean")
                # loop over the upper triangular of the distance matrix
                for i in range(0, D.shape[0]):
                    for j in range(i + 1, D.shape[1]):
                        # check to see if the distance between any two
                        # centroid pairs is less than the configured number
                        # of pixels
                        if D[i, j] < config.MIN_DISTANCE:
                            # update our violation set with the indexes of
                            # the centroid pairs
                            violate.add(i)
                            violate.add(j)
                            #detected = True



            # loop over the results
            for (i, (prob, bbox, centroid)) in enumerate(results):
                # extract the bounding box and centroid coordinates, then
                # initialize the color of the annotation
                (startX, startY, endX, endY) = bbox
                (cX, cY) = centroid
                color = (0, 255, 0)

                # if the index pair exists within the violation set, then
                # update the color
                if i in violate:
                    color = (0, 0, 255)

                # draw (1) a bounding box around the person and (2) the
                # centroid coordinates of the person,
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
                cv2.circle(frame, (cX, cY), 5, color, 1)

            # draw the total number of social distancing violations on the
            # output frame
            analytics.current_violations_sd = len(violate)
            total_violations += analytics.current_violations_sd
            analytics.average_violations_sd = (total_violations)/float(counter20)
            text = "Social Distancing Violations: {}".format(len(violate))
            cv2.putText(frame, text, (10, frame.shape[0] - 25),
                cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
            
            #yield(frame.tobytes())
            #frame = frame.tobytes()

            #yield(b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame.tobytes() + b'\r\n')

            # check to see if the output frame should be displayed to our
            # screen
            
            if args["display"] > 10:
                # show the output frame
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

            # if an output video file path has been supplied and the video
            # writer has not been initialized, do so now
            if args["output"] != "" and writer is None:
                # initialize our video writer
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(args["output"], fourcc, 25,
                    (frame.shape[1], frame.shape[0]), True)

            # if the video writer is not None, write the frame to the output
            # video file
            if writer is not None:
                writer.write(frame)
            frame = cv2.imencode('.jpg', frame)[1].tobytes()
            yield(b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
コード例 #9
0
def infinite_infer_run():
    # load our YOLO object detector trained on COCO dataset (80 classes)
    print("[INFO] loading YOLO from disk...")
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
    # set up local display
    local_display = LocalDisplay('480p')
    local_display.start()
    # check if we are going to use GPU
    if config.USE_GPU:
        # set CUDA as the preferable backend and target
        print("[INFO] setting preferable backend and target to CUDA...")
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

    # determine only the *output* layer names that we need from YOLO
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    # initialize the video stream and pointer to output video file
    print("[INFO] accessing video stream...")

    writer = None

    # loop over the frames from the video stream
    while True:
        # read the next frame from the file
        ret, frame = awscam.getLastFrame()
        # resize the frame and then detect people (and only people) in it
        frame = imutils.resize(frame, width=700)
        results = detect_people(frame,
                                net,
                                ln,
                                personIdx=LABELS.index("person"))

        # initialize the set of indexes that violate the minimum social
        # distance
        violate = set()
        # ensure there are *at least* two people detections (required in
        # order to compute our pairwise distance maps)
        if len(results) >= -1:
            # extract all centroids from the results and compute the
            # Euclidean distances between all pairs of the centroids
            centroids = np.array([r[2] for r in results])
            D = dist.cdist(centroids, centroids, metric="euclidean")

            # loop over the upper triangular of the distance matrix
            for i in range(0, D.shape[0]):
                for j in range(i + 1, D.shape[1]):
                    # check to see if the distance between any two
                    # centroid pairs is less than the configured number
                    # of pixels
                    if D[i, j] < config.MIN_DISTANCE:
                        # update our violation set with the indexes of
                        # the centroid pairs
                        violate.add(i)
                        violate.add(j)

            for (i, (prob, bbox, centroid)) in enumerate(results):
                # extract the bounding box and centroid coordinates, then
                # initialize the color of the annotation
                (startX, startY, endX, endY) = bbox
                (cX, cY) = centroid
                color = (0, 255, 0)

                # if the index pair exists within the violation set, then
                # update the color
                if i in violate:
                    color = (0, 0, 255)

                # draw (1) a bounding box around the person and (2) the
                # centroid coordinates of the person,
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
                cv2.circle(frame, (cX, cY), 5, color, 1)

            # draw the total number of social distancing violations on the
            # output frame
            text = "Social Distancing Violations: {}".format(len(violate))
            cv2.putText(frame, text, (10, frame.shape[0] - 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
            print('draw frame!')
            local_display.set_frame_data(frame)

        # draw the total number of social distancing violations on the
        # output frame
        # local_display.set_frame_data(frame)
        msg = '{ "violation":' + str(len(violate)) + '}'
        client.publish(topic=iotTopic, payload=msg)
    threading.Timer(15, infinite_infer_run).start()
        total_frames[index] += 1
        ret[index], frame[index] = video.read()
        if total_frames[index] % 10:
            continue
        if not ret[index]:
            flag = 1
            break

        #frame[index] = imutils.resize(frame[index],width = 700)

        if frame[index] is None:
            print('[i] ==> Done.......!!!')
        mask_detection = detect_mask(frame[index])

        results = detect_people(frame[index],
                                net,
                                ln,
                                personIdx=LABELS.index('person'))

        #Considering each person in the frame.
        """for (i, (prob, bbox, centroid)) in enumerate(results):
            res = detect_mask(ROI)
            mask_detection.append(res)"""

        accuracy = [0] * len(results)
        label = ['None'] * len(results)
        new_label = []

        person_centroids = np.array([r[2] for r in results])
        face_centroids = np.array([r[2] for r in mask_detection])
        #print(mask_detection)
        for (acc, box, (fcX, fcY), lab) in mask_detection: