class Video():
    def __init__(self):
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])


    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
class recordVideo():
    def __init__(self):
        # initialize the video stream and allow the camera
        # sensor to warmup
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])
        
        self.writer = None
        (h, w) = (None, None)
        
    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image, and also record the video
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def initRecord(self):
        if self.writer == None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            #(h, w) = self.raw_img.shape[:2]
            self.writer = cv2.VideoWriter('./demoVideo/'+str(int(time.time()))+'.avi', cv2.cv.FOURCC(*"XVID"), 15,
			(640 , 480 ), True)
    def record(self):
        # write the output frame to file
        self.writer.write(self.raw_img)

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
Example #3
0
def main():
	global frame, key
	# initialize the camera and grab a reference to the raw camera capture
	wdth = int(math.floor(360))
	hgth = int(math.floor(800))
	camera = VideoStream(usePiCamera=True,resolution=(wdth,hgth)).start()
	time.sleep(2.0)
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	writer = None
	(h,w) = (None, None)
	# setup the mouse callback
	cv2.startWindowThread()
	cv2.namedWindow("Detection")
	cv2.setMouseCallback("Detection",mouseOn)
	# keep looping over the frames
	#for frame2 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	while True:
		frame = camera.read();
		frame = cv2.transpose(frame);
		frame = cv2.flip(frame,1)
		timestamp = datetime.datetime.now()
		ts = timestamp.strftime("%d/%m/%Y %H:%M:%S")
		cv2.putText(frame,ts,(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,255,0),1)
		if writer is None:
			(h,w) = frame.shape[:2]
			writer = cv2.VideoWriter("/media/usb/test_" + timestamp.strftime("%d_%m_%Y_%H%M") + ".avi", fourcc,5,(w,h), True)
		writer.write(frame)
		cv2.imshow("Detection", frame);
		#cv2.setMouseCallback("Detection",mouseOn)
		#key = cv2.waitKey(10) & 0xFF
		# if the 'q' key is pressed, stop the loop
		if key == ord("q"): #cv2.EVENT_LBUTTONDOWN: #ord("q"):
	#		cv2.destroyAllWindows()
	#		camera.stop()
			break
	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	camera.stop()
Example #4
0
class TEVideoHandler:
	def __init__(self):
		self.FRAME_WIDTH = conf.DEF_FRAME_WIDTH
		self.FRAME_HEIGHT = conf.DEF_FRAME_HEIGHT

		# devices
		self.video_file = None
		self.camera = None
		self.picamera = None
		
	def set_frame_size(w, h):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Frame size need to be set before initialization")

		self.FRAME_WIDTH = w
		self.FRAME_HEIGHT = h

	def initialize_with_file(self, filename):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.video_file = cv2.VideoCapture(filename)

	def initialize_with_configured_cam(self):
		cam_selector = {
			conf.CameraType.PYCAMERA:			lambda: self.initialize_with_pycamera(),
			conf.CameraType.PYCAMERA_ROBUST:	lambda: self.initialize_with_pycamera2(),
			conf.CameraType.WEBCAM:				lambda: self.initialize_with_webcam(),
		}

		cam_selector[conf.CAMERA_TYPE]()


	def initialize_with_pycamera(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream(usePiCamera=True).start()
		time.sleep(2.0)

	# It uses picamera library to disable auto control feature
	def initialize_with_pycamera2(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.picamera = PiCamera()
		self.picamera.resolution = (self.FRAME_WIDTH, self.FRAME_HEIGHT)
		self.picamera.framerate = 30
		self.rawCapture = PiRGBArray(self.picamera, size=(self.FRAME_WIDTH, self.FRAME_HEIGHT))

		time.sleep(0.1)

		self.picamera.shutter_speed = self.picamera.exposure_speed
		self.picamera.exposure_mode = 'off'
		g = self.picamera.awb_gains
		self.picamera.awb_mode = 'off'
		self.picamera.awb_gains = g
		self.stream = self.picamera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)


	# Tested with a monitor webcam, but didn't checked with the webcam macbook
	def initialize_with_webcam(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream().start()
		time.sleep(2.0)

	# Read a frame	
	# Return: frame (pixel array)
	# Note: if not grapped (for video file), raise exception
	def read(self):
		frame = None
		if self.video_file is not None:
			(grabbed, frame) = self.video_file.read()
			if not grabbed:
				raise TEInvalidFrameException()
		elif self.camera is not None:
			frame = self.camera.read()
		elif self.picamera is not None:
			data = self.stream.next()
			frame = data.array
			self.rawCapture.truncate(0)

		# If still null frame,
		if frame is None:
			raise TEInvalidFrameException()

		# resize the frame
		frame = imutils.resize(frame, width=self.FRAME_WIDTH)

		return frame

	def release(self):
		if self.video_file is not None:
			self.video_file.release()
		elif self.camera is not None:
			# Pycamera may not have the release function
			if hasattr(self.camera, 'release'): 
				self.camera.release()
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

#initiate the video stream and allow camera to warm up
print("{INFO} starting video stream..")
vs = VideoStream(src=0).start()
time.sleep(2.0)

# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=600)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                 (300, 300), (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the detections and
    # predictions
    net.setInput(blob)
    detections = net.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with the
Example #6
0
import time
import cv2
import imutils
from imutils.video import VideoStream


from matcher import Matcher

matcher = Matcher([("fau-logo", "./templates/fau-logo.png"),
                   ("first-logo", "./templates/first-logo.jpg"),
                   ("nextera-logo", "./templates/nextera-energy-logo.jpg"),
                   ("techgarage-logo", "./templates/techgarage-logo.png")
                   ], min_keypoints_pct_match=8)

cam = VideoStream(usePiCamera=False).start()

cnt = 0
while True:
    img = cam.read()
    cv2.imshow("Pic", img)
    print matcher.match(img)
    key = cv2.waitKey(10)
    if key == ord('q'):
       break

cam.stop()
cv2.destroyAllWindows()
Example #7
0
def main():
    # Definition of color tracking
    #lower = [170, 240, 170]
    #upper = [255, 255, 215]

    #    lower = [0, 0, 254]
    #    upper = [255, 10, 255]

    lower = [0, 0, 0]
    upper = [255, 110, 255]

    kernel = np.ones((3, 3), np.uint8)

    args = get_arguments()
    state = True
    x = 0
    y_up = 0
    y_down = 0

    if args['webcam'] != -1:
        camera = cv2.VideoCapture(0)
    elif args['picamera'] != -1:
        camera = VideoStream(usePiCamera=args['picamera'] > 0).start()
        time.sleep(2.0)
    elif args['query']:
        # load the query image, compute the ratio of the old height to the new height, clone it, and resize it
        image = cv2.imread(args['query'])


#        image = imutils.resize(image, height=320)

    while state:
        if args['webcam'] != -1:
            ret, image = camera.read()

            if not ret:
                break
        elif args['picamera'] != -1:
            image = camera.read()

        cv2.imshow("Image", image)
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        height, width, color = image.shape
        print(height, width, color)
        # create NumPy arrays from the boundaries
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        thresh = cv2.inRange(hsv, lower, upper)
        bitwise = cv2.bitwise_and(image, image, mask=thresh)

        opening = cv2.morphologyEx(bitwise, cv2.MORPH_OPEN, kernel)
        output = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
        cv2.imshow("images", output)

        for r in range(0, height - 1):
            for c in range(0, width - 1):
                if (output[r][c][0] >= 10):
                    x = c
                    y_up = r
                    break

            if x is not 0:
                break

        for r in range(0, height - 1):
            for c in range(x - 10, x + 10):
                if (output[r][c][0] >= 10):
                    y_down = r

        print(x, y_up, y_down, y_up - y_down)
        cv2.circle(output, (int(x), int(y_up)), 10, (0, 0, 255), 2)
        cv2.circle(output, (int(x), int(y_down)), 5, (0, 255, 255), 2)
        cv2.imshow("result", output)

        if cv2.waitKey(1) & 0xFF is ord('q'):
            break
Example #8
0
def Camera():
    def detect_and_predict_mask(frame, faceNet, maskNet):
        # grab the dimensions of the frame and then construct a blob
        # from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
                                     (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        faceNet.setInput(blob)
        detections = faceNet.forward()

        # initialize our list of faces, their corresponding locations,
        # and the list of predictions from our face mask network
        faces = []
        locs = []
        preds = []

        # loop over the detections
        for i in range(0, detections.shape[2]):

            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # ensure the bounding boxes fall within the dimensions of
                # the frame
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                # extract the face ROI, convert it from BGR to RGB channel
                # ordering, resize it to 224x224, and preprocess it

                face = frame[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)

                # add the face and bounding boxes to their respective
                # lists
                faces.append(face)
                locs.append((startX, startY, endX, endY))

        # only make a predictions if at least one face was detected
        if len(faces) > 0:
            # for faster inference we'll make batch predictions on *all*
            # faces at the same time rather than one-by-one predictions
            # in the above `for` loop
            faces = np.array(faces, dtype="float32")
            preds = maskNet.predict(faces, batch_size=32)

# return a 2-tuple of the face locations and their corresponding
# locations
        return (locs, preds)

    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-f",
                    "--face",
                    type=str,
                    default="face_detector",
                    help="path to face detector model directory")
    ap.add_argument("-m",
                    "--model",
                    type=str,
                    default="mask_detector.model",
                    help="path to trained face mask detector model")
    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.5,
                    help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # load our serialized face detector model from disk
    print("[INFO] loading face detector model...")
    prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
    weightsPath = os.path.sep.join(
        [args["face"], "res10_300x300_ssd_iter_140000.caffemodel"])
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

    # load the face mask detector model from disk
    print("[INFO] loading face mask detector model...")
    maskNet = load_model(args["model"])

    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # detect faces in the frame and determine if they are wearing a
        # face mask or not
        (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)

        # loop over the detected face locations and their corresponding
        # locations
        for (box, pred) in zip(locs, preds):
            # unpack the bounding box and predictions
            (startX, startY, endX, endY) = box
            (mask, withoutMask) = pred

            # determine the class label and color we'll use to draw
            # the bounding box and text
            label = "Mask" if mask > withoutMask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

            # include the probability in the label
            label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)

            # display the label and bounding box rectangle on the output
            # frame
            cv2.putText(frame, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
redUpper1 = (10, 255, 255)
redLower2 = (160, 100, 100)
redUpper2 = (179, 255, 255)

# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
(dX, dY) = (0, 0)

video_stream = VideoStream(usePiCamera=False, resolution=(640,480), framerate=32).start()
time.sleep(2)

# keep looping
while True:
	# grab the current frame
	# image = video_stream.read()
	frame = video_stream.read()

	# resize the frame, blur it, and convert it to the HSV
	# color space
	# frame = imutils.resize(image, width=400)
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	# construct a mask for the color "green", then perform
	# a series of dilations and erosions to remove any small
	# blobs left in the mask
	mask1 = cv2.inRange(hsv, redLower1, redUpper1)
	mask2 = cv2.inRange(hsv, redLower2, redUpper2)
	mask = mask1 + mask2
	mask = cv2.erode(mask, None, iterations=2)
	mask = cv2.dilate(mask, None, iterations=2)
Example #10
0
class VideoCamera(object):
    def __init__(self, prototxt, model, confidence=0.2):
        self.prototxt = prototxt
        self.model = model
        self.confidence = confidence
        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(self.prototxt, self.model)

        # initialize the video stream, allow the cammera sensor to warmup,
        # and initialize the FPS counter
        print("[INFO] starting video stream...")
        self.vs = VideoStream(src=0).start()

        self.CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]
        self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))

        self.fps = FPS().start()

    def __del__(self):
        self.vs.stop()
        self.fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

    def get_frame(self):
        frame = self.vs.read()
        frame = imutils.resize(frame, width=400)

        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        self.net.setInput(blob)
        detections = self.net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > self.confidence:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(self.CLASSES[idx],
                                             confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              self.COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.COLORS[idx], 2)

# update the FPS counter
        self.fps.update()
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
def mainvideosavecam():
    j=1
    dict={}
    previds=[-1]
    current_directory = os.getcwd()
    final_directory = os.path.join(current_directory, r'frames_cam')
    if not os.path.exists(final_directory):
        os.makedirs(final_directory)
    prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
    weightsPath = os.path.sep.join(["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"])
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
    maskNet = load_model(os.path.sep.join(["face_detector", "mask_detector2.h5"]))
    vs = VideoStream(0).start()
    time.sleep(2.0)
    while True:
        rects = []
        frame = vs.read()
        #frame = imutils.resize(frame, width=1080)
        new = detect_and_predict_mask(frame, faceNet, maskNet)
        now = datetime.now()
        date_string = now.strftime("%d/%m/%Y")
        time_string = now.strftime("%H:%M:%S")
        for box in new:
            (startX, startY, endX, endY) = box
            #mask = pred[0][0]
            #withoutmask = pred[0][1]        
            #label = "Mask" if mask > withoutmask else "No mask"
            #color = (0, 255, 0) if label == "Mask" else (0, 0, 255)        
            #label = "{}: {:.2f}%".format(label, max(mask, withoutmask) * 100)
            #cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            #cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,255), 2)
            
            
            rects.append(box)
            
        boundingboxes = np.array(rects)
        boundingboxes = boundingboxes.astype(int)
        rects = non_max_suppression_fast(boundingboxes, 0.3)
        predictions=[]
        faces=[]
        ids=[]
        objects = tracker.update(rects)
        
        for (objectId, bbox) in objects.items():
            ids.append(objectId)
            
        for (objectId, bbox) in objects.items():
            
            try:
                x1, y1, x2, y2 = bbox
                x1 = int(x1)
                y1 = int(y1)
                x2 = int(x2)
                y2 = int(y2)
                #print(previds)
                #print(len(objects.items()))
                face = frame[y1:y2, x1:x2]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)
                face = np.expand_dims(face, axis=0)
                #faces.append(face)
                pred = maskNet.predict(face)
                #print(predictions)
                mask = pred[0][0]
                withoutmask = pred[0][1]        
                label = "Mask" if mask > withoutmask else "No mask"
                color = (0, 255, 0) if label == "Mask" else (0, 0, 255)        
                label = "{}: {:.2f}%".format(label, max(mask, withoutmask) * 100)
                cv2.putText(frame, label, (x1, y1 - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
                text = "ID: {}".format(objectId)
                cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
                
                for id1 in ids:
                    k=0
                    l=0
                    for id2 in previds:
                        if id1 == id2:    
                            break
                        elif id1 in previds:
                            k=1
                        elif id1>id2 and k==0:
                            #print(id1)
                            cv2.imwrite(os.path.join(final_directory,str(id1)+".jpg"), frame[startY:endY, startX:endX])
                            if j==1:
                                dict.update({'Number':[j], 'Date':date_string, 'Time':time_string, 'Label':label})
                                df = pd.DataFrame(dict)
                                df.to_csv(r'recordfile_videocam.csv', index=False)
                            else:
                                dict.update({'Number':[j], 'Date':date_string, 'Time':time_string, 'Label':label})
                                df = pd.DataFrame(dict)
                                df.to_csv(r'recordfile_videocam.csv',mode='a', header=False,index=False)
                            j+=1                        
                            previds.append(id1)
                        elif id2 in ids:
                            #print("yes")
                            l=1
                        elif l==0:
                            #print("yes")
                            previds.remove(id2)
                            
                    for id1 in ids:
                        l=0
                        for id2 in previds:
                            if id2 in ids:
                                #print("yes")
                                l=1
                            elif l==0:
                                #print("yes")
                                previds.remove(id2)
                                
            except:
                continue                        
        cv2.imshow("Frame", frame)     
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            cv2.destroyAllWindows()
            vs.stop()
            break
    cv2.destroyAllWindows()
    vs.stop()


        


#pcount= centroidtracker.peoplecount

# start the frames per second throughput estimator
fps = FPS().start()

# loop over frames from the video stream
while True:
	# grab the next frame and handle if we are reading from either
	# VideoCapture or VideoStream
	frame = vs.read()  # the vs contain the video by videocapture
	frame = frame[1] if args.get("input", False) else frame  # for capturing from webcam
	pc()

	# if we are viewing a video and we did not grab a frame then we
	# have reached the end of the video
	if args["input"] is not None and frame is None:
		break

	# resize the frame to have a maximum width of 500 pixels (the
	# less data we have, the faster we can process it), then convert
	# the frame from BGR to RGB for dlib
	frame = imutils.resize(frame, width=500)
	rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

	# if the frame dimensions are empty, set them
Example #13
0
class CoralMain:
    def __init__(self):
        """
        Initializations
        """

        print("[INFO] Beginning initialization of Coral Main File")

        # initialize the labels dictionary
        self.labels = {}
        self.labels[0] = "Can"

        # Argument Parser for model path
        ap = argparse.ArgumentParser()
        ap.add_argument("-m", "--model", required=True,
                        help="path to TensorFlow Lite object detection model")
        args = vars(ap.parse_args())

        # load the Google Coral object detection model
        print("[INFO] loading Coral model...")
        self.model = DetectionEngine(args["model"])

        # initialize the video stream and allow the camera sensor to warmup
        self.vs = VideoStream(src=0).start()
        time.sleep(2.0)

        print("Finished Initialization of Model and Video")

        # Socket Variables
        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.client_socket.connect(('192.168.1.230', 8485))
        self.connection = self.client_socket.makefile('wb')
        self.img_counter = 0

        # Video Variables
        self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
        self.h = 480  # DO NOT CHANGE
        self.w = 500  # DO NOT CHANGE
        self.threshold = 0.6

    def main_process(self):
        """
        Main Process
        :return: void
        """

        # loop over the frames from the video stream
        while True:
            # grab the frame from the threaded video stream and resize it
            frame = self.vs.read()

            # to have a maximum width of 500 pixels
            frame = imutils.resize(frame, width=500)
            orig = frame.copy()

            # prepare the frame for object detection by converting (1) it
            # from BGR to RGB channel ordering and then (2) from a NumPy
            # array to PIL image format
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # make predictions on the input frame
            frame = Image.fromarray(frame)
            results = self.model.detect_with_image(frame, threshold=self.threshold, keep_aspect_ratio=True,
                                                   relative_coord=False)
            # Variable Init
            largest_box = -999
            largest_area = -999
            centroid = None

            # loop over the results
            for r in results:
                # extract the bounding box and box and predicted class label
                box = r.bounding_box.flatten().astype("int")
                (startX, startY, endX, endY) = box
                area = (endY - startY) * (endX - startX)

                # If this area is larger than any area before it make it the largest area
                if area > largest_area:
                    largest_area = area
                    centroid = (int(startX + (endX - startX) / 2), int(startY + (endY - startY) / 2))
                    largest_box = (endX - startX) * (endY - startY)

                label = self.labels[r.label_id]
                # draw the bounding box and label on the image
                cv2.rectangle(orig, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                text = "{}: {:.2f}%".format(label, r.score * 100)
                cv2.putText(orig, text, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # Add the centroid of the image and the centroid of the largest object
            cv2.circle(orig, centroid, 4, (0, 255, 0), 2)
            cv2.circle(orig, (int(self.w / 2), int(self.h / 2) - 20), 4, (0, 0, 255), 2)

            if centroid is None:
                centroid = (-99, -99)

            # Socket Connection
            self.socket_con(orig, centroid, largest_box)

            key = cv2.waitKey(1) & 0xFF
            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

        # do a bit of cleanup
        cv2.destroyAllWindows()
        self.vs.stop()

    def socket_con(self, frame, centroid, largest_box_size):
        """
        Socket Connection
        :param frame: the image frame
        :param centroid: a tuple of the centroid
        :param largest_box_size: the area of the largest detection
        :return: void
        """

        # Encode the image and pickle the frame and centroid into an array
        result, frame = cv2.imencode('.jpg', frame, self.encode_param)
        data = pickle.dumps((frame, centroid, largest_box_size), 0)
        size = len(data)

        # Send the image and data over the socket connection
        print("{}: {}".format(self.img_counter, size))
        self.client_socket.sendall(struct.pack(">L", size) + data)
        self.img_counter += 1
            face = preprocess_input(face)

            roi.append(face)
            location.append((x_start, y_start, x_end, y_end))

    if len(roi) > 0:

        roi = np.array(roi, dtype='float32')
        prediction = trained_model.predict(roi, batch_size=32)

    return (location, prediction)


while True:

    frame = video.read()

    frame = imutils.resize(frame, width=400)

    (location, prediction) = detection_and_prediction(frame, weightpath,
                                                      protopath)

    for (box, pred) in zip(location, prediction):

        (x_start, y_start, x_end, y_end) = box
        (mask, withoutmask) = pred

        if mask > withoutmask:
            label = 'Mask'
            color = (0, 255, 0)
Example #15
0
def calibration(usnm, mode):

    # define three constants, one for the eye aspect ratio to indicate
    # blink and then a set of constant for the number of consecutive upper limit and lower limit
    # frames the eye must be below the threshold

    name = usnm + "_" + mode

    EYE_AR_THRESH = 0.19
    EYE_AR_CONSEC_FRAMES_MIN = 3
    EYE_AR_CONSEC_FRAMES_MAX = 32
    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")

    vs = VideoStream(src=0)
    vs.start()

    time.sleep(1.0)
    clsd = 0
    frameno = 0
    blink_data = []
    start = time.time()
    # loop over frames from the video stream
    while True:

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = e_a_r(leftEye)
            rightEAR = e_a_r(rightEye)
            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            #blink_data.append([frameno, ear])
            #frameno +=1

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1
                blink_data.append([frameno, ear, leftEAR, rightEAR, 0])
                frameno += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks

                blink_data.append([frameno, ear, leftEAR, rightEAR, 0])
                if TOTAL == 0:
                    if COUNTER >= EYE_AR_CONSEC_FRAMES_MIN and COUNTER <= EYE_AR_CONSEC_FRAMES_MAX:
                        TOTAL = 1

                        for i in range(frameno - COUNTER, frameno, 1):
                            blink_data[i][-1] = 1
                        print("\n bLINK AT :", (frameno - COUNTER), " for:",
                              COUNTER, " frames\n")
                        #initialize a counter forinter blink rate afterdetecting first blink
                        IBT = 0

                elif TOTAL > 0:

                    if COUNTER >= EYE_AR_CONSEC_FRAMES_MIN and COUNTER <= EYE_AR_CONSEC_FRAMES_MAX and IBT >= 2:
                        TOTAL += 1

                        for i in range(frameno - COUNTER, frameno, 1):
                            blink_data[i][-1] = 1
                        print("\n bLINK AT :", (frameno - COUNTER), " for:",
                              COUNTER, " frames\n")
                    elif COUNTER < EYE_AR_CONSEC_FRAMES_MIN and COUNTER > 0:
                        clsd = COUNTER
                        frm = (frameno) - COUNTER
                        blink_data[frameno - COUNTER][-1] = 0
                        print(" Not blink: {} , from frame: {}".format(
                            clsd, frm))
                    elif COUNTER == 0:
                        IBT += 1
                    else:
                        clsd = COUNTER
                        frm = (frameno) - COUNTER
                        for i in range((frameno - COUNTER), frameno, 1):
                            blink_data[(frameno) - i][-1] = 0
                        print(" Not blink: {} , from frame: {}".format(
                            clsd, frm))

                frameno += 1
                # reset the eye frame counter
                COUNTER = 0
            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

            if mode == 't1':
                cv2.putText(frame, " Blink type 1 Press 'q' after blinking",
                            (1, 310), cv2.FONT_HERSHEY_SIMPLEX, 0.3,
                            (0, 0, 255), 2)
            elif mode == 't2':
                cv2.putText(frame, " Blink type 2 Press 'q' after blinking ",
                            (1, 310), cv2.FONT_HERSHEY_SIMPLEX, 0.3,
                            (0, 0, 255), 2)

            cv2.putText(frame, "  sufficient number of times (at least 15) ",
                        (1, 330), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255),
                        2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            name = name + '.pickle'
            pickle.dump(blink_data, open(name, "wb"))
            end = time.time()
            #print("\n Total frames captured :", frameno-1," in:",end - start ," seconds\n")
            break

    # do a bit of cleanup
    vs.stop()

    cv2.destroyAllWindows()

    svmtrnr(usnm, mode)
Example #16
0
def call_cam():
    from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
    from tensorflow.keras.preprocessing.image import img_to_array
    from tensorflow.keras.models import load_model
    from imutils.video import VideoStream
    import numpy as np
    import imutils
    import time
    import cv2
    import os
    import time
    import sys
    from tkinter import messagebox
    import threading
    def detect_and_predict_mask(frame, faceNet, maskNet):
        # grab the dimensions of the frame and then construct a blob
        # from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
                                     (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        faceNet.setInput(blob)
        detections = faceNet.forward()
        # print(detections.shape)

        # initialize our list of faces, their corresponding locations,
        # and the list of predictions from our face mask network
        faces = []
        locs = []
        preds = []

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > 0.5:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # ensure the bounding boxes fall within the dimensions of
                # the frame
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                # extract the face ROI, convert it from BGR to RGB channel
                # ordering, resize it to 224x224, and preprocess it
                face = frame[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)

                # add the face and bounding boxes to their respective
                # lists
                faces.append(face)
                locs.append((startX, startY, endX, endY))

        # only make a predictions if at least one face was detected
        if len(faces) > 0:
            # for faster inference we'll make batch predictions on *all*
            # faces at the same time rather than one-by-one predictions
            # in the above `for` loop
            faces = np.array(faces, dtype="float32")
            preds = maskNet.predict(faces, batch_size=32)

        # return a 2-tuple of the face locations and their corresponding
        # locations
        return (locs, preds)

    # load our serialized face detector model from disk
    prototxtPath = r"D:\Face-Mask-Detection-master\Face-Mask-Detection-master\face_detector\deploy.prototxt"
    weightsPath = r"D:\Face-Mask-Detection-master\Face-Mask-Detection-master\face_detector\res10_300x300_ssd_iter_140000.caffemodel"
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

    # load the face mask detector model from disk
    maskNet = load_model(r"D:\Face-Mask-Detection-master\Face-Mask-Detection-master\mask_detector.model")

    # initialize the video stream
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()

    # loop over the frames from the video stream

    t1 = time.time() # this t1 is used to calculate 4 seconds since the camera was ON
    mask_list = [] # this empty list will be used to calculate the average of Wearing Mask
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # detect faces in the frame and determine if they are wearing a
        # face mask or not
        (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)

        # loop over the detected face locations and their corresponding
        # locations
        for (box, pred) in zip(locs, preds):
            # unpack the bounding box and predictions
            (startX, startY, endX, endY) = box
            (mask, withoutMask) = pred

            # determine the class label and color we'll use qto draw
            # the bounding box and text
            label = "Mask" if mask > withoutMask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

            # include the probability in the label
            label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)




            mask_list.append(mask)

            if time.time() - t1 > 4: # when 4 seconds passes

                mask_avg = sum(mask_list) / len(mask_list)

                if mask_avg*100 > 90: # when the average of wearing a mask is > 90%

                    messagebox.showinfo('Welcome', 'Welcome to the building, Thank you for wearing your mask')
                    exit() # after welcoming the employee close the program

                else :
                    messagebox.showinfo('WAER IT !','Wear your MASK !!')
                    exit() # after alerting the employee, close the program so he can try again after wearing tha mask


            # display the label and bounding box rectangle on the output
            # frame

            cv2.putText(frame, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #17
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net
    cap = None
    if args.input == 'cam':
        input_stream = 0
        cap = VideoStream().start()
    elif args.input == 'picam':
        cap = VideoStream(usePiCamera=True, resolution=(640, 480)).start()
        time.sleep(2)
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        cap = VideoStream(src=input_stream).start()
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = False
    render_time = 0
    frame = cap.read()
    starttime = 0
    while frame is not None:
        if is_async_mode:
            next_frame = cap.read()
            original = next_frame.copy()
        else:
            frame = cap.read()
            original = frame.copy()

        initial_h, initial_w, depth = frame.shape
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id,
                                 inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            objlist = []
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min((7 - class_id) * 12.5,
                                 255), min((7 - class_id) * 7,
                                           255), min((7 - class_id) * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)
                    objlist.append(det_label)
            if len(objlist) > 0:
                if (time.time() - starttime) > 10:
                    starttime = time.time()
                    timestamp = '{:%y-%m-%d_H_%M_%S}'.format(
                        datetime.datetime.now())
                    items = ['spoon', 'knife', 'fork', 'plate', 'cup', 'food']

                    outname = ""
                    for item in items:
                        if item in objlist:
                            outname = item
                            break
                    filename = outname + '_' + timestamp + '.jpg'
                    cv2.imwrite(filename, original)

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        #
        render_start = time.time()
        cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        if key == 27:
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format(
                "async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
    del exec_net
    del plugin
Example #18
0
class DualCamera():
    def __init__(self):
        self.root = Tk()
        self.root.wm_title("Dual Cam")

        self.saving = False
        self.frames = []
        self.overlay = False

        self.image_left = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(np.zeros((256, 256)))))
        self.image_panel_left = Label(self.root, image = self.image_left)
        self.image_panel_left.grid(row = 0, column = 0, columnspan=2)

        self.image_right = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(256 * np.random.rand(256, 256))))
        self.image_panel_right = Label(self.root, image = self.image_right)
        self.image_panel_right.grid(row = 0, column = 2, columnspan=2)

        self.save_button = Button(width = 10, height = 2, text = 'Save', command=self.save)
        self.save_button.grid(row = 1, column = 0)

        self.calibrate_button = Button(width = 10, height = 2, text = 'Calibrate', command=self.calibrate)
        self.calibrate_button.grid(row = 1, column = 1)

        self.close_button = Button(width = 10, height = 2, text = 'Close', command=self.quit)
        self.close_button.grid(row = 1, column = 3)

        self.overlay_button = Button(width=10, height=2, text='Overlay', command=self.toggle_overlay)
        self.overlay_button.grid(row = 1, column = 2)

        self.bias_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.bias)
        self.bias_slider.grid(row = 2, column = 1, columnspan=3)
    	self.bias_label = Label(self.root, text="Bias current")
    	self.bias_label.grid(row=2, column=0)

        self.clock_slider = Scale(self.root, from_=0, to=63, length=400, orient=HORIZONTAL, command=self.clock)
        self.clock_slider.grid(row = 3, column = 1, columnspan=3)
    	self.clock_label = Label(self.root, text="Clock speed")
    	self.clock_label.grid(row=3, column=0)

        self.cm_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.cm)
        self.cm_slider.grid(row = 4, column = 1, columnspan=3)
    	self.cm_label = Label(self.root, text="CM current")
    	self.cm_label.grid(row=4, column=0)

        # set default positions
    	self.cm_slider.set(0x0C)
    	self.clock_slider.set(0x15)
    	self.bias_slider.set(0x05)


        # initialize visible camera
        self.vs = VideoStream(usePiCamera=True).start()

        # thread for reading from sensor hardware intro an image queue           
        self.ir_images = Queue.LifoQueue()
        self.ir_commands = Queue.Queue()
        self.ir_calibrate = threading.Event()
        self.ir_stop = threading.Event()
        self.raw_ir_images = Queue.LifoQueue()

        self.capture_thread = threading.Thread(
                        target=ir_capture,
                        name="capture_thread",
                        args=[self.ir_images, self.ir_calibrate, self.ir_stop, self.ir_commands, self.raw_ir_images]
                        )

        self.capture_thread.start()

        self.ticktock()
        self.root.mainloop()

    def ticktock(self):
        # grab an image from the camera
        frame = self.vs.read()
        changed = False

        if frame is not None:
            frame = imutils.resize(frame, height=240)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.last_vis_frame = frame
            image = Image.fromarray(frame)

            if not self.overlay:
                self.image_right = ImageTk.PhotoImage(image=image)
                self.image_panel_right.configure(image=self.image_right);

            changed = True

        if not self.ir_images.empty():
            ir_frame = self.ir_images.get()

            if not self.ir_images.empty():
                with self.ir_images.mutex:
                    self.ir_images.queue = []

            ir_image = imutils.resize(ir_frame, height=240, inter=cv2.INTER_LINEAR)
            ir_image = np.dstack((ir_image, ir_image, ir_image))
            ir_image = cv2.LUT(ir_image, colormap).astype('uint8')
           
            self.last_ir_frame = ir_image
            self.image_left = ImageTk.PhotoImage(image=Image.fromarray(ir_image))
            self.image_panel_left.configure(image=self.image_left)
            changed = True

        if changed and self.overlay:
            overlay_image = np.zeros_like(self.last_vis_frame)

            overlay_image[:,:,2] = 0.125 * self.last_vis_frame[:,:,0] + 0.25 * self.last_vis_frame[:,:,1] + 0.125 * self.last_vis_frame[:,:,2]
            converted_frame = cv2.cvtColor(self.last_ir_frame, cv2.COLOR_RGB2HSV)
            overlay_image[:,40:280,2] += 0.5 * converted_frame[:,:,2]
            overlay_image[:,40:280,1] = converted_frame[:,:,1]
            overlay_image[:,40:280,0] = converted_frame[:,:,0]

            overlay_image = cv2.cvtColor(overlay_image, cv2.COLOR_HSV2RGB)

            self.image_right = ImageTk.PhotoImage(image=Image.fromarray(overlay_image))
            self.image_panel_right.configure(image=self.image_right)

        if self.saving:
            if not self.raw_ir_images.empty():
                ir_frame = self.raw_ir_images.get()

                if not self.raw_ir_images.empty():
                    with self.raw_ir_images.mutex:
                        self.raw_ir_images.queue = []
            else:
                ir_frame = None

            self.frames.append((frame, ir_frame))

        if not self.ir_calibrate.isSet():
            self.calibrate_button.configure(text = 'Calibrate', command=self.calibrate, state="normal")

        self.root.after(100, self.ticktock)

    def quit(self):
        self.vs.stop()
        self.ir_stop.set()
        self.root.quit()

    def save(self):
        self.save_button.configure(text = 'Stop Saving', command=self.stop_save)
        self.saving = True
        self.frames = []

    def stop_save(self):
        self.save_button.configure(text = 'Save', command=self.save)
        now = time.strftime("%Y-%m-%dT%H:%M:%S")
        pickle.dump(self.frames, open(now + ".p", "wb"))
        self.frames = []

    def calibrate(self):
        self.calibrate_button.configure(text = 'Calibrating...', state="disabled")
        self.ir_calibrate.set()

    def cm(self, val):
        val = int(val)
        self.ir_commands.put(('cm', val))

    def bias(self, val):
        val = int(val)
        self.ir_commands.put(('bias', val))

    def clock(self, val):
        val = int(val)
        self.ir_commands.put(('clock', val))

    def toggle_overlay(self):
        if self.overlay:
            self.overlay = False
            self.overlay_button.configure(text = 'Overlay')
        else:
            self.overlay = True
            self.overlay_button.configure(text = 'No Overlay')
Example #19
0
    def facial_recognise(self):
        # construct the argument parser and parse the arguments
        ap = argparse.ArgumentParser()
        ap.add_argument("-e",
                        "--encodings",
                        default="encodings.pickle",
                        help="path to serialized db of facial encodings")
        ap.add_argument("-r",
                        "--resolution",
                        type=int,
                        default=240,
                        help="Resolution of the video feed")
        ap.add_argument(
            "-d",
            "--detection-method",
            type=str,
            default="hog",
            help="face detection model to use: either `hog` or `cnn`")
        args = vars(ap.parse_args())

        # load the known faces and embeddings
        print("[INFO] loading encodings...")
        data = pickle.loads(open(args["encodings"], "rb").read())

        # initialize the video stream and then allow the camera sensor to warm up
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

        # loop over frames from the video file stream
        while True:
            # grab the frame from the threaded video stream
            frame = vs.read()

            # convert the input frame from BGR to RGB then resize it to have
            # a width of 750px (to speedup processing)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            rgb = imutils.resize(frame, width=args["resolution"])

            # detect the (x, y)-coordinates of the bounding boxes
            # corresponding to each face in the input frame, then compute
            # the facial embeddings for each face
            boxes = face_recognition.face_locations(
                rgb, model=args["detection_method"])
            encodings = face_recognition.face_encodings(rgb, boxes)
            names = []

            # loop over the facial embeddings
            for encoding in encodings:
                # attempt to match each face in the input image to our known
                # encodings
                matches = face_recognition.compare_faces(
                    data["encodings"], encoding)
                name = "Unknown"

                # check to see if we have found a match
                if True in matches:
                    # find the indexes of all matched faces then initialize a
                    # dictionary to count the total number of times each face
                    # was matched
                    matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    # loop over the matched indexes and maintain a count for
                    # each recognized face face
                    for i in matchedIdxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1

                    # determine the recognized face with the largest number
                    # of votes (note: in the event of an unlikely tie Python
                    # will select first entry in the dictionary)
                    name = max(counts, key=counts.get)

                # update the list of names
                names.append(name)

        # loop over the recognized faces
            for name in names:
                # print to console, identified person
                print("Person found: {}".format(name))
                email = name
                # Set a flag to sleep the cam for fixed time
                break

            return email
        # do a bit of cleanup
        vs.stop()
Example #20
0
y_offset2 = 250
x_offset2 = 300
y_offset3 = 250
x_offset3 = 0
y_offset4 = 700
x_offset4 = 50

imageCanvas = np.zeros((height, width, channels), np.uint8)

Stream0 = VideoStream(3).start()
Stream1 = VideoStream(1).start()
Stream2 = VideoStream(2).start()
Stream3 = VideoStream(4).start()
time.sleep(2.0)

streamL = Stream0.read()
streamF = Stream1.read()
streamR = Stream2.read()
streamB = Stream3.read()

streamL = imutils.resize(streamL, width=400)
streamF = imutils.resize(streamF, width=400)
streamR = imutils.resize(streamR, width=400)
streamB = imutils.resize(streamB, width=400)

screenCntL = find_points(streamL)
screenCntF = find_points(streamF)
screenCntR = find_points(streamR)
screenCntB = find_points(streamB)

ML = four_point_transform(streamL, screenCntL.reshape(4, 2))
# Set initial frame size.
frameSize = (1020, 720)

# Setup video stream
vs = VideoStream(src=0,
                 usePiCamera=PiCamera,
                 resolution=frameSize,
                 framerate=32).start()

# Allow camera to setup.
time.sleep(2.0)
i = 0

while 1:
    # Read Video steram
    img = vs.read()
    # Convert frame into grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Find faces in frame
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    if len(faces) == 0:
        # render a message on frame with no face detected
        cv2.putText(img, "NO FACE DETECTED", (340, 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4, cv2.LINE_AA)
    else:
        # render a message on frame with face detected
        cv2.putText(img, "FACE DETECTED", (340, 40), cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 255, 0), 4, cv2.LINE_AA)
        for (x, y, w, h) in faces:
            # Draw rectangle around every detected face
print("[INFO] starting cameras...")
leftStream = VideoStream(src=1).start()
#rightStream = VideoStream(usePiCamera=True).start()
rightStream = VideoStream(src=0).start()
time.sleep(2.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=500)
total = 0

# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
	left = imutils.resize(left, width=400)
	right = imutils.resize(right, width=400)

	# stitch the frames together to form the panorama
	# IMPORTANT: you might have to change this line of code
	# depending on how your cameras are oriented; frames
	# should be supplied in left-to-right order
	result = stitcher.stitch([left, right])

	# no homograpy could be computed
	if result is None:
		print("[INFO] homography could not be computed")
Example #23
0
        except:
            write_to_log("Log.txt", objCnt)


def reset_cnt():
    global objCnt
    objCnt = 0
    return objCnt


default_scheduler.every(args["f"]).seconds.do(check)
default_scheduler.every(args["f"]).seconds.do(reset_cnt)
print("Starting...")

while True:
    frame = camera.read()  # read camera

    if frame is None:
        print('fail with camera. Is it being used? src # correct?')
        break

    frame = imutils.resize(frame, width=400)  # resize frame
    height = np.size(frame, 0)  # calculates the height of frame
    width = np.size(frame, 1)  # calculates the width of frame
    blurred = cv2.GaussianBlur(frame, (21, 21), 0)  # blurring image before hsv applied (less noise)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)  # creating hsv from blurred frame and converting the bgr to hsv

    mask = cv2.inRange(hsv, np.array(args["a"]), np.array(args["b"]))  # mask is setting hsv in range of the lower and upper HSV values
    mask = cv2.dilate(mask, None, iterations=2)  # dilate is opposite of erosion "thickens" image
    mask = cv2.erode(mask, None, iterations=2)  # erode "thins" image. erosion followed by dilation to remove white noises
    res = cv2.bitwise_and(frame, frame, mask=mask)  # this makes the color filter show that color in the res frame
Example #24
0
    def recognise(self):
        """
        Starts Video stream and checks detected face againist encoded data
        to recognise user through face.
        :return: The reognised user's username
        :rtype: str
        """
        data = pickle.loads(open("encodings.pickle", "rb").read())
        print("starting video stream...")

        # initialize the video stream and allow the camera sensor to warm up
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

        # grab the frame from the threaded video stream
        frame = vs.read()

        # convert the input frame from BGR to RGB then resize it to have
        # a width of 750px (to speedup processing)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        rgb = imutils.resize(frame, width=240)

        # detect the (x, y)-coordinates of the bounding boxes
        # corresponding to each face in the input frame, then compute
        # the facial embeddings for each face
        boxes = face_recognition.face_locations(rgb, model="hog")
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []

        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(
                data["encodings"], encoding)
            name = "Unknown"

            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}

                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                    # determine the recognized face with the largest number
                    # of votes (note: in the event of an unlikely tie Python
                    # will select first entry in the dictionary)
                    name = max(counts, key=counts.get)

                names.append(name)

        for name in names:
            vs.stop()
            return(name)

        vs.stop()
        return ""
        totalDown = 0
        totalUp = 0
        countlogU = 0
        countlogD = 0
        outFrames = totalFrames
        fracao = 4

        # Inicia os frames por segundo durante a estimativa.
        fps = FPS().start()

        # Loop sobre os frames do fluxo de arquivos de vídeo.
        while True:

            # Pega o próximo frame e manipula se estamos lendo o VideoCapture
            # ou VideoStream
            frame = stream.read()
            frame = frame[1] if configure.get("input", False) else frame

            # Se o frame não for pego, então nós temos alcançado o final do
            # fluxo.
            if configure["input"] is not None and frame is None:
                break

            # Pega a referência para o tensor da imagem de entrada e o tensor
            # de caixas (boxes)
            # OBS: Essas referências nos permitirão acessar seus valores
            # associados depois de passar a imagem pela rede.
            imageTensor = model.get_tensor_by_name("image_tensor:0")
            boxesTensor = model.get_tensor_by_name("detection_boxes:0")

            # Para cada caixa delimitadora nós gostaríamos de saber a pontuação
Example #26
0
def lancerapp(x):
    def eye_aspect_ratio(eye):
        # compute the euclidean distances between the two sets of
        # vertical eye landmarks (x, y)-coordinates
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[5])

        # compute the euclidean distance between the horizon
        # eye landmark (x, y)-coordinates
        C = dist.euclidean(eye[0], eye[3])

        # compute the eye aspect ratio
        ear = (A + B) / (2.0 * C)

        # return the eye aspect ratio
        return ear

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()

    ap.add_argument("-w",
                    "--webcam",
                    type=int,
                    default=0,
                    help="index of webcam on system")
    args = vars(ap.parse_args())

    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold for to set off the
    # alarm
    #Si on est dans un situaion ou il faut etre tres vigilant on peut rendre la deuxieme constante plus petite
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = x

    #EYE_AR_CONSEC_FRAMES = 48

    # initialize the frame counter as well as a boolean used to
    # indicate if the alarm is going off
    COUNTER = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("68 face landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    vs = VideoStream(src=args["webcam"]).start()
    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        #On passe en gris pour mieux detecter les contrastes
        frame = vs.read()
        frame = imutils.resize(frame, width=700)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (182, 196, 46), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (182, 196, 46), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

                # if the eyes were closed for a sufficient number of
                # then sound the alarm
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    # if the alarm is not on, turn it on

                    # draw an alarm on the frame
                    cv2.putText(frame, "Signes de somnolence!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (54, 29, 231),
                                2)
    #Lancer une autre application !

            # otherwise, the eye aspect ratio is not below the blink
            # threshold, so reset the counter and alarm
            else:
                COUNTER = 0

            # draw the computed eye aspect ratio on the frame to help
            # with debugging and setting the correct eye aspect ratio
            # thresholds and frame counters
            cv2.putText(frame, "Rapport d'ouverture: {:.2f}".format(ear),
                        (400, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (182, 196, 46), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #27
0
    face_net = cv2.dnn.readNet(proto_txt_path, weights_path)

    # подгрузка модели
    mask_net = load_model("mask_detector.model")

    # инициализация потока видео
    logger.info("starting video stream...")
    #rtsp_url = "rtsp://name:password/video"
    # vs = VideoStream(rtsp_url).start()
    # для взятие потока с ip-камеры
    vs = VideoStream(src=0).start()

    # циклю при рабочем потоке видео
    while True:
        # захват видео с потока и ресайз до максимальных 400 пикселей
        frame = imutils.resize(vs.read(), width=400)

        # определение лиц на видеопотоке и определение есть ли на них маска
        try:
            locs, preds = detect_and_predict_mask(frame, face_net,
                                                  mask_net)  # падает
            for box, pred in zip(locs, preds):
                # распаковка ограничивающей рамке и прогнозов
                startX, startY, endX, endY = box
                mask, withoutMask = pred

                # определение лейбла и цвета для отрисовки ограничевающих рамок
                label, color = ("In mask", (
                    0, 255, 0)) if mask > withoutMask else ("Without mask",
                                                            (0, 0, 255))
Example #28
0
import cv2
import numpy as np
from imutils.video import VideoStream

cam = VideoStream(src=0).start()

while (True):
    img = cam.read()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    red_lower = np.array([136, 87, 111], np.uint8)
    red_upper = np.array([180, 255, 255], np.uint8)

    blue_lower = np.array([99, 115, 150], np.uint8)
    blue_upper = np.array([110, 255, 255], np.uint8)

    yellow_lower = np.array([22, 60, 200], np.uint8)
    yellow_upper = np.array([60, 255, 255], np.uint8)

    red = cv2.inRange(hsv, red_lower, red_upper)
    blue = cv2.inRange(hsv, blue_lower, blue_upper)
    yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)

    kernal = np.ones((5, 5), "uint8")
    red = cv2.dilate(red, kernal)
    res = cv2.bitwise_and(img, img, mask=red)

    blue = cv2.dilate(blue, kernal)
    res1 = cv2.bitwise_and(img, img, mask=blue)

    yellow = cv2.dilate(yellow, kernal)
class SpeedDetector:
    def __init__(self):
        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        self.H = None
        self.W = None
        self.video_stream = None
        self.net = None
        self.current_time_stamp = None
        self.frame = None
        self.rgb = None
        self.meter_per_pixel = None

        # Load Model
        self.load_model()
        # Initialize the camera.
        self.initialize_camera()

        # start the frames per second throughput estimator
        self.fps = FPS().start()
        self.centroid_object_creator = CentroidObjectCreator()

    def load_model(self):
        """
        Load our serialized model from disk
        """
        logger().info("Loading model name:{}, proto_text:{}.".format(
            MODEL_NAME, PROTO_TEXT_FILE))
        self.net = cv2.dnn.readNetFromCaffe(PROTO_TEXT_FILE, MODEL_NAME)
        # Set the target to the MOVIDIUS NCS stick connected via USB
        # Prerequisite: https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_raspbian.html
        logger().info(
            "Setting MOVIDIUS NCS stick connected via USB as the target to run the model."
        )
        self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    def initialize_camera(self):
        """
        Initialize the video stream and allow the camera sensor to warmup.
        """
        logger().info(
            "Warming up Raspberry PI camera connected via the PCB slot.")
        self.video_stream = VideoStream(usePiCamera=True).start()
        time.sleep(2.0)
        # vs = VideoStream(src=0).start()

    def grab_next_frame(self):
        """
        1. Grab the next frame from the stream.
        2. store the current timestamp, and store the new date.
        """
        self.frame = self.video_stream.read()
        if self.frame is None:
            return

        self.current_time_stamp = datetime.now()
        # resize the frame
        self.frame = imutils.resize(self.frame, width=FRAME_WIDTH_IN_PIXELS)
        self.rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

    def set_frame_dimensions(self):
        """
        If the frame dimensions are empty, set them.
        """
        # if the frame dimensions are empty, set them
        if not self.W or not self.H:
            (self.H, self.W) = self.frame.shape[:2]
            self.meter_per_pixel = DISTANCE_OF_CAMERA_FROM_ROAD / self.W

    def loop_over_streams(self):
        while True:
            self.grab_next_frame()
            # check if the frame is None, if so, break out of the loop
            if self.frame is None:
                break
            self.set_frame_dimensions()
            objects = self.centroid_object_creator.create_centroid_tracker_object(
                self.H, self.W, self.rgb, self.net, self.frame)
            for speed_tracked_object, objectID, centroid in SpeedTrackerHandler.yield_a_speed_tracker_object(
                    objects):
                SpeedTrackerHandler.compute_speed(self.frame,
                                                  speed_tracked_object,
                                                  objectID, centroid,
                                                  self.current_time_stamp,
                                                  self.meter_per_pixel)
                SpeedValidator.validate_speed(speed_tracked_object,
                                              self.current_time_stamp,
                                              self.frame)

            # if the *display* flag is set, then display the current frame
            # to the screen and record if a user presses a key
            if OPEN_DISPLAY:
                cv2.imshow("Car_car_speed_detector_frame", self.frame)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key is pressed, break from the loop
                if key == ord("q"):
                    break

            # Update the FPS counter
            self.fps.update()

    def clean_up(self):
        # stop the timer and display FPS information
        self.fps.stop()
        logger().info("elapsed time: {:.2f}".format(self.fps.elapsed()))
        logger().info("approx. FPS: {:.2f}".format(self.fps.fps()))

        #Close the log file.
        SpeedValidator.close_log_file()

        # close any open windows
        cv2.destroyAllWindows()

        # clean up
        logger().info("cleaning up...")
        self.video_stream.stop()

    def perform_speed_detection(self):
        while True:
            self.loop_over_streams()
        self.clean_up()
	vs = VideoStream(src=2).start() 

# using prerecorded video
else:
	vs = cv2.VideoCapture(args["video"])

# allow the camera or video file to warm up
time.sleep(2.0)

firstLoop = True # check if this is the first loop
prevCenter = [0, 0] # value for previous center location
croppedFrame = [0, 0, 0, 0] # y1, y2, x1, x2 for cropping the frame


while True:
	frame = vs.read() # read current frame

	# if this is from a camera, it's a tuple of [grabbed boolean, frame]
	frame = frame[1] if args.get("video", False) else frame

	# if this is the end of the video
	if frame is None:
		break

	# resize the frame, blur it, and convert to the HSV
	frame = imutils.resize(frame, width=600)
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

	# run ROI on first frame and crop frame
	if firstLoop:
Example #31
0
sl = 'Saturation Low'
vh = 'Value High'
vl = 'Value Low'
wnd = 'Colorbars'

cv2.namedWindow(wnd, flags=cv2.WINDOW_AUTOSIZE)

cv2.createTrackbar(hl, wnd, 0, 179, nothing)
cv2.createTrackbar(hh, wnd, 0, 179, nothing)
cv2.createTrackbar(sl, wnd, 0, 255, nothing)
cv2.createTrackbar(sh, wnd, 0, 255, nothing)
cv2.createTrackbar(vl, wnd, 0, 255, nothing)
cv2.createTrackbar(vh, wnd, 0, 255, nothing)

while True:
    frame = cap.read()
    frame = cv2.GaussianBlur(frame, (5, 5), 0)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    hul = cv2.getTrackbarPos(hl, wnd)
    huh = cv2.getTrackbarPos(hh, wnd)
    sal = cv2.getTrackbarPos(sl, wnd)
    sah = cv2.getTrackbarPos(sh, wnd)
    val = cv2.getTrackbarPos(vl, wnd)
    vah = cv2.getTrackbarPos(vh, wnd)

    hsvlow = np.array([hul, sal, val])
    hsvhigh = np.array([huh, sah, vah])

    mask = cv2.inRange(hsv, hsvlow, hsvhigh)
    screen = cv2.bitwise_and(frame, frame, mask=mask)
green_upper = (80, 255, 150)
pts = deque(maxlen=args["buffer"])

camera = VideoStream(usePiCamera=args["picamera"] > 0).start()
#go to target
theta_total="1/011/2/020/3/155/4/080"
print theta_total
print 'going to target'
ser.write(theta_total)
#time.sleep(2)
ser.write(theta_total)
#time.sleep(8)

time.sleep(2.0)
while(1):
        (frame)=camera.read()        
        frame=imutils.resize(frame, width=400)
        hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask=cv2.inRange(hsv, green_lower, green_upper)
        cv2.imshow('OrigFrame',frame)
        #cv2.imshow('Original Mask',mask)
        mask=cv2.erode(mask, None, iterations=3)
        #cv2.imshow('Erode Mask',mask)
        mask=cv2.dilate(mask, None, iterations=3)
        cv2.imshow('Dilate Mask',mask)
        #contours
        cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        #time.sleep(10)
        center=None
        if len(cnts)>0:
Example #33
0
def run():

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p",
                    "--prototxt",
                    required=False,
                    help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m",
                    "--model",
                    required=True,
                    help="path to Caffe pre-trained model")
    ap.add_argument("-i",
                    "--input",
                    type=str,
                    help="path to optional input video file")
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    help="path to optional output video file")
    # confidence default 0.4
    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.4,
                    help="minimum probability to filter weak detections")
    ap.add_argument("-s",
                    "--skip-frames",
                    type=int,
                    default=30,
                    help="# of skip frames between detections")
    args = vars(ap.parse_args())

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # if a video path was not supplied, grab a reference to the ip camera
    if not args.get("input", False):
        print("[INFO] Starting the live stream..")
        #vs = cv2.VideoCapture(cv2.CAP_V4L2)
        vs = VideoStream(cv2.CAP_V4L2).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] Starting the video..")
        vs = cv2.VideoCapture(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    x = []
    empty = []
    empty1 = []

    # start the frames per second throughput estimator
    fps = FPS().start()

    if config.Thread:
        vs = thread.ThreadingClass(cv2.CAP_V4L2)

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=400)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if args["output"] is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (153, 204, 102), 3)
        cv2.putText(frame, "-Prediction border - Entrance-",
                    (10, H - ((i * 20) + 200)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 0, 0), 1)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        empty.append(totalUp)
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        empty1.append(totalDown)
                        #print(empty1[-1])
                        x = []
                        # compute the sum of total people inside
                        x.append(len(empty1) - len(empty))
                        #print("Total people inside:", x)
                        # if the people limit exceeds over threshold, send an email alert
                        if sum(x) >= 2:
                            cv2.putText(frame,
                                        "-ALERT: People limit exceeded-",
                                        (10, frame.shape[0] - 80),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 0, 255), 2)

                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255),
                       -1)

        # construct a tuple of information we will be displaying on the
        # info = [
        # ("exit", totalUp),
        # ("Enter", totalDown),
        # ("Status", status),
        # ]

        info2 = [
            ("Total People: ", x),
        ]

        # Display the output
        # for (i, (k, v)) in enumerate(info):
        #    text = "{}: {}".format(k, v)
        #    cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)

        for (i, (k, v)) in enumerate(info2):
            text = "{}: {}".format(k, v)
            # cv2.putText(frame, text, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2) #Red
            cv2.putText(frame, text, (15, H - ((i * 20) + 50)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)  #Red

        # Initiate a simple log to save data at end of the day
        if config.Log:
            datetimee = [datetime.datetime.now()]
            d = [datetimee, empty1, empty, x]
            export_data = zip_longest(*d, fillvalue='')

            with open('Log.csv', 'w', newline='') as myfile:
                wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
                wr.writerow(("End Time", "In", "Out", "Total Inside"))
                wr.writerows(export_data)

        # show the output frame
        cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"): break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

        if config.Timer:
            # Automatic timer to stop the live stream. Set to 8 hours (28800s).
            t1 = time.time()
            num_seconds = (t1 - t0)
            if num_seconds > 28800: break

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # # if we are not using a video file, stop the camera video stream
    # if not args.get("input", False):
    #    vs.stop()
    #
    # # otherwise, release the video file pointer
    # else:
    #    vs.release()

    # close any open windows
    cv2.destroyAllWindows()
Example #34
0
    def viewCameraPi(self):

        #text = 'This is a message from app to inform that app start running now!'
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone1,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone2,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        print("START SCRIPT AND MAJOR WARNING!")
        statusSMS = 'delivered'
        if((statusSMS != 'failed') and (statusSMS != 'undelivered')):
            #camera = PiCamera()
            #camera.resolution = ( 640, 480)
            #camera.framerate = 32

            #rawCapture = PiRGBArray(camera, size=( 640, 480))
            #self.sumMSE = self.sumSSIM = self.avgMSE = self.avgSSIM = 0
            self.tempHour = datetime.datetime.now().hour
            self.tempMinute = datetime.datetime.now().minute
            self.warmup = 0

            vs = VideoStream(usePiCamera=1,resolution=(640,480)).start()
            time.sleep(1.2)

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = None
            (h, w) = (None, None)
            zeros = None

            print ("view camera")

            while True:
                self.warmup+=1
                if(self.warmup >=5):
                    frame = vs.read()
                    if writer is None:
                        # store the image dimensions, initialzie the video writer,
                        # and construct the zeros array
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('exampleTH3.avi', fourcc, 20,
                            (w, h), True)

                    writer.write(frame)

                    self.curImage = frame
                    self.getDefImagePerHours()
                    #write xml
                    print("process frame thu {}".format(self.warmup-4))
                    self.writeXML()

                    #tat chuong trinh sau 5 phut:
                    if(datetime.datetime.now().minute - self.tempMinute >5):
                        self.final()
                        break


            # for frame in camera.capture_continuous( rawCapture, format("bgr"), use_video_port = True):
            #
            #     self.curImage = frame.array
            #     frame = vs.read()
            #     self.warmup +=1
            #     if(self.warmup >=5):
            #         self.getDefImagePerHours()
            #         # self.getDefImagePerTenMinutes()
            #
            #         #write xml
            #         print("process frame thu {}".format(self.warmup-4))
            #         self.writeXML()
            #
            #         #warning
            #         # self.warning()
            #
            #         #tat chuong trinh sau 5 phut:
            #         if(datetime.datetime.now().minute - self.tempMinute >5):
            #             self.final()
            #             break
            #     #neu la 16h, script se tu tat
            #     # tempBreak = datetime.datetime.now().hour
            #     # if(( tempBreak == 0) or (tempBreak == 6) or (tempBreak == 18) or (tempBreak == 12)):
            #     #     if(datetime.datetime.now().minute == 0):
            #     #         if((datetime.datetime.now().second >= 0) and (datetime.datetime.now().second <=3)):
            #     #             self.final()
            #     #             self.__init__(tempBreak)
            #
            #     # show frame
            #     # cv2.imshow("image", self.curImage)
            #     # key = cv2.waitKey(1) & 0xFF
            #
            #     #renew
            #     rawCapture.truncate(0)
            #
            #     #press 'q' to stop, press any key to continue
            #     # if(key == ord("q")):
            #     #     break
            #call function final
            vs.stop()
            writer.release()
            self.final()


        else :
            print("send message failed. So App will not run. Sorry for the inconvenience!!")
#OTWARCIE PLIKU DO ZAPISU
file = open('data.txt', 'w')
file.write("n		X		Y\n")
n = 0

#PETLA GLOWNA
while True:
    #PRZECHWYCENIE KLATKI
    if from_vfile:
        (grabbed, frame) = video_file.read()

        #W PRZYPADKU BRAKU KLATEK TO KONIEC VIDEO
        if not grabbed:
            break
    else:
        frame = camera.read()

    frame = imutils.resize(frame, width=600)
    if record_video:
        if video_writer is None:  #STWORZ INTERFEJS
            (h, w) = frame.shape[:2]
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            video_writer = cv2.VideoWriter(args["record_video"], fourcc, 20,
                                           (w, h), True)

        if not record_track:
            video_writer.write(frame)

    #PROGOWANIE
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, greenLower, greenUpper)
Example #36
0
fourcc = cv2.VideoWriter_fourcc(*args["codec"])
out = None
(h, w) = (None, None)
zeros = None
A = None
wait_time = None
record = False
WindowName = args["windowname"]

n=0
print("Initialize streaming")
while True:
    t0 = time.time()

    #grab the frame from the video stream and resize it to have a max width
    gray = vs.read()
    # frame = imutils.resize(frame, args["width"])
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow(WindowName, gray)

    if record == True:
        output = np.zeros((h, w),dtype="uint8")
        output[0:h, 0:w] = gray
        #output = np.zeros((h, w, 3),dtype="uint8")
        #output[0:h, 0:w] = frame #grayscale movie
        out.write(output)
        cv2.imshow(WindowName + "_RECORDING", output)
        n+=1

        if (n % 100) == 0:
            print("The number of frames collected: {0} frames".format(n))
        minSize=(MIN_SIZE, MIN_SIZE),
        flags=cv2.CASCADE_SCALE_IMAGE
    )

    if len(faces)>0:
        print ("Found {0} faces!".format(len(faces)))

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

    return frame

while True:
    # Capture frame-by-frame
    _,frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if ENABLE_FACE_DETECT:
        resultFrame = faceDetect(gray)
        # Display the resulting frame
        cv2.imshow('Video', resultFrame)

    if ENABLE_FPS:
        print("fps:",t.fps())

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
Example #38
0
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
	vs = VideoStream(usePiCamera=1).start()
	time.sleep(2.0)	

# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args["video"])

# keep looping
while True:
	# grab the current frame
	if args.get("video"):
		(grabbed, frame) = vs.read()
	else:
		frame = vs.read()

	# if we are viewing a video and we did not grab a frame,
	# then we have reached the end of the video
	if args.get("video") and not grabbed:
		break

	# resize the frame, blur it, and convert it to the HSV
	# color space
	frame = imutils.resize(frame, width=600)
	# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	# construct a mask for the color "green", then perform
Example #39
0
import cv2
import sys
import time
from imutils.video import VideoStream
import imutils

#setting up the PiCamera and it's variables
video_stream = VideoStream(usePiCamera=True, resolution=(480,320), framerate=25).start()
time.sleep(2)

x = 0


# capture frames from the camera using the piCamera library
while True:
    print(x)

    # Capture frame-by-frame
    image = video_stream.read()

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    cv2.imshow("Frame", gray)
    cv2.imwrite("test-images\frame{}.jpg".format(x), gray)
    key = cv2.waitKey(1) & 0xFF
    x = x + 1
    
    #break code
    if key == cv2.waitKey(1) & 0xFF == ord('q'):
        break
Example #40
0
def main():
    # construction des arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.prototxt.txt",
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.caffemodel",
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.6,
        help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # initialiser la liste des objets entrainés par MobileNet SSD 
    # création du contour de détection avec une couleur attribuée au hasard pour chaque objet
    CLASSES = ["arriere-plan", "avion", "velo", "oiseau", "bateau",
        "bouteille", "autobus", "voiture", "chat", "chaise", "vache", "table",
        "chien", "cheval", "moto", "personne", "plante", "mouton",
        "sofa", "train", "moniteur"]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    pygame.mixer.init()

    # chargement des fichiers depuis le répertoire de stockage 
    print(" ...chargement du modèle...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialiser la caméra du pi, attendre 2s pour la mise au point ,
    # initialiser le compteur FPS
    print("...démarrage de la Picamera...")
    vs = VideoStream(usePiCamera=True, resolution=(1600, 1200)).start()
    time.sleep(2.0)
    #fps = FPS().start()

    # boucle principale du flux vidéo
    while True:
        # récupération du flux vidéo, redimension 
        # afin d'afficher au maximum 800 pixels 
        frame = vs.read()
        frame = imutils.resize(frame, width=800)

        # récupération des dimensions et transformation en collection d'images
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)

        # determiner la détection et la prédiction 
        net.setInput(blob)
        detections = net.forward()

        # boucle de détection
        list_objects = []
        for i in np.arange(0, detections.shape[2]):
            # calcul de la probabilité de l'objet détecté en fonction de la prédiction
            confidence = detections[0, 0, i, 2]
            
            # supprimer les détections faibles inférieures à la probabilité minimale
            if confidence > args["confidence"]:
                # extraire l'index du type d'objet détecté
                # calcul des coordonnées de la fenêtre de détection 
                idx = int(detections[0, 0, i, 1])
                #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                #(startX, startY, endX, endY) = box.astype("int")

                # creation du contour autour de l'objet détecté
                # insertion de la prédiction de l'objet détecté 
                #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                #y = startY - 15 if startY - 15 > 15 else startY + 15
                #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                
                # enregistrement de l'image détectée 
                #cv2.imwrite("detection.png", frame)
                obj = CLASSES[idx]
                if obj not in list_objects:
                    list_objects.append(CLASSES[idx])
        
        # affichage du flux vidéo dans une fenètre 
        #cv2.imshow("Frame", frame)
        #key = cv2.waitKey(1) & 0xFF  # ligne necessaire pour l'affichage dans la frame

        # Pronounce the objects seen
        print(list_objects)
        for anobject in list_objects:
            path_to_sound = "/home/pi/Kenobi/recognition/vocabulary/" + anobject + ".ogg"
            if os.path.isfile(path_to_sound):
                pygame.mixer.music.load(path_to_sound)
                pygame.mixer.music.play()
                # Play until end of music file
                while pygame.mixer.music.get_busy() == True:
                    pygame.time.Clock().tick(10)

        # la touche q permet d'interrompre la boucle principale
        #if key == ord("q"):
        #   break

        # mise à jour du FPS 
        #fps.update()

    # arret du compteur et affichage des informations dans la console
    #fps.stop()
    #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Example #41
0
class yawn_detection:
    def __init__(self, shape_predictor=None, alarm=None, webcam=None):

        #shape_predictor = self.shape_predictor
        #alarm = self.alarm
        #webcam = self.webcam

        self.shape_predictor = 'shape_predictor_68_face_landmarks.dat'
        self.alarm = 'alarm.wav'
        self.webcam = 'http://10.24.201.216:4747/mjpegfeed?640x480'

        # define two constants, one for the eye aspect ratio to indicate
        # blink and then a second constant for the number of consecutive
        # frames the eye must be below the threshold for to set off the
        # alarm
        self.MOUTH_AR_THRESH = 0.65
        self.MOUTH_AR_CONSEC_FRAMES = 15

        # initialize the frame counter as well as a boolean used to
        # indicate if the alarm is going off
        self.COUNTER = 0
        self.ALARM_ON = False

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.shape_predictor)

        self.STATUS = False

        # grab the indexes of the facial landmarks for the mouth
        (self.Start, self.End) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

    def mouth_aspect_ratio(self, mouth):
        #vertical dist (x-y)
        A = dist.euclidean(mouth[1], mouth[5])
        B = dist.euclidean(mouth[2], mouth[4])

        C = dist.euclidean(mouth[0], mouth[3])

        self.ratio = (A + B) / (2.0 * C)
        return self.ratio

    def sound_alarm(self, path):
        # play an alarm sound
        playsound.playsound(path)

    def vs_loop(self):
        # start the video stream thread
        print("[INFO] starting video stream thread...")
        self.vs = VideoStream(
            src='http://10.24.201.216:4747/mjpegfeed?640x480').start()

        #print(self.vs.isOpened())
        time.sleep(1.0)

        while True:
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale
            # channels)
            frame = self.vs.read()
            frame = imutils.resize(frame, width=450)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the grayscale frame
            self.rects = self.detector(gray, 0)

            # loop over the face detections
            for rect in self.rects:
                # determine the facial landmarks for the face region, then
                # convert the facial landmark (x, y)-coordinates to a NumPy
                # array
                shape = self.predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                mouth = shape[self.Start:self.End]
                MAR = self.mouth_aspect_ratio(mouth)

                mouthHull = cv2.convexHull(mouth)
                cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0),
                                 1)  #what do these numbers mean??

                if MAR < self.MOUTH_AR_THRESH:
                    self.COUNTER += 1

                    # if the eyes were closed for a sufficient number of
                    # then sound the alarm
                    if self.COUNTER >= self.MOUTH_AR_CONSEC_FRAMES:
                        self.STATUS = True
                        # draw an alarm on the frame
                        cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
                                    2)
                    else:
                        self.STATUS = False
                else:
                    self.COUNTER = 0
                    ALARM_ON = False

                cv2.putText(frame, "MAR: {:.2f}".format(MAR), (300, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break
Example #42
0
import sys
sys.path.append('/usr/local/lib')
import socket
import time
from imutils.video import VideoStream
from imagezmq import ImageSender

sender = ImageSender(connect_to='tcp://127.0.0.1:5555')
cam_name = socket.gethostname()
cam = VideoStream(src=0).start()
time.sleep(2.0)
while True:  # send images as stream until Ctrl-C
    image = cam.read()
    sender.send_image(cam_name, image)
"""test_2_rpi_send_images.py -- send PiCamera image stream.

A Raspberry Pi test program that uses imagezmq to send image frames from the
PiCamera continuously to a receiving program on a Mac that will display the
images as a video stream.

This program requires that the image receiving program be running first. Brief
test instructions are in that program: test_2_mac_receive_images.py.
"""

import sys

import socket
import time
import cv2
from imutils.video import VideoStream
import imagezmq

# use either of the formats below to specifiy address of display computer
# sender = imagezmq.ImageSender(connect_to='tcp://jeff-macbook:5555')
sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')

rpi_name = socket.gethostname()  # send RPi hostname with each image
picam = VideoStream(usePiCamera=True).start()
time.sleep(2.0)  # allow camera sensor to warm up
while True:  # send images as stream until Ctrl-C
    image = picam.read()
    sender.send_image(rpi_name, image)
Example #44
0
# otherwise, grab a reference to the video file
else:
	video_file = cv2.VideoCapture(args["video"])

# keep looping
while True:
	# grab the current frame
	if from_vfile:
		(grabbed, frame) = video_file.read()

		# if we are viewing a video and we did not grab a frame,
		# then we have reached the end of the video
		if not grabbed:
			break
	else:
		frame = camera.read()


	# resize the frame, blur it, and convert it to the HSV
	# color space
	frame = imutils.resize(frame, width=600)

	# record video
	if record_video:
		if video_writer is None: # create interface
			(h, w) = frame.shape[:2]
			fourcc = cv2.VideoWriter_fourcc(*'MJPG')
			# Note: the extension of the filename must be avi
			video_writer = cv2.VideoWriter(args["record_video"], fourcc, 20, (w,h), True)
			#zeros = np.zeros((h,w), dtype="uint8")
Example #45
0
class Vision(object):
    def __init__(self, use_pi_cam=False):
        self.camera = VideoStream(usePiCamera=use_pi_cam)
        self.bounds_file = open("./scripts/colors.txt", "r")
        self.color_info = ast.literal_eval(self.bounds_file.readline())
        self.colors = len(self.color_info)
        self.camera.start()
        time.sleep(2)

    def find_color_in_frame(self,
                            signature,
                            frame,
                            show_feed,
                            threshold=50,
                            show_max=True):
        start = time.time()
        result = []
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)

        lower = np.array([40, 70, 70])
        upper = np.array([80, 200, 200])

        # lower = np.fromstring(self.color_info[signature]['bounds'][0][1:-1], dtype=int, sep=' ')
        # upper = np.fromstring(self.color_info[signature]['bounds'][1][1:-1], dtype=int, sep=' ')
        blurred = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(blurred, lower, upper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]
        if show_feed:
            cv2.imshow(self.color_info[signature]['color'], mask)
        if len(contours) > 0:
            if show_max:
                contours = [max(contours, key=cv2.contourArea)]
            for c in contours:
                rect = cv2.minAreaRect(c)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                if cv2.contourArea(box) > threshold:
                    cv2.drawContours(frame, [box], 0, (0, 0, 255), 3)
                    font = cv2.QT_FONT_NORMAL
                    cv2.putText(frame, self.color_info[signature]['color'],
                                (box[0][0], box[0][1]), font, 0.5,
                                (255, 255, 255), 1, cv2.LINE_AA)
                    M = cv2.moments(box)
                    cx = int(M['m10'] / M['m00'])
                    cy = int(M['m01'] / M['m00'])
                    result.append({
                        'area': cv2.contourArea(box),
                        'x': cx,
                        'y': cy,
                        'color': self.color_info[signature]['color'],
                        'time': time.time() - start
                    })

        return frame, result

    def get_color(self, signature, show_feed=False):
        frame = self.camera.read()
        frame = imutils.resize(frame, width=400)
        frame, result = self.find_color_in_frame(signature, frame, show_feed)
        if show_feed:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1)  #
            if key == 27:
                self.camera.stop()
                exit()
        return result

    def get_colors(self, threshold, show_feed=False):
        frame = self.camera.read()
        frame = imutils.resize(frame, width=400)
        result = []
        for x in range(0, self.colors):
            if self.color_info[x]:
                frame, cont = self.find_color_in_frame(x,
                                                       threshold=threshold,
                                                       frame=frame)
                result.append(cont)
        if show_feed:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1)
            if key == 27:
                # self.camera.stop()
                exit()
        return result
Example #46
0
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
 
# initialize the FourCC, video writer, dimensions of the frame, and
# zeros array
fourcc = cv2.cv.FOURCC(*args["codec"])
writer = None
(h, w) = (None, None)
zeros = None

# loop over frames from the video stream
while True:
    # grab the frame from the video stream and resize it to have a
    # maximum width of 300 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=300)
 
    # check if the writer is None
    if writer is None:
    	# store the image dimensions, initialzie the video writer,
    	# and construct the zeros array
    	(h, w) = frame.shape[:2]
	writer = cv2.VideoWriter(args["output"], fourcc, args["fps"],(w, h), True)
	zeros = np.zeros((h, w), dtype="uint8")

    # break the image into its RGB components, then construct the
    # RGB representation of each frame individually
    (B, G, R) = cv2.split(frame)
    #R = cv2.merge([zeros, zeros, R])
    #G = cv2.merge([zeros, G, zeros])
Example #47
0
import cv2

helmet_cascade = cv2.CascadeClassifier('LBPCascade_helmet.xml')
#motorcycle_cascade = cv2.CascadeClassifier('LBPcascade_motorcycle.xml')

print("[INFO] starting cameras...")
picam = VideoStream(usePiCamera=True).start()
webcam = VideoStream(src=0).start()
time.sleep(2.0)
print(helmet_cascade)
time.sleep(5)
while True:
    frames = []

    #for stream in picam:
    frame = webcam.read()
    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #locs = motion.update(gray)
    helmet = helmet_cascade.detectMultiScale(gray, 1.3, 5)

    #if total<32:
    #frames.append(gray)
    #continue

    frames.append(gray)

    timestamp = datetime.datetime.now()
    ts = timestamp.strftime("%A %d %B %Y %I:%M:%Sp")

    for (frame, name) in zip(frames, ("Picamera")):