Example #1
0
def main():
	global frame, key
	# initialize the camera and grab a reference to the raw camera capture
	wdth = int(math.floor(360))
	hgth = int(math.floor(800))
	camera = VideoStream(usePiCamera=True,resolution=(wdth,hgth)).start()
	time.sleep(2.0)
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	writer = None
	(h,w) = (None, None)
	# setup the mouse callback
	cv2.startWindowThread()
	cv2.namedWindow("Detection")
	cv2.setMouseCallback("Detection",mouseOn)
	# keep looping over the frames
	#for frame2 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	while True:
		frame = camera.read();
		frame = cv2.transpose(frame);
		frame = cv2.flip(frame,1)
		timestamp = datetime.datetime.now()
		ts = timestamp.strftime("%d/%m/%Y %H:%M:%S")
		cv2.putText(frame,ts,(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,255,0),1)
		if writer is None:
			(h,w) = frame.shape[:2]
			writer = cv2.VideoWriter("/media/usb/test_" + timestamp.strftime("%d_%m_%Y_%H%M") + ".avi", fourcc,5,(w,h), True)
		writer.write(frame)
		cv2.imshow("Detection", frame);
		#cv2.setMouseCallback("Detection",mouseOn)
		#key = cv2.waitKey(10) & 0xFF
		# if the 'q' key is pressed, stop the loop
		if key == ord("q"): #cv2.EVENT_LBUTTONDOWN: #ord("q"):
	#		cv2.destroyAllWindows()
	#		camera.stop()
			break
	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	camera.stop()
			(minX, maxX) = (min(minX, x), max(maxX, x + w))
			(minY, maxY) = (min(minY, y), max(maxY, y + h))

		# draw the bounding box
		cv2.rectangle(result, (minX, minY), (maxX, maxY),
			(0, 0, 255), 3)

	# increment the total number of frames read and draw the 
	# timestamp on the image
	total += 1
	timestamp = datetime.datetime.now()
	ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
	cv2.putText(result, ts, (10, result.shape[0] - 10),
		cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

	# show the output images
	cv2.imshow("Result", result)
	cv2.imshow("Left Frame", left)
	cv2.imshow("Right Frame", right)
	key = cv2.waitKey(1) & 0xFF

	# if the `q` key was pressed, break from the loop
	if key == ord("q"):
		break

# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
leftStream.stop()
rightStream.stop()
Example #3
0
def main():
    # construction des arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.prototxt.txt",
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.caffemodel",
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.6,
        help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # initialiser la liste des objets entrainés par MobileNet SSD 
    # création du contour de détection avec une couleur attribuée au hasard pour chaque objet
    CLASSES = ["arriere-plan", "avion", "velo", "oiseau", "bateau",
        "bouteille", "autobus", "voiture", "chat", "chaise", "vache", "table",
        "chien", "cheval", "moto", "personne", "plante", "mouton",
        "sofa", "train", "moniteur"]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    pygame.mixer.init()

    # chargement des fichiers depuis le répertoire de stockage 
    print(" ...chargement du modèle...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialiser la caméra du pi, attendre 2s pour la mise au point ,
    # initialiser le compteur FPS
    print("...démarrage de la Picamera...")
    vs = VideoStream(usePiCamera=True, resolution=(1600, 1200)).start()
    time.sleep(2.0)
    #fps = FPS().start()

    # boucle principale du flux vidéo
    while True:
        # récupération du flux vidéo, redimension 
        # afin d'afficher au maximum 800 pixels 
        frame = vs.read()
        frame = imutils.resize(frame, width=800)

        # récupération des dimensions et transformation en collection d'images
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)

        # determiner la détection et la prédiction 
        net.setInput(blob)
        detections = net.forward()

        # boucle de détection
        list_objects = []
        for i in np.arange(0, detections.shape[2]):
            # calcul de la probabilité de l'objet détecté en fonction de la prédiction
            confidence = detections[0, 0, i, 2]
            
            # supprimer les détections faibles inférieures à la probabilité minimale
            if confidence > args["confidence"]:
                # extraire l'index du type d'objet détecté
                # calcul des coordonnées de la fenêtre de détection 
                idx = int(detections[0, 0, i, 1])
                #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                #(startX, startY, endX, endY) = box.astype("int")

                # creation du contour autour de l'objet détecté
                # insertion de la prédiction de l'objet détecté 
                #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                #y = startY - 15 if startY - 15 > 15 else startY + 15
                #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                
                # enregistrement de l'image détectée 
                #cv2.imwrite("detection.png", frame)
                obj = CLASSES[idx]
                if obj not in list_objects:
                    list_objects.append(CLASSES[idx])
        
        # affichage du flux vidéo dans une fenètre 
        #cv2.imshow("Frame", frame)
        #key = cv2.waitKey(1) & 0xFF  # ligne necessaire pour l'affichage dans la frame

        # Pronounce the objects seen
        print(list_objects)
        for anobject in list_objects:
            path_to_sound = "/home/pi/Kenobi/recognition/vocabulary/" + anobject + ".ogg"
            if os.path.isfile(path_to_sound):
                pygame.mixer.music.load(path_to_sound)
                pygame.mixer.music.play()
                # Play until end of music file
                while pygame.mixer.music.get_busy() == True:
                    pygame.time.Clock().tick(10)

        # la touche q permet d'interrompre la boucle principale
        #if key == ord("q"):
        #   break

        # mise à jour du FPS 
        #fps.update()

    # arret du compteur et affichage des informations dans la console
    #fps.stop()
    #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Example #4
0
class DualCamera():
    def __init__(self):
        self.root = Tk()
        self.root.wm_title("Dual Cam")

        self.saving = False
        self.frames = []
        self.overlay = False

        self.image_left = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(np.zeros((256, 256)))))
        self.image_panel_left = Label(self.root, image = self.image_left)
        self.image_panel_left.grid(row = 0, column = 0, columnspan=2)

        self.image_right = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(256 * np.random.rand(256, 256))))
        self.image_panel_right = Label(self.root, image = self.image_right)
        self.image_panel_right.grid(row = 0, column = 2, columnspan=2)

        self.save_button = Button(width = 10, height = 2, text = 'Save', command=self.save)
        self.save_button.grid(row = 1, column = 0)

        self.calibrate_button = Button(width = 10, height = 2, text = 'Calibrate', command=self.calibrate)
        self.calibrate_button.grid(row = 1, column = 1)

        self.close_button = Button(width = 10, height = 2, text = 'Close', command=self.quit)
        self.close_button.grid(row = 1, column = 3)

        self.overlay_button = Button(width=10, height=2, text='Overlay', command=self.toggle_overlay)
        self.overlay_button.grid(row = 1, column = 2)

        self.bias_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.bias)
        self.bias_slider.grid(row = 2, column = 1, columnspan=3)
    	self.bias_label = Label(self.root, text="Bias current")
    	self.bias_label.grid(row=2, column=0)

        self.clock_slider = Scale(self.root, from_=0, to=63, length=400, orient=HORIZONTAL, command=self.clock)
        self.clock_slider.grid(row = 3, column = 1, columnspan=3)
    	self.clock_label = Label(self.root, text="Clock speed")
    	self.clock_label.grid(row=3, column=0)

        self.cm_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.cm)
        self.cm_slider.grid(row = 4, column = 1, columnspan=3)
    	self.cm_label = Label(self.root, text="CM current")
    	self.cm_label.grid(row=4, column=0)

        # set default positions
    	self.cm_slider.set(0x0C)
    	self.clock_slider.set(0x15)
    	self.bias_slider.set(0x05)


        # initialize visible camera
        self.vs = VideoStream(usePiCamera=True).start()

        # thread for reading from sensor hardware intro an image queue           
        self.ir_images = Queue.LifoQueue()
        self.ir_commands = Queue.Queue()
        self.ir_calibrate = threading.Event()
        self.ir_stop = threading.Event()
        self.raw_ir_images = Queue.LifoQueue()

        self.capture_thread = threading.Thread(
                        target=ir_capture,
                        name="capture_thread",
                        args=[self.ir_images, self.ir_calibrate, self.ir_stop, self.ir_commands, self.raw_ir_images]
                        )

        self.capture_thread.start()

        self.ticktock()
        self.root.mainloop()

    def ticktock(self):
        # grab an image from the camera
        frame = self.vs.read()
        changed = False

        if frame is not None:
            frame = imutils.resize(frame, height=240)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.last_vis_frame = frame
            image = Image.fromarray(frame)

            if not self.overlay:
                self.image_right = ImageTk.PhotoImage(image=image)
                self.image_panel_right.configure(image=self.image_right);

            changed = True

        if not self.ir_images.empty():
            ir_frame = self.ir_images.get()

            if not self.ir_images.empty():
                with self.ir_images.mutex:
                    self.ir_images.queue = []

            ir_image = imutils.resize(ir_frame, height=240, inter=cv2.INTER_LINEAR)
            ir_image = np.dstack((ir_image, ir_image, ir_image))
            ir_image = cv2.LUT(ir_image, colormap).astype('uint8')
           
            self.last_ir_frame = ir_image
            self.image_left = ImageTk.PhotoImage(image=Image.fromarray(ir_image))
            self.image_panel_left.configure(image=self.image_left)
            changed = True

        if changed and self.overlay:
            overlay_image = np.zeros_like(self.last_vis_frame)

            overlay_image[:,:,2] = 0.125 * self.last_vis_frame[:,:,0] + 0.25 * self.last_vis_frame[:,:,1] + 0.125 * self.last_vis_frame[:,:,2]
            converted_frame = cv2.cvtColor(self.last_ir_frame, cv2.COLOR_RGB2HSV)
            overlay_image[:,40:280,2] += 0.5 * converted_frame[:,:,2]
            overlay_image[:,40:280,1] = converted_frame[:,:,1]
            overlay_image[:,40:280,0] = converted_frame[:,:,0]

            overlay_image = cv2.cvtColor(overlay_image, cv2.COLOR_HSV2RGB)

            self.image_right = ImageTk.PhotoImage(image=Image.fromarray(overlay_image))
            self.image_panel_right.configure(image=self.image_right)

        if self.saving:
            if not self.raw_ir_images.empty():
                ir_frame = self.raw_ir_images.get()

                if not self.raw_ir_images.empty():
                    with self.raw_ir_images.mutex:
                        self.raw_ir_images.queue = []
            else:
                ir_frame = None

            self.frames.append((frame, ir_frame))

        if not self.ir_calibrate.isSet():
            self.calibrate_button.configure(text = 'Calibrate', command=self.calibrate, state="normal")

        self.root.after(100, self.ticktock)

    def quit(self):
        self.vs.stop()
        self.ir_stop.set()
        self.root.quit()

    def save(self):
        self.save_button.configure(text = 'Stop Saving', command=self.stop_save)
        self.saving = True
        self.frames = []

    def stop_save(self):
        self.save_button.configure(text = 'Save', command=self.save)
        now = time.strftime("%Y-%m-%dT%H:%M:%S")
        pickle.dump(self.frames, open(now + ".p", "wb"))
        self.frames = []

    def calibrate(self):
        self.calibrate_button.configure(text = 'Calibrating...', state="disabled")
        self.ir_calibrate.set()

    def cm(self, val):
        val = int(val)
        self.ir_commands.put(('cm', val))

    def bias(self, val):
        val = int(val)
        self.ir_commands.put(('bias', val))

    def clock(self, val):
        val = int(val)
        self.ir_commands.put(('clock', val))

    def toggle_overlay(self):
        if self.overlay:
            self.overlay = False
            self.overlay_button.configure(text = 'Overlay')
        else:
            self.overlay = True
            self.overlay_button.configure(text = 'No Overlay')
Example #5
0
    def viewCameraPi(self):

        #text = 'This is a message from app to inform that app start running now!'
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone1,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone2,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        print("START SCRIPT AND MAJOR WARNING!")
        statusSMS = 'delivered'
        if((statusSMS != 'failed') and (statusSMS != 'undelivered')):
            #camera = PiCamera()
            #camera.resolution = ( 640, 480)
            #camera.framerate = 32

            #rawCapture = PiRGBArray(camera, size=( 640, 480))
            #self.sumMSE = self.sumSSIM = self.avgMSE = self.avgSSIM = 0
            self.tempHour = datetime.datetime.now().hour
            self.tempMinute = datetime.datetime.now().minute
            self.warmup = 0

            vs = VideoStream(usePiCamera=1,resolution=(640,480)).start()
            time.sleep(1.2)

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = None
            (h, w) = (None, None)
            zeros = None

            print ("view camera")

            while True:
                self.warmup+=1
                if(self.warmup >=5):
                    frame = vs.read()
                    if writer is None:
                        # store the image dimensions, initialzie the video writer,
                        # and construct the zeros array
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('exampleTH3.avi', fourcc, 20,
                            (w, h), True)

                    writer.write(frame)

                    self.curImage = frame
                    self.getDefImagePerHours()
                    #write xml
                    print("process frame thu {}".format(self.warmup-4))
                    self.writeXML()

                    #tat chuong trinh sau 5 phut:
                    if(datetime.datetime.now().minute - self.tempMinute >5):
                        self.final()
                        break


            # for frame in camera.capture_continuous( rawCapture, format("bgr"), use_video_port = True):
            #
            #     self.curImage = frame.array
            #     frame = vs.read()
            #     self.warmup +=1
            #     if(self.warmup >=5):
            #         self.getDefImagePerHours()
            #         # self.getDefImagePerTenMinutes()
            #
            #         #write xml
            #         print("process frame thu {}".format(self.warmup-4))
            #         self.writeXML()
            #
            #         #warning
            #         # self.warning()
            #
            #         #tat chuong trinh sau 5 phut:
            #         if(datetime.datetime.now().minute - self.tempMinute >5):
            #             self.final()
            #             break
            #     #neu la 16h, script se tu tat
            #     # tempBreak = datetime.datetime.now().hour
            #     # if(( tempBreak == 0) or (tempBreak == 6) or (tempBreak == 18) or (tempBreak == 12)):
            #     #     if(datetime.datetime.now().minute == 0):
            #     #         if((datetime.datetime.now().second >= 0) and (datetime.datetime.now().second <=3)):
            #     #             self.final()
            #     #             self.__init__(tempBreak)
            #
            #     # show frame
            #     # cv2.imshow("image", self.curImage)
            #     # key = cv2.waitKey(1) & 0xFF
            #
            #     #renew
            #     rawCapture.truncate(0)
            #
            #     #press 'q' to stop, press any key to continue
            #     # if(key == ord("q")):
            #     #     break
            #call function final
            vs.stop()
            writer.release()
            self.final()


        else :
            print("send message failed. So App will not run. Sorry for the inconvenience!!")
        #contours
        cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        #time.sleep(10)
        center=None
        if len(cnts)>0:
                cnt=cnts[0]
                rect = cv2.minAreaRect(cnt)
                box = cv2.boxPoints(rect)
                box=np.int0(box)
                x1=(box[3][0]-(box[0][0]))
                y1=(box[3][1]-(box[0][1]))
                print 'x1', x1
                print 'y1', y1
                
                if ((abs(x1)>70)or (abs(y1)>70)):
                        #time.sleep(5)
                        cv2.imshow('OrigFrame',frame)
                        execfile("InvKin_Control.py")
                        print "picking up"
                cv2.drawContours(frame,[box],0,(0,0,255),2)                
        
        key=cv2.waitKey(1)& 0xFF
        if key==ord("q"):
               break


camera.stop()
cv2.destroyAllWindows()
          
Example #7
0
import time
import cv2
import imutils
from imutils.video import VideoStream


from matcher import Matcher

matcher = Matcher([("fau-logo", "./templates/fau-logo.png"),
                   ("first-logo", "./templates/first-logo.jpg"),
                   ("nextera-logo", "./templates/nextera-energy-logo.jpg"),
                   ("techgarage-logo", "./templates/techgarage-logo.png")
                   ], min_keypoints_pct_match=8)

cam = VideoStream(usePiCamera=False).start()

cnt = 0
while True:
    img = cam.read()
    cv2.imshow("Pic", img)
    print matcher.match(img)
    key = cv2.waitKey(10)
    if key == ord('q'):
       break

cam.stop()
cv2.destroyAllWindows()
Example #8
0
def line_detection(shared_object):
    """
    Detects the line in a special set region, calculates the average center of the line in the screen and detects red
    :param shared_object: Thread notifier
    :return: None
    """
    # Get view of picamera and do a small warmup of 0.3s
    cap = VideoStream(src=0, usePiCamera=True, resolution=(320, 240)).start()
    time.sleep(0.3)

    # Get width and height of the frame and make vertices for a traingle shaped region
    sample = cap.read()
    height, width, channel = sample.shape

    vertices = [
        (20, height),
        (width / 2, height / 2 + 20),
        (width - 20, height - 20),
    ]

    while not shared_object.has_to_stop():
        # Get current frame from picamera and make a cropped image with the vertices above with set_region
        img = cap.read()
        img = cv2.flip(img, -1)
        img_cropped = set_region(img, np.array([vertices], np.int32))

        # Add blur to the cropped image
        blur = cv2.GaussianBlur(img_cropped, (9, 9), 0)

        # Generate and set a mask for a range of black (color of the line) to the cropped image
        hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
        black = Color("Black", [0, 0, 0], [180, 92, 90])
        mask = cv2.inRange(hsv, black.lower, black.upper)

        # Checks if the color red is detected and calls function detect_red with the img
        global red_detected, red
        if detect_red(img, hsv):
            red_detected = True
        else:
            if red_detected:
                red = True
            red_detected = False

        # Set variables and get lines with Houghlines function on the mask of black
        theta = np.pi / 180
        threshold = 30
        min_line_length = 10
        max_line_gap = 40

        lines = cv2.HoughLinesP(mask, 1, theta, threshold, np.array([]),
                                min_line_length, max_line_gap)

        # Set line color to blue and clone the image to draw the lines on
        line_color = (255, 0, 0)
        img_clone = img.copy()

        global line_detected

        if lines is not None:
            for line in lines:
                for x1, y1, x2, y2 in line:
                    # Make two points with the pixels of the line and draw the line on the cloned image
                    p1 = Point(x1, y1)
                    p2 = Point(x2, y2)
                    cv2.line(img_clone, (p1.x, p1.y), (p2.x, p2.y), line_color,
                             5)
            # Calculate avarge distance with avarge_distance in percentages to the left and right
            left, right = average_distance(lines, width)
            global last_position
            last_position = round(left)
            line_detected = True
        else:
            line_detected = False

        # cv2.imshow('camservice-lijn', img_clone)

        # If q is pressed, break while loop
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.stop()
    cv2.destroyAllWindows()
Example #9
0
            elif event.key == pg.K_c:
                print("Going Backward-right")
                Backright(speed)

            elif event.key == pg.K_e:
                print("Doing a Circle-right")
                Circleright(speed)

            elif event.key == pg.K_q:
                print("Doing a Circle-left")
                Circleleft(speed)

            elif event.key == pg.K_l:
                print("Exiting!")
                GPIO.cleanup()
                webcam.stop()
                cv.destroyAllWindows()
                pg.quit()

        elif event.type == pg.QUIT:
            print("Exiting!")
            GPIO.cleanup()
            webcam.stop()
            cv.destroyAllWindows()
            pg.quit()
            break

        elif event.type == pg.KEYUP:
            Stop()
            print("Stop")
Example #10
0
def run(mode, localPath):
    global font
    global success
    global totalInferenceDuration
    print("CUDE usage status : " + str(dlib.DLIB_USE_CUDA))
    #faced
    face_detector = FaceDetector()
    startTS = time.time()
    """ Load models """
    predictor_path = "assets/shape_predictor_5_face_landmarks.dat"
    face_rec_model_path = "assets/dlib_face_recognition_resnet_model_v1.dat"
    facerec = dlib.face_recognition_model_v1(face_rec_model_path)
    sp = dlib.shape_predictor(predictor_path)
    """ Check local/stream availability """
    if (mode == "stream"):
        # initialize the video stream and allow the cammera sensor to warmup
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        w = int(vs.get(3))
        h = int(vs.get(4))
        time.sleep(2.0)
    elif (mode == "local"):
        vidcap = cv2.VideoCapture(localPath)
        success, frame = vidcap.read()
        fps = vidcap.get(cv2.CAP_PROP_FPS)
        frameCtr = 0
        w = int(vidcap.get(3))
        h = int(vidcap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))

    while success:
        processStartTs = time.time()
        """ Acquire the next frame """
        if (mode == "stream"):
            frame = vs.read()

        elif (mode == "local"):
            success, frame = vidcap.read()

            frameCtr += 1
        """ grab the frame from the threaded video stream and resize it
		 to have a maximum width of 400 pixels """
        try:
            frame = imutils.resize(frame, width=400)
        except AttributeError:
            continue
        try:
            rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        except:
            break
        inferenceStartTs = time.time()
        #faced (thresh argument can be added, 0.85 by default-)
        bboxes = face_detector.predict(rgb_img)
        inferenceEndTs = time.time()
        totalInferenceDuration += inferenceEndTs - inferenceStartTs

        helpers.min_clusters = len(bboxes)
        if (mode == "stream"):
            timestamp = calendar.timegm(time.gmtime())
        elif (mode == "local"):
            timestamp = float(frameCtr / fps)

        for x, y, w, h, p in bboxes:
            top = int(y + h / 2)
            left = int(x - w / 2)
            bottom = int(y - h / 2)
            right = int(x + w / 2)
            cv2.rectangle(frame, (left, bottom), (right, top), (99, 44, 255),
                          1)
            cv2.putText(frame, str(p), (left, top + 5), font, 0.2,
                        (255, 255, 255), 1, cv2.LINE_AA)
            shape = sp(frame, dlib.rectangle(left, bottom, right, top))
            # Compute the 128D vector that describes the face in img identified by
            face_descriptor = facerec.compute_face_descriptor(frame, shape)
            bestIndex = cluster_faces.match(face_descriptor)
            if (bestIndex >= 0):
                cv2.putText(frame,
                            str(helpers.unique_persons[bestIndex]["uuid"]),
                            (left, top + 10), font, 0.2, (0, 255, 255), 1,
                            cv2.LINE_AA)
                data = [{
                    "uuid": helpers.unique_persons[bestIndex]["uuid"],
                    "timestamp": timestamp
                }]
                helpers.individual_stats.extend(data)
            else:
                cv2.putText(frame, "Learning...", (left, top + 10), font, 0.2,
                            (0, 255, 255), 1, cv2.LINE_AA)
                data = [{
                    "label": 0,
                    "timestamp": timestamp,
                    "encoding": face_descriptor
                }]
                helpers.candidate_persons.extend(data)

        try:
            frame = imutils.resize(frame, width=720)
        except AttributeError:
            continue

        cv2.putText(frame,
                    "FPS : " + str(int(1 / (time.time() - processStartTs))),
                    (20, 30), font, 1, (0, 255, 0), 3, cv2.LINE_AA, False)
        out.write(frame)

        #cv2.imshow("Frame", frame)
        if (len(helpers.candidate_persons) >=
            (helpers.MIN_FACES_PER_CLUSTER * helpers.min_clusters)):
            cluster_faces.cluster()
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    if (mode == "stream"):
        vs.stop()
    endTS = time.time()
    out.release()
    print("Total number of unique faces = ", len(helpers.unique_persons))
    print("Total duration")
    print(endTS - startTS)
    print("Total inference duration")
    print(totalInferenceDuration)
Example #11
0
def objectdetection(objname):
    engine = pyttsx3.init()

    engine.setProperty('rate', 150)

    ap = argparse.ArgumentParser()

    ap.add_argument('-p',
                    '--prototxt',
                    required=True,
                    help="path to Caffe 'deploy' prototxt file")

    ap.add_argument('-m',
                    '--model',
                    required=True,
                    help='path to Caffe pre-trained model')

    ap.add_argument('-c',
                    '--confidence',
                    type=float,
                    default=0.2,
                    help='minimum probability to filter weak detections')

    args = vars(ap.parse_args())

    CLASSES = [
        'background',
        'aeroplane',
        'bicycle',
        'bird',
        'boat',
        'bottle',
        'bus',
        'car',
        'cat',
        'chair',
        'cow',
        'diningtable',
        'dog',
        'horse',
        'motorbike',
        'person',
        'pottedplant',
        'sheep',
        'sofa',
        'train',
        'tvmonitor',
    ]

    COLORS = np.random.uniform(0, 0xFF, size=(len(CLASSES), 3))

    print('[INFO] loading model...')

    net = cv2.dnn.readNetFromCaffe(args['prototxt'], args['model'])

    print('Model information obtained')

    print('[INFO] starting video stream...')

    vs = VideoStream(src=0).start()

    time.sleep(2.0)

    fps = FPS().start()

    frame_width = 600

    user_x = frame_width / 2

    user_y = frame_width

    while True:

        frame = vs.read()

        frame = imutils.resize(frame, width=frame_width)
        frame = imutils.rotate(frame, angle=180)

        (h, w) = frame.shape[:2]

        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        net.setInput(blob)

        detections = net.forward()

        fl = False

        for i in np.arange(0, detections.shape[2]):

            confidence = detections[0, 0, i, 2]

            if confidence > args['confidence']:

                idx = int(detections[0, 0, i, 1])

                if objname in CLASSES[idx]:

                    fl = True

                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])

                    (startX, startY, endX, endY) = box.astype('int')

                    label = '{}: {:.2f}%'.format(CLASSES[idx],
                                                 confidence * 100)

                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  COLORS[idx], 2)

                    y = (startY - 15 if startY - 15 > 15 else startY + 15)

                    cv2.putText(
                        frame,
                        label,
                        (startX, y),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        COLORS[idx],
                        2,
                    )

                    centerX = (endX + startX) / 2

                    centerY = (endY + startY) / 2

                    cv2.line(frame, (int(user_x), int(user_y)),
                             (int(centerX), int(centerY)), (0xFF, 0, 0), 7)

                    dir = 0

                    if centerX > user_x:

                        dir = 1

                        dirr = 'right'
                        print(dirr)
                        engine.say(dirr)

                        engine.runAndWait()

                    elif centerX < user_x:

                        dir = -1

                        deg = math.degrees(
                            math.atan(
                                abs(centerX - user_x) / abs(centerY - user_y)))

                        middle = centerY - user_y

                        deg1 = centerX - user_x

                        dirr = 'left'
                        print(dirr)
                        engine.say(dirr)

                        engine.runAndWait()

                        #print("LR = " + str(deg1))
                        #print("ceta = " + str(deg))
                        """if deg1 > 50:

                            dirr = 'right'
                            print(dirr)
                            engine.say(dirr)

                            engine.runAndWait()
"""

                        if deg1 < 50 and deg1 > -50:

                            dirr = 'forward'
                            print(dirr)
                            engine.say(dirr)

                            engine.runAndWait()
                            distance = ShowDistance()
                            engine.say(distance)
                            engine.runAndWait()
                        """if deg1 < -50:

                            dirr = 'left'
                            print(dirr)
                            engine.say(dirr)

                            engine.runAndWait()
"""

        cv2.imshow('Frame', frame)

        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
            fps.update()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
    vs.stop()
    main()
Example #12
0
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < args["min_area"]:
            continue

        # compute the bounding box for the contour, draw it on the frame,
        # and update the text
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        text = "Occupied"
# draw the text and timestamp on the frame
    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame,
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                (0, 0, 255), 1)

    # show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key is pressed, break from the lop
    if key == ord("q"):
        break

# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
Example #13
0
def cameraInput():
    # load the known faces and embeddings along with OpenCV's Haar
    # cascade for face detection
    print("[INFO] loading encodings + face detector...")
    data = pickle.loads(open("/home/pi/Desktop/pi-face-recognition/ourencodings.pickle", "rb").read())
    detector = cv2.CascadeClassifier("/home/pi/Desktop/pi-face-recognition/haarcascade_frontalface_default.xml")

    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    # vs = VideoStream(src=0).start()
    vs = VideoStream(usePiCamera=True).start()
    time.sleep(2.0)

    # start the FPS counter
    fps = FPS().start()
    count = 0
    start_time = 0

    check = False

    #############################################
    ## -- NEED TO CHANGE VIDEO->CAMERA
    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = vs.read()
        frame = imutils.resize(frame, width=500)

        # convert the input frame from (1) BGR to grayscale (for face
        # detection) and (2) from BGR to RGB (for face recognition)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # detect faces in the grayscale frame
        rects = detector.detectMultiScale(gray, scaleFactor=1.1,
                                          minNeighbors=5, minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

        # OpenCV returns bounding box coordinates in (x, y, w, h) order
        # but we need them in (top, right, bottom, left) order, so we
        # need to do a bit of reordering
        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        # compute the facial embeddings for each face bounding box
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []

        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(data["encodings"],
                                                     encoding)
            name = "Unknown"

            # check to see if we have found a match
            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}

                # loop over the matched indexes and maintain a count for
                # each recognized face face
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                # determine the recognized face with the largest number
                # of votes (note: in the event of an unlikely tie Python
                # will select first entry in the dictionary)
                name = max(counts, key=counts.get)

                if (name != "Unknown"):
                    print("person")
                    print(count)
                    if (count == 0):
                        start_time = time.time()
                        count = count + 1
                    pre_name = name
                    between = (time.time() - start_time) * 1000 / 60
                    if (between >= 2):
                        #########
                        print('TTS start')
                        tts = gTTS(name)
                        tts.save('obj.mp3')
                        print('TTS ok')

                        print('pygame start')
                        pygame.init()
                        pygame.mixer.init()
                        obj_out = pygame.mixer.music.load('obj.mp3')
                        pygame.mixer.music.play()
                        # pygame.event.wait()
                        print('pygame ok')
                        ############
                        check = True
                        break

                        count = 0
                        start_time = 0

            # update the list of names
            names.append(name)

        # loop over the recognized faces
        for ((top, right, bottom, left), name) in zip(boxes, names):
            # draw the predicted face name on the image
            cv2.rectangle(frame, (left, top), (right, bottom),
                          (0, 255, 0), 2)
            y = top - 15 if top - 15 > 15 else top + 15
            cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.75, (0, 255, 0), 2)

        # display the image to our screen
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #14
0
    frameHSV = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
    
    # HSV values to define a colour range we want to create a mask from.
    colorLow = np.array([lowHue,lowSat,lowVal])
    colorHigh = np.array([highHue,highSat,highVal])
    mask = cv2.inRange(frameHSV, colorLow, colorHigh)
    # Show the first mask
    cv2.imshow('mask-plain', mask)

    # Cleanup the mask with Morphological Transformation functions
    kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal)

    # Show morphological transformation mask
    cv2.imshow('mask', mask)
    
    # Put mask over top of the original image.
    result = cv2.bitwise_and(frame, frame, mask = mask)

    # Show final output image
    cv2.imshow('colorTest', result)
    
    # listen for exit key (q) to eixt main loop
    if cv2.waitKey(1) & 0xff == ord('q'):
        break

# close program
cv2.destroyAllWindows()
capture.stop()
Example #15
0
        # otherwise, the eye aspect ratio is not below the blink
        # threshold
        else:
            # if the eyes were closed for a sufficient number of
            # then increment the total number of blinks
            if COUNTER >= EYE_AR_CONSEC_FRAMES:
                TOTAL += 1

            # reset the eye frame counter
            COUNTER = 0

        # draw the total number of blinks on the frame along with
        # the computed eye aspect ratio for the frame
        cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    # show the frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Example #16
0
    #G = cv2.merge([zeros, G, zeros])
    #B = cv2.merge([B, zeros, zeros])
 
    # construct the final output frame, storing the original frame
    # at the top-left, the red channel in the top-right, the green
    # channel in the bottom-right, and the blue channel in the
    # bottom-left
    output = np.zeros((h, w, 3), dtype="uint8")
    output[0:h, 0:w] = frame
    #output[0:h, w:w * 2] = R
    #output[h:h * 2, w:w * 2] = G
    #output[h:h * 2, 0:w] = B
 
    # write the output frame to file
    writer.write(output)

    # show the frames
    cv2.imshow("Frame", frame)
    cv2.imshow("Output", output)
    key = cv2.waitKey(1) & 0xFF
 
    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break
 
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
writer.release()
def barcode_scanner(csv_file):
    print("[INFO] Starting video stream ....")
    vs = VideoStream(src=0).start()
    #vs = VideoStream(usePiCamera=True).start()
    time.sleep(2.0)

    csv = open(csv_file, "a")
    found = set()
    reader = zxing.BarCodeReader()

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        #barcodes = pyzbar.decode(frame)

        #f = BytesIO()

        im = Image.fromarray(frame)

        im.save("./tmp/a.jpeg")
        barcodes = reader.decode("./tmp/a.jpeg")

        #im.save(f, "JPEG")
        #barcodes = reader.decode(f.read())
        #f.close()

        if barcodes:
            print(barcodes)

        if barcodes and (barcodes.format == "CODE_39"
                         or barcodes.format == "QR_CODE"):
            print(dir(barcodes))
            print("raw: {}".format(barcodes.raw))
            print("type: {}".format(barcodes.type))
            print("format: {}".format(barcodes.format))
            print("parse: {}".format(barcodes.parse))
            print("parsed: {}".format(barcodes.parsed))
            print("points: {}".format(barcodes.points))
            print(barcodes)

            barcode_data = barcodes.raw
            barcode_type = barcodes.format

            (x1, y1) = (int(barcodes.points[0][0]), int(barcodes.points[0][1]))
            (x2, y2) = (int(barcodes.points[1][0]), int(barcodes.points[1][1]))
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)

            text = "{} ({})".format(barcode_data, barcode_type)
            cv2.putText(frame, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 255), 2)

            #for barcode in barcodes:
            #    barcode_data = barcode.data.decode("utf-8")
            #    barcode_type = barcode.type

            #    (x, y, w, h) = barcode.rect
            #    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
            #
            #    text = "{} ({})".format(barcode_data, barcode_type)
            #    cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

            #    if barcode_data not in found:
            #        csv.write("{},{},{}\n".format(datetime.datetime.now(), barcode_data, barcode_type))
            #        csv.flush()
            #        found.add(barcode_data)

        cv2.imshow("Barcode Scanner", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    print("[INFO] Cleaning up....")
    csv.close()
    cv2.destroyAllWindows()
    vs.stop()