def __init__(self, q1, q2): self.queue_MAIN_2_VS = q1 self.queue_VS_2_STM = q2 self.resolution = (320, 240) self.video_stream = PiVideoStream(self.resolution, 60) self.settings = { 'disp': False, 'dispThresh': False, 'dispContours': False, 'dispApproxContours': False, 'dispVertices': False, 'dispNames': False, 'dispCenters': False, 'dispTHEcenter': False, 'erodeValue': 0, 'lowerThresh': 40, 'working': True, 'autoMode': False, 'dispGoal': True } self.prevStateDisp = self.settings['disp'] self.prevStateDispThresh = self.settings['dispThresh'] self.objs = [] self.classLogger = logging.getLogger('droneNav.VisionSys') self.working = True self.t = Thread(target=self.update, args=()) self.t.daemon = True
def picamera_ndvi(preview=False, resolution=(640, 480), framerate=60): stream = PiVideoStream(resolution=resolution, framerate=framerate).start() time.sleep(2) print('Video stream started.') directory = 'capture_' + str(get_time_ms()) + '/' # loop over the frames from the video stream indefinitely while True: # grab the frame from the threaded video stream frame = stream.read() if frame is not None: b, g, r = cv2.split(frame) # get NDVI from RGB image ndvi = vegevision.get_ndvi(b, r) ndvi_colorized = apply_custom_colormap( 255 * ndvi.astype(np.uint8), cmap=vegevision.load_cmap('NDVI_VGYRM-lut.csv')) # show the frame if preview: cv2.imshow("Video Input with NDVI", ndvi_colorized) print('Displaying NDVI...') else: save_image(ndvi, directory=directory) # if the `q` key was pressed, break from the loop key = cv2.waitKey(1) & 0xFF if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() stream.stop()
def __init__(self, src=0, usePiCamera=False, resolution=(370, 290), framerate=32): if usePiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: self.stream() = WebcamVideoStream(src=src)
def __init__(self, resolution=(320, 240), framerate=32): self.conf = json.load(open("conf.json")) self.lt = LocalTime("Baltimore") self.avg = None self.avg_count = 0 self.motionCounter = 0 self.motion_frames = [] self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.contour_area = 0 self.vs = PiVideoStream(resolution, framerate).start() time.sleep(self.conf["camera_warmup_time"])
def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=32): # check to see if the picamera module should be used # if usePiCamera: # only import the picamera packages unless we are # explicity told to do so -- this helps remove the # requirement of `picamera[array]` from desktops or # laptops that still want to use the `imutils` package from pivideostream import PiVideoStream # initialize the picamera stream and allow the camera # sensor to warmup self.stream = PiVideoStream(resolution=resolution, framerate=framerate)
def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=60): if usePiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: self.stream = WebcamVideoStream(src=src) def start(self): return self.stream.start() def update(self): self.stream.update() def read(self): return self.stream.read() def stop(self): self.stream.stop()
def __init__(self, src=0, isPiCamera=False, resolution=(320, 240), framerate=32): if isPiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: from usbvideostream import usbVideoStream self.stream = usbVideoStream(src, resolution=resolution)
class VideoCamera(object): def __init__(self, flip=False, fps=10, res=(160, 128)): self.vs = PiVideoStream(resolution=res, framerate=fps).start() self.flip = flip print("cam init") time.sleep(2.0) def __del__(self): print("cam del") self.vs.stop() def flip_if_needed(self, frame): if self.flip: return np.flip(frame, 0) return frame def get_frame(self): frame = self.flip_if_needed(self.vs.read()) ret, png = cv2.imencode('.png', frame) return png.tobytes()
class VideoStream: def __init__(self, src=0, isPiCamera=False, resolution=(320, 240), framerate=32): if isPiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: from usbvideostream import usbVideoStream self.stream = usbVideoStream(src, resolution=resolution) def start(self): return self.stream.start() def update(self): self.stream.update() def read(self): return self.stream.read() def stop(self): self.stream.stop()
def __init__(self, src=0, usePiCamera=False, resolution=(720, 480), framerate=32): if usePiCamera: # only import this package if using PiCamera from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) # otherwise, we are using OpenCV so initialize the webcam # stream else: self.stream = WebcamVideoStream(src=src)
def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=32): # pycam modulu ?* if usePiCamera: # gerekli kurulum from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) # else else: self.stream = WebcamVideoStream(src=src)
def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=32): # check to see if the picamera module should be used if usePiCamera: # only import the picamera packages unless we are # explicity told to do so -- this helps remove the # requirement of `picamera[array] from pivideostream import PiVideoStream # initialize the picamera stream and allow the camera # sensor to warmup self.stream = PiVideoStream(resolution=resolution, framerate=framerate) # otherwise, we are using OpenCV so initialize the webcam # stream else: self.stream = WebcamVideoStream(src=src)
def __init__(self, src=0,usePiCamera=False, resolution=(320,240),framerate=32): #check to see if the picamera module should be used if usePiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: self.stream = WebcamVideoStream(src=src) def start(self): #start the threaded video stream return self.stream.start() def update(self): #grab the next frame from the stream self.stream.update() def read(self): #return the current frame return self.stream.read() def stop(self): # stop the thread and release any resources self.stream.stop()
class VideoStream: def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=32, **kwargs): # check to see if the picamera module should be used if usePiCamera: # only import the picamera packages unless we are # explicity told to do so -- this helps remove the # requirement of `picamera[array]` from desktops or # laptops that still want to use the `imutils` package from pivideostream import PiVideoStream # initialize the picamera stream and allow the camera # sensor to warmup self.stream = PiVideoStream(resolution=resolution, framerate=framerate, **kwargs) # otherwise, we are using OpenCV so initialize the webcam # stream def start(self): # start the threaded video stream return self.stream.start() def update(self): # grab the next frame from the stream self.stream.update() def read(self): # return the current frame return self.stream.read() def stop(self): # stop the thread and release any resources self.stream.stop()
def main(): vs = PiVideoStream() vs.start() time.sleep(2.0) vs.consistent() setup_trackbars(range_filter) cv2.namedWindow("Original", cv2.WINDOW_NORMAL) cv2.namedWindow("Thresh", cv2.WINDOW_NORMAL) while True: image = vs.read() frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(range_filter) thresh = cv2.inRange(frame_to_thresh,(v1_min, v2_min, v3_min),(v1_max, v2_max, v3_max)) cv2.imshow("Original", image) cv2.imshow("Thresh", thresh) if cv2.waitKey(1) & 0xFF is ord('q'): break
def main(argv): #todo: crop xy threshold, pipette, mask, contours use_gui=0 simulate=0 try: opts, args =getopt.getopt(sys.argv[1:], "g:s:",["gui=","sim="]) except getopt.GetoptError as err: usage() sys.exit(2) #print("opts: ",opts) #print("args: ",sys.argv[1:]) if len(opts)<1: use_gui=0 simulate=0 for o,a, in opts: if o=="-g": use_gui=int(a) elif o=="-s": simulate=int(a) t_gui=Thread(target=guiThread) if use_gui>0: t_gui.start() else: load_settings() #print("set guiCommands nogui:",guiCommands) guiCommands['previewRaw']=False if simulate==0: import RPi.GPIO as GPIO from pivideostream import PiVideoStream #vs = PiVideoStream((1296, 736)).start() vs = PiVideoStream((1920, 1088)).start() time.sleep(2) runVideo=True imageCounter=0 #print("picSavePath: ",picSavePath) while (runVideo==True): #print("mainloop") runVideo=guiCommands['runVideo'] if simulate==0: grabbedFrame = vs.readCropped(guiCommands['cropleft'],guiCommands['croptop'],guiCommands['cropright'],guiCommands['cropbottom']) else: grabbedFrame=cv2.imread("images/image_001.png", -1) #runVideo=False if guiCommands['takePic']==True and simulate==0: filename="images/image_"+str(imageCounter).zfill(3)+".png" cv2.imwrite(filename, grabbedFrame) imageCounter+=1 guiCommands['takePic']=False Live=ContourOperations() frame_hsv= cv2.cvtColor(grabbedFrame,cv2.COLOR_BGR2HSV) lower_red=np.array([guiCommands['lowerhue'],guiCommands['lowersat'],guiCommands['lowerval']]) upper_red=np.array([guiCommands['upperhue'],guiCommands['uppersat'],guiCommands['upperval']]) mask=cv2.inRange(frame_hsv, lower_red, upper_red) filtered= cv2.bitwise_and(grabbedFrame, grabbedFrame, mask=mask) blue, green, red = cv2.split(grabbedFrame) background=cv2.imread("background/image_000.png", -1) #bluebg, greenbg, redbg = cv2.split(grabbedFrame) #redOnly=Live.computeRedMinusGB(grabbedFrame) #red_Threshold=Live.computeThreshold(redOnly, guiCommands['threshold']) #redThresContour=red_Threshold.copy() #contour_ext=Live.get_selected_contour(mask, 0) if guiCommands['previewRaw']==True: cv2.imshow('All',grabbedFrame) cv2.imshow('Mask',mask) cv2.imshow('Filtered',filtered) #cv2.imshow('All-BG',cv2.subtract(grabbedFrame,background)) #Live.showContour(grabbedFrame, contour_ext) #cv2.imshow('red',red_Threshold) #cv2.imshow('RedOnly',redOnly) ''' red=Live.showRed(False) #print("Type grabbedFrame",grabbedFrame.dtype()) green=Live.showGreen(False) blue=Live.showBlue(False) #cv2.imshow('Red-Green',red-green) #cv2.imshow('Red-Blue',red-blue) #cv2.imshow('Red-Blue-Green',red-blue-green) Live.showPixelValue(cv2.subtract(cv2.subtract(red,blue),green),60,95, 'r-b-g') Live.showPixelValue(red-blue,60,95, 'r-b') Live.showPixelValue(red,60,95, 'r') Live.showPixelValue(red-green,60,95, 'r-g') red[95][60]=red[95][60]-65 Live.showPixelValue(red,60,95, 'r new') Live.showPixelValue(red-green,60,95, 'r-g new') ''' else: cv2.destroyAllWindows() if cv2.waitKey(1) & 0xFF == ord('q'): runVideo=False break
""" Lecture et affichage de la video """ from pivideostream import PiVideoStream from imutils.video import FPS # Pour les mesures de framerate import cv2 import time import numpy as np # Nom de la fenetre d'affichage window_name = 'preview' # Creation du thread de lecture + setup vs=PiVideoStream() vs.camera.video_stabilization = True # Demarrage du flux video + warmup de la camera vs.start() time.sleep(2.0) # Creation de la fenetre d'affichage cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) fps = FPS().start() while True : frame = vs.read() fps.update() cv2.imshow(window_name, frame) key = cv2.waitKey(1) & 0xFF
def main(): pwm = PWM(0x40) pwm.setPWMFreq(100) vs = PiVideoStream(resolution=(640, 480)).start() time.sleep(1.0) frame = vs.read() prvs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) hsv = np.zeros_like(frame) hsv[..., 1] = 255 mode = 0 speed = 0 steer = 0 while True: frame = vs.read() #frame = rotate_image(frame) next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if mode == 3: flow = cv2.calcOpticalFlowFarneback(prvs, next, 0.5, 3, 15, 3, 5, 1.2, 0) mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) hsv[..., 0] = ang * 180 / 3.14 / 2 hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) cv2.putText(frame, "Speed: {}, Steer: {}".format(speed, steer), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 0, 0)) if mode == 1: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if mode == 2: frame = cv2.Canny(frame, 20, 100) if mode == 3: cv2.imshow("Frame", bgr) else: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break if key == ord("f"): mode = 3 if key == ord("e"): mode = 2 if key == ord("g"): mode = 1 if key == ord("c"): mode = 0 if key == ord("l"): pwm.setPWM(0, 0, 500) if key == ord("r"): pwm.setPWM(0, 0, 300) if key == 81: # left if steer > -1: steer = steer - 0.1 if key == 83: # right if steer < 1: steer = steer + 0.1 if key == 82: # up if speed < 1: speed = speed + 0.1 if key == 84: # down if steer > -1: speed = speed - 0.1 if key == ord("s"): speed = 0 steer = 0 pwm.setPWM(0, 0, 500 + int(speed * 100)) pwm.setPWM(2, 0, 670 - int(steer * 100)) prvs = next cv2.destroyAllWindows() vs.stop()
# import the necessary packages from pivideostream import PiVideoStream import datetime import imutils import time import cv2 import numpy as np death = 0.0 temp = 0 counter = 0 cond = False # initialize the video stream and allow the cammera sensor to warmup vs = PiVideoStream(resolution=(320, 240), framerate=32) vs.start() time.sleep(2.0) start_time = time.time() while (True): elapsed_time = time.time() - start_time elap = time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) onset = time.time() # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=400)
configs.set("span_l",span_l) configs.set("span_s",span_s) # def update_command(, center) if flag_display: cv2.namedWindow("plots") cv2.createTrackbar("H","plots",span_h,255,update_trackbars) cv2.createTrackbar("L","plots",span_l,255,update_trackbars) cv2.createTrackbar("S","plots",span_s,255,update_trackbars) # initialize and configure camera module if(flag_raspberry>0): print("Starting Raspberry Pi Camera module Thread") vs = PiVideoStream().start() else: print "Start OpenCV Video Capture module" vs = cv2.VideoCapture(0) time.sleep(2.0) try: while True: # store fps timer_list =[] timer_list.append(["FPS",int(cv2.getTickFrequency()/(cv2.getTickCount() - loop_timer_start))]) loop_timer_start = cv2.getTickCount() finger_list_filtered = [] finger_num = 0 contour_solidity = 0 center = (0,0)
img1 = cv2.imread(fn1, 0) """ camera = PiCamera() #camera.resolution = (640, 480) #camera.framerate = 32 rawCapture = PiRGBArray(camera) #rawCapture = io.BytesIO() time.sleep(0.1) """ # initialize the picamera stream and allow the camera # sensor to warmup stream = PiVideoStream(resolution=(640, 480), framerate=32) vs = stream.start() time.sleep(2.0) img1 = resize(img1, preferred_dimensions=(640, 480)) kp = detector.detect(img1, None) kp, desc = compute.compute(img1, kp) count = 0 start_time = timer() frame = vs.read()
import cv2 # Librairie OpenCV import time # Fonctions temporelles import serial # Demarre la communication sur le port série ser = serial.Serial('/dev/ttyACM0',9600) # Fenetre d'affichage de la "vue" du robot cv2.namedWindow('preview', cv2.WINDOW_NORMAL) # Definition des seuils HSV pour le vert greenLower = (90, 70, 90) greenUpper = (105, 255, 255) # Demarage du flux video sur un thread different vs = PiVideoStream() vs.start() # Laisse le temps de chauffer a la camera time.sleep(2) vs.consistent() print("Demarrage") # Boucle principale while True : frame = vs.read() # Convertis la frame en HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Construis un masque pour la couleur définie mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations = 4)
from constantes import * #Load model model = load_model(sys.argv[1]) print("Model loaded") # Init engines and hat i2c = busio.I2C(board.SCL, board.SDA) hat = adafruit_pca9685.PCA9685(i2c) kit = ServoKit(channels=16) hat.frequency = 50 kit.servo[0].angle = 100 kit.servo[1].angle = 100 vs = PiVideoStream().start() time.sleep(2.0) from PIL import Image frame = vs.read() img = Image.fromarray(frame) img.save("test.png") was_direction = 0 # Starting loop print("Ready ! press CTRL+C to START/STOP :") try: while True:
from flask import Flask, render_template, Response, request, jsonify from flask_basicauth import BasicAuth from pivideostream import PiVideoStream from picamera import PiCamera from picamera.array import PiRGBArray import numpy as np import cv2 import time from servo_manager import ServoManager from motor_manager import MotorManager #import urllib.request, urllib.parse try: # video steam vs = PiVideoStream().start() # vertical axis aiming servo servo = ServoManager(11) servo.setAngleInDegrees(90) # horizontal axis aiming motor motor = MotorManager(16, 18, 22) # update public ip on main site for easy access #data = {} #data['new'] = 'remote_password' #url_values = urllib.parse.urlencode(data) #url = 'remote page for receiving ip update' #full_url = url + '?' + url_values #data = urllib.request.urlopen(full_url)
class VideoCamera(object): def __init__(self, resolution=(320, 240), framerate=32): self.conf = json.load(open("conf.json")) self.lt = LocalTime("Baltimore") self.avg = None self.avg_count = 0 self.motionCounter = 0 self.motion_frames = [] self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.contour_area = 0 self.vs = PiVideoStream(resolution, framerate).start() time.sleep(self.conf["camera_warmup_time"]) def hflip(self, hflip=True): self.vs.hflip(hflip) def vflip(self, vflip=True): self.vs.vflip(vflip) def rotation(self, angle=0): self.vs.rotation(angle) def exposure_mode(self, exposure_mode="auto"): self.vs.exposure_mode(exposure_mode) def iso(self, iso=0): self.vs.iso(iso) def shutter_speed(self, speed): self.vs.shutter_speed(speed) def change_framerate(self, framerate=32): self.vs.stop(stop_camera=False) time.sleep(self.conf["camera_cooldown_time"]) self.vs.camera.framerate = framerate self.vs.shutter_speed(0) self.vs.start() time.sleep(self.conf["camera_warmup_time"]) self.avg_count = 0 def __del__(self): self.vs.stop(stop_camera=True) def get_frame(self): frame = self.vs.read().copy() framerate = self.vs.camera.framerate # draw the text and timestamp on the frame timestamp = self.lt.now() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) cv2.putText( frame, "Motion on: {}; FPS: {}; Contour area: {}".format( self.avg_count == self.conf["camera_adjustment_frames"], framerate, self.contour_area), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) if self.w > 0: cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (0, 255, 0), 2) ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes() def get_object(self): frame = self.vs.read().copy() timestamp = self.lt.now() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") found_obj = False frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if self.avg is None or self.avg_count < self.conf[ "camera_adjustment_frames"]: self.avg = gray.copy().astype("float") self.avg_count += 1 if self.avg_count == self.conf["camera_adjustment_frames"]: print("[INFO] motion detector live...") return (None, False) cv2.accumulateWeighted(gray, self.avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, self.conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours for c in cnts: # if the contour is too small, ignore it ca = cv2.contourArea(c) self.contour_area = ca if ca < self.conf["min_area"]: continue # compute the bounding box for the contour, draw it on the frame, # and update found_obj (self.x, self.y, self.w, self.h) = cv2.boundingRect(c) cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (0, 255, 0), 2) found_obj = True # check to see if the room is occupied if found_obj: print("[INFO] found object!") # increment the motion counter self.motionCounter += 1 cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) self.motion_frames.append(frame) # check to see if the number of frames with consistent motion is # high enough if self.motionCounter >= self.conf["min_motion_frames"]: print("[INFO] occupied!") self.motionCounter = 0 vis = np.concatenate(self.motion_frames, axis=0) return (vis, found_obj) return (None, False) # otherwise, the room is not occupied else: (self.x, self.y, self.w, self.h) = (0, 0, 0, 0) self.contour_area = 0 self.motionCounter = 0 self.motion_frames = [] return (None, False)
#!/usr/bin/env python # coding=utf-8 import cv2 import time #from settings import * #from picamera.array import PiRGBArray #from picamera import PiCamera from pivideostream import PiVideoStream vs = PiVideoStream((1920, 1088)).start() time.sleep(1) #time.sleep(3) runVideo = True #vs.SetParam() while (runVideo == True): #for i in range(0,3): frame = vs.readCropped(20, 36, 44, 15) #frame = vs.read() #qprint("shape:",frame.shape[:2]) cv2.imshow('Test', frame) if cv2.waitKey(1) & 0xFF == ord('q'): runVideo = False break time.sleep(0.1) #runVideo=False
class VisionSystem: """docstring for visionSystem""" def __init__(self, q1, q2): self.queue_MAIN_2_VS = q1 self.queue_VS_2_STM = q2 self.resolution = (320, 240) self.video_stream = PiVideoStream(self.resolution, 60) self.settings = { 'disp': False, 'dispThresh': False, 'dispContours': False, 'dispApproxContours': False, 'dispVertices': False, 'dispNames': False, 'dispCenters': False, 'dispTHEcenter': False, 'erodeValue': 0, 'lowerThresh': 40, 'working': True, 'autoMode': False, 'dispGoal': True } self.prevStateDisp = self.settings['disp'] self.prevStateDispThresh = self.settings['dispThresh'] self.objs = [] self.classLogger = logging.getLogger('droneNav.VisionSys') self.working = True self.t = Thread(target=self.update, args=()) self.t.daemon = True def start(self): self.classLogger.debug('Starting vision system.') self.video_stream.start() time.sleep(2) self.working = True self.t.start() return def stop(self): self.working = False self.t.join() return def update(self): while 1: if self.working is False: break if self.queue_MAIN_2_VS.empty(): pass if not self.queue_MAIN_2_VS.empty(): self.settings = self.queue_MAIN_2_VS.get() self.queue_MAIN_2_VS.task_done() frame = self.video_stream.read() frame_processed = self.process_frame(frame, self.settings) self.detect_shapes(frame, frame_processed) if self.settings['disp'] is False and self.prevStateDisp is False: pass if self.settings['disp'] is True and self.prevStateDisp is False: cv2.namedWindow('Frame') key = cv2.waitKey(1) & 0xFF # cv2.startWindowThread() elif self.settings['disp'] is True and self.prevStateDisp is True: key = cv2.waitKey(1) & 0xFF cv2.imshow('Frame', frame) elif self.settings['disp'] is False and self.prevStateDisp is True: cv2.destroyWindow('Frame') if self.settings[ 'dispThresh'] is False and self.prevStateDispThresh is False: pass if self.settings[ 'dispThresh'] is True and self.prevStateDispThresh is False: cv2.namedWindow('Processed') key = cv2.waitKey(1) & 0xFF # cv2.startWindowThread() elif self.settings[ 'dispThresh'] is True and self.prevStateDispThresh is True: key = cv2.waitKey(1) & 0xFF cv2.imshow('Processed', frame_processed) elif self.settings[ 'dispThresh'] is False and self.prevStateDispThresh is True: cv2.destroyWindow('Processed') if self.settings['dispThresh'] or self.settings['disp']: if key == 27: self.video_stream.stop() self.prevStateDisp = self.settings['disp'] self.prevStateDispThresh = self.settings['dispThresh'] # send objects to state machine self.queue_VS_2_STM.put(self.objs) cv2.destroyAllWindows() self.video_stream.stop() self.classLogger.debug('Ending vision system.') def process_frame(self, fr, setts): """ Takes frame and processes it based on settings. """ # frame = imutils.resize(frame, width=600) # fr = cv2.flip(fr, 0) # frame = cv2.copyMakeBorder(frame, 3, 3, 3, 3, # cv2.BORDER_CONSTANT, # value=(255, 255, 255)) frameGray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY) frameBlurred = cv2.GaussianBlur(frameGray, (7, 7), 0) frameThresh = cv2.threshold(frameBlurred, setts['lowerThresh'], 255, cv2.THRESH_BINARY_INV)[1] frameThresh = cv2.erode(frameThresh, None, iterations=setts['erodeValue']) frameThresh = cv2.dilate(frameThresh, None, iterations=setts['erodeValue']) frameThresh = cv2.copyMakeBorder(frameThresh, 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=(0, 0, 0)) frameFinal = frameThresh return frameFinal def draw_cntrs_features(self, fr, setts, obj): """ Takes frame, settings, objects list and draws features (contours, names, vertives, centers) on frame. """ if setts['dispContours']: cv2.drawContours(fr, [obj['contour']], -1, (255, 255, 0), 1) if setts['dispApproxContours']: cv2.drawContours(fr, [obj['approx_cnt']], -1, (0, 255, 0), 1) if setts['dispNames']: cv2.putText(fr, obj['shape'] + str(obj['approx_cnt_area']), obj['center'], cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) if setts['dispVertices']: for i in range(0, len(obj['verts'])): cv2.circle(fr, tuple(obj['verts'][i]), 4, (255, 100, 100), 1) if setts['dispCenters']: cv2.circle(fr, (obj['center']), 2, (50, 255, 50), 1) def detect_shapes(self, frameOriginal, frameProcessed): """ This functiion simplifies the contour, identifies shape by name, unpacks vertices, computes area. Then it returns a dictionary with all of this data. :param c: Contour to be approximated. :type c: OpenCV2 contour. :returns: dictionary -- shape name, vertices, approximated contour, approximated area. :rtype: dictionary. """ # ##################################################################### # FIND COUNTOURS # ##################################################################### cnts = cv2.findContours(frameProcessed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # ##################################################################### # ANALYZE CONTOURS # ##################################################################### # clear list self.objs = [] for index, c in enumerate(cnts): verts = [] vrt = [] # ################################################################# # SIMPLIFY CONTOUR # ################################################################# perimeter = cv2.arcLength(c, True) approx_cnt = cv2.approxPolyDP(c, 0.04 * perimeter, True) # ################################################################# # GET CONTOUR AREA # ################################################################# approx_cnt_area = cv2.contourArea(approx_cnt) # ################################################################# # GETTING THE VERTICES COORDINATES # ################################################################# for i in range(0, len(approx_cnt)): # iterate over vertices (needs additional [0] vrt = [] for j in range(0, 2): vrt.append(int(approx_cnt[i][0][j])) verts.append(vrt) # ################################################################# # NAMING THE OBJECT # ################################################################# # if the shape is a triangle, it will have 3 vertices if len(approx_cnt) == 3: shape = "triangle" # if the shape has 4 vertices, it is either a square or # a rectangle elif len(approx_cnt) == 4: # compute the bounding box of the contour and use the # bounding box to compute the aspect ratio (x, y, w, h) = cv2.boundingRect(approx_cnt) ar = w / float(h) # a square will have an aspect ratio that is approximately # equal to one, otherwise, the shape is a rectangle shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle" # if the shape is a pentagon, it will have 5 vertices elif len(approx_cnt) == 5: shape = "pentagon" # otherwise, we assume the shape is a circle else: shape = "circle" # ################################################################# # COMPUTING CENTER # ################################################################# M = cv2.moments(approx_cnt) try: approx_cnt_X = int((M['m10'] / M['m00'])) approx_cnt_Y = int((M['m01'] / M['m00'])) except ZeroDivisionError: approx_cnt_X = 0 approx_cnt_Y = 0 obj = { 'shape': shape, 'verts': verts, 'approx_cnt': approx_cnt, 'approx_cnt_area': approx_cnt_area, 'contour': c, 'center': (approx_cnt_X, approx_cnt_Y) } self.objs.append(obj) c = c.astype('float') c = c.astype('int') self.draw_cntrs_features(frameOriginal, self.settings, self.objs[index]) if self.settings['dispTHEcenter']: cv2.circle(frameOriginal, (self.resolution[0] / 2, self.resolution[1] / 2), 2, (50, 50, 255), 1) if self.settings['dispGoal'] and bool(self.objs): cv2.line(frameOriginal, (self.resolution[0] / 2, self.resolution[1] / 2), self.objs[0]['center'], (255, 0, 0), 2)
# import necessary packages from pivideostream import PiVideoStream import cv2 import imutils import numpy as np import datetime import time # initialize variables microsleep = False # boolean to store subject state, default counter = 0 # variable to count instances of microsleep # initialize the video stream specs, allow time for camera sensor to warmup vs = PiVideoStream(resolution=(320, 240),framerate=32) vs.start() time.sleep(2.0) # initialize start time after warmup start_time = time.time() while(True): frame = vs.read() # start parsing through frames frame = imutils.resize(frame, width=400) # resize each individual frames onset = time.time() # declare time of EC onset roi = frame[100:300, 100:250] # crop to region of interest gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) # convert BGR frame to grayscale ret, thresh_img = cv2.threshold(gray,200,255,cv2.THRESH_BINARY) # create binary (BW) mask of grayscale image clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(4,4)) # perform adaptive histogram equalization
help="whether or not the Raspberry Pi camera should be used") args = vars(ap.parse_args()) kernel = numpy.ones((5,5), numpy.uint8) #frameNum = 0 # TODO: find out how to determine unique frames loops = 0 key = 0 centerX = 0 centerY = 0 angleToTarget = 0 display = 0 utils.hsvWrite(30,90,120,255,120,255) #Write Networktable values Green #utils.hsvWrite(80,120,80,120,190,255) #Write Networktable values Blue #utils.hsvWrite(130,120,80,200,190,255) #Write Networktable values Red if (args["picamera"] > 0): cap = PiVideoStream().start() else: cap = WebcamVideoStream().start() time.sleep(2.0) distanceTarget = -1 target = -1 centerX = 0 centerY = 0 r1x1 = -1 r1x2 = -1 r2x1 = -1 r2x2 = -1 while True: image = cap.read() #Capture frame #imageCopy = image
def read_settings(): settings_file = open("settings.json", "r") settings_json = settings_file.read() return json.loads(settings_json) def write_settings(settings): settings_json = json.dumps(settings) settings_file = open("settings.json", "w") settings_file.write(settings_json) settings = read_settings() # ----------------------------------------------------------------------------- # Camera # ----------------------------------------------------------------------------- vs = PiVideoStream(resolution=(1280, 720), framerate=60).start() def set_camera_settings(): # Default settings vs.camera.brightness = 50 vs.camera.contrast = 0 vs.camera.awb_mode = "off" vs.camera.awb_gains = (1, 1) vs.drc_strength = "off" vs.exposure_mode = "off" # Customized settings vs.camera.shutter_speed = settings["shutter_speed"] set_camera_settings() def get_raw_readings(): result = {}
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model', help='File path of Tflite model.', required=True) parser.add_argument( '--dims', help='Model input dimension', required=True) args = parser.parse_args() #Set all input params equal to the input dimensions expected by the model mdl_dims = int(args.dims) #dims must be a factor of 32 for picamera resolution to work #Set max num of objects you want to detect per frame max_obj = 10 engine = edgetpu.detection.engine.DetectionEngine(args.model) pygame.init() pygame.display.set_caption('Face Detection') screen = pygame.display.set_mode((mdl_dims, mdl_dims), pygame.DOUBLEBUF|pygame.HWSURFACE) pygame.font.init() fnt_sz = 18 myfont = pygame.font.SysFont('Arial', fnt_sz) camera = picamera.PiCamera() strm_thread = PiVideoStream().start() strm_thread.rgbCapture = bytearray(strm_thread.camera.resolution[0] * strm_thread.camera.resolution[1] * 3) #Set camera resolution equal to model dims camera.resolution = (mdl_dims, mdl_dims) #rgb = bytearray(camera.resolution[0] * camera.resolution[1] * 3) camera.framerate = 30 _, width, height, channels = engine.get_input_tensor_shape() x1, x2, x3, x4, x5 = 0, 50, 50, 0, 0 y1, y2, y3, y4, y5 = 50, 50, 0, 0, 50 z = 5 last_tm = time.time() i = 0 exitFlag = True while(exitFlag): for event in pygame.event.get(): keys = pygame.key.get_pressed() if(keys[pygame.K_ESCAPE] == 1): exitFlag = False with picamera.array.PiRGBArray(camera, size=(mdl_dims, mdl_dims)) as stream: #stream = io.BytesIO() start_ms = time.time() camera.capture(stream, use_video_port=True, format='rgb') elapsed_ms = time.time() - start_ms stream.seek(0) stream.readinto(stream.rgbCapture) #stream.truncate() #needed?? img = pygame.image.frombuffer(stream.rgbCapture[0: (camera.resolution[0] * camera.resolution[1] * 3)], camera.resolution, 'RGB') input = np.frombuffer(stream.getvalue(), dtype=np.uint8) #Inference results = engine.DetectWithInputTensor(input, top_k=max_obj) stream.close() if img: screen.blit(img, (0,0)) if results: num_obj = 0 for obj in results: num_obj = num_obj + 1 for obj in results: bbox = obj.bounding_box.flatten().tolist() score = round(obj.score,2) x1 = round(bbox[0] * mdl_dims) y1 = round(bbox[1] * mdl_dims) x2 = round(bbox[2] * mdl_dims) y2 = round(bbox[3] * mdl_dims) rect_width = x2 - x1 rect_height = y2 - y1 class_score = "%.2f" % (score) ms = "(%d) %s%.2fms" % (num_obj, "faces detected in ", elapsed_ms*1000) fnt_class_score = myfont.render(class_score, True, (0,0,255)) fnt_class_score_width = fnt_class_score.get_rect().width screen.blit(fnt_class_score,(x1, y1-fnt_sz)) fnt_ms = myfont.render(ms, True, (255,255,255)) fnt_ms_width = fnt_ms.get_rect().width screen.blit(fnt_ms,((mdl_dims / 2) - (fnt_ms_width / 2), 0)) bbox_rect = pygame.draw.rect(screen, (0,0,255), (x1, y1, rect_width, rect_height), 2) #pygame.display.update(bbox_rect) else: elapsed_ms = time.time() - start_ms ms = "%s %.2fms" % ("No faces detected in", elapsed_ms*1000) fnt_ms = myfont.render(ms, True, (255,0,0)) fnt_ms_width = fnt_ms.get_rect().width screen.blit(fnt_ms,((mdl_dims / 2) - (fnt_ms_width / 2), 0)) pygame.display.update() pygame.display.quit()
def main(): # create queues in shared memory so each process can access it mainQueue = Queue(maxsize=70) xyDoneQueue = Queue(maxsize=70) hudDoneQueue = Queue(maxsize=70) resizeDoneQueue = Queue(maxsize=70) # start VideoStream process vs = PiVideoStream(mainQueue, file1) time.sleep(1) # allow pi camera to "warm up" vsP1 = Process(target=vs.update, name='videoStreamProcess', args=(resolution, framerate, awb_mode)) # passing these parameters here because time.sleep( 0.5 ) # put some frames into mainQueue to stop other processes from throwing errors # passing to PiVideoStream instantiation causes pi camera to be accessed by two different processes which breaks it vsP1.daemon = True vsP1.start() # start ColorTracker process tracker = ColorTracker(mainQueue, xyDoneQueue, file2) # pass shared queues trackerP1 = Process(target=tracker.update, name='trackerProcess', args=()) trackerP1.daemon = True trackerP1.start() # start Hud process hud = Hud(resolution, xyDoneQueue, hudDoneQueue, file3, hud_done) hudP1 = Process(target=hud.draw, name='hudProcess', args=()) hudP1.daemon = True hudP1.start() # start ResizeFrame process 1 resize = ResizeFrame(hudDoneQueue, resizeDoneQueue, file4, hud_done, instance_name='resizer1') resizeP1 = Process(target=resize.resize, name='resizeProcess', args=()) resizeP1.daemon = True resizeP1.start() # start ResizeFrame process 2 resize2 = ResizeFrame(hudDoneQueue, resizeDoneQueue, file4B, hud_done, instance_name='resizer2') resizeP2 = Process(target=resize2.resize, name='resizeProcess2', args=()) resizeP2.daemon = True resizeP2.start() # start DisplayFrame process display = DisplayFrame(resizeDoneQueue, file5, done) displayP1 = Process(target=display.show, name='displayProcess', args=()) displayP1.daemon = True displayP1.start() while not done.is_set(): continue else: print('Terminating processes') file1.close() file2.close() file3.close() file4.close() file4B.close() vsP1.terminate() trackerP1.terminate() hudP1.terminate() resizeP1.terminate() resizeP2.terminate() displayP1.terminate()
# DEBUG values SET_GUI = False # Do or do not show GUI DEBUG_MOTORSPEED = False # Do or do not write motor speed commands on console DEBUG_TIMING = False # Do or do not write how much time each processing step takes on console DEBUG_CIRCLEPOS = True # Do or do not write detected circle position on console # Initialize the motor object motor = mw.MyMotor("/dev/ttyACM0", 115200) motor.pwm = 50 # initialize the camera width = 320 height = 240 camera = PiVideoStream((width, height), 30).start() counter = FPS() counter.start() # allow the camera to warmup capture frames from the camera time.sleep(0.5) # detection variables posX = None # X position posX_prev = 0 # X position in the previous iteration posY = None # Y position posX_exp_filter_coeff = 0.8 # The amount of how much the current measurement changes the position. [0,1]. current = alpha * measurement + (1-alpha) * previous radius = None # Circle radius radius_prev = 0 # Previous circle radius rad_exp_filter_coeff = 0.8 # The amount of how much the current measurement changes the radius. [0,1]. current = alpha * measurement + (1-alpha) * previous speed = 0 # Speed to send to the motor controller angleCorr = 0 # The difference between the two tracks so the robot turns
import requests import json import cv2 from pivideostream import PiVideoStream import numpy as np import base64 addr = 'http://192.168.0.6:5000' test_url = addr + '/api/test' content_type = 'image/jpeg' headers = {'content-type': content_type} video_capture = PiVideoStream().start() while True: try: frame = video_capture.read() #print(frame) _, img_encoded = cv2.imencode('.jpg', frame) response = requests.post(test_url, data=img_encoded.tostring(), headers=headers) str_response = json.loads(response.text) arr = np.fromstring(base64.b64decode( str_response['message']['py/b64']), dtype=np.uint8) img = cv2.imdecode(arr, -1) print(img) cv2.imshow('frame', img) if cv2.waitKey(1) & 0xFF == ord('q'):