def picamera_ndvi(preview=False, resolution=(640, 480), framerate=60): stream = PiVideoStream(resolution=resolution, framerate=framerate).start() time.sleep(2) print('Video stream started.') directory = 'capture_' + str(get_time_ms()) + '/' # loop over the frames from the video stream indefinitely while True: # grab the frame from the threaded video stream frame = stream.read() if frame is not None: b, g, r = cv2.split(frame) # get NDVI from RGB image ndvi = vegevision.get_ndvi(b, r) ndvi_colorized = apply_custom_colormap( 255 * ndvi.astype(np.uint8), cmap=vegevision.load_cmap('NDVI_VGYRM-lut.csv')) # show the frame if preview: cv2.imshow("Video Input with NDVI", ndvi_colorized) print('Displaying NDVI...') else: save_image(ndvi, directory=directory) # if the `q` key was pressed, break from the loop key = cv2.waitKey(1) & 0xFF if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() stream.stop()
class VideoStream: def __init__(self, src=0, isPiCamera=False, resolution=(320, 240), framerate=32): if isPiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: from usbvideostream import usbVideoStream self.stream = usbVideoStream(src, resolution=resolution) def start(self): return self.stream.start() def update(self): self.stream.update() def read(self): return self.stream.read() def stop(self): self.stream.stop()
class VideoStream: def __init__(self, src=0, usePiCamera=False, resolution=(370, 290), framerate=32): if usePiCamera: from pivideostream import PiVideoStream self.stream = PiVideoStream(resolution=resolution, framerate=framerate) else: self.stream() = WebcamVideoStream(src=src) def start(self): return self.stream.start() def update(self): self.stream.update() def read(self): return self.stream.read() def stop(self): self.stream.stop()
class VideoCamera(object): def __init__(self, flip=False, fps=10, res=(160, 128)): self.vs = PiVideoStream(resolution=res, framerate=fps).start() self.flip = flip print("cam init") time.sleep(2.0) def __del__(self): print("cam del") self.vs.stop() def flip_if_needed(self, frame): if self.flip: return np.flip(frame, 0) return frame def get_frame(self): frame = self.flip_if_needed(self.vs.read()) ret, png = cv2.imencode('.png', frame) return png.tobytes()
class VideoStream: def __init__(self, src=0, usePiCamera=False, resolution=(320, 240), framerate=32, **kwargs): # check to see if the picamera module should be used if usePiCamera: # only import the picamera packages unless we are # explicity told to do so -- this helps remove the # requirement of `picamera[array]` from desktops or # laptops that still want to use the `imutils` package from pivideostream import PiVideoStream # initialize the picamera stream and allow the camera # sensor to warmup self.stream = PiVideoStream(resolution=resolution, framerate=framerate, **kwargs) # otherwise, we are using OpenCV so initialize the webcam # stream def start(self): # start the threaded video stream return self.stream.start() def update(self): # grab the next frame from the stream self.stream.update() def read(self): # return the current frame return self.stream.read() def stop(self): # stop the thread and release any resources self.stream.stop()
elif preds[0] == 1: print('gauche') kit.servo[0].angle = SOFT_LEFT was_direction = 1 elif preds[0] == 0: print('gauche') kit.servo[0].angle = DIR_LEFT was_direction = 1 elif preds[0] == 3: print('gauche') kit.servo[0].angle = SOFT_RIGHT was_direction = 1 elif preds[0] == 4: print('droite') kit.servo[0].angle = DIR_RIGHT was_direction = 1 time.sleep(0.1) except KeyboardInterrupt: pass # Stop the machine kit.servo[0].angle = 100 kit.servo[1].angle = 100 vs.stop() print("Stop")
print("distance to target "+str(distanceTarget)) angleToTarget = math.atan((centerX-160)/317.5)*(180/math.pi) #angleToTarget returns angle to target in degrees print("angle to target "+str(angleToTarget)) center = (centerX,centerY) cv2.circle(image,(int(centerX),int(centerY)),int(abs(cX-centerX)),(0,255,255),2) cv2.circle(image,center,5,(0,255,255),-1) target = 1 else: target = -1 cv2.putText(image, shape+" "+str(shape), (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) utils.targetWrite(target,centerX,centerY,angleToTarget,loops,distanceTarget) if display == 1: #Draw display if turned on cv2.imshow("Frame", image) #Display a screen with outputs cv2.imshow("HSV,Blur,Thresh", thresh) #Display a screen with outputs key = cv2.waitKey(1) & 0xFF #Wait for keypress if there is a display if key == ord("q"):# if the `q` key was pressed, break from the loop break r1x1 = -1 r1x2 = -1 r1y1 = -1 r1y2 = -1 r2x1 = -1 r2x2 = -1 r2y1 = -1 r2y2 = -1 cv2.imwrite("test.jpg", image) cv2.destroyAllWindows() cap.stop()
window_name = 'preview' # Creation du thread de lecture + setup vs=PiVideoStream() vs.camera.video_stabilization = True # Demarrage du flux video + warmup de la camera vs.start() time.sleep(2.0) # Creation de la fenetre d'affichage cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) fps = FPS().start() while True : frame = vs.read() fps.update() cv2.imshow(window_name, frame) key = cv2.waitKey(1) & 0xFF if key == ord("q") : break fps.stop() print("Temps passé : {:.2f}".format(fps.elapsed())) print("Approx. FPS : {:.2f}".format(fps.fps())) cv2.destroyAllWindows() vs.stop()
def main(): pwm = PWM(0x40) pwm.setPWMFreq(100) vs = PiVideoStream(resolution=(640, 480)).start() time.sleep(1.0) frame = vs.read() prvs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) hsv = np.zeros_like(frame) hsv[..., 1] = 255 mode = 0 speed = 0 steer = 0 while True: frame = vs.read() #frame = rotate_image(frame) next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if mode == 3: flow = cv2.calcOpticalFlowFarneback(prvs, next, 0.5, 3, 15, 3, 5, 1.2, 0) mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) hsv[..., 0] = ang * 180 / 3.14 / 2 hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) cv2.putText(frame, "Speed: {}, Steer: {}".format(speed, steer), (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 0, 0)) if mode == 1: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if mode == 2: frame = cv2.Canny(frame, 20, 100) if mode == 3: cv2.imshow("Frame", bgr) else: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break if key == ord("f"): mode = 3 if key == ord("e"): mode = 2 if key == ord("g"): mode = 1 if key == ord("c"): mode = 0 if key == ord("l"): pwm.setPWM(0, 0, 500) if key == ord("r"): pwm.setPWM(0, 0, 300) if key == 81: # left if steer > -1: steer = steer - 0.1 if key == 83: # right if steer < 1: steer = steer + 0.1 if key == 82: # up if speed < 1: speed = speed + 0.1 if key == 84: # down if steer > -1: speed = speed - 0.1 if key == ord("s"): speed = 0 steer = 0 pwm.setPWM(0, 0, 500 + int(speed * 100)) pwm.setPWM(2, 0, 670 - int(steer * 100)) prvs = next cv2.destroyAllWindows() vs.stop()
class VideoCamera(object): def __init__(self, resolution=(320, 240), framerate=32): self.conf = json.load(open("conf.json")) self.lt = LocalTime("Baltimore") self.avg = None self.avg_count = 0 self.motionCounter = 0 self.motion_frames = [] self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.contour_area = 0 self.vs = PiVideoStream(resolution, framerate).start() time.sleep(self.conf["camera_warmup_time"]) def hflip(self, hflip=True): self.vs.hflip(hflip) def vflip(self, vflip=True): self.vs.vflip(vflip) def rotation(self, angle=0): self.vs.rotation(angle) def exposure_mode(self, exposure_mode="auto"): self.vs.exposure_mode(exposure_mode) def iso(self, iso=0): self.vs.iso(iso) def shutter_speed(self, speed): self.vs.shutter_speed(speed) def change_framerate(self, framerate=32): self.vs.stop(stop_camera=False) time.sleep(self.conf["camera_cooldown_time"]) self.vs.camera.framerate = framerate self.vs.shutter_speed(0) self.vs.start() time.sleep(self.conf["camera_warmup_time"]) self.avg_count = 0 def __del__(self): self.vs.stop(stop_camera=True) def get_frame(self): frame = self.vs.read().copy() framerate = self.vs.camera.framerate # draw the text and timestamp on the frame timestamp = self.lt.now() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) cv2.putText( frame, "Motion on: {}; FPS: {}; Contour area: {}".format( self.avg_count == self.conf["camera_adjustment_frames"], framerate, self.contour_area), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) if self.w > 0: cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (0, 255, 0), 2) ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes() def get_object(self): frame = self.vs.read().copy() timestamp = self.lt.now() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") found_obj = False frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if self.avg is None or self.avg_count < self.conf[ "camera_adjustment_frames"]: self.avg = gray.copy().astype("float") self.avg_count += 1 if self.avg_count == self.conf["camera_adjustment_frames"]: print("[INFO] motion detector live...") return (None, False) cv2.accumulateWeighted(gray, self.avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, self.conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours for c in cnts: # if the contour is too small, ignore it ca = cv2.contourArea(c) self.contour_area = ca if ca < self.conf["min_area"]: continue # compute the bounding box for the contour, draw it on the frame, # and update found_obj (self.x, self.y, self.w, self.h) = cv2.boundingRect(c) cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (0, 255, 0), 2) found_obj = True # check to see if the room is occupied if found_obj: print("[INFO] found object!") # increment the motion counter self.motionCounter += 1 cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) self.motion_frames.append(frame) # check to see if the number of frames with consistent motion is # high enough if self.motionCounter >= self.conf["min_motion_frames"]: print("[INFO] occupied!") self.motionCounter = 0 vis = np.concatenate(self.motion_frames, axis=0) return (vis, found_obj) return (None, False) # otherwise, the room is not occupied else: (self.x, self.y, self.w, self.h) = (0, 0, 0, 0) self.contour_area = 0 self.motionCounter = 0 self.motion_frames = [] return (None, False)
class VisionSystem: """docstring for visionSystem""" def __init__(self, q1, q2): self.queue_MAIN_2_VS = q1 self.queue_VS_2_STM = q2 self.resolution = (320, 240) self.video_stream = PiVideoStream(self.resolution, 60) self.settings = { 'disp': False, 'dispThresh': False, 'dispContours': False, 'dispApproxContours': False, 'dispVertices': False, 'dispNames': False, 'dispCenters': False, 'dispTHEcenter': False, 'erodeValue': 0, 'lowerThresh': 40, 'working': True, 'autoMode': False, 'dispGoal': True } self.prevStateDisp = self.settings['disp'] self.prevStateDispThresh = self.settings['dispThresh'] self.objs = [] self.classLogger = logging.getLogger('droneNav.VisionSys') self.working = True self.t = Thread(target=self.update, args=()) self.t.daemon = True def start(self): self.classLogger.debug('Starting vision system.') self.video_stream.start() time.sleep(2) self.working = True self.t.start() return def stop(self): self.working = False self.t.join() return def update(self): while 1: if self.working is False: break if self.queue_MAIN_2_VS.empty(): pass if not self.queue_MAIN_2_VS.empty(): self.settings = self.queue_MAIN_2_VS.get() self.queue_MAIN_2_VS.task_done() frame = self.video_stream.read() frame_processed = self.process_frame(frame, self.settings) self.detect_shapes(frame, frame_processed) if self.settings['disp'] is False and self.prevStateDisp is False: pass if self.settings['disp'] is True and self.prevStateDisp is False: cv2.namedWindow('Frame') key = cv2.waitKey(1) & 0xFF # cv2.startWindowThread() elif self.settings['disp'] is True and self.prevStateDisp is True: key = cv2.waitKey(1) & 0xFF cv2.imshow('Frame', frame) elif self.settings['disp'] is False and self.prevStateDisp is True: cv2.destroyWindow('Frame') if self.settings[ 'dispThresh'] is False and self.prevStateDispThresh is False: pass if self.settings[ 'dispThresh'] is True and self.prevStateDispThresh is False: cv2.namedWindow('Processed') key = cv2.waitKey(1) & 0xFF # cv2.startWindowThread() elif self.settings[ 'dispThresh'] is True and self.prevStateDispThresh is True: key = cv2.waitKey(1) & 0xFF cv2.imshow('Processed', frame_processed) elif self.settings[ 'dispThresh'] is False and self.prevStateDispThresh is True: cv2.destroyWindow('Processed') if self.settings['dispThresh'] or self.settings['disp']: if key == 27: self.video_stream.stop() self.prevStateDisp = self.settings['disp'] self.prevStateDispThresh = self.settings['dispThresh'] # send objects to state machine self.queue_VS_2_STM.put(self.objs) cv2.destroyAllWindows() self.video_stream.stop() self.classLogger.debug('Ending vision system.') def process_frame(self, fr, setts): """ Takes frame and processes it based on settings. """ # frame = imutils.resize(frame, width=600) # fr = cv2.flip(fr, 0) # frame = cv2.copyMakeBorder(frame, 3, 3, 3, 3, # cv2.BORDER_CONSTANT, # value=(255, 255, 255)) frameGray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY) frameBlurred = cv2.GaussianBlur(frameGray, (7, 7), 0) frameThresh = cv2.threshold(frameBlurred, setts['lowerThresh'], 255, cv2.THRESH_BINARY_INV)[1] frameThresh = cv2.erode(frameThresh, None, iterations=setts['erodeValue']) frameThresh = cv2.dilate(frameThresh, None, iterations=setts['erodeValue']) frameThresh = cv2.copyMakeBorder(frameThresh, 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=(0, 0, 0)) frameFinal = frameThresh return frameFinal def draw_cntrs_features(self, fr, setts, obj): """ Takes frame, settings, objects list and draws features (contours, names, vertives, centers) on frame. """ if setts['dispContours']: cv2.drawContours(fr, [obj['contour']], -1, (255, 255, 0), 1) if setts['dispApproxContours']: cv2.drawContours(fr, [obj['approx_cnt']], -1, (0, 255, 0), 1) if setts['dispNames']: cv2.putText(fr, obj['shape'] + str(obj['approx_cnt_area']), obj['center'], cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) if setts['dispVertices']: for i in range(0, len(obj['verts'])): cv2.circle(fr, tuple(obj['verts'][i]), 4, (255, 100, 100), 1) if setts['dispCenters']: cv2.circle(fr, (obj['center']), 2, (50, 255, 50), 1) def detect_shapes(self, frameOriginal, frameProcessed): """ This functiion simplifies the contour, identifies shape by name, unpacks vertices, computes area. Then it returns a dictionary with all of this data. :param c: Contour to be approximated. :type c: OpenCV2 contour. :returns: dictionary -- shape name, vertices, approximated contour, approximated area. :rtype: dictionary. """ # ##################################################################### # FIND COUNTOURS # ##################################################################### cnts = cv2.findContours(frameProcessed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # ##################################################################### # ANALYZE CONTOURS # ##################################################################### # clear list self.objs = [] for index, c in enumerate(cnts): verts = [] vrt = [] # ################################################################# # SIMPLIFY CONTOUR # ################################################################# perimeter = cv2.arcLength(c, True) approx_cnt = cv2.approxPolyDP(c, 0.04 * perimeter, True) # ################################################################# # GET CONTOUR AREA # ################################################################# approx_cnt_area = cv2.contourArea(approx_cnt) # ################################################################# # GETTING THE VERTICES COORDINATES # ################################################################# for i in range(0, len(approx_cnt)): # iterate over vertices (needs additional [0] vrt = [] for j in range(0, 2): vrt.append(int(approx_cnt[i][0][j])) verts.append(vrt) # ################################################################# # NAMING THE OBJECT # ################################################################# # if the shape is a triangle, it will have 3 vertices if len(approx_cnt) == 3: shape = "triangle" # if the shape has 4 vertices, it is either a square or # a rectangle elif len(approx_cnt) == 4: # compute the bounding box of the contour and use the # bounding box to compute the aspect ratio (x, y, w, h) = cv2.boundingRect(approx_cnt) ar = w / float(h) # a square will have an aspect ratio that is approximately # equal to one, otherwise, the shape is a rectangle shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle" # if the shape is a pentagon, it will have 5 vertices elif len(approx_cnt) == 5: shape = "pentagon" # otherwise, we assume the shape is a circle else: shape = "circle" # ################################################################# # COMPUTING CENTER # ################################################################# M = cv2.moments(approx_cnt) try: approx_cnt_X = int((M['m10'] / M['m00'])) approx_cnt_Y = int((M['m01'] / M['m00'])) except ZeroDivisionError: approx_cnt_X = 0 approx_cnt_Y = 0 obj = { 'shape': shape, 'verts': verts, 'approx_cnt': approx_cnt, 'approx_cnt_area': approx_cnt_area, 'contour': c, 'center': (approx_cnt_X, approx_cnt_Y) } self.objs.append(obj) c = c.astype('float') c = c.astype('int') self.draw_cntrs_features(frameOriginal, self.settings, self.objs[index]) if self.settings['dispTHEcenter']: cv2.circle(frameOriginal, (self.resolution[0] / 2, self.resolution[1] / 2), 2, (50, 50, 255), 1) if self.settings['dispGoal'] and bool(self.objs): cv2.line(frameOriginal, (self.resolution[0] / 2, self.resolution[1] / 2), self.objs[0]['center'], (255, 0, 0), 2)