def __init__(self): self.mtx_file_path = 'camera_matrices/mtx.npz' self.camera_matrices = None self.thresholding_pipeline = Thresholding() self.perspective = Perspective() self.load_camera_matrices() self.line_search = LineSearch()
def transform2birdseye(image): # Transform it to birdseye P = Perspective() birdseye = P.to_birdseye(image) # cv2.imshow('Birdseye view', birdseye) # print('birdseye shape: {}'.format(birdseye.shape)) return birdseye
def get_perspective_score(text): p = Perspective(API_KEY) try: comment = p.score(text, tests=["TOXICITY"]) score = comment["TOXICITY"].score except: score = 0 return score
def __init__(self): self.mouse = MouseControl() self.XRES, self.YRES = self.mouse.get_screen_resolution() self.XRES, self.YRES = float(self.XRES), float(self.YRES) self.ABCD = [(0, 0), (self.XRES - 5, 0), (0, self.YRES - 5), (self.XRES - 5, self.YRES - 5)] self.perspective = Perspective() self.perspective.setdst((0.0, 0.0), (self.XRES, 0.0), (0.0, self.YRES), (self.XRES, self.YRES)) self.cursor = Cursor(self) self.Options = Options()
def _final_frame(self, im, fill, lcurv, rcurv, offset, font=cv.FONT_HERSHEY_DUPLEX, scale_font=1, color_font=(255, 0, 0), show=False): fill = prsp.warp(fill, inverse=True) out = im.copy() out = cv.addWeighted(out, 0.7, fill, 0.5, 0) xtxt = 50 lcurv_text = 'Left curvature={0:.01f}m'.format(lcurv) rcurv_text = 'Right curvature={0:.01f}m'.format(rcurv) offset_text = 'Offset={0:.02f}m'.format(offset) out = cv.putText(out, lcurv_text, (xtxt, 30), font, scale_font, color_font) out = cv.putText(out, rcurv_text, (xtxt, 60), font, scale_font, color_font) out = cv.putText(out, offset_text, (xtxt, 90), font, scale_font, color_font) if show == True: show_images(im, out, 'origin', 'lanes', 'Lanes detected') return out
def _get_initial_perspective(self, *methods): """ Return the initial perspective. """ methods = [ # If a default perspective was specified then we prefer that over # any other perspective. self._get_default_perspective, # If there was no default perspective then try the perspective that # was active the last time the application was run. self._get_previous_perspective, # If there was no previous perspective, then try the first one that # we know about. self._get_first_perspective ] for method in methods: perspective = method() if perspective is not None: break # If we have no known perspectives, make a new blank one up. else: logger.warn('no known perspectives - creating a new one') perspective = Perspective() return perspective
def draw(self, display): from perspective import Perspective size= Perspective.perceived_size(self) if size < 3: size= 3 #draw pulse if self.tick%128 >= 64: draw.circle(display.CANVAS, self.color.mix(self.color.BLACK, ((self.tick%128))**2/(128**2)), Perspective.window(self), size+int( ((self.tick%128)/32))) draw.circle(display.CANVAS, self.color.mix(self.color.BLACK, (((self.tick+64)%128))**2/(128**2)), Perspective.window(self), size+int( (((self.tick+64)%128)/32))) else: draw.circle(display.CANVAS, self.color.mix(self.color.BLACK, (((self.tick+64)%128)**2)/(128**2)), Perspective.window(self), size+int( (((self.tick+64)%128)/32))) draw.circle(display.CANVAS, self.color.mix(self.color.BLACK, ((self.tick%128) )**2/(128**2)), Perspective.window(self), size+int( ((self.tick%128)/32))) #draw itself draw.circle(display.CANVAS, self.color.get(), Perspective.window(self), Perspective.perceived_size(self))
def __init__(self): self.mouse = MouseControl() self.XRES,self.YRES = self.mouse.get_screen_resolution() self.XRES, self.YRES = float(self.XRES), float(self.YRES) self.ABCD = [(0,0),(self.XRES-5,0),(0,self.YRES-5),(self.XRES-5,self.YRES-5)] self.perspective = Perspective() self.perspective.setdst((0.0,0.0),(self.XRES,0.0),(0.0,self.YRES),(self.XRES,self.YRES)) self.cursor = Cursor(self) self.Options = Options()
def __init__(self): super(MainWindow, self).__init__() self.setWindowTitle(App_Name) self._menubar = QtGui.QMenuBar(self) self._actions = {} self._handlers = {} self.buildActions() self.buildMenus() self.setMenuBar(self._menubar) self.perspective = Perspective(self)
def __init__(self, src, dst, n_images=1, calibration=None, line_segments=LINE_SEGMENTS, offset=0): self.n_images = n_images self.cam_calibration = calibration self.line_segments = line_segments self.image_offset = offset self.left_line = None self.right_line = None self.center_poly = None self.curvature = 0.0 self.offset = 0.0 self.perspective_src = src self.perspective_dst = dst self.perspective = Perspective(src, dst) self.dists = []
def __init__(self, imagenDeInicializacion, maximoPuntos, poligonoDePartida, poligonoDeLlegada=[[0, 0]]): # El poligono self.imagenAuxiliarEnGrises = cv2.cvtColor(imagenDeInicializacion, cv2.COLOR_BGR2GRAY) [self.height, self.width] = imagenDeInicializacion.shape[:2] #print('AQUIIIIIIIIIIIIIIII:',self.width,self.height) if poligonoDePartida == None: #print('No se utilizara mascara') self.mascara = None self.regionDeInteres = np.array([[0, 0], [0, 240], [320, 240], [320, 0]]) else: self.regionDeInteres = np.array(poligonoDePartida) self.mascara = np.zeros_like(self.imagenAuxiliarEnGrises) cv2.fillPoly(self.mascara, [self.regionDeInteres], 255) #print('Mascara creada con exito') self.poligonoDeLlegada = np.array(poligonoDeLlegada) # Auxiliares self.feature_params = dict(maxCorners=maximoPuntos, qualityLevel=0.3, minDistance=7, blockSize=7) self.lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.color = np.random.randint( 0, 255, (50, 3)) # Se empleara un color nuevo por infraccion self.zonaPartida = Perspective(self.regionDeInteres) self.imagenAuxiliarZonaPartida = self.zonaPartida.generarRegionNormalizada( imagenDeInicializacion) self.flujoEnLaPartida = FlowDetector(180) self.flujoEnLaPartida.inicializarClase(self.imagenAuxiliarZonaPartida)
def test(filename, n=0, show=False): import importlib importlib.reload(mask) print(persp._perspective) im_ = plt.imread(filename) im = clb.undistort(im_) if n == -1: return persp.warp(im, show=show) elif n == 0: im2 = persp.warp(im, show=show) color_mask.hls_channel(im2, channel='h', thresh=(100, 255), show=show) color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show) color_mask.hls_channel(im2, channel='s', thresh=(100, 255), show=show) elif n == 1: im_eq = equalize_hist(im, show=show) im2 = persp.warp(im_eq, show=show) color_mask.hls_channel(im2, channel='h', thresh=(0, 100), show=show) color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show) color_mask.hls_channel(im2, channel='s', thresh=(100, 255), show=show) elif n == 2: im2 = persp.warp(im, show=show) color_mask.rgb_channel(im2, channel='r', thresh=(220, 255), show=show) color_mask.rgb_channel(im2, channel='g', thresh=(210, 255), show=show) color_mask.rgb_channel(im2, channel='b', thresh=(0, 100), show=show) elif n == 3: im2 = persp.warp(im, show=show) color_mask.hls_channel(im2, channel='h', thresh=(0, 90), show=show) color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show) color_mask.hls_channel(im2, channel='s', thresh=(110, 255), show=show) elif n == 4: im2 = persp.warp(im, show=show) return custom_mask.special_mask(im2, show=show)
class PerspectiveTests(unittest.TestCase): p = Perspective(api_key) def test_comment(self): for x in range(10): test = random.choice(allowed) comment = self.p.score(generate_text(), tests=test, languages=["EN"]) self.assertTrue(0 <= comment[test].score <= 1) for attr in comment[test].spans: self.assertTrue(0 <= attr.score <= 1) self.assertTrue(str(attr) in comment.text)
def awakeFromNib(self): self.center = (50.0, 50.0) self.radius = 10.0 self.color = NSColor.redColor() #self.perspective.setdst((0.0,0.0),(400,0.0),(0.0,400),(400,400)) self.perspective.setdst((0.0,400),(400,400),(0.0,0.0),(400,0.0)) #preperation for allowing mouse control from Quartz import CoreGraphics self.CG = CG = CoreGraphics #displayWidth = CG.CGMainDisplayID displayWidth = CG.CGDisplayPixelsWide(CG.CGMainDisplayID()) displayHeight = CG.CGDisplayPixelsHigh(CG.CGMainDisplayID()) print displayWidth, displayHeight dp = Perspective() dp.setdst((0,0),(displayWidth,0),(0,displayHeight),(displayWidth,displayHeight)) self.display_perspective = dp #callibrate the wiimote to our perspectives self.calib = Calibratinator() self.calib.perspective = self.perspective self.calib.display_perspective = self.display_perspective self.calib.calibrated = self.calibrated
def __init__(self, camera_matrix, distortion_coefficients, buffer_size = 50, img_size = (1280,720)): self.camera_matrix = camera_matrix self.distortion_coefficients = distortion_coefficients src = np.float32( [[(img_size[0] / 2) - 75, img_size[1] / 2 + 100], [((img_size[0] / 6) - 10), img_size[1]], [(img_size[0] * 5 / 6) + 60, img_size[1]], [(img_size[0] / 2 + 75), img_size[1] / 2 + 100]]) dst = np.float32( [[(img_size[0] / 4), 0], [(img_size[0] / 4), img_size[1]], [(img_size[0] * 3 / 4), img_size[1]], [(img_size[0] * 3 / 4), 0]]) self.perspective = Perspective(src,dst) self.search_engine = HistogramSearch( window_width = 200, window_height = 80, min_pixels_to_recenter=50, visualisation = None) self.left = Lane(buffer_size) self.right = Lane(buffer_size)
def get_perspective_by_id(self, id): """ Return the perspective with the specified Id. Return None if no such perspective exists. """ for perspective in self.perspectives: if perspective.id == id: break else: if id == Perspective.DEFAULT_ID: perspective = Perspective() else: perspective = None return perspective
def process(self, frame, show=False): im = clb.undistort(frame) warped = prsp.warp(im) masked = custom_mask.apply(warped, show=show) lp, rp = self._find_lane_points(masked, num_strips=10, radius=70, show=show) height = masked.shape[0] width = masked.shape[1] llane = Lane(lp, end=height - 1, num=height) rlane = Lane(rp, end=height - 1, num=height) offset = Lane.offset(llane, rlane, y=masked.shape[0] - 1, frame_width=width) lcurv = llane.curvature(height // 2) rcurv = rlane.curvature(height // 2) fill = Lane.fill_lanes(warped, llane, rlane, show=show) return self._final_frame(im, fill, lcurv, rcurv, offset, show=show)
#asteroid= OrbitalBody(position, speed, mass, RADIUS) CelestialCluster.init() distancia_corpos = [] from display import Display os.environ['SDL_VIDEO_WINDOW_POS'] = "0, 0" #--MAIN LOOP-- while not SysComm.quit: current_time= gtime.current() #update system interface SysComm.update() #update things CelestialCluster.update() Perspective.update() #draw things Display.update() gtime.update() elapsed_time= gtime.current()-current_time if elapsed_time < 16: gtime.sleep( 16 - elapsed_time) #print(distancia_corpos) #grafico(ticks,distancia_corpos,gtime.RESOLUTION) #distancia_corpos.append(distancia_2_planetas(celestial_cluster.cluster,color.EARTH,color.SUN))
import matplotlib.pyplot as plt import numpy as np from scipy.signal import savgol_filter, argrelextrema import cv2 import glob from camera import Camera from perspective import Perspective from lane_detector import LaneDetector # Camera and perspective instances camera = Camera(1280, 720) perspective = Perspective() # Calibrate camera camera = Camera(1280, 720) perspective = Perspective() results = camera.calibrate(glob.glob("CALIBRATION/*.jpg"), (9, 6), True) for result in results: file_index = result["filename"][-5] cv2.imwrite("OUTPUT/calibration/1-corners%s.jpg" % file_index, result["img"]) cv2.imwrite("OUTPUT/calibration/2-corners-undistorted-%s.jpg" % file_index, camera.undistort(result["img"])) # HSV color mask color_mask = ( ((20, 50, 75), (110, 255, 255)), # yellow lines ((0, 0, 220), (255, 255, 255)) # white lines )
from camera_calibration import Camera from perspective import RegionOfInterest, Perspective from util import images_in_directory, write_images_to_directory from binary_image import Lane, identify_lane_pixels import cv2 import numpy as np from moviepy.editor import VideoFileClip camera = Camera(images_in_directory('camera_cal')) region_of_interest = RegionOfInterest((1280, 720)) perspective = Perspective(region_of_interest) class Pipeline: def __init__(self): self.previous_polynomial_left = [] self.previous_polynomial_right = [] self.previous_curves = [] def pipeline(self, image): undistorted_image = camera.undistort(image) bird_view_image = perspective.bird_view(undistorted_image) left_lane, right_lane = self.find_lanes(bird_view_image) bird_view_lane_image = np.zeros_like(bird_view_image).astype(np.uint8) left_lane.draw_fill_on_image(bird_view_lane_image, other_lane=right_lane) left_lane.draw_identified_pixels_on_image(bird_view_lane_image, [255, 0, 0]) right_lane.draw_identified_pixels_on_image(bird_view_lane_image, [0, 0, 255])
def read_signal(signal): if signal.type == gsignal.SCROLLUP or signal.type == gsignal.SCROLLDOWN: Perspective.read_signal(signal)
def __init__(self): assert (clb.initialized()) assert (prsp.initialized()) self.left_line = None self.right_line = None
class LaneDetector: def __init__(self, src, dst, n_images=1, calibration=None, line_segments=LINE_SEGMENTS, offset=0): self.n_images = n_images self.cam_calibration = calibration self.line_segments = line_segments self.image_offset = offset self.left_line = None self.right_line = None self.center_poly = None self.curvature = 0.0 self.offset = 0.0 self.perspective_src = src self.perspective_dst = dst self.perspective = Perspective(src, dst) self.dists = [] @staticmethod def _acceptable_lanes(left, right): if len(left[0]) < 3 or len(right[0]) < 3: return False else: new_left = Line(y=left[0], x=left[1]) new_right = Line(y=right[0], x=right[1]) return acceptable_lanes(new_left, new_right) def _check_lines(self, left_x, left_y, right_x, right_y): left_found, right_found = False, False if self._acceptable_lanes((left_x, left_y), (right_x, right_y)): left_found, right_found = True, True elif self.left_line and self.right_line: if self._acceptable_lanes((left_x, left_y), (self.left_line.ys, self.left_line.xs)): left_found = True if self._acceptable_lanes( (right_x, right_y), (self.right_line.ys, self.right_line.xs)): right_found = True return left_found, right_found def _draw_info(self, image): font = cv2.FONT_HERSHEY_SIMPLEX text_curvature = 'Curvature: {}'.format(self.curvature) cv2.putText(image, text_curvature, (50, 50), font, 1, (255, 255, 255), 2) text_position = '{}m {} of center'.format( abs(self.offset), 'left' if self.offset < 0 else 'right') cv2.putText(image, text_position, (50, 100), font, 1, (255, 255, 255), 2) def _draw_overlay(self, image): overlay = np.zeros([*image.shape]) mask = np.zeros([image.shape[0], image.shape[1]]) lane_area = calculate_lane_area((self.left_line, self.right_line), image.shape[0], 20) mask = cv2.fillPoly(mask, np.int32([lane_area]), 1) mask = self.perspective.inverse_transform(mask) overlay[mask == 1] = (255, 128, 0) selection = (overlay != 0) image[selection] = image[selection] * 0.3 + overlay[selection] * 0.7 mask[:] = 0 mask = draw_polynomial(mask, self.center_poly, 20, 255, 5, True, ARROW_TIP_LENGTH) mask = self.perspective.inverse_transform(mask) image[mask == 255] = (255, 75, 2) mask[:] = 0 mask = draw_polynomial(mask, self.left_line.best_fit_poly, 5, 255) mask = draw_polynomial(mask, self.right_line.best_fit_poly, 5, 255) mask = self.perspective.inverse_transform(mask) image[mask == 255] = (255, 200, 2) def _process_history(self, image, left_found, right_found, left_x, left_y, right_x, right_y): if self.left_line and self.right_line: left_x, left_y = lane_detection_history( image, self.left_line.best_fit_poly, self.line_segments) right_x, right_y = lane_detection_history( image, self.right_line.best_fit_poly, self.line_segments) left_found, right_found = self._check_lines( left_x, left_y, right_x, right_y) return left_found, right_found, left_x, left_y, right_x, right_y def _process_histogram(self, image, left_found, right_found, left_x, left_y, right_x, right_y): if not left_found: left_x, left_y = lane_detection_histogram( image, self.line_segments, (self.image_offset, image.shape[1] // 2), h_window=HISTOGRAM_WINDOW) left_x, left_y = remove_outliers(left_x, left_y) if not right_found: right_x, right_y = lane_detection_histogram( image, self.line_segments, (image.shape[1] // 2, image.shape[1] - self.image_offset), h_window=HISTOGRAM_WINDOW) right_x, right_y = remove_outliers(right_x, right_y) if not left_found or not right_found: left_found, right_found = self._check_lines( left_x, left_y, right_x, right_y) return left_found, right_found, left_x, left_y, right_x, right_y def _draw(self, image, original_image): if self.left_line and self.right_line: self.dists.append( self.left_line.get_best_fit_distance(self.right_line)) self.center_poly = (self.left_line.best_fit_poly + self.right_line.best_fit_poly) / 2 self.curvature = curvature(self.center_poly) self.offset = ( image.shape[1] / 2 - self.center_poly(POLYNOMIAL_COEFFICIENT)) * 3.7 / 700 self._draw_overlay(original_image) self._draw_info(original_image) def _update_lane_left(self, found, x, y): if found: if self.left_line: self.left_line.update(y=x, x=y) else: self.left_line = Line(self.n_images, y, x) def _update_lane_right(self, found, x, y): if found: if self.right_line: self.right_line.update(y=x, x=y) else: self.right_line = Line(self.n_images, y, x) def process_image(self, image): original_image = np.copy(image) image = self.cam_calibration.undistort(image) image = lane_mask(image, VERTICAL_OFFSET) image = self.perspective.transform(image) left_found = right_found = False left_x = left_y = right_x = right_y = [] left_found, right_found, left_x, left_y, right_x, right_y = \ self._process_history(image, left_found, right_found, left_x, left_y, right_x, right_y) left_found, right_found, left_x, left_y, right_x, right_y = \ self._process_histogram(image, left_found, right_found, left_x, left_y, right_x, right_y) self._update_lane_left(left_found, left_x, left_y) self._update_lane_right(right_found, right_x, right_y) self._draw(image, original_image) return original_image
class WhiteBoard: running = False mouseclicked = False calibrating = False KEEPLIGHTTIME = 3 #will not consider if we lose the light for x frames CLICKMAXTIME = 5 #longest that we will consider as a click for touchpad mode CLICKMINTIME = 3 # will not move the pointer for the first x frames after click to help a singleclic with shaky hand MAXTIMEBETWEENCLICKDRAG = 5 #... def __init__(self): self.mouse = MouseControl() self.XRES,self.YRES = self.mouse.get_screen_resolution() self.XRES, self.YRES = float(self.XRES), float(self.YRES) self.ABCD = [(0,0),(self.XRES-5,0),(0,self.YRES-5),(self.XRES-5,self.YRES-5)] self.perspective = Perspective() self.perspective.setdst((0.0,0.0),(self.XRES,0.0),(0.0,self.YRES),(self.XRES,self.YRES)) self.cursor = Cursor(self) self.Options = Options() def printOut(self,strList,newLine = True): for all in strList: print all, #if newLine: print "" print "" def GetBattery(self): try: bat = self.wiimote.WiimoteState.Battery except AttributeError: return None if bat: return bat*100/200 else: return None def Connect(self): self.wiimote = Wiimote() self.OnConnecting() if sys.platform != 'win32': self.printOut(["Press 1 and 2 on wiimote"]) #try: self.wiimote.Connect() #except: # self.OnNoWiimoteFound() # raise SystemExit self.OnConnected() self.wiimote.SetLEDs(True,False,False,False) self.wiimote.SetReportType(self.wiimote.InputReport.IRAccel,True) def IsConnected(self): try: cn = self.wiimote.running except: return False return cn def IsRunning(self): return self.running def OnRun(self): #to be overloaded pass def OnConnected(self): #to be overloaded self.printOut(["Battery: ", self.GetBattery(), "%"]) def OnConnecting(self): #to be overloaded pass def OnDisconnect(self): #to be overloaded pass def OnStop(self): #to be overloaded pass def OnCalibrated(self): #to be overloaded pass def OnCalibrating(self): pass def OnNoWiimoteFound(self): print "No Wiimote or no Bluetooth found..." pass def OnOptionsChanged(self): pass def Run(self): self.running = True keep_light = self.KEEPLIGHTTIME x,y = (0,0) first_x, first_y = (0,0) light_state = False timer = 0 self.OnRun() while self.running: if self.wiimote.WiimoteState.ButtonState.A: self.Stop() continue time.sleep(0.030) if self.wiimote.WiimoteState.IRState.Found1: if not light_state: #if the light appears light_state = True first_x,first_y = map(int,self.perspective.warp(self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1)) x,y = first_x, first_y timer = 0 #reset timer self.cursor.update(light_state, True, (first_x,first_y), (x,y), timer) else: #if the light is still lit x,y = map(int,self.perspective.warp(self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1)) timer += 1 keep_light = self.KEEPLIGHTTIME #keep it high while light is seen. self.cursor.update(light_state, False, (first_x,first_y), (x,y), timer) else: if keep_light and timer > self.KEEPLIGHTTIME: keep_light -= 1 #keep_light will not prevent cut-off if the light goes out quick else: if light_state: light_state = False self.cursor.update(light_state, True, (first_x,first_y), (x,y), timer) timer = self.KEEPLIGHTTIME - keep_light #this is the true delay since the light has gone off, not 0 else: timer += 1 self.cursor.update(light_state, False, (first_x,first_y), (x,y), timer) def Stop(self): self.running = False self.OnStop() def Disconnect(self): if self.IsRunning: self.Stop() self.wiimote.Disconnect() self.OnDisconnect() if self.calibrating: raise SystemExit del self.wiimote def Calibrate(self): self.Stop() self.calibrating = True self.OnCalibrating() self.printOut(["Point to the top left corner of the screen... "],False) self.ABCD[0] = self.WaitForLight() self.printOut(["OK"]) self.WaitNoLight() self.printOut(["Point to the top right corner of the screen... "],False) self.ABCD[1] = self.WaitForLight() self.printOut([ "OK"]) self.WaitNoLight() self.printOut([ "Point to the bottom left corner of the screen... "],False) self.ABCD[2] = self.WaitForLight() self.printOut([ "OK"]) self.WaitNoLight() self.printOut([ "Point to the bottom right corner of the screen... "],False) self.ABCD[3] = self.WaitForLight() self.printOut([ "OK"]) self.WaitNoLight() x0,y0 = self.ABCD[0] x1,y1 = self.ABCD[1] x2,y2 = self.ABCD[2] x3,y3 = self.ABCD[3] self.perspective.setsrc((x0,y0),(x1,y1),(x2,y2),(x3,y3)) self.calibrating = False self.printOut([ "Calibration complete!"]) self.OnCalibrated() def WaitForLight(self): dot = False while not dot: checkdot = self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1 if self.wiimote.WiimoteState.IRState.Found1: dot = checkdot return dot time.sleep(0.030) def WaitNoLight(self): time.sleep(0.5) while 1: time.sleep(0.030) if self.wiimote.WiimoteState.IRState.Found1 == False: return
class WhiteBoard: running = False mouseclicked = False calibrating = False KEEPLIGHTTIME = 3 #will not consider if we lose the light for x frames CLICKMAXTIME = 5 #longest that we will consider as a click for touchpad mode CLICKMINTIME = 3 # will not move the pointer for the first x frames after click to help a singleclic with shaky hand MAXTIMEBETWEENCLICKDRAG = 5 #... def __init__(self): self.mouse = MouseControl() self.XRES, self.YRES = self.mouse.get_screen_resolution() self.XRES, self.YRES = float(self.XRES), float(self.YRES) self.ABCD = [(0, 0), (self.XRES - 5, 0), (0, self.YRES - 5), (self.XRES - 5, self.YRES - 5)] self.perspective = Perspective() self.perspective.setdst((0.0, 0.0), (self.XRES, 0.0), (0.0, self.YRES), (self.XRES, self.YRES)) self.cursor = Cursor(self) self.Options = Options() def printOut(self, strList, newLine=True): for all in strList: print all, #if newLine: print "" print "" def GetBattery(self): try: bat = self.wiimote.WiimoteState.Battery except AttributeError: return None if bat: return bat * 100 / 200 else: return None def Connect(self): self.wiimote = Wiimote() self.OnConnecting() if sys.platform != 'win32': self.printOut(["Press 1 and 2 on wiimote"]) #try: self.wiimote.Connect() #except: # self.OnNoWiimoteFound() # raise SystemExit self.OnConnected() self.wiimote.SetLEDs(True, False, False, False) self.wiimote.SetReportType(self.wiimote.InputReport.IRAccel, True) def IsConnected(self): try: cn = self.wiimote.running except: return False return cn def IsRunning(self): return self.running def OnRun(self): #to be overloaded pass def OnConnected(self): #to be overloaded self.printOut(["Battery: ", self.GetBattery(), "%"]) def OnConnecting(self): #to be overloaded pass def OnDisconnect(self): #to be overloaded pass def OnStop(self): #to be overloaded pass def OnCalibrated(self): #to be overloaded pass def OnCalibrating(self): pass def OnNoWiimoteFound(self): print "No Wiimote or no Bluetooth found..." pass def OnOptionsChanged(self): pass def Run(self): self.running = True keep_light = self.KEEPLIGHTTIME x, y = (0, 0) first_x, first_y = (0, 0) light_state = False timer = 0 self.OnRun() while self.running: if self.wiimote.WiimoteState.ButtonState.A: self.Stop() continue time.sleep(0.030) if self.wiimote.WiimoteState.IRState.Found1: if not light_state: #if the light appears light_state = True first_x, first_y = map( int, self.perspective.warp( self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1)) x, y = first_x, first_y timer = 0 #reset timer self.cursor.update(light_state, True, (first_x, first_y), (x, y), timer) else: #if the light is still lit x, y = map( int, self.perspective.warp( self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1)) timer += 1 keep_light = self.KEEPLIGHTTIME #keep it high while light is seen. self.cursor.update(light_state, False, (first_x, first_y), (x, y), timer) else: if keep_light and timer > self.KEEPLIGHTTIME: keep_light -= 1 #keep_light will not prevent cut-off if the light goes out quick else: if light_state: light_state = False self.cursor.update(light_state, True, (first_x, first_y), (x, y), timer) timer = self.KEEPLIGHTTIME - keep_light #this is the true delay since the light has gone off, not 0 else: timer += 1 self.cursor.update(light_state, False, (first_x, first_y), (x, y), timer) def Stop(self): self.running = False self.OnStop() def Disconnect(self): if self.IsRunning: self.Stop() self.wiimote.Disconnect() self.OnDisconnect() if self.calibrating: raise SystemExit del self.wiimote def Calibrate(self): self.Stop() self.calibrating = True self.OnCalibrating() self.printOut(["Point to the top left corner of the screen... "], False) self.ABCD[0] = self.WaitForLight() self.printOut(["OK"]) self.WaitNoLight() self.printOut(["Point to the top right corner of the screen... "], False) self.ABCD[1] = self.WaitForLight() self.printOut(["OK"]) self.WaitNoLight() self.printOut(["Point to the bottom left corner of the screen... "], False) self.ABCD[2] = self.WaitForLight() self.printOut(["OK"]) self.WaitNoLight() self.printOut(["Point to the bottom right corner of the screen... "], False) self.ABCD[3] = self.WaitForLight() self.printOut(["OK"]) self.WaitNoLight() x0, y0 = self.ABCD[0] x1, y1 = self.ABCD[1] x2, y2 = self.ABCD[2] x3, y3 = self.ABCD[3] self.perspective.setsrc((x0, y0), (x1, y1), (x2, y2), (x3, y3)) self.calibrating = False self.printOut(["Calibration complete!"]) self.OnCalibrated() def WaitForLight(self): dot = False while not dot: checkdot = self.wiimote.WiimoteState.IRState.RawX1, self.wiimote.WiimoteState.IRState.RawY1 if self.wiimote.WiimoteState.IRState.Found1: dot = checkdot return dot time.sleep(0.030) def WaitNoLight(self): time.sleep(0.5) while 1: time.sleep(0.030) if self.wiimote.WiimoteState.IRState.Found1 == False: return
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0) plt.imshow(result) plt.plot(left_fit_x, plot_y, color='yellow') plt.plot(right_fit_x, plot_y, color='yellow') plt.xlim(0, 1280) plt.ylim(720, 0) plt.axis("off") # Save visualization if save_image: plt.savefig('./output_images/vis_area.png', bbox_inches='tight') plt.show() if __name__ == '__main__': polyfit = Polyfit() perspective = Perspective() image_file = 'test_images/test2.jpg' image = mpimg.imread(image_file) undistorted = undistort(image) # Create binary outputs abs_thresh, mag_thresh, dir_thresh, hls_thresh, hsv_thresh, combined_output = combined_threshold( undistorted) plt.imshow(combined_output, cmap='gray') warped = perspective.warp(combined_output) plt.imshow(warped, cmap='gray') # Find lanes left_fit, right_fit, vars = polyfit.poly_fit_skip(warped) left_curve_rad, right_curve_rad = polyfit.curvature() position = polyfit.vehicle_position(warped) print('curvature: {:.2f}m'.format((left_curve_rad + right_curve_rad) / 2))
class LaneLines: def __init__(self, camera_matrix, distortion_coefficients, buffer_size = 50, img_size = (1280,720)): self.camera_matrix = camera_matrix self.distortion_coefficients = distortion_coefficients src = np.float32( [[(img_size[0] / 2) - 75, img_size[1] / 2 + 100], [((img_size[0] / 6) - 10), img_size[1]], [(img_size[0] * 5 / 6) + 60, img_size[1]], [(img_size[0] / 2 + 75), img_size[1] / 2 + 100]]) dst = np.float32( [[(img_size[0] / 4), 0], [(img_size[0] / 4), img_size[1]], [(img_size[0] * 3 / 4), img_size[1]], [(img_size[0] * 3 / 4), 0]]) self.perspective = Perspective(src,dst) self.search_engine = HistogramSearch( window_width = 200, window_height = 80, min_pixels_to_recenter=50, visualisation = None) self.left = Lane(buffer_size) self.right = Lane(buffer_size) def draw(self, img): preprocessed_img = self.preprocess(img) left, right = self.find_lanes(preprocessed_img) result = img if left.best_fit is not None and right.best_fit is not None: area = self.lane_area(preprocessed_img, left, right) area = cv2.warpPerspective(area, self.perspective.matrix, (img.shape[1], img.shape[0]), flags=cv2.WARP_INVERSE_MAP) result = cv2.addWeighted(img, 1, area, 0.3, 0) ls = LaneStatistics(preprocessed_img, left) rs = LaneStatistics(preprocessed_img, right) radius = "radius of curvature {0:.0f} m" \ .format((ls.radius_of_curvature_in_m + rs.radius_of_curvature_in_m)/2) offset = "offset {0:.2f} m" \ .format(np.abs(ls.distance_to_vehicle_center_in_m - rs.distance_to_vehicle_center_in_m)) cv2.putText(result, radius, (50, 65), cv2.FONT_HERSHEY_PLAIN, 3,(255,255,255),2); cv2.putText(result, offset, (50, 135), cv2.FONT_HERSHEY_PLAIN, 3,(255,255,255),2); return result def find_lanes(self, preprocessed_img): l_detection, r_detection = self.search_engine.find_lane_polynomials( preprocessed_img, self.left.best_fit, self.right.best_fit) if l_detection is None or r_detection is None or not l_detection.trustful() or not r_detection.trustful(): l_detection, r_detection = self.search_engine.find_lane_polynomials(preprocessed_img) self.left.update(l_detection) self.right.update(r_detection) return (self.left, self.right) def lane_area(self, warped, left_lane, right_lane): warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) arguments_y = np.linspace(0, warped.shape[0]-1, warped.shape[0] ) left_values_x = left_lane.best_fit[0]*arguments_y**2 + left_lane.best_fit[1]*arguments_y + left_lane.best_fit[2] right_values_x = right_lane.best_fit[0]*arguments_y**2 + right_lane.best_fit[1]*arguments_y + right_lane.best_fit[2] # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_values_x, arguments_y]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_values_x, arguments_y])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) return color_warp def preprocess(self, img): img = self.undistort(img) img = self.blur(img) img = self.threshold(img) img = self.warp(img) return img def warp(self, img): return self.perspective.warp(img) def blur(self, img, kernel_size = 5): return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def undistort(self, img): return cv2.undistort(img, self.camera_matrix, self.distortion_coefficients, None, self.camera_matrix) def threshold(self, img): img_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(np.float) img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype(np.float) img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2LUV).astype(np.float) threshold = Threshold() w_binary = threshold.white(img_luv, min_max= (180, 255)) l_x = threshold.gradient_magnitude_x(img_luv[:,:,0], sobel_kernel=5, min_max = (15,255)) l_d = threshold.gradient_direction_xy(img_luv[:,:,0], sobel_kernel=3, min_max = (0.7, 1.3)) white = np.zeros_like(w_binary) white[(img[:,:,0] >= 200) & (img[:,:,1] >= 200) & (img[:,:,2] >= 200)] = 1 r_x = threshold.gradient_magnitude_x(white, sobel_kernel=5, min_max = (10,255)) l_binary = threshold.lightness(img_hls, min_max = (210, 255)) g_m = threshold.gradient_magnitude_x(l_binary, sobel_kernel=31, min_max = (100, 255)) white_lane = np.zeros_like(w_binary) white_lane [(r_x == 1) | ((l_d == 1) & (l_x == 1) & (w_binary == 1)) | (g_m == 1)] = 1 y_binary = threshold.blue_yellow(img_lab, min_max= (145, 255)) y_m = threshold.gradient_magnitude_xy(img_lab[:,:,2], sobel_kernel=3, min_max = (50, 255)) s_binary = threshold.saturation(img_hls, min_max = (10,255)) s_x = threshold.gradient_magnitude_x(img_hls[:,:,2], sobel_kernel=31, min_max = (100,255)) s_d = threshold.gradient_direction_xy(img_hls[:,:,2], sobel_kernel=31, min_max = (0.6, 1.2)) yellow_line = np.zeros_like(y_binary) yellow_line[(y_binary == 1) | (y_m == 1) | ((s_d == 1) & (s_x == 1) & (s_binary == 1))] = 1 result = np.zeros_like(w_binary) result[(yellow_line == 1) | (white_lane == 1)] = 1 return result
def process_frame(frame): """ Process a single image or a frame from a video to find the lane lines This is the main entry point for this code. It can be used in debug mode (see gEnv above) to process a single image, but is designed to operate with moviepy to process videos. Args: frame (image): The input image Returns: result (image): The output image with the lane and lines overlaid, along with curvature and car position. """ global gEnv gEnv['frame_count'] += 1 # keep track of frames to get ratio of good ones # Correct for camera distortion corrected = correct_image(frame) # normalize brightness and contrast image = apply_CLAHE(corrected) # Transform it to birdseye image = transform2birdseye(image) if gEnv['debug']: cv2.imshow('Birdseye', image) # Enhance the image, and if necessary find the starts of the lines enhanced, gEnv['prev_line_ends'] = find_line_starts(image, gEnv) if gEnv['debug']: cv2.imshow('Enhanced', enhanced) # walk up the lines to find a series of points left_Xpts, right_Xpts, Ypts, data_good = walk_lines(enhanced, gEnv) # Fit a 2nd order function for each line left_coef = np.polyfit(Ypts, left_Xpts, 2) right_coef = np.polyfit(Ypts, right_Xpts, 2) # Get new lines from the fitted curve Yarr = np.array(Ypts) left_newXpts = (left_coef[0] * Yarr**2) + (left_coef[1] * Yarr) + left_coef[2] right_newXpts = (right_coef[0] * Yarr**2) + (right_coef[1] * Yarr) + right_coef[2] # was the walk data good enough to keep? max_bad_frames = 25 # don't reset on the first bad frame, wait a second if data_good: gEnv['bad_frame_count'] = 0 gEnv[ 'left_prev_coef'] = left_coef # only save coef if data was actually good, regardless of count gEnv['right_prev_coef'] = right_coef gEnv['prev_line_ends'] = [ left_newXpts[len(left_newXpts) - 1], right_newXpts[len(right_newXpts) - 1] ] gEnv['good_frames'] += 1 else: gEnv['bad_frame_count'] += 1 if gEnv['bad_frame_count'] > max_bad_frames: gEnv['prev_data_good'] = False else: gEnv['prev_data_good'] = True # Create an image to draw the lines on warp_zero = np.zeros_like(image).astype(np.uint8) # Form polylines and polygon left_newpts = [z for z in zip(left_newXpts, Ypts)] left_newpts = [np.array(left_newpts, dtype=np.int32)] right_newpts = [z for z in zip(right_newXpts, Ypts)] right_newpts = [np.array(right_newpts, dtype=np.int32)] poly_pts = np.vstack((left_newpts[0], right_newpts[0][::-1])) cv2.fillPoly(warp_zero, np.int_([poly_pts]), (0, 255, 0)) # Draw the polylines and polygon cv2.fillPoly(warp_zero, np.int_([poly_pts]), (0, 255, 0)) cv2.polylines(warp_zero, np.int_([left_newpts]), False, (255, 0, 0), thickness=30) cv2.polylines(warp_zero, np.int_([right_newpts]), False, (0, 0, 255), thickness=30) # Where are we in the lane? car_center = frame.shape[1] / 2 # camera is centered in the car left_lineX = left_newXpts[0] right_lineX = right_newXpts[0] lane_center = np.mean([left_lineX, right_lineX]) birdseye_laneWpx = 906 # pixel width of lane in birdseye (points are in birdseye space) meters_px = 3.6576 / birdseye_laneWpx # Assumming that the lane is 12 feet wide (3.6576 meters) offcenter_Px = car_center - lane_center offcenter_Meters = offcenter_Px * meters_px if offcenter_Meters > 0: position_msg = 'Car is {:.2f} meters to the right of center'.format( abs(offcenter_Meters)) else: position_msg = 'Car is {:.2f} meters to the left of center'.format( abs(offcenter_Meters)) # Define conversions in x and y from pixels space to meters y_eval = np.max(Ypts) ym_per_pix = 30 / 720 # meters per pixel in y dimension xm_per_pix = meters_px # meters per pixel in x dimension left_fit_cr = np.polyfit(Yarr * ym_per_pix, np.asarray(left_Xpts) * xm_per_pix, 2) right_fit_cr = np.polyfit(Yarr * ym_per_pix, np.asarray(right_Xpts) * xm_per_pix, 2) left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval + left_fit_cr[1])**2)** 1.5) / np.absolute(2 * left_fit_cr[0]) right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval + right_fit_cr[1])**2) **1.5) / np.absolute(2 * right_fit_cr[0]) # Now our radius of curvature is in meters curvature_msg = 'Curvatures: {:.2f} meters left line, {:.2f} meters right line'.format( left_curverad, right_curverad) # Transform the drawn image back to normal from birdseye P = Perspective() normal = P.back2normal(warp_zero) # Overlay the drawn image on the corrected camera image result = cv2.addWeighted(corrected, 1, normal, 0.7, 0) cv2.putText(result, position_msg, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 0), 2) cv2.putText(result, curvature_msg, (10, 50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 0), 2) if data_good: cv2.putText(result, 'Good Data', (10, 80), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2) else: cv2.putText(result, 'Bad Data', (10, 80), cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2) return result
import dash_html_components as html import dash_core_components as dcc import dash_table_experiments as dt from flask_caching import Cache import numpy as np import os import pandas as pd import plotly.graph_objs as go import time import json from perspective import Perspective from twitter import Twitter perspective_key = os.environ.get('PERSPECTIVE_KEY') perspective_client = Perspective(perspective_key) twitter_consumer_key = os.environ.get('TWITTER_KEY') twitter_consumer_secret = os.environ.get('TWITTER_SECRET') twitter_client = Twitter(twitter_consumer_key, twitter_consumer_secret) app = dash.Dash('harassment dashboard') server = app.server CACHE_CONFIG = { 'CACHE_TYPE': 'redis', 'CACHE_REDIS_URL': os.environ.get('REDIS_URL', 'localhost:6379') } cache = Cache() cache.init_app(app.server, config=CACHE_CONFIG) app.css.append_css(
class AreaDeResguardo(): """ Includes some extra methods util for automovil track """ def __init__(self, imagenDeInicializacion, maximoPuntos, poligonoDePartida, poligonoDeLlegada=[[0, 0]]): # El poligono self.imagenAuxiliarEnGrises = cv2.cvtColor(imagenDeInicializacion, cv2.COLOR_BGR2GRAY) [self.height, self.width] = imagenDeInicializacion.shape[:2] #print('AQUIIIIIIIIIIIIIIII:',self.width,self.height) if poligonoDePartida == None: #print('No se utilizara mascara') self.mascara = None self.regionDeInteres = np.array([[0, 0], [0, 240], [320, 240], [320, 0]]) else: self.regionDeInteres = np.array(poligonoDePartida) self.mascara = np.zeros_like(self.imagenAuxiliarEnGrises) cv2.fillPoly(self.mascara, [self.regionDeInteres], 255) #print('Mascara creada con exito') self.poligonoDeLlegada = np.array(poligonoDeLlegada) # Auxiliares self.feature_params = dict(maxCorners=maximoPuntos, qualityLevel=0.3, minDistance=7, blockSize=7) self.lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) self.color = np.random.randint( 0, 255, (50, 3)) # Se empleara un color nuevo por infraccion self.zonaPartida = Perspective(self.regionDeInteres) self.imagenAuxiliarZonaPartida = self.zonaPartida.generarRegionNormalizada( imagenDeInicializacion) self.flujoEnLaPartida = FlowDetector(180) self.flujoEnLaPartida.inicializarClase(self.imagenAuxiliarZonaPartida) def obtenerPuntosAseguirDe(self, CVImagen): self.imagenAuxiliarEnGrises = cv2.cvtColor(CVImagen, cv2.COLOR_BGR2GRAY) # Aqui deberiamos agregar un filtro que retire los puntos de afuera del ROI caracteristicasASeguir = self.obtenerUltimosPuntosASeguir() return caracteristicasASeguir def obtenerUltimosPuntosASeguir(self): caracteristicasASeguir = cv2.goodFeaturesToTrack( self.imagenAuxiliarEnGrises, mask=self.mascara, **self.feature_params) try: puntosExtraidos = caracteristicasASeguir.ravel().reshape( caracteristicasASeguir.ravel().shape[0] // 2, 2) #print('PUNTOS CREADOS: ',caracteristicasASeguir) except: print('NO OBTUVE PUNTOS CARACTERISTICOS') #for punto in puntosExtraidos: # cv2.circle(self.imagenAuxiliarEnGrises, tuple(punto), 1, 0, -1) #cv2.imshow('PuntosEncontradosVisual',self.imagenAuxiliarEnGrises) return np.array([caracteristicasASeguir]) def encontrarObjeto(self, imagenActual, caracteristicasASeguir): imagenActualEnGris = cv2.cvtColor(imagenActual, cv2.COLOR_BGR2GRAY) nuevaUbicacion, activo, err = cv2.calcOpticalFlowPyrLK( self.imagenAuxiliarEnGrises, imagenActualEnGris, caracteristicasASeguir, None, **self.lk_params) return np.array([nuevaUbicacion]), activo, err def encontrarObjetoYPaso(self, imagenActual, caracteristicasASeguir): nuevaUbicacion, activo, err = self.encontrarObjeto( imagenActual, caracteristicasASeguir) self.imagenAuxiliarEnGrises = imagenActualEnGris return nuevaUbicacion, activo, err def calcularFlujoTotalEnFrame(self, CVImagen): self.imagenAuxiliarZonaPartida = self.zonaPartida.generarRegionNormalizada( CVImagen) total_flow, modulo, lines = self.flujoEnLaPartida.procesarNuevoFrame( self.imagenAuxiliarZonaPartida ) # Obtengo vector y parametros de flujo momentumAutomovil, vectorSuave, vectorRuidoso, velocidad, indiceFrameActual = self.flujoEnLaPartida.procesarFlujoEnTiempoReal( total_flow) # Proyecto y filtroFiltro return vectorRuidoso, velocidad, -indiceFrameActual def calcularFlujoTotalEnFrameYPaso(self, CVImagen): vectorRuidoso, velocidad, indiceFrameActual = self.calcularFlujoTotalEnFrame( CVImagen) self.imagenAuxiliarEnGrises = cv2.cvtColor(CVImagen, cv2.COLOR_BGR2GRAY) ###### WARNING, DOUBLE CVT TRANSFORM ALERT ###### return vectorRuidoso, velocidad, indiceFrameActual def encontrarObjetosYCalcularFlujo(self, imagenActual, infraccionesListOfDict): numeroDeInfracciones = 0 listoParaTomarFoto = False for infraction in infraccionesListOfDict: #print(infraction['estado'],' :\t',infraction['desplazamiento'].shape,infraction['frameInicial'],' a ',infraction['frameFinal'],'\t',infraction['name']) numeroDeInfracciones += 1 # Para cada infraccion tomo el ultimo elemento lo proceso con la imagen actual y lo appendo al mismo lugar if infraction['estado'] == 'Candidato': nuevoVector, act, err = self.encontrarObjeto( imagenActual, infraction['desplazamiento'][-1]) nuevoVectorCompleto = np.append(infraction['desplazamiento'], nuevoVector, axis=0) infraction['desplazamiento'] = nuevoVectorCompleto # Si el nuevo vector esta dentro del poligono de llegada (numero mayor o igual a cero se declara como confirmado) numeroDePuntos = len(nuevoVector[0]) puntosFueraFrame = 0 puntosFueraPoligonoInicial = 0 for pattern in nuevoVector[0]: #print(pattern) x, y = pattern.ravel() #print(pattern) #print((x,y)) #print(cv2.pointPolygonTest(self.poligonoDeLlegada,(x,y),True)) if cv2.pointPolygonTest(self.poligonoDeLlegada, (x, y), True) >= 0: nuevoItem = {'estado': 'Confirmando'} infraction.update(nuevoItem) if cv2.pointPolygonTest(self.regionDeInteres, (x, y), True) < 0: puntosFueraPoligonoInicial += 1 if self.estaFueraDeFrame((x, y)): puntosFueraFrame += 1 #if self.estaFueraDeFrame((x,y)): # nuevoItem = {'estado':'Viro Fuera'} # infraction.update(nuevoItem) #print('Puntos adentro del frame: ',numeroDePuntos) if nuevoVectorCompleto.shape[0] > 250: nuevoItem = {'estado': 'Timeout'} infraction.update(nuevoItem) if puntosFueraFrame >= numeroDePuntos: nuevoItem = {'estado': 'Giro'} infraction.update(nuevoItem) if puntosFueraPoligonoInicial >= numeroDePuntos - 2: listoParaTomarFoto = True #for i in range(numeroDeInfracciones): #sys.stdout.write("\033[F") # pass vectorRuidoso, velocidad, indiceFrameActual = self.calcularFlujoTotalEnFrame( imagenActual) imagenActualEnGris = cv2.cvtColor(imagenActual, cv2.COLOR_BGR2GRAY) if listoParaTomarFoto == True: indiceFrameActual = -10 return vectorRuidoso, velocidad, indiceFrameActual def encontrarObjetosYCalcularFlujoYPaso(self, imagenActual, infraccionesListOfDict): vectorRuidoso, velocidad, indiceFrameActual = self.encontrarObjetosYCalcularFlujo( imagenActual, infraccionesListOfDict) self.imagenAuxiliarEnGrises = cv2.cvtColor(imagenActual, cv2.COLOR_BGR2GRAY) # Si los puntos estan en el lugar optimo para una infraccion se manda -10 return vectorRuidoso, velocidad, indiceFrameActual def estaFueraDeFrame(self, tupleVector): x = tupleVector[0] y = tupleVector[1] if (x < 0) | (y < 0) | (x > self.width) | (y > self.height): return True else: return False
class Pipeline: # constructor def __init__(self): self.mtx_file_path = 'camera_matrices/mtx.npz' self.camera_matrices = None self.thresholding_pipeline = Thresholding() self.perspective = Perspective() self.load_camera_matrices() self.line_search = LineSearch() # Perform the full image processing pipeline one by one def run_pipeline(self, image): self.calculate_calib_matrices() dist = self.undistort_image(image) thresh_image = self.thresholding_pipeline.process_image(dist) pers_image = self.perspective.warp_image(thresh_image) centroids, output = self.line_search.search(pers_image) left_fit, right_fit, result = self.line_search.fit_polynomial( pers_image, centroids, self.perspective.pers_Minv, dist) # print undistorted image # plt.figure() # plt.imshow(dist) # plt.waitforbuttonpress() # plt.figure() plt.imshow(pers_image, cmap='gray') plt.waitforbuttonpress() plt.imshow(result, cmap='gray') plt.waitforbuttonpress() return result def calculate_calib_matrices(self, calib_folder='../data/camera_cal', draw=False): # check if we have the matrices if self.camera_matrices is not None: return # termination criteria TODO: check what happens if not used nx, ny = 9, 6 criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((nx * ny, 3), np.float32) objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. # get all calibration images calib_images = glob.glob(calib_folder + '/*') for fname in calib_images: img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find the corners ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) # if found, add objpoints and image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners if draw: cv2.drawChessboardCorners(img, (9, 6), corners, ret) cv2.imshow('img', img) cv2.waitKey(0) # now calculate the matrices for calibration ret, mtx, dist, revcs, tvecs = cv2.calibrateCamera( objpoints, imgpoints, gray.shape[::-1], None, None) # save these for using when distortion happens np.savez(self.mtx_file_path, mtx=mtx, dist=dist) self.camera_matrices = (mtx, dist) def load_camera_matrices(self): if os.path.isfile(self.mtx_file_path): npzfile = np.load(self.mtx_file_path) mtx, dist = npzfile['mtx'], npzfile['dist'] self.camera_matrices = (mtx, dist) def undistort_image(self, image): mtx, dist = self.camera_matrices h, w = image.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix( mtx, dist, (w, h), 1, (w, h)) # undistort dst = cv2.undistort(image, mtx, dist, None, newcameramtx) x, y, w, h = roi dst = dst[y:y + h, x:x + w] return dst