from pyscopelib import * from sys import stdout from fps import FPS fps = FPS(60) f = Frame() f.add_shape(Line(28160, 21760, 10240, 25600)) f.add_shape(Line(19968, 24064, 19968, 43519)) f.add_shape(Line(8704, 31744, 30720, 31744)) f.add_shape(Line(37119, 23039, 53759, 23039)) f.add_shape(Line(53759, 23039, 53759, 31488)) f.add_shape(Line(53759, 31488, 37119, 31488)) f.add_shape(Line(37119, 31488, 37119, 23039)) f.add_shape(Line(37119, 25600, 53759, 25600)) f.add_shape(Line(45567, 31488, 45567, 43519)) f.add_shape(Line(34559, 37119, 56319, 37119)) while True: stdout.buffer.write(f.get_pcm()) fps.sleep()
def __init__(self, resource=None): super(CV2VideoStreamer, self).__init__() if resource is None: print "You must give an argument to open a video stream." print " It can be a number as video device, e.g.: 0 would be /dev/video0" print " It can be a url of a stream, e.g.: rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov" print " It can be a video file, e.g.: samples/moon.avi" print " It can be a class generating images, e.g.: TimeStampVideo" exit(0) # If given a number interpret it as a video device if isinstance(resource, int) or len(resource) < 3: self.resource_name = "/dev/video" + str(resource) resource = int(resource) self.vidfile = False else: self.resource_name = str(resource) self.vidfile = True print "Trying to open resource: " + self.resource_name if isinstance(resource, VideoSource): self.cap = resource else: self.cap = cv2.VideoCapture(resource) if not self.cap.isOpened(): print "Error opening resource: " + str(resource) exit(0) self.fps = FPS().start()
def __init__(self): # self.cam = cv2.VideoCapture(0) # self.cam.set(3, 320) # self.cam.set(4, 240) self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start() self.fps = FPS().start() ret, self.frame = self.cam.read() self.suspend_tracking = SuspendTracking(teta=3) self.height, self.width = self.frame.shape[:2] self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) cv2.namedWindow('camshift') self.obj_select = RectSelector('camshift', self.onmouse) radius = 3 n_point = 8 * radius self.lbpDesc = LBP(n_point, radius) self.HSV_CHANNELS = ( (24, [0, 180], "hue"), # Hue (8, [0, 256], "sat"), # Saturation (8, [0, 256], "val") # Value ) self.show_backproj = False self.track_window = None self.histHSV = [] self.track_box = None
def __init__(self, video_src): #树莓派ip #self.server_address='rtmp://localhost/dji/stream.h264' #self.server_address='rtmp://127.0.0.1:1935/dji' self.server_address = 'http://192.168.40.146:8000/stream.mjpg' #self.server_address='rtsp://:192.168.40.118/1' #self.server_address=0 #self.server_address='udp://@:8000 --demux=h264' #self.cam = video.create_capture(self.server_address) self.cam = WebcamVideoStream(self.server_address).start() ret, self.frame = self.cam.read() self.fish_cali = fish_calibration(self.frame) self.drag_start = None self.list_camshift = [] self.show_backproj = False self.newcamshift = None self.selection = None self.lock = False self.mdp = MyUdp() #self.count=0 self.light = self.get_light() self.swicht = False #self.list_camshift.append(self.get_car('red.jpg',0)) #self.list_camshift.append(self.get_car('green.jpg',1)) self.fps = FPS().start() #wifi模块IP self.mdp.client_address = ('192.168.40.31', 8899) cv2.namedWindow('TUCanshift') cv2.setMouseCallback('TUCanshift', self.onmouse)
def run(self): """ run() must be called by the user to start, draw and refresh everything on the screen. """ # Initialize the windows fps. self.fps = FPS(1.0 / self.fps_limit) # Check if the window should close while not glfw.window_should_close(self.window): # Set the current glfw context to self.window. glfw.make_context_current(self.window) # Set the background color (black). glClearColor(self.background[0], self.background[1], self.background[2], self.background[3]) # Update/refresh the window. glfw.poll_events() glClear(GL_COLOR_BUFFER_BIT) # Check the fps counter to see if things should be drawn to the screen. if self.fps.tick(glfw.get_time()): self.on_run() glfw.swap_buffers(self.window) glfw.terminate()
class VideoStreamer(object): def __init__(self): self.fps = FPS().start() def get_fps(self): self.fps.stop() fps = self.fps.fps() self.fps.start() return fps def get_frame(self): raise NotImplementedError("Choose a video streamer from the available ones " "e.g., CV2VideoStreamer or ROSVideoStreamer")
def from_stream(): fps = FPS().start() cam = WebcamVideoStream().start() max_frames = 50 i = 0 while True: frame = cam.read() if i > max_frames: fps.stop() print(fps.elapsed()) print(fps.fps()) break i += 1 testcone(frame, stream=True) fps.update() cv2.imshow('', frame) cv2.waitKey(1)
def threaded_video_test(): # created a *threaded* video stream, allow the camera sensor to warmup, # and start the FPS counter print("[INFO] sampling THREADED frames from webcam...") vs = WebcamVideoStream(src=SRC).start() fps = FPS().start() # loop over some frames...this time using the threaded stream while fps._numFrames < NUM_FRAMES: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=VID_WIDTH) # check to see if the frame should be displayed to our screen if DISPLAY: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
def __init__(self, src=0, resolution=(320, 240), framerate=32): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.fps = FPS() if self.stream and self.stream.isOpened(): self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0]) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1]) self.stream.set(cv2.CAP_PROP_FPS, framerate) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False
def __init__(self): # self.cam = cv2.VideoCapture(0) # self.cam.set(3, 320) # self.cam.set(4, 240) self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start() self.fps = FPS().start() ret, self.frame = self.cam.read() self.conf = { 'ColorFrameNum': 7, 'LBPFrameNum': 7, 'MaxFrameDiffClr': 15, 'MaxLBPFrameUpdate': 30, 'L_Weight': 0.3, 'A_Weight': 0.7, 'B_Weight': 0.7 } self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1) self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1) self.ColorDistance = LABDistance() self.LBPDistance = LocalBinaryPatterns( numPoints=8, radius=2, update_prev_hist=self.conf['MaxLBPFrameUpdate']) self.isLost = False self.isLBPLost = False self.height, self.width = self.frame.shape[:2] self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) cv2.namedWindow('camshift') self.obj_select = RectSelector('camshift', self.onmouse) self.LAB_CHANNELS = ( (24, [0, 256], "light"), # L (24, [0, 256], "a"), # a (24, [0, 256], "b") # b ) self.show_backproj = False self.track_window = None self.histLAB = [] self.track_box = None
def blocking_video_test(): # grab a pointer to the video stream and initialize the FPS counter print("[INFO] sampling frames from webcam...") stream = cv2.VideoCapture(SRC) fps = FPS().start() # loop over some frames while fps._numFrames < 1000: # grab the frame from the stream and resize it to have a maximum # width of 400 pixels (grabbed, frame) = stream.read() frame = imutils.resize(frame, width=VID_WIDTH) # check to see if the frame should be displayed to our screen if DISPLAY: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup stream.release() cv2.destroyAllWindows()
def __init__(self, camera, db_service, alpr_config, alpr_run_time, gui): self.stopped = False self.camera_name = camera.name self.cam = videoStream(src=camera.url) self.guiFPS = FPS() self.gui = gui self.detection_boxes = [] for search_box in camera.aoi_list: for search_box_name in search_box: new_box = detectionBox(camera.name, search_box_name, search_box[search_box_name], self.cam, alpr_config, alpr_run_time, db_service) self.detection_boxes.append(new_box)
class PyForce: def __init__(self): pygame.init() self.screen = pygame.display.set_mode(RESOLUTION) pygame.display.set_caption(CAPTION) self.clock = pygame.time.Clock() self.sfx = Sfx() self.rand = Random() self.battlefield = BattleField(self.screen, self.sfx, self.rand) if FPS_ENABLED: self.fps = FPS(self.screen, self.clock) self.status = 'game' if DEBUG: print 'init : main' import sys print sys.version[:6] def run(self): while self.status != 'quit': # check quit for event in pygame.event.get(): if event.type == QUIT: self.status = 'quit' elif event.type == KEYDOWN: if event.key == K_ESCAPE: self.status = 'quit' # select status if self.status == 'intro': self.intro() elif self.status == 'game': self.game() elif self.status == 'win': img = pygame.image.load('res/img/end.png').convert() self.screen.blit(img, (0, 0)) pygame.display.update() self.clock.tick(FPS_LIMIT) else: if DEBUG: print 'user prompt quit' def game(self): self.battlefield.update() if self.battlefield.status == 'win': self.status = 'win' if FPS_ENABLED: self.fps.update()
def __init__(self, log=False): if str(log) == "True": dir_path = os.getcwd() + "/" file_name = time.strftime("%Y.%m.%d.%H.%M.%S") + ".log" log = dir_path + file_name self.log = log self.mtr = BigBoy() self.imu = IMU() self.fps = FPS(1.0) self.t0 = time.time() self.steps = 5 self.max_steps = 60 self.pos = 0 self.threshold = 1.5 self.tilt_constant = -25.0
def __init__(self): self.env = Environment() self.bot = Robot() self.fps = FPS() self.start = [18, 1] self.targetCoverage = 100 self.timeAvailable = 360 self.startTime = -1.0 self.goalVisited = False self.executeEndSequence = False self.endSequence = [] self.endSequenceIndex = 1 self.realignASAP = False self.forwardcount = 0
def __init__(self, pace=None, **kwargs): super().__init__(**kwargs) # self.stop_event: set it to stop the thread self._stop_event = Event() self.fps = FPS() if pace: self.pacer = Pacer(pace) else: self.pacer = None
class GUIFaceFinder: fps = FPS() state = IdleState() resized = False @staticmethod def css_roi_for_frame_shape(frame_shape): w_percent = 0.30 h_percent = 0.15 frame_h, frame_w, _ = frame_shape roi_top = int(frame_h * h_percent) roi_right = int(frame_w * (1 - w_percent)) roi_bottom = int(frame_h * (1 - h_percent)) roi_left = int(frame_w * w_percent) return (roi_top, roi_right, roi_bottom, roi_left) def __update_state(self, people, percent): if percent < 0.8: if self.state.__class__ != IdleState: self.state = IdleState() else: in_the_frame = [p for p in people if p.in_the_frame] if len(in_the_frame) == 0: if percent > 0.8: if self.state.__class__ != SearchingState: self.state = SearchingState() elif in_the_frame[0].id is None: if self.state.__class__ != NoMatchState: self.state = NoMatchState() else: if self.state.__class__ != MatchState: self.state = MatchState() def draw(self, frame, face, people, driver_license_face, percent): if not self.resized: (roi_top, _roi_right, roi_bottom, _roi_left) = GUIFaceFinder.css_roi_for_frame_shape(frame.shape) padding = 32 banners_height = roi_top-padding rects_height = roi_bottom-roi_top IdleState.banner = imutils.resize(IdleState.banner, height=banners_height) SearchingState.banner = imutils.resize(SearchingState.banner, height=banners_height) MatchState.banner = imutils.resize(MatchState.banner, height=banners_height) NoMatchState.banner = imutils.resize(NoMatchState.banner, height=banners_height) SearchingState.rects = imutils.resize(driver_license_face, height=rects_height) MatchState.rects = imutils.resize(MatchState.rects, height=rects_height) NoMatchState.rects = imutils.resize(NoMatchState.rects, height=rects_height) self.resized = True self.__update_state(people, percent) self.fps.draw(frame) TimeAverageRenderer.draw_for_percentage(frame, percent) self.state.update(people) self.state.draw(frame, face)
class ALPRCamera: def __init__(self, camera, db_service, alpr_config, alpr_run_time, gui): self.stopped = False self.camera_name = camera.name self.cam = videoStream(src=camera.url) self.guiFPS = FPS() self.gui = gui self.detection_boxes = [] for search_box in camera.aoi_list: for search_box_name in search_box: new_box = detectionBox(camera.name, search_box_name, search_box[search_box_name], self.cam, alpr_config, alpr_run_time, db_service) self.detection_boxes.append(new_box) def get_frame(self): frame = self.cam.read() self.guiFPS.update() for box in self.detection_boxes: frame = box.draw(frame) return frame def is_alive(self): return not self.stopped def start(self): Thread(target=self.run, args=()).start() return self def stop(self): self.stopped = True def run(self): self.cam.start() self.guiFPS.start_timer() for box in self.detection_boxes: box.start() # main loop while not self.stopped: continue for box in self.detection_boxes: box.stop() self.guiFPS.stop() if self.gui: print(self.camera_name + " gui", self.guiFPS.fps()) self.cam.stop() cv2.destroyAllWindows() return
def run(self): try: self.fps = FPS().start() for f in self.stream: if self.isInterruptionRequested(): self.finished.emit() return self.rawCapture.seek(0) self.image = f.array # grab the frame from the stream ## # Crop ## if (self.cropRect[2] > self.cropRect[0]) and (self.cropRect[3] > self.cropRect[1]): ## self.frame.emit(self.image[self.cropRect[0]:self.cropRect[2], self.cropRect[1]:self.cropRect[3]]) # Emit resized frame for speed self.frame.emit( cv2.resize(self.image, self.display_frame_size[:2])) self.fps.update() except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args))
def __init__(self, path, config): QWidget.__init__(self) self.path = path self.config = config self.setWindowTitle('AR4maps') self.move(0, 0) self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT) self.setup_ui() self.markerImg = cv.imread(self.path + self.config['target']) # cv.imshow("target",targetImg) self._cam = Camera().start() self._track = Tracking(self.markerImg) self._rendering = Rendering(self.markerImg, self.config['coords']) self._fps = FPS() self.setup_render()
class CV2VideoStreamer(VideoStreamer): def __init__(self, resource=None): super(CV2VideoStreamer, self).__init__() if resource is None: print "You must give an argument to open a video stream." print " It can be a number as video device, e.g.: 0 would be /dev/video0" print " It can be a url of a stream, e.g.: rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov" print " It can be a video file, e.g.: samples/moon.avi" print " It can be a class generating images, e.g.: TimeStampVideo" exit(0) # If given a number interpret it as a video device if isinstance(resource, int) or len(resource) < 3: self.resource_name = "/dev/video" + str(resource) resource = int(resource) self.vidfile = False else: self.resource_name = str(resource) self.vidfile = True print "Trying to open resource: " + self.resource_name if isinstance(resource, VideoSource): self.cap = resource else: self.cap = cv2.VideoCapture(resource) if not self.cap.isOpened(): print "Error opening resource: " + str(resource) exit(0) self.fps = FPS().start() def __del__(self): self.cap.release() def get_frame(self): success, image = self.cap.read() # TODO (fabawi): resizing the image takes some time. make it multi-threaded # image = imutils.resize(image, width=VID_WIDTH) ret, jpeg = cv2.imencode('.jpg', image) jpeg_bytes = jpeg.tobytes() self.fps.update() return jpeg_bytes
class World: def __init__(self, fps): self.display = pygame.display self.options = Options() self._create_screen() pygame.display.set_caption("Shaolin Fury - beta version") pygame.mouse.set_visible(False) self.quit = False self.fps = FPS(fps) self.font = Font() self.change_state(Game(self, DATADIR, CONFIGDIR)) def _create_screen(self): """Genera la ventana principal del videojuego""" if self.options.fullscreen: flags = pygame.FULLSCREEN else: flags = 0 if self.options.reduce_mode: size = (320, 240) else: size = (640, 480) self.screen = pygame.display.set_mode(size, flags) def change_state(self, new_state): self.state = new_state def loop(self): while not self.quit: n = self.fps.update() for t in xrange(n): e = pygame.event.poll() if e: if e.type == pygame.QUIT: self.quit = True else: if e.type == pygame.KEYDOWN: if e.key == pygame.K_F3: pygame.display.toggle_fullscreen() elif e.key == pygame.K_q: self.quit = True self.state.update() if n > 0: self.state.draw()
def __init__(self, video_src): #树莓派ip self.mdp=MyUdp() #self.server_address='http://%s:8000/stream.mjpg' % MyUdp.get_piIP('raspberrypi') self.server_address='http://192.168.56.240:8000/stream.mjpg' #self.server_address='rtmp://127.0.0.1/live/stream' #self.server_address='rtmp://127.0.0.1:1935/dji' #self.server_address='http://192.168.56.146:8000/stream.mjpg' #self.server_address='http://192.168.191.3:8000/stream.mjpg' #self.server_address='rtsp://:192.168.40.118/1' #self.server_address=0 #self.server_address='udp://@:8000 --demux=h264' #self.cam = video.create_capture(self.server_address) self.cam = WebcamVideoStream(self.server_address).start() ret, self.frame = self.cam.read() self.fish_cali=fish_calibration(self.frame) self.drag_start = None self.list_camshift=[] self.show_backproj = False self.newcamshift=None self.selection=None self.lock=False #self.count=0 self.light=self.get_light() self.swicht=False #self.list_camshift.append(self.get_car('red.jpg',0)) #self.list_camshift.append(self.get_car('yellow.jpg',1)) #H,S self.BACKGROUND_PARAM=App.calc_HS(cv2.cvtColor(self.frame,cv2.COLOR_BGR2HSV)) self.fps = FPS().start() #wifi模块IP #self.mdp.client_address=('192.168.56.31', 8899) #新车 self.mdp.client_address=('192.168.56.207', 8899) cv2.namedWindow('TUCanshift') cv2.setMouseCallback('TUCanshift', self.onmouse)
def __init__(self, fps): self.display = pygame.display self.options = Options() self._create_screen() pygame.display.set_caption("Shaolin Fury - beta version") pygame.mouse.set_visible(False) self.quit = False self.fps = FPS(fps) self.font = Font() self.change_state(Game(self, DATADIR, CONFIGDIR))
def run_main(): size = width, height = 1280, 720 speed = [2, 2] black = 0, 0, 0 screen = pygame.display.set_mode(size) sf = StarField(size) fps = FPS() while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() screen.fill(black) sf.do_things(screen) fps.show_fps(screen) pygame.display.flip()
def __init__(self, camera_name, name, area, video_stream_reference, config, runtime, db_reference): self.camera_name = camera_name self.name = name self.area = area # bounding box for the search self.stream = video_stream_reference # reference to the video feed self.old_detected_rect = [] # threads cannot share alpr object, needs its own self.alpr = Alpr("us", config, runtime) self.alpr.set_top_n(1) self.fps = FPS().start_timer() self.stopped = False # stores license plate objects self.license_plate_list = [] self.licensePlateService = licensePlateService(self, db_reference).start()
def run(self): try: self.fps = FPS().start() for f in self.captureStream: if self.isInterruptionRequested(): break self.rawCapture.seek(0) img = f.array # grab the frame from the stream self.frame.emit(img) #cv2.resize(img, self.frameSize[:2])) self.fps.update() ## # Grab jpeg from an mpeg video stream ## self.videoStream.seek(0) ## buf = self.videoStream.read() ## if buf.startswith(b'\xff\xd8'): ## # jpeg magic number is detected ## flag = cv2.IMREAD_GRAYSCALE if self.monochrome else cv2.IMREAD_COLOR ## img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), flag) ## self.frame.emit(img) ## self.fps.update() ## self.videoStream.truncate(0) except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) finally: self.fps.stop() img = np.zeros(shape=(self.frameSize[1], self.frameSize[0]), dtype=np.uint8) cv2.putText( img, 'Camera suspended', (int(self.frameSize[0] / 2) - 150, int(self.frameSize[1] / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255), 1) for i in range(5): wait_ms(100) self.frame.emit(img) msg = "{}: info; finished, approx. processing speed: {:.2f} fps".format( self.__class__.__name__, self.fps.fps()) self.postMessage.emit(msg) self.finished.emit()
def test01(): """can we instantiate? """ fps = FPS().start() pacer = Pacer(DESIRED_FPS).start() while fps.n_frames < N_TEST_FRAMES: print(datetime.datetime.now()) fps.update() pacer.update() fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) print("[INFO] n_frames: %i" % fps.n_frames)
def run(self): try: self.fps = FPS().start() for f in self.stream: if self.isInterruptionRequested(): self.signals.finished.emit() return ## self.rawCapture.truncate(0) # Depricated: clear the stream in preparation for the next frame self.rawCapture.seek(0) self.frame = f.array # grab the frame from the stream self.signals.ready.emit() self.signals.result.emit( cv2.resize( self.frame, self.image_size)) # resize to speed up processing self.fps.update() except Exception as err: traceback.print_exc() self.signals.error.emit( (type(err), err.args, traceback.format_exc()))
class VideoPlayer: def __init__(self, source=0, dest=None): self._source = source self._dest = dest self._frame = None self._playing = False self._fps = FPS() def start(self): self._cap = cv2.VideoCapture(self._source) self._cap.set(3, 640) self._cap.set(4, 640) if self._dest is not None: width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5) height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5) fps = int(self._cap.get(cv2.CAP_PROP_FPS)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') self._out = cv2.VideoWriter(self._dest, fourcc, fps, (width, height)) self._playing = True self._fps.start() while self._playing: self.read_frame() self.process_frame() self.write_frame() self._fps.stop() print(self._fps.fps()) def stop(self): self._playing = False self._cap.release() self._out.release() cv2.destroyAllWindows() def read_frame(self): ret, frame = self._cap.read() self._frame = frame self._fps.update() def process_frame(self): pass def write_frame(self): self.show_frame() if self._dest is not None: self.save_frame() def show_frame(self): cv2.imshow('Video', self._frame) if cv2.waitKey(1) & 0xFF == ord('q'): self.stop() def save_frame(self): self._out.write(self._frame)
def load(): random.seed() pygame.init() pygame.display.set_caption('Unnamed') G.screen_w = SCREEN_W G.screen_h = SCREEN_H G.screen_mode = SCREEN_MODE G.screen = pygame.display.set_mode([G.screen_w, G.screen_h], G.screen_mode) G.screen.fill(BG_COLOR) pygame.display.flip() G.background = G.screen.copy() G.dirty_rects = [] G.fps = FPS()
def __init__(self): pygame.init() self.screen = pygame.display.set_mode(RESOLUTION) pygame.display.set_caption(CAPTION) self.clock = pygame.time.Clock() self.sfx = Sfx() self.rand = Random() self.battlefield = BattleField(self.screen, self.sfx, self.rand) if FPS_ENABLED: self.fps = FPS(self.screen, self.clock) self.status = 'game' if DEBUG: print 'init : main' import sys print sys.version[:6]
def begin(self): self.enemy = Entity( Vec2D(Xudia.renderer.width-10, Xudia.renderer.height/2), Xudia.Graphic('<<==\n|===\n<<=='), None ) hitbox = Hitbox(self.enemy, 4, 3) self.enemy.hitbox = hitbox self.add_entity(self.enemy) self.add_entity(Ship(Xudia.renderer.width/2, Xudia.renderer.height/2)) self.add_entity(FPS()) Xudia.input.add_listener('q', Xudia.engine.stop)
class WebcamVideoStream: def __init__(self, src=0, resolution=(320, 240), framerate=32): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.fps = FPS() if self.stream and self.stream.isOpened(): self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0]) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1]) self.stream.set(cv2.CAP_PROP_FPS, framerate) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False def start(self): # start the thread to read frames from the video stream t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): # keep looping infinitely until the thread is stopped self.fps.start() while True: # if the thread indicator variable is set, stop the thread if self.stopped: return # otherwise, read the next frame from the stream (self.grabbed, self.frame) = self.stream.read() self.fps.update() def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.fps.stop() self.stopped = True
# DEBUG values SET_GUI = False # Do or do not show GUI DEBUG_MOTORSPEED = False # Do or do not write motor speed commands on console DEBUG_TIMING = False # Do or do not write how much time each processing step takes on console DEBUG_CIRCLEPOS = True # Do or do not write detected circle position on console # Initialize the motor object motor = mw.MyMotor("/dev/ttyACM0", 115200) motor.pwm = 50 # initialize the camera width = 320 height = 240 camera = PiVideoStream((width, height), 30).start() counter = FPS() counter.start() # allow the camera to warmup capture frames from the camera time.sleep(0.5) # detection variables posX = None # X position posX_prev = 0 # X position in the previous iteration posY = None # Y position posX_exp_filter_coeff = 0.8 # The amount of how much the current measurement changes the position. [0,1]. current = alpha * measurement + (1-alpha) * previous radius = None # Circle radius radius_prev = 0 # Previous circle radius rad_exp_filter_coeff = 0.8 # The amount of how much the current measurement changes the radius. [0,1]. current = alpha * measurement + (1-alpha) * previous speed = 0 # Speed to send to the motor controller angleCorr = 0 # The difference between the two tracks so the robot turns roi = None # Part of the image where we expect to find the ball
from screen import Screen from fps import FPS from tas import TAS from emulator import Emulator if __name__ == "__main__": parser = argparse.ArgumentParser( description = 'Dumps NES header information') parser.add_argument('pathname', help='specify the NES file to open') parser.add_argument('--tas', dest='tas', help='specify a TAS file to run', default=None) parser.add_argument('--scale', dest='scale', help='specify the screen scale', default = 1) args = parser.parse_args() emulator = Emulator(args.pathname) screen = Screen(args.scale) fps = FPS() if args.tas: target_fps = 1000.0 tas = TAS(args.tas) else: target_fps = 60.0 tas = None keep_going = True while keep_going: try: # Get button presses if tas == None: pressed = pygame.key.get_pressed() controller = emulator.controllers[0] if pressed[pygame.K_RETURN]:
def stream(tracker, camera=0, server=0): """ @brief Captures video and runs tracking and moves robot accordingly @param tracker The BallTracker object to be used @param camera The camera number (0 is default) for getting frame data camera=1 is generally the first webcam plugged in """ ######## GENERAL PARAMETER SETUP ######## MOVE_DIST_THRESH = 20 # distance at which robot will stop moving SOL_DIST_THRESH = 150 # distance at which solenoid fires PACKET_DELAY = 1 # number of frames between sending data packets to pi OBJECT_RADIUS = 13 # opencv radius for circle detection AXIS_SAFETY_PERCENT = 0.05 # robot stops if within this % dist of axis edges packet_cnt = 0 tracker.radius = OBJECT_RADIUS ######## SERVER SETUP ######## motorcontroller_setup = False if server: # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Obtain server address by going to network settings and getting eth ip server_address = ('169.254.171.10',10000) # CHANGE THIS #server_address = ('localhost', 10000) # for local testing print 'starting up on %s port %s' % server_address sock.bind(server_address) sock.listen(1) connection, client_address = None, None while True: # Wait for a connection print 'waiting for a connection' connection, client_address = sock.accept() break ######## CV SETUP ######## # create video capture object for #cap = cv2.VideoCapture(camera) cap = WebcamVideoStream(camera).start() # WEBCAM #cap = cv2.VideoCapture('../media/goalie-test.mov') #cap = cv2.VideoCapture('../media/bounce.mp4') cv2.namedWindow(tracker.window_name) # create trajectory planner object # value of bounce determines # of bounces. 0 is default (no bounces) # bounce not currently working correctly planner = TrajectoryPlanner(frames=4, bounce=0) # create FPS object for frame rate tracking fps_timer = FPS(num_frames=20) while(True): # start fps timer fps_timer.start_iteration() ######## CAPTURE AND PROCESS FRAME ######## ret, frame = True, cap.read() # WEBCAM #ret, frame = cap.read() # for non-webcam testing if ret is False: print 'Frame not read' exit() # resize to 640x480, flip and blur frame,img_hsv = tracker.setup_frame(frame=frame, w=640,h=480, scale=1, blur_window=15) ######## TRACK OBJECTS ######## # use the HSV image to get Circle objects for robot and objects. # object_list is list of Circle objects found. # robot is single Circle for the robot position # robot_markers is 2-elem list of Circle objects for robot markers object_list = tracker.find_circles(img_hsv.copy(), tracker.track_colors, tracker.num_objects) robot, robot_markers = tracker.find_robot_system(img_hsv) walls = tracker.get_rails(img_hsv, robot_markers, colors.Yellow) planner.walls = walls # Get the line/distances between the robot markers # robot_axis is Line object between the robot axis markers # points is list of Point objects of closest intersection w/ robot axis # distanes is a list of distances of each point to the robot axis robot_axis = utils.line_between_circles(robot_markers) points, distances = utils.distance_from_line(object_list, robot_axis) planner.robot_axis = robot_axis ######## TRAJECTORY PLANNING ######## # get closest object and associated point, generate trajectory closest_obj_index = utils.min_index(distances) # index of min value closest_line = None closest_pt = None if closest_obj_index is not None: closest_obj = object_list[closest_obj_index] closest_pt = points[closest_obj_index] closest_line = utils.get_line(closest_obj, closest_pt) # only for viewing planner.add_point(closest_obj) # Get trajectory - list of elements for bounces, and final line traj # Last line intersects with robot axis traj_list = planner.get_trajectory_list(colors.Cyan) traj = planner.traj ######## SEND DATA TO CLIENT ######## if packet_cnt != PACKET_DELAY: packet_cnt = packet_cnt + 1 else: packet_cnt = 0 # reset packet counter # error checking to ensure will run properly if len(robot_markers) is not 2 or robot is None or closest_pt is None: pass elif server: try: if motorcontroller_setup is False: # send S packet for motorcontroller setup motorcontroller_setup = True ######## SETUP MOTORCONTROLLER ######## axis_pt1 = robot_markers[0].to_pt_string() axis_pt2 = robot_markers[1].to_pt_string() data = 'SM '+axis_pt1+' '+axis_pt2+' '+robot.to_pt_string() print data connection.sendall(data) # setup is done, send packet with movement data else: obj_robot_dist = utils.get_pt2pt_dist(robot, closest_obj) print 'dist: ' + str(obj_robot_dist) # USE FOR CALIBRATION ######## SOLENOID ACTIVATION CODE ######## # check if solenoid should fire if obj_robot_dist <= SOL_DIST_THRESH: # fire solenoid, dont move print 'activate solenoid!' # TODO SEND SOLENOID ACTIVATE ######## MOTOR CONTROL ######## # get safety parameters rob_ax1_dist = utils.get_pt2pt_dist(robot_markers[0],robot) rob_ax2_dist = utils.get_pt2pt_dist(robot_markers[1],robot) axis_length = utils.get_pt2pt_dist(robot_markers[0], robot_markers[1]) # ensure within safe bounds of motion relative to axis # if rob_ax1_dist/axis_length <= AXIS_SAFETY_PERCENT or \ # rob_ax2_dist/axis_length <= AXIS_SAFETY_PERCENT: # # in danger zone, kill motor movement # print 'INVALID ROBOT LOCATION: stopping motor' # data = 'KM' # connection.sendall(data) # if in danger zone near axis edge, move towards other edge if rob_ax1_dist/axis_length <= AXIS_SAFETY_PERCENT: print 'INVALID ROBOT LOCATION' data = 'MM '+robot.to_pt_string()+' '+robot_markers[1].to_pt_string() elif rob_ax2_dist/axis_length <= AXIS_SAFETY_PERCENT: print 'INVALID ROBOT LOCATION' data = 'MM '+robot.to_pt_string()+' '+robot_markers[0].to_pt_string() # check if robot should stop moving elif obj_robot_dist <= MOVE_DIST_THRESH: # obj close to robot # Send stop command, obj is close enough to motor to hit data = 'KM' print data connection.sendall(data) pass # Movement code else: # far enough so robot should move #### FOR TRAJECTORY ESTIMATION # if planner.traj is not None: # axis_intersect=shapes.Point(planner.traj.x2,planner.traj.y2) # # Clamp the point to send to the robot axis # traj_axis_pt = utils.clamp_point_to_line( # axis_intersect, robot_axis) # data = 'D '+robot.to_pt_string()+' '+traj_axis_pt.to_string() # connection.sendall(data) #### FOR CLOSEST POINT ON AXIS #### if closest_pt is not None and robot is not None: # if try to move more than length of axis, stop instead if utils.get_pt2pt_dist(robot,closest_pt) > axis_length: print 'TRYING TO MOVE OUT OF RANGE' data = 'KM' print data connection.sendall(data) else: data = 'MM ' + robot.to_pt_string() + ' ' + \ closest_pt.to_string() print data connection.sendall(data) except IOError: pass # don't send anything ######## ANNOTATE FRAME FOR VISUALIZATION ######## frame = gfx.draw_lines(img=frame, line_list=walls) frame = gfx.draw_robot_axis(img=frame, line=robot_axis) # draw axis line frame = gfx.draw_robot(frame, robot) # draw robot frame = gfx.draw_robot_markers(frame, robot_markers) # draw markers frame = gfx.draw_circles(frame, object_list) # draw objects # eventually won't need to print this one frame = gfx.draw_line(img=frame, line=closest_line) # closest obj>axis # draw full set of trajectories, including bounces #frame = gfx.draw_lines(img=frame, line_list=traj_list) #frame = gfx.draw_line(img=frame, line=traj) # for no bounces frame = gfx.draw_point(img=frame, pt=closest_pt) #frame=gfx.draw_line(frame,planner.debug_line) # for debug ######## FPS COUNTER ######## fps_timer.get_fps() fps_timer.display(frame) ######## DISPLAY FRAME ON SCREEN ######## cv2.imshow(tracker.window_name,frame) # quit by pressing q if cv2.waitKey(1) & 0xFF == ord('q'): break # release capture cap.stop() # WEBCAM #cap.release() # for testing w/o webcam cv2.destroyAllWindows()
# construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-n", "--num-frames", type=int, default=50000, help="# of frames to loop over for FPS test") ap.add_argument("-d", "--display", type=int, default=-1, help="Whether or not frames should be displayed") args = vars(ap.parse_args()) # created a *threaded *video stream, allow the camera sensor to warmup, # and start the FPS counter print("[INFO] sampling THREADED frames from `picamera` module...") vs = ColorSepPVStream().start() time.sleep(2.0) fps = FPS().start() ser = serial.Serial( #port='/dev/ttyACM0', port='/dev/ttyAMA0', baudrate = 57600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1 ) # loop over some frames...this time using the threaded stream while fps._numFrames < args["num_frames"]: # grab the frame from the threaded video stream and resize it
pass #print "waiting for frame queue" print "import timeout" queue_lock = Lock() frame_queue = Queue() stream = Process(target=import_image, args=(frame_queue, queue_lock)) time.sleep(1) stream.start() frames = 0 # loop over some frames fps = FPS() fps.start() timeOut = 0 while frames < 300: #print "Main Thread Queue - > ",frame_queue.qsize() with queue_lock: if not frame_queue.empty(): timeOut = 0 frame = frame_queue.get() sal = MPSaliency(frame) sal_map = sal.get_saliency_map() #sal_frame = (sal_map*255).round().astype(np.uint8)