def __init__(self): self.cam = camera.Camera() self.all_objects = pygame.sprite.Group() tiled_map = load_pygame("./resources/deepy_tilemap.tmx") self.tiles = pygame.sprite.Group() for i in range(tiled_map.width): for j in range(tiled_map.height): sur = tiled_map.get_tile_image(i, j, 0) if sur is not None: sprite = pygame.sprite.Sprite() sprite.surf = sur sprite.rect = pygame.Rect(i * 8, j * 8, 8, 8) self.tiles.add(sprite) p = tiled_map.get_object_by_name("Player") self.Player = objects.Submarino(self.tiles, pygame.Vector2(p.x, p.y)) self.all_objects.add(self.Player) self.vignette = pygame.image.load("./resources/vig.png") self.dialog = dialog.Dialog("Olá capitão", "bom dia", "teste")
def __init__(self): # stiffness in Nm/rad: [20,50,15,25] self.settings_r = hr.MekaArmSettings( stiffness_list=[0.1939, 0.6713, 0.748, 0.7272, 0.8]) # self.settings_r = hr.MekaArmSettings(stiffness_list=[0.2,1.0,1.0,0.4,0.8]) self.settings_stiff = hr.MekaArmSettings( stiffness_list=[0.8, 1.0, 1.0, 1.0, 0.8]) self.firenze = hr.M3HrlRobot(connect=True, right_arm_settings=self.settings_stiff) self.hok = hs.Hokuyo('utm', 0, flip=True) self.thok = ths.tilt_hokuyo('/dev/robot/servo0', 5, self.hok, l1=0., l2=-0.055) self.cam = camera.Camera('mekabotUTM') self.set_camera_settings() self.fit_circle_lock = RLock() threading.Thread.__init__(self)
def record(camera_config): # target_dir = config.TRAIN_TARGET_DIR target_dir = "{}/{}/{}/".format(TRAIN_TARGET_DIR, camera_config['person'], camera_config['label']) if not os.path.exists(target_dir): os.makedirs(target_dir) # os.makedirs(target_dir) folders = os.listdir(target_dir) target_dir = "{}{}".format(target_dir, 'batch_{}/'.format(len(folders))) if not os.path.exists(target_dir): os.makedirs(target_dir) # os.makedirs(target_dir, exist_ok=True) target_paths = [ "{}img_{}.jpg".format(target_dir, i) for i in range( camera_config.get('duration') * camera_config.get('framerate', FRAMERATE)) ] # exit() pi_camera = camera.Camera(camera_config) pi_camera.capture_sequence(target_paths)
def display_params(self, iar): if not self.vo: matrix = [] # Matrix will be in row,column order section = "" in_proj = 0 for l in iar.data.split('\n'): if len(l) > 0 and l[0] == '[': section = l.strip('[]') ws = l.split() if ws != []: if section == "right camera" and ws[0].isalpha(): in_proj = (ws[0] == 'proj') elif in_proj: matrix.append([float(s) for s in l.split()]) Fx = matrix[0][0] Fy = matrix[1][1] Cx = matrix[0][2] Cy = matrix[1][2] Tx = -matrix[0][3] / Fx self.params = (Fx, Fy, Tx, Cx, Cx, Cy) cam = camera.Camera(self.params) self.vo = VisualOdometer(cam) self.started = None
def __init__(self, user, args, cfg): wx.Frame.__init__(self, None, style = wx.CAPTION | wx.CLOSE_BOX) self.user = user self.cfg = cfg self.args = args self.timer = None self.pre_on_timer = 0 self.camera = camera.Camera() self.res = None self.foodxml = None self.inited = False self.EndTime = None self.frame = 0 self.dir = 0 self.power = 0 self.sendMoveTime = 0 self.ballx = 0 self.bally = 0 self.show_line = True self.init()
def __init__(self, gl_format=None): if gl_format is None: # using opengl 3.3 core profile gformat = QGLFormat() gformat.setVersion(3, 3) gformat.setProfile(QGLFormat.CoreProfile) super(GLWindow, self).__init__(gformat) self.__timer = QElapsedTimer() self.__timer.start() self.camera = camera.Camera(0.0, 0.0, 3.0) self.__lastX = 400 self.__lastY = 300 self.__firstMouse = True self.__deltaTime = 0.0 self.__lastTime = 0.0 self.blinn = False # if you want press mouse button to active camera rotation set it to false self.setMouseTracking(True)
def __init__(self, gl_format=None): if gl_format is None: # using opengl 3.3 core profile gformat = QGLFormat() gformat.setVersion(3, 3) gformat.setProfile(QGLFormat.CoreProfile) super(GLWindow, self).__init__(gformat) self.__timer = QElapsedTimer() self.__timer.start() self.camera = camera.Camera(0.0, 0.0, 3.0) self.__lastX = self.width() / 2.0 self.__lastY = self.height() / 2.0 self.__firstMouse = True self.lightPos = np.array([1.2, 1.0, 2.0], np.float32) self.__deltaTime = 0.0 self.__lastTime = 0.0 # if you want press mouse button to active camera rotation set it to false self.setMouseTracking(True)
def __init__(self, screen_size, gravity=(0.0, -9.0), ppm=100.0, renderer='pygame'): """ Init the world with boundaries and gravity, and init colors. Parameters: screen_size .. (w, h) -- screen size in pixels [int] gravity ...... (x, y) in m/s^2 [float] default: (0.0, -9.0) ppm .......... pixels per meter [float] default: 100.0 renderer ..... which drawing method to use (str) default: 'pygame' Return: class Elements() """ self.set_screenSize(screen_size) self.set_drawingMethod(renderer) # Create Subclasses self.add = add_objects.Add(self) self.callbacks = callbacks.CallbackHandler(self) self.camera = camera.Camera(self) # Gravity + Bodies will sleep on outside self.gravity = gravity self.doSleep = True # Create the World self.world = box2d.b2World(self.gravity, self.doSleep) bodyDef = box2d.b2BodyDef() self.world.groundBody = self.world.CreateBody(bodyDef) # Init Colors self.init_colors() # Set Pixels per Meter self.ppm = ppm
def do_demo(filename='demodata.npy', cam=None, look_at=None, sealevel=0): screen = pygame.display.set_mode(RESOLUTION, pygame.DOUBLEBUF) scene = m.Map(filename, sealevel) if cam is None: cam = c.Camera(position=np.array([ scene.positions[:, 0].mean(), -2 * np.ceil(scene.positions[:, 2].max()) - 9.0, 2 * np.ceil(scene.positions[:, 2].max()) + 6.0 ]), screen=c.Screen(resolution=np.array(RESOLUTION))) shader = s.Shader(cam) if look_at is not None: cam.look_at_point(look_at) # pixels = cam.get_screen_coordinates(scene.positions) colours = shader.apply_lighting(scene.positions, scene.normals, scene.colours.copy()) patches, depth = cam.get_screen_coordinates(scene.patches) order = np.argsort(-((scene.positions - cam.position)**2).mean(-1)) for n in order: # Use this to render point cloud in uniform colour: # screen.set_at(np.round(pixels[n]).astype(np.int), (204, 0, 0)) # Use this to render point cloud in shaded colours (not pretty): # screen.set_at(np.round(pixels[n]).astype(np.int), colours[n, :]) # Use this to render patches: pygame.draw.polygon(screen, colours[n], patches[n]) pygame.display.flip() return scene, cam
def __init__(self, w, h): self.G = 6.67384 * (10**-11) self.bodies = [] self.camera = camera.Camera(w, h) self.w = w self.h = h self.FPS = 100 self.update_count = 0 self.updates_per_render = 15 #Renderer object self.renderer = renderer.Renderer(w, h) # dt is the timestep in seconds self.dt = IntPointer(30000.0 * self.FPS) #Stack of mouse objects. The first item in the list should be prioritised last self.mouse_objects_stack = [self.camera] #List of infolabels that exist self.infolabels = [] #Add all gui elements self.add_gui() self.body_being_created = None self.editing_body = False #The current mouse object that is being clicked self.grabbed_mouse_object = None #Stores the options flags (e.g. render_tracers, render_planet_labels) self.flags = flags_file.RENDER_TRACERS + flags_file.RENDER_PLANET_LABELS + flags_file.SHADOWS + flags_file.REALISTIC self.planet_shadow = pygame.image.load("../res/shadow.png")
def load_merton_data(data_path='./MertonCollege', show_detail=False): """ https://www.robots.ox.ac.uk/~vgg/data/mview/の"Merton College I"データを読み込む Args: data_path (str):データを展開したディレクトリのpath。image,2D,3Dがある前提 Returns: corr:ある717点ある3次元上の点が、2D/001.cornes内で示されたのどの2D上の点に対応しているか。 indexを表す。 """ # 画像を読み込む im1 = ip.imread(os.path.join(data_path, 'image/001.jpg')) im2 = ip.imread(os.path.join(data_path, 'image/002.jpg')) if (show_detail): ip.show_img(im1) ip.show_img(im2) # 画像上の2Dの点をリストに読み込む points2D = [] for i in range(3): file_path = os.path.join(data_path, '2D/{:0=3}.corners'.format(i + 1)) points2D.append(np.loadtxt(file_path).T) # 3Dの点を読み込む points3D = np.loadtxt(os.path.join(data_path, '3D/p3d')).T # 対応関係を読み込む corr = np.genfromtxt(os.path.join(data_path, '2D/nview-corners'), dtype='int', missing_values='*') # カメラパラメーラをCameraオブジェクトに読み込む P = [] for i in range(3): istrct_p = np.loadtxt('MertonCollege/2D/{:0=3}.P'.format(i + 1)) P.append(camera.Camera(istrct_p)) return im1, im2, points2D, points3D, corr, P
def main(): profile_list = [ AnimalProfile("0782B18622", "Jim Kirk", 0, 0, session_save_path), AnimalProfile("0782B182D6", "Yuri Gagarin", 0, 0, session_save_path), AnimalProfile("0782B17DE9", "Elon Musk", 0, 0, session_save_path), AnimalProfile("0782B18A1E", "Buzz Aldrin", 0, 0, session_save_path), AnimalProfile("5643564457", "Captain Picard", 0, 0, session_save_path) ] # Initializing servo, camera and RFID reader and session controller. servo_1 = servo.Servo(servo_PWM_BCM_pin_number) camera_1 = camera.Camera(fourcc, camera_index, camera_fps, camera_res) RFID_1 = RFID_Reader(serial_interface_path, baudrate, RFID_proximity_BCM_pin_number) session_controller = SessionController(profile_list, serial, servo_1, camera_1, RFID_1) # Main loop until I implement a GUI or something while True: RFID_code = session_controller.RFID_reader.listenForRFID() profile = session_controller.searchForProfile(RFID_code) session_controller.startSession(profile)
def setUI(self): w = QWidget() f = QFormLayout() btn = QPushButton("Başlat") btn.clicked.connect(self.st) f.addRow(btn) self.cb = QComboBox() self.cb.addItems(self.colors.keys()) self.cb.currentIndexChanged.connect(self.set_color) f.addRow(self.cb) self.sp = QSpinBox() self.sp.setMaximum(5000) self.sp.valueChanged.connect(self.set_timer) f.addRow(self.sp) self.cam = c.Camera(self) f.addRow(self.cam) w.setLayout(f) self.setCentralWidget(w)
def __init__(self, a_world): # super(MainWnd,self).__init__() Window.__init__(self) # init GL options glEnable(GL_CULL_FACE) glFrontFace(GL_CCW) glCullFace(GL_BACK) glPolygonMode(GL_FRONT, GL_FILL) glPolygonMode(GL_BACK, GL_LINE) glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LEQUAL) # create the world self.the_world = a_world # create the camera self.the_camera = camera.Camera() self.the_camera.x = 400 self.the_camera.y = -350 self.the_camera.z = 80
def __init__(self, detection_callback, detection_fps=2.0, resolution: (int, int) = (1280, 720), camera_id: int = 0): self._camera = camera.Camera(resolution, camera_id) time_start = timeit.default_timer() print(f"Initializing Darknet") self._fps = detection_fps self._run = True self._frame_lock = threading.Lock() self._detection_callback = detection_callback self.output_resolution = (1280, 720) # The latest detected frame self._latest_frame: numpy.ndarray = None self.paused = True # Darknet variables self._darknet_resolution = (0, 0) self._darknet_metaMain = None self._darknet_netMain = None self._darknet_image = None self._initialize_darknet() self._thread = None print( f"Darknet initialized in {timeit.default_timer()-time_start:.2f} seconds" )
def fisheye_test(): cam = camera.Camera() cam.load('test/camera_fisheye.yaml') if not os.path.exists('test/out'): os.mkdir('test/out') img = cv2.imread('test/opencv_distorted.jpg') assert isinstance(img, np.ndarray) img_und = cam.undistort_image(img) cv2.imwrite('test/out/fisheye_undistorted.png', img_und) img_points_und = cam.undistort(img_points) print 'Undistorted' print img_points_und for point in img_points.T.astype(int): cv2.circle(img, tuple(point), 3, (0, 255, 0), -1) cv2.imwrite('test/out/fisheye_test_points.jpg', img) for point in img_points_und.T.astype(int): cv2.circle(img_und, tuple(point), 3, (0, 255, 0), -1) cv2.imwrite('test/out/fisheye_test_points_und.jpg', img_und) img_points_dist = cam.distort(img_points_und) print 'Distorted back:' print img_points_dist print 'Undistortion / distortion errors:' print np.linalg.norm(img_points - img_points_dist, axis=0) image_coords = cam.world_to_image(np.array([[0., 0., 0.]]).T) print image_coords print cam.image_to_world(image_coords, 0.)
def __init__(self, **args): super().__init__(**args) # create world self.world = world.World() # create shader self.shader = shader.Shader("vert.glsl", "frag.glsl") self.shader_sampler_location = self.shader.find_uniform( b"texture_array_sampler") self.shader.use() # pyglet stuff pyglet.clock.schedule_interval( self.update, 1.0 / 10000) # set the update interval as small as possible self.mouse_captured = False # camera stuff self.camera = camera.Camera(self.shader, self.width, self.height)
def main(dummy=False, debug=False): # setup camera CAPTURE_WIDTH = 32 CAPTURE_HEIGHT = 32 if dummy: import dummy_camera cam = dummy_camera.DummyCamera(CAPTURE_WIDTH, CAPTURE_HEIGHT, debug=debug) else: import camera cam = camera.Camera(CAPTURE_WIDTH, CAPTURE_HEIGHT, debug=debug) cam.setup() # setup drink detector dd = drink_detect.DrinkDetector(debug=debug) dd.setup() while True: img = cam.captureImg() #logger.debug(img) answer = dd.detect(img) logger.debug(answer) time.sleep(1)
def main(): labels = load_labels() interpreter = make_interpreter() interpreter.allocate_tensors() cam = camera.Camera() stream = cam.get_stream() print("you can press Q button to terminate the process!") while True: image = load_image(stream) scale = detect.set_input( interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS)) interpreter.invoke() objs = detect.get_output(interpreter, 0.4, scale) draw_objects(ImageDraw.Draw(image), objs, labels) cv.imshow("Debug", np.asarray(image)) if cv.waitKey(10) & 0xFF == ord('q'): break cam.terminate()
def __init__(self): self.window = tk.Tk() self.window.title = "luci biceps rep counter" self.counters = [1, 1] self.rep_counter = 0 self.extended = False self.contracted = False self.last_prediction = 0 self.model = model.Model() self.counting_enabled = False self.camera = camera.Camera() self.init_gui() self.delay = 15 self.update() self.window.attributes("-topmost", True) self.window.mainloop()
rotationInOneSecond = 0.79 # 1.13826 weightMean = 0 visitedLM = [False, False] turn_counter = 0 # Allocate space for world map world = np.zeros((500, 500, 3), dtype=np.uint8) # Draw map draw_world(est_pose, particles, world) print "Opening and initializing camera" #cam = camera.Camera(0, 'macbookpro') #cam = camera.Camera(0, 'frindo') cam = camera.Camera(0, 'arlo') while True: #if visitedLM[0] and visitedLM[1]: # # VICTORY # arlo.go_diff(80, 79, 1, 0) # sleep(2) # arlo.stop() # while cv2.waitKey(15) != ord('q'): # continue # break # Move the robot according to user input (for testing) action = cv2.waitKey(15) if action == ord('w'): # Forward velocity += 4.0
if modals[m].running: modals[m].update() if modals[m].detected_events: print "DETECTED IN ", m for e in modals[m].detected_events: print "EVENT ", e self.socket.socketIO.emit('detected', {'event': e}) modals[m].detected_events = [] if __name__ == '__main__': creds_path = os.path.abspath(os.path.join(os.pardir, 'data/config.json')) json_data = open(creds_path) data = json.load(json_data) json_data.close() modals = {'audio': audio.Audio(), 'camera': camera.Camera(data['aws'])} socket = Socket(modals) monitor = Monitor(modals, socket) path = os.path.abspath(os.path.join(os.pardir, 'uploads/test.wav')) modals['audio'].speak('hello my name is noodle') #modals['audio'].play(path) modals['camera'].photo("hitest") #path = os.path.abspath(os.path.join(os.pardir, 'uploads/test.wav')) #os.open(path) #print 'opened ', path socket.start() monitor.start()
X = X[:, infront] # 3D plot from mpl_toolkits.mplot3d import axes3d fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(-X[0], X[1], X[2], 'k.') ax.axis('off') # plot the projection of X import camera # project 3D points cam1 = camera.Camera(P1) cam2 = camera.Camera(P2[ind]) x1p = cam1.project(X) x2p = cam2.project(X) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(x1p[0], x1p[1], x1p[2], 'k.') ax.axis('off') fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(x2p[0], x2p[1], x2p[2], 'k.') ax.axis('off') # reverse K normalization
def main(): # create render window window = grapher.Grapher() # create cube models render_models = list() render_models.append(cube.Cube()) render_models.append(cube.Cube()) render_models.append(cube.Cube()) render_models.append(cube.Cube()) render_models[0].set_pos(5, 0, 10) render_models[1].set_pos(-5, 0, 10) render_models[2].set_pos(5, -4, 13) render_models[3].set_pos(-5, -7, 10) # create walls render_models.append(wall.Wall()) render_models[4].set_pos(0, -8, 25) render_models[4].scale = 8 render_models.append(wall.Wall()) render_models[5].set_pos(16, -8, 9) render_models[5].set_rot(0, 90, 0) render_models[5].scale = 8 # create camera cam = camera.Camera() # temp r = 0 s = 0.2 # main loop try: while True: # clear grapher window.clear() # render models in render_models list for rm in render_models: # set global model-space stuff here rm.set_rot_delta(r, r, r) # transform model and get transformed coordinate pairs coords = rm.process(cam, fov) # render segments from coordinate pair list for i in coords: # gather depth coordinates sz = i[0][2] ez = i[1][2] # if segment is completely behind player, skip rendering if sz < 0 and ez < 0: continue # gather transformed coordinates startx = i[0][0] starty = i[0][1] endx = i[1][0] endy = i[1][1] # if segment is partially behind player, clip segment to camera's field of view if sz < 0 or ez < 0: # ffffuuuuuuu pass # draw segment window.draw_line(startx, starty, endx, endy) # draw debug text coords = "Player: (%f, %f, %f)" % (cam.pos_x, cam.pos_y, cam.pos_z) pang = "Player angle: %f" % cam.angle options = "Options: ROT: %d, SPD: %d" % (r, s) window.console_out("3D Engine test - HalfBurntToast") window.console_out(coords) window.console_out(pang) window.console_out(options) # update grapher window.update() # get command input for event in pygame.event.get(): if event.type == pygame.KEYDOWN: if event.key == pygame.K_r: r = int(not r) if event.key == pygame.K_p: cam.reset() print("Reset") # get movement input keys = pygame.key.get_pressed() if keys[pygame.K_w]: ang = numpy.deg2rad(numpy.abs(360 - cam.angle)) cam.pos_z -= numpy.cos(ang) * s cam.pos_x -= numpy.sin(ang) * s if keys[pygame.K_s]: ang = numpy.deg2rad(numpy.abs(360 - cam.angle)) cam.pos_z += numpy.cos(ang) * s cam.pos_x += numpy.sin(ang) * s if keys[pygame.K_a]: ang = numpy.deg2rad(numpy.abs(360 - (cam.angle - 90))) cam.pos_z += numpy.cos(ang) * s cam.pos_x += numpy.sin(ang) * s if keys[pygame.K_d]: ang = numpy.deg2rad(numpy.abs(360 - (cam.angle - 90))) cam.pos_z -= numpy.cos(ang) * s cam.pos_x -= numpy.sin(ang) * s if keys[pygame.K_e]: cam.angle = (cam.angle - 0.6) % 360 if keys[pygame.K_q]: cam.angle = (cam.angle + 0.6) % 360 # if not using any input handling, uncommon the line below #pygame.event.pump() # temp delay. TODO: replace with FPS regulator pygame.time.delay(10) except KeyboardInterrupt: pygame.quit()
velocity = 0.0 # cm/sec angular_velocity = 0.0 # radians/sec # Initialize the robot (XXX: You do this) # Allocate space for world map world = np.zeros((500, 500, 3), dtype=np.uint8) # Draw map draw_world(est_pose, particles, world) print "Opening and initializing camera" if DEMO: #cam = camera.Camera(0, 'macbookpro') cam = camera.Camera(0, 'frindo') def gaussian_distribution(x, mu, sigma): delta = x - mu Q = 2 * sigma * sigma denote = np.sqrt(Q * np.pi) return (np.exp(-(delta * delta) / Q)) / denote def checkpath(offroad, drivetime): global est_pose global bubble travel = forward_time_2_cm(drivetime) travel_frags = travel * 3.0
# -*- coding: utf-8 -*- """ Created on Fri Aug 26 16:33:45 2016 @author: user """ from PIL import Image from numpy import * import sys sys.path.append('../ch4/') import camera # load some images im1 = array(Image.open('../pcv_data/data/Merton1/images/001.jpg')) im2 = array(Image.open('../pcv_data/data/Merton1/images/002.jpg')) # load 2D points for each view to a list points2D = [loadtxt('../pcv_data/data/Merton1/2D/00'+str(i+1)+'.corners').T for i in range(3)] # load 3D points points3D = loadtxt('../pcv_data/data/Merton1/3D/p3d').T # load correspondences corr = genfromtxt('../pcv_data/data/Merton1/2D/nview-corners',dtype='int')#,missing='*' # load cameras to a list of Camera objects P = [camera.Camera(loadtxt('../pcv_data/data/Merton1/2D/00'+str(i+1)+'.P')) for i in range(3)]
import camera import cv2 cam = camera.Camera("rtsp://*****:*****@192.168.0.40/h264Preview_01_main") cam.get_frame() while (1): frame = cam.get_frame(0.65) cv2.imshow("Feed", frame) key = cv2.waitKey(1) if key == 13: #13 is the Enter Key break cv2.destroyAllWindows() cam.end()
import camera print('testing camera.get_rect') with open('testdata/get_rect.in', 'r') as f: test_count = int(f.readline()) for i in range(test_count): tokens = f.readline().split() lat, lon = float(tokens[0]), float(tokens[1]) zoom = min(int(tokens[2]), camera.MAX_ZOOM_LEVEL) width, height = int(tokens[3]), int(tokens[4]) cam = camera.Camera(lat, lon, zoom, (width, height)) result = cam.get_rect() res_str = 'min_lat={}, min_lon={}, max_lat={}, max_lon={}'.format( result.min_lat, result.min_lon, result.max_lat, result.max_lon) exp_str = 'min_lat={}, min_lon={}, max_lat={}, max_lon={}'.format( tokens[5], tokens[6], tokens[7], tokens[8]) print('For lattitude={}, longitude={}, zoom={}, dimensions={}'.format( lat, lon, zoom, (width, height))) print('got {}.\nA reasonable result would be {}\n'.format( res_str, exp_str))
def do_brand_demo(filename='zdata.npy', sealevel=0.0, steps=42, fps=30, save_fig=False, frames=None): import string fps_clock = pygame.time.Clock() screen = pygame.display.set_mode(RESOLUTION, pygame.DOUBLEBUF) scene = m.Map(filename, sealevel, True) look_at = np.array([63.5, 63.5, 6.0]) cam = c.Camera(position=np.array([54.5, 63.5, 6.0]), screen=c.Screen(resolution=np.array(RESOLUTION))) shader = s.Shader(cam) fighter = o.FireFighter() fighter.position = np.array([63.5, 63.5, 6.0]) house = o.House() house.position = np.array([58.5, 67, 2.23]) house.yaw = np.pi / 8 angles = np.linspace(0, 2 * np.pi, steps + 1) R = np.linalg.norm(cam.position[:2] - look_at[:2]) for N, a in enumerate(angles): screen.fill((0, 0, 0)) cam.position = np.array( [np.sin(a) * R + 63.5, np.cos(a) * R + 63.5, 6.0]) cam.look_at_point(look_at) colours = shader.apply_lighting(scene.positions, scene.normals, scene.colours.copy()) patches, depth = cam.get_screen_coordinates(scene.patches) order = np.argsort(-((scene.positions - cam.position)**2).mean(-1)) for n in order: if ((depth[n].mean() > 0 and patches[n] >= 0).any() and (patches[n, :, 0] < RESOLUTION[0]).any() and (patches[n, :, 1] < RESOLUTION[1]).any()): pygame.draw.polygon(screen, colours[n], patches[n]) fighter.yaw = a fighter.pitch = a fighter.roll = a colours = shader.apply_lighting(fighter.positions, fighter.normals, fighter.colours.copy()) patches, depth = cam.get_screen_coordinates(fighter.patches) order = np.argsort(-((fighter.positions - cam.position)**2).mean(-1)) for n in order: if (colours[n, 3] and depth[n].mean() > 0 and (patches[n] >= 0).any() and (patches[n, :, 0] < RESOLUTION[0]).any() and (patches[n, :, 1] < RESOLUTION[1]).any()): pygame.draw.polygon(screen, colours[n], patches[n]) colours = shader.apply_lighting(house.positions, house.normals, house.colours.copy()) patches, depth = cam.get_screen_coordinates(house.patches) order = np.argsort(-((house.positions - cam.position)**2).mean(-1)) for n in order: if (colours[n, 3] and depth[n].mean() > 0 and (patches[n] >= 0).any() and (patches[n, :, 0] < RESOLUTION[0]).any() and (patches[n, :, 1] < RESOLUTION[1]).any()): pygame.draw.polygon(screen, colours[n], patches[n]) pygame.display.flip() if save_fig: pygame.image.save(screen, 'out/{0}.png'.format(string.zfill(str(N), 2))) the_tick = fps_clock.tick(fps) print "Frame update time: {0} ms".format(the_tick) if frames is not None: frames.append(the_tick / 1000.0)
def detect(self, frame, target_points): quality_level = 0.01 min_distance = 10.0 return VO.harris(frame.rawdata, frame.size[0], frame.size[1], 1000, quality_level, min_distance) for topic, msg, t in rosrecord.logplayer(filename): if rospy.is_shutdown(): break if topic.endswith("videre/cal_params") and not cam: print msg.data cam = camera.VidereCamera(msg.data) (Fx, Fy, Tx, Clx, Crx, Cy) = cam.params Tx /= (7.30 / 7.12) cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy)) vos = [ VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = None), VisualOdometer(cam, feature_detector = FeatureDetectorMine(), inlier_error_threshold = 1.0, descriptor_scheme = DescriptorSchemeSAD(), sba = (3,10,10)), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = (3,8,10)), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True, inlier_thresh = 100), #VisualOdometer(cam, feature_detector = FeatureDetectorHarris(), descriptor_scheme = DescriptorSchemeSAD()), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()), #VisualOdometer(cam, feature_detector = FeatureDetector4x4(FeatureDetectorFast), descriptor_scheme = DescriptorSchemeSAD()), ] vo_x = [ [] for i in vos]