def __init__(self, beta=0, axis=Up, pos=Zero, beta_f=0, axis_f = Up, length = 3, velocity = 100, up = Up, fwd = Fwd, left = Left, sx=Sx, sy=Sy, near=Near, farfov=Far, eye=vector(0, 0, 500), camtype='standard'): self.nose_data = wf_data('worm_head_tail.dat') self.middle_data = wf_data('worm_middle_segment.dat') self.elbow_data = wf_data('worm_elbow.dat') self.nose = position(self.nose_data, beta, axis, pos, beta_f, axis_f, up, fwd, left) self.nose.rotate_by_Up(128) self.nose.setworld() # The Orientation axis of our worm; this is important for figuring out # translations and rotations! #self.Up = up #self.Fwd = fwd #self.Left = left self.length = length self.velocity = velocity self.flyVelocity = 10 #self.flyUp = up #self.flyFwd = fwd #self.flyLeft = left # This is the camera that will follow our worm! # Note that the camera position needs appropriate coordinates! based # on the initial position of the nose. cam_beta = beta cam_axis = axis cam_pos = pos cam_beta_f = beta_f cam_axis_f = axis_f self.noseCamera = camera(sx, sy, near, farfov, cam_beta, cam_axis, cam_pos, cam_beta_f, cam_axis_f, camtype, fwd, left, up, eye) self.flyCamera = camera(sx, sy, near, farfov, cam_beta, cam_axis, cam_pos, cam_beta_f, cam_axis_f, camtype, fwd, left, up, eye) self.cameraList = [self.noseCamera, self.flyCamera] self.curCamera = 0 self.camera = self.cameraList[self.curCamera] # This is where we'll keep the segments of our worm. self.segments = []
def main(self): glfw.init() self.window = glfw.create_window(1024, 768, "Car racer", None, None) glfw.make_context_current(self.window) glfw.swap_interval(1) glfw.set_key_callback(self.window, self.onkey) glfw.set_framebuffer_size_callback(self.window, self.onresize) self.car = model('models/Chevrolet_Camaro_SS_Low.obj') self.car.scale = vec3(car_size) # self.car = model('cube') # self.car.scale = vec3(0.08,0.01,0.18) self.envbox = envbox() self.road = road(car_speed, lanes, arc_len, lane_width, max_y) self.gen_car_states() self.cam = camera() self.cam.position = vec3(0, 3, 0) self.cam.target = vec3(0, 0, -5) glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LEQUAL) while not glfw.window_should_close(self.window) and not self.done: glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) self.__draw_frame() glfw.poll_events()
def tilt(): global cnt global flag2 global dis t1 = GPIO.input(tilt1) t2 = GPIO.input(tilt2) chk = calc(t1, t2, dis) if flag2 == True: if chk == 1: ledctl(1) cnt += 1 if cnt % 10 == 0: print("빨간불") camera() else: ledctl(chk)
def __init__(self, file): self.objects = [] self.materials = [] self.textures = [] self.camera = None if (file != ""): self.loadscene(file) else: self.camera = camera(vector(0, 0, 0), vector(0, 0, -1), vector(0, 1, 0), 90, YA.WIDTH / YA.HEIGHT)
def tilt(): global cnt global flag2 #습도 변화 +1 이후 자세 측정 결과 출력 위함 global dis t1 = GPIO.input(tilt1) #기울기센서1 측정 값 (90도 이상:True) t2 = GPIO.input(tilt2) #기울기센서2 측정 값 chk, chk2 = calc(t1, t2, dis) #기울기, 거리 측정 값으로 자세 판단 (목, 등 구부러짐) print(chk, chk2) if flag2 == True: #습도 변화 +1 이후 (착석 후 일정 시간 경과) if chk == 23 and chk2 == 23: ledctl(chk, chk2) cnt += 1 if cnt % 10 == 0: print("빨간불") camera() else: ledctl(chk, chk2)
def __init__(self, map): self.UP = self.DOWN = self.LEFT = self.RIGHT = self.RUNNING = self.CLICK = False self.camleft = self.camright = False self.scrollup = self.scrolldown = False # 'bind' classes to the playstate class, for later use, # instead of leaving them floating in the 'global' realm self.player = Tittle() self.map = maploader.map('2') self.TILES, self.tiles = self.map.build() self.camera = camera(complex_camera, self.map.width(), self.map.height())
def new(self): # initialize all variables and do all the setup for a new game self.all_sprites = pg.sprite.Group() self.walls = pg.sprite.Group() for row, tiles in enumerate(self.map.data): for col, tile in enumerate(tiles): if tile == '1': Wall(self, col, row) if tile == 'P': self.player = Player(self, col, row) self.camera= camera(self.map.width, self.map.height)
def main(filename: str = 'output', output_res: tuple = (200, 100), num_samples=100): nx = output_res[0] ny = output_res[1] num_samples = num_samples output = np.zeros((nx, ny, 3)) #f.write("P3\n" + str(nx) + " " + str(ny) + "\n255\n"); hit_object_list = [] R = math.cos(math.pi / 4) # hit_object_list.append(sphere(vec3(-R,0,-1),R,lambertian(vec3(0, 0, 1)))) # hit_object_list.append(sphere(vec3(R,0,-1),R,lambertian(vec3(1, 0, 0)))) # hit_object_list.append(sphere(vec3(0,0,-1),0.5,lambertian(vec3(0.1,0.2,0.5)))) # hit_object_list.append(sphere(vec3(0,-100.5,-1),100,lambertian(vec3(0.8,0.8,0.0)))) # hit_object_list.append(sphere(vec3(1,0,-1),0.5,metal(vec3(0.8,0.6,0.2)))) # hit_object_list.append(sphere(vec3(-1,0,-1),0.5,dielectric(1.5))) # hit_object_list.append(sphere(vec3(-1,0,-1),-0.45,dielectric(1.5))) hit_object_list = random_scene() #print("hit this") lookfrom = vec3(13, 2, 3) lookat = vec3(0, 0, 0) dist_to_focus = 10 aperture = 0.1 cam = camera(lookfrom, lookat, vec3(0, 1, 0), 20, float(nx) / float(ny), aperture, dist_to_focus) with tqdm(total=ny * nx) as pbar: for j in range(ny - 1, -1, -1): for i in range(0, nx): col = vec3(0, 0, 0) for s in range(0, num_samples): u = float(i + random.random()) / float(nx) v = float(j + random.random()) / float(ny) r = cam.get_ray(u, v) #p = r(2.0) col += color(r, hit_object_list, 0) pbar.update(1) col /= float(num_samples) col = vec3(math.sqrt(col[0]), math.sqrt(col[1]), math.sqrt(col[2])) ir = int(255.99 * col.x0) ig = int(255.99 * col.x1) ib = int(255.99 * col.x2) output[i, j, :] = np.array([ir, ig, ib]) #f.write(str(ir) + " " + str(ig) + " " + str(ib) + "\n"); plt.imsave(filename + ".png", np.rot90(output).astype(np.uint8))
def run(self): while True: if GPIO.input(7): print("Intruder detected") mob.join() rfidob=rfid() rfidob.start() rfidob.join() camob=camera() camob.start time.sleep(2) else: print("No intruders") time.sleep(1)
def main(filename: str, output_res: tuple): f = open(filename + '.ppm', 'w') nx = output_res[0] ny = output_res[1] num_samples = 256 f.write("P3\n" + str(nx) + " " + str(ny) + "\n255\n") hit_object_list = [] R = math.cos(math.pi / 4) # hit_object_list.append(sphere(vec3(-R,0,-1),R,lambertian(vec3(0, 0, 1)))) # hit_object_list.append(sphere(vec3(R,0,-1),R,lambertian(vec3(1, 0, 0)))) # hit_object_list.append(sphere(vec3(0,0,-1),0.5,lambertian(vec3(0.1,0.2,0.5)))) # hit_object_list.append(sphere(vec3(0,-100.5,-1),100,lambertian(vec3(0.8,0.8,0.0)))) # hit_object_list.append(sphere(vec3(1,0,-1),0.5,metal(vec3(0.8,0.6,0.2)))) # hit_object_list.append(sphere(vec3(-1,0,-1),0.5,dielectric(1.5))) # hit_object_list.append(sphere(vec3(-1,0,-1),-0.45,dielectric(1.5))) hit_object_list = random_scene() #print("hit this") lookfrom = vec3(13, 2, 3) lookat = vec3(0, 0, 0) dist_to_focus = 10 aperture = 0.1 cam = camera(lookfrom, lookat, vec3(0, 1, 0), 20, float(nx) / float(ny), aperture, dist_to_focus) with tqdm(total=ny * nx) as pbar: for j in range(ny - 1, 0, -1): for i in range(0, nx): col = vec3(0, 0, 0) for s in range(0, num_samples): u = float(i + random.random()) / float(nx) v = float(j + random.random()) / float(ny) r = cam.get_ray(u, v) #p = r(2.0) col += color(r, hit_object_list, 0) pbar.update(1) col /= float(num_samples) col = vec3(math.sqrt(col[0]), math.sqrt(col[1]), math.sqrt(col[2])) ir = int(255.99 * col.x0) ig = int(255.99 * col.x1) ib = int(255.99 * col.x2) f.write(str(ir) + " " + str(ig) + " " + str(ib) + "\n") f.close()
def __init__( self ): pca.startup() self._gpmacaddress = '' #'E4:17:D8:2C:08:68' self._buttonpressed = set() # A set used to hold button pressed states, used for debounce detection and button combos. # self._left = wheel(pca, tank._LS, tank._LF, tank._LB, tank._SPEEDOL) self._left.name = 'lw' self._right = wheel(pca, tank._RS, tank._RF, tank._RB, tank._SPEEDOR) self._right.name = 'rw' self._lights = 0.0 self._onestick = False self._sides = sides(tank._LSIDE, tank._RSIDE) # Left/Right Pin #s for trigger/echo. self._front = vl53.create() self._camera = camera(pca, tank._CAMERAPAN, tank._CAMERATILT) #Initialize the states. self._states = {} self._states[tank._CONNECT] = state.create(u = self._connectUD) self._states[tank._HUMAN] = state.create(u = self._humanUD, i = self._humanIN) self._states[tank._CAMERACONTROL] = state.create(u = self._cameraUD, i = self._cameraIN. e = self._cameraEND) self._states[tank._TURNING] = state.create() self._states[tank._MOVEFWD] = state.create(s = self._movefwdST, u = self._movefwdUD) self._curstate = tank._CONNECT self._strobe = strobe(tank._STROBERED, tank._STROBEBLUE) self._lightbank = lightbank(tank._LIGHTBANK) self._counter = 0 self.togglelights() onestick.adjustpoints(tank._DZ) # Set point to minimum value during interpretation. self._controller = gamepad(tank._MACADDRESS, self._buttonaction) try: bres = keyboard.is_pressed('q') self._haskeyboard = True except: self._haskeyboard = False self._area = area(tank._MAXSIDE, tank._MAXFRONT) self._running = True
def loadscene(self, file): with open(file, 'r') as f: data = json.load(f) for e in data: if (e['type'] == "camera"): o = vector(*tuple(e['origin'])) to = vector(*tuple(e['to'])) up = vector(*tuple(e['up'])) fov = e['fov'] ratio = float(YA.WIDTH / YA.HEIGHT) self.camera = camera(o, to, up, fov, ratio) if (e['type'] == "sphere"): p = vector(*tuple(e['position'])) r = e['radius'] idmaterial = e['material'] material = self.getmaterial(idmaterial) if (material == None): print( "\n\nERROR [Material \"%s\" not found]: Please, check material id and define it *before* objects in .json file" % (idmaterial)) quit() if ("animation" in e): anim = animation() if ('translate' in e['animation']): anim.translate = vector( *tuple(e['animation']['translate'])) else: anim = None obj = sphere(p, r, material, anim) self.objects.append(obj) if (e['type'] == "material"): idmaterial = e['id'] texture = None if (e.get('texture') != None): texture = self.gettexture(e['texture']) if (e['subtype'] == "lambertian"): albedo = color(tuple(e['albedo'])) material = lambertian(idmaterial, albedo, texture) if (e['subtype'] == "metal"): albedo = color(tuple(e['albedo'])) fuzzy = e['fuzzy'] material = metal(idmaterial, albedo, fuzzy, texture) if (e['subtype'] == "glass"): ior = e['ior'] material = glass(idmaterial, ior, texture) self.materials.append(material) if (e['type'] == "texture"): idtexture = e['id'] if (e['subtype'] == "checker"): col1 = color(tuple(e['color1'])) col2 = color(tuple(e['color2'])) size = e['size'] texture = checker(idtexture, col1, col2, size) self.textures.append(texture) #Here we check if there is an object with animation so we use #the frame number in the config file, otherwise its value will be 1 found = 0 for o in self.objects: if (o.animation != None): found = 1 if (found == 0): YA.FRAMES = 1
def capture_video(self): self.timerCamera = QtCore.QTimer() self.camera = camera() self.timerCamera.timeout.connect(self.capture_frame) self.timerCamera.start(1000 * 1 / 25)
def main(filename: str = 'output', output_res: tuple = (200, 100), num_samples=100): nx = output_res[0] ny = output_res[1] num_samples = num_samples output = np.zeros((nx, ny, 3)).tolist() #f.write("P3\n" + str(nx) + " " + str(ny) + "\n255\n"); hit_object_list = [] R = math.cos(math.pi / 4) #hit_object_list = random_scene() #hit_object_list = two_spheres() #hit_object_list = two_perlin_spheres() #hit_object_list = earth_sphere() #hit_object_list = simple_light() #hit_object_list = cornell_box() #hit_object_list = cornell_smoke() #hit_object_list = final_scene() hit_object_list = cornell_triangle() #lookfrom = vec3(13,2,3) #lookat = vec3(0,0,0) #lookfrom = vec3(13,2,10) #lookat = vec3(0,2,0) # dist_to_focus = 10 # aperture = 0.1 #vfov = 20 lookfrom = vec3(278, 278, -800) #lookfrom = vec3(478,278,-600) lookat = vec3(278, 278, 0) dist_to_focus = 10 aperture = 0 vfov = 40 seed = 123 sampler = uniform_sampler_2D(seed) #sampler = stratified_sampler_2D(num_samples,seed) cam = camera(lookfrom, lookat, vec3(0, 1, 0), vfov, float(nx) / float(ny), aperture, dist_to_focus, 0.0, 1.0) with tqdm(total=ny * nx) as pbar: for j in range(ny - 1, -1, -1): for i in range(0, nx): col = vec3(0, 0, 0) samples = sampler.generate_n_samples_uv(num_samples) for s in samples: u, v = s #print(" i: {} u:{} j: {}, v: {} ".format(i,u,j,v)) s = (i + u) / nx t = (j + v) / ny r = cam.get_ray(s, t) col += color(r, hit_object_list, 0) col /= float(num_samples) #col = vec3(math.sqrt(col[0]),math.sqrt(col[1]),math.sqrt(col[2])) ir = 255.99 * math.sqrt(col[0]) ig = 255.99 * math.sqrt(col[1]) ib = 255.99 * math.sqrt(col[2]) ir = (max(0, min(ir, 255))) ig = (max(0, min(ig, 255))) ib = (max(0, min(ib, 255))) output[i][j] = [ir, ig, ib] pbar.update(1) #f.write(str(ir) + " " + str(ig) + " " + str(ib) + "\n"); plt.imsave(filename + ".png", np.rot90(np.array(output)).astype(np.uint8))
from camera import * outer_id = 'yfacesy' csvfilepath = "./faces/face_token.csv" create_faceset(outer_id, csvfilepath) print("初始化人脸数据集\n") cam_addr = 'rtsp://*****:*****@192.168.1.100//Streaming/Channels/1' outer_id = 'yfacesy' camera(cam_addr, outer_id) os.remove("./faces/face_token.csv") os.remove('./faces/body_rect.csv') result = delete_set(outer_id) print("\n\n\nfaceset " + result['outer_id'] + " 已删除")
homepath = '/home/pi/' print homepath print os.path.exists(homepath + '/autopi.config') if not os.path.exists(homepath + '/autopi.config'): print 'no user info' root = Tk() root.wm_title('AutoPi Login') app = registerGUI(root, web) root.mainloop() print 'Web' print web pi = raspberryPi(web) light = light() cam = camera() alarm = alarm() blind = blinds() reg = register(web, pi, light, cam, alarm, blind) else: config = ConfigParser.ConfigParser() config.read(homepath + '/autopi.config') username = config.get('LoginInfo', 'username') password = config.get('LoginInfo', 'password') print 'working' web.setUsername(username) web.setPassword(password) web.setAuth() pi = raspberryPi(web) if not pi.response: err.setLoginError()
def main(filename: str = 'output', output_res: tuple = (200, 100), num_samples=100): nx = output_res[0] ny = output_res[1] num_samples = num_samples output = np.zeros((nx, ny, 3)).tolist() #f.write("P3\n" + str(nx) + " " + str(ny) + "\n255\n"); hit_object_list = [] R = math.cos(math.pi / 4) #hit_object_list = random_scene() #hit_object_list = two_spheres() #hit_object_list = two_perlin_spheres() #hit_object_list = earth_sphere() #hit_object_list = simple_light() #hit_object_list = cornell_box() #hit_object_list = cornell_smoke() hit_object_list = final_scene() #lookfrom = vec3(13,2,3) #lookat = vec3(0,0,0) #lookfrom = vec3(13,2,10) #lookat = vec3(0,2,0) # dist_to_focus = 10 # aperture = 0.1 #vfov = 20 #lookfrom = vec3(278,278,-800) lookfrom = vec3(478, 278, -600) lookat = vec3(278, 278, 0) dist_to_focus = 10 aperture = 0 vfov = 40 cam = camera(lookfrom, lookat, vec3(0, 1, 0), vfov, float(nx) / float(ny), aperture, dist_to_focus, 0.0, 1.0) with tqdm(total=ny * nx) as pbar: for j in range(ny - 1, -1, -1): for i in range(0, nx): col = vec3(0, 0, 0) for s in range(0, num_samples): u = float(i + random.random()) / float(nx) v = float(j + random.random()) / float(ny) r = cam.get_ray(u, v) #p = r(2.0) col += color(r, hit_object_list, 0) col /= float(num_samples) #col = vec3(math.sqrt(col[0]),math.sqrt(col[1]),math.sqrt(col[2])) ir = 255.99 * math.sqrt(col[0]) ig = 255.99 * math.sqrt(col[1]) ib = 255.99 * math.sqrt(col[2]) ir = (max(0, min(ir, 255))) ig = (max(0, min(ig, 255))) ib = (max(0, min(ib, 255))) output[i][j] = [ir, ig, ib] pbar.update(1) #f.write(str(ir) + " " + str(ig) + " " + str(ib) + "\n"); plt.imsave(filename + ".png", np.rot90(np.array(output)).astype(np.uint8))
def main(): pygame.init() clock = pygame.time.Clock() running = True root = Tkinter.Tk() TILE_WIDTH = 64 TILE_HEIGHT = 64 SCREEN_WIDTH, SCREEN_HEIGHT = (root.winfo_screenwidth(), root.winfo_screenheight()) screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.FULLSCREEN) pygame.display.set_caption("S for Sabotage") #commandHandler = CommandHandler(fatguy) world_map = TileMapParser().parse_decode("fase3.tmx") world_map.load(ImageLoaderPygame()) ww = world_map.pixel_width; wh = world_map.pixel_height; w0 = world(ww, wh, SCREEN_WIDTH, SCREEN_HEIGHT, TILE_WIDTH, TILE_HEIGHT) ground_objects = get_walk_objects(world_map) nonwalk_objects = get_nonwalk_objects(world_map) walk_enemy = get_walk_enemy(world_map, 0) pliers = get_activate_objects(world_map, 'pliers') c4 = get_activate_objects(world_map, 'c4') activate_list = [] activate_list.append(pliers) activate_list.append(c4) enemies = [] player_x, player_y = get_player(world_map, "player") player_x = (player_x / TILE_WIDTH) * TILE_WIDTH; player_y = (player_y / TILE_HEIGHT) * TILE_HEIGHT; enemy_x, enemy_y = get_player(world_map, "enemies") enemy_x = (enemy_x / TILE_WIDTH) * TILE_WIDTH; enemy_y = (enemy_y / TILE_HEIGHT) * TILE_HEIGHT; print 'ENEMY: ' + str(enemy_x) + ' ' + str(enemy_y) print 'PLAYER: ' + str(player_x) + ' ' + str(player_y) img_s = pygame.image.load(os.path.join('', 'art', 's.png')) img_enemy = pygame.image.load(os.path.join('', 'art', 'police.png')) h0 = hud(['pliers', 'c4']) s = Character(w0, "s", img_s, player_x, player_y, nonwalk_objects, activate_list, h0) enemy = Enemy(w0, 'enemy', img_enemy, enemy_x, enemy_y, walk_enemy, 0) npx = player_x + TILE_WIDTH npy = player_y + TILE_HEIGHT camera_x = npx - SCREEN_WIDTH / 2 if camera_x < 0: camera_x = 0 elif (camera_x + SCREEN_WIDTH) > ww: camera_x -= (camera_x + SCREEN_WIDTH) - ww camera_y = npy - SCREEN_HEIGHT / 2 if camera_y < 0: camera_y = 0 elif (camera_y + SCREEN_HEIGHT) > wh: camera_y -= (camera_y + SCREEN_HEIGHT) - wh print str(camera_x) + ' ' + str(camera_y) c0 = camera(camera_x, camera_y, w0, s) inputHandler = input_handler(w0, c0, s) print str(ww) + ' ' + str(wh) #key_timeout = -1 #create sprites groups for collision detection #playerGroup = pygame.sprite.RenderUpdates() #playerGroup.add(fatguy) #objectGroup = pygame.sprite.Group() #enemyGroup = pygame.sprite.Group() #sceneGroup = pygame.sprite.Group() #pygame.key.set_repeat(REPEAT_DELAY, REPEAT_DELAY) world_surface = world_map.get_surface() while running: clock.tick(60) screen.blit(world_surface.subsurface(pygame.Rect(c0.x, c0.y, SCREEN_WIDTH, SCREEN_HEIGHT)), (0, 0)) screen.blit(s.image, s.draw_pos(c0.x, c0.y)) screen.blit(enemy.image, enemy.draw_pos(c0.x, c0.y)) if enemy.sees(s.get_pos(), 30, 120, 240): print 'TE VEJO!' print (s.get_pos()) #pygame.draw.line(screen, (225,0,0), s.draw_pos(c0.x, c0.y), enemy.draw_pos(c0.x, c0.y), 10) enemy.draw_sees(screen, c0.x, c0.y, 15, 128,256) h0.paint(w0, c0) #fatguy.update(pygame.time.get_ticks(), SCREEN_WIDTH, SCREEN_HEIGHT, cam_speed) #obj, col_type = fatguy.collides_with_objects(ground_objects) #if col_type == 1: # fatguy.put_on_ground_running(obj[1]) #elif col_type == 2: # running = False #obj, col_type = fatguy.collides_with_objects(killer_objects) #if col_type != -1: # running = False running = inputHandler.handle() # pygame.display.update() pygame.display.flip()
def main(args): # Get data file dt = h5py.File(args.prior, 'r') # Extract data face_basis = get_face_basis(dt, args.size_id, args.size_exp) # SECTION 2 print("\nSection 2...") # Sample alpha and delta print("\tSampling latent variables") if args.latent is None: alpha = np.random.uniform(-1, 1, args.size_id).astype(np.float32) delta = np.random.uniform(-1, 1, args.size_exp).astype(np.float32) else: with open(args.latent, "rb") as f: (alpha, delta), _ = pickle.load(f) alpha, delta = alpha.detach().numpy(), delta.detach().numpy() # Generate face from respective alpha and delta print("\tGenerating face 3D point-cloud") face_3D = face_basis(alpha, delta) # Save object for later visualization print("\tSaving face data") save_obj( args.face_3D_file, face_3D, face_basis.color, face_basis.mesh, ) print("\tSaved to ", args.face_3D_file) if args.up_to is not None and args.up_to == "3D": return # SECTION 3 print("\nSection 3...") print("Rotating face") # Transform face print("\tTransforming face with omega: ", args.omega, " and t: ", args.t) face_transform = FaceTransform() face_wt = face_transform(face_3D, args.omega, args.t) print("\tSaving rotated face data") save_obj(args.face_wt_file, face_wt, face_basis.color, face_basis.mesh) print("\tSaved to ", args.face_wt_file) if args.up_to is not None and args.up_to == "rotate": return print("Applying camera projection") # Init camera print("\tInitializing camera with FOV: ", args.fov, " aspect ratio: ", args.aratio, " near-far clips: ", args.near_far) camera = Camera(args.fov, args.aratio, args.near_far) # Generate image from face print("\tGenerating uv image") face_uv = camera(face_wt) print("\tNormalizing uv image (z coordinate)") uv_normalizer = UVNormalizer() face_uv_n = uv_normalizer(face_uv) # Extracting landmark points print("\tExtracting landmark pointsi from", args.landmarks) lmks = get_landmarks(args.landmarks) face_lmks = face_uv_n[lmks, :2] # Generate image plt.scatter(face_lmks[:, 0], face_lmks[:, 1]) plt.axis('equal') plt.savefig(args.face_uv_file + ".png", dpi=900) print("\tSaved to ", args.face_uv_file + ".png") save_obj(args.face_uv_file + ".obj", face_uv_n, face_basis.color, face_basis.mesh) print("\tSaved to ", args.face_uv_file + ".obj") if args.up_to is not None and args.up_to == "project": return
homepath = '/home/pi/' print homepath print os.path.exists(homepath + '/autopi.config') if not os.path.exists(homepath + '/autopi.config'): print 'no user info' root = Tk() root.wm_title('AutoPi Login') app = registerGUI(root,web) root.mainloop() print 'Web' print web pi = raspberryPi(web) light = light() cam = camera() alarm = alarm() blind = blinds() reg = register(web,pi,light,cam,alarm,blind) else: config = ConfigParser.ConfigParser() config.read(homepath+'/autopi.config') username = config.get('LoginInfo','username') password = config.get('LoginInfo','password') print 'working' web.setUsername(username) web.setPassword(password) web.setAuth() pi = raspberryPi(web) if not pi.response: err.setLoginError()
# to setup ram disk # in /etc/fstab add line "tmpfs /var/tmp tmpfs nodev,nosuid,size=50m 0 0" # sudo mount -a # initialize network socket HOST = '' PORT = 5800 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) soc.bind((HOST, PORT)) soc.listen(5) conn, addr = soc.accept() print("Got connection from", addr) # initialize the video stream stream = camera().start() time.sleep(2.0) fps = FPS().start() # start cv loop while True: # grab image from stream image = stream.read() image = imutils.resize(image, 320, 240) img = cv2.cvtColor(image, cv2.cv.CV_BGR2HSV) # do the stuff GREEN_MIN = np.array([50, 21, 156]) GREEN_MAX = np.array([91, 181, 255]) mask = cv2.inRange(img, GREEN_MIN, GREEN_MAX) contours0, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
sock1.bind(server_address1) sock1.listen(1) while 1: print "WAITING FOR CONNECTION" connection1,client_address1 = sock1.accept() try: print "connection from",client_address1 while 1: data = connection1.recv(20) print "RECIEVED %s" %data data = data.split() if data[0] == "$start": p = camera() print "STARTING" #gtk.main() elif data[0] == "$stop": #start.pause() try: #print p.pid os.kill(p.pid+1,signal.SIGTERM) #p.kill() print "PROCESS ENDED" except: print >>sys.stderr, "ERROR OCCURRED WHILE KILLING PROCESS" else: print >>sys.stderr, "UNRECOGNISED DATA",client_address1 connection1.close()
def main(): fps = 60; size = (320*4 ,224*4) pg.init() screen = pg.display.set_mode(size) spin_start = False jump_lock = False background = pg.image.load('tiles.png') #flag for the main game-loop #subject to change when running cycles get introduced clock = pg.time.Clock() player = sonic('jump_',Zone(640, 448).sonic_hitbox()[0:2],1,4,'right') all_sprites = pg.sprite.Group() all_sprites.add(player) pg.init() zone = Zone(player.rect.x, player.rect.y) right_lock = False; left_lock = False; spinmode =False tile(Zone(640, 448).screen) # block(Zone(640, 448).screen) ramp(Zone(640, 448).screen) X_pos = 1000 floor = 8009 Camera = camera(1280, 896) Engine = engine() while True: Camera.update(Engine.gsp) tiles = [ (n[1] * 64 - Camera.offset()[0], n[0] * 64 - Camera.offset()[1]) for n in tile.floor_tiles] # blocks = [ (n[1] * 64 - Camera.offset()[0], n[0] * 64 - Camera.offset()[1]) for n in block.block_tiles] ramps = [ (n[1] * 64 - Camera.offset()[0] , n[0] * 64 - Camera.offset()[1]) for n in ramp.ramp_tiles] sensor_lines = {'bottom_left': sensor_A(640, 448, player), 'bottom_right' : sensor_B(640, 448, player), 'mid_left' : sensor_C(640,448,player), 'mid_right' : sensor_D(640, 448, player), 'top_left': sensor_E(640,448,player), 'top_right' : sensor_F(640, 448, player)} button = None dt = clock.tick(fps)/1000 Engine.direction = player.direction_returner ''' Loop that checks first ''' keys = pg.key.get_pressed() if keys[pg.K_a] and not keys[pg.K_s]: if not left_lock: Engine.movement = 'left' elif keys[pg.K_d] and not keys[pg.K_s]: if not right_lock: Engine.movement = 'right' else: Engine.movement = 'None' '''note implement A variable jump system that requires holding the button and a lot more effort''' if keys[pg.K_w]: button = 'look_up' if keys.count(1) == 0: jump_lock = False else: spinmode = False if keys[pg.K_SPACE] and Engine.onGround == True and not keys[pg.K_s] and jump_lock == False: Engine.movement = 'jump' if keys[pg.K_s] and Engine.onGround == True: if player.velocity.x != 0: Engine.movement = 'roll' else: button = 'look_down' jump_lock = True for event in pg.event.get(): if event.type == pg.KEYDOWN and event.key == pg.K_SPACE: # smoke trails spin_start = True Engine.movement = 'spin_charge' if event.type == pg.KEYUP: if spin_start and event.key == pg.K_s and Engine.onGround == True and jump_lock == True: Engine.movement = 'spin_dash' spinmode = True spin_start = False Engine.spin_rev = 0 for event in pg.event.get(): if event.type == pg.QUIT: pg.quit() if spin_start: button = 'spin' sensor_lines['mid_left'].floor_mode() sensor_lines['mid_right'].floor_mode() print(f'movement: {Engine.gsp}') Camera.velocity.x, Camera.velocity.y = Engine.gsp, Engine.ysp player.velocity.x , player.velocity.y = Engine.gsp, Engine.ysp ''' collision detection using sensor lines ''' if pushing_left(sensor_lines,tiles,Camera,Engine) != None: button = pushing_left(sensor_lines, tiles, Camera, Engine) if pushing_right(sensor_lines, tiles, Camera, Engine) != None: button = pushing_right(sensor_lines,tiles,Camera, Engine) fall = falling(sensor_lines, tiles,player,Engine,ramps,zone.screen,ramp,Camera.offset(),Camera) if fall != None: floor = fall['floor'] sonic.angle = fall['angle'] # Engine.angle = fall['angle'] # Engine.xsp = Engine.gsp * math.cos(fall['angle']) # Engine.ysp = Engine.gsp * - math.sin(fall['angle']) ''' animation ''' screen.fill((0, 0, 0)) # screen.blit(background,(0,0)) [screen.blit(pg.image.load('tile.png'), n) for n in tiles] [screen.blit(pg.image.load('slope_45.png'), n ) for n in ramps] screen.blit(pg.image.load('active_tile.png'),( floor,X_pos)) player.refresh(dt,button,spinmode) all_sprites.draw(screen) [sensor_lines.get(n).draw(screen) for n in sensor_lines] pg.display.update()
import numpy as np import pandas as pd from skimage import io, color, img_as_ubyte from skimage.feature import greycomatrix, greycoprops from sklearn.metrics.cluster import entropy import skimage import os from Features import * from camera import * print(" Gerarando nova classe!") controler = input (" Deseja adicionar uma nova classe?: y/n ") if controler == 'y': nome= input("Insira o nome da nova classe: ") nova_classe= camera(nome) nova_classe.save_img() features = Features() features.gerar_csv()
import random from pygame import locals import logging from matrix import * from camera import * from test import * from objects import * logging.basicConfig(filename='kocka.log', level=logging.DEBUG) pygame.init() screenwidth = 600 screenheight = 600 screen = pygame.display.set_mode((screenwidth, screenheight)) kam = camera(0.035) #meter def display(): diplaying = kam.toDisplay(objects) for i in range(len(diplaying)): for line in objects[i].lines: pygame.draw.line(screen, (0, 20, 100), diplaying[i][line[0]], diplaying[i][line[1]], 2) #for line in objects[i].lines[:6]: # for line2 in objects[i].lines[6:]: #display sides # pygame.draw.polygon(screen, (100, 20, 100), [diplaying[i][line[0]], diplaying[i][line[1]], diplaying[i][line2[0]], diplaying[i][line2[1]]], 0) def main(): run = True
import cv2 from camera import * import time cam1 = camera(200) #cam2 = camera(1) #picam = piCamera() time.sleep(2) start = time.time() try: while (True): frame1, _ = cam1.PicTime(True) #frame2, _ = cam2.PicTime(False) #frame3, _ = picam.PicTime(True) #cv2.imshow('frame1',frame1) #cv2.imshow('frame2',frame2) #cv2.imshow('frame2',frame3) except KeyboardInterrupt: finish = time.time() print('\nWebcam%i Captured %d frames at %.2ffps' % (cam1.camera_id, cam1.i, cam1.i / (finish - start))) print('Time:{}'.format(finish - start)) #print('\nPiCam Captured %d frames at %.2ffps' % ( #picam.i, #picam.i / (finish - start)))