def __init__(self, content, retina=False): self.CAMERA_INITIAL_ANGLE_V = deg2rad(10.0) # TODO: 他の環境も指定できるようにする self.content = content self.env = Environment(self.content) self.egocentric_images = None self.allocentric_images = None self.retina = Retina() if retina else None
def save_ocoloenv_images(retina, random_rate, retry, datasets, dirname, suffix): for content_id, content in enumerate(make_contents(), 1): sys.stderr.write('generating images (content_id == {})...'.format(content_id)) sys.stderr.flush() env = Environment(content, on_buffer_width=128, retina=retina) for datatype, n in datasets: prefix = '{}/{}/{}/'.format(dirname, datatype, content_id) subprocess.call(['mkdir', '-p', prefix]) for i in range(n): filename = '{}{}{}'.format(prefix, i, suffix) save_screen_images(env, random_rate, retry, filename) env.close() sys.stderr.write('done\n')
def evaluate(self, agent): print("content:{} difficulty:{} start".format(self.content_id, self.difficulty)) content_class_name = content_class_names[self.content_id-1] content_class = globals()[content_class_name] if self.difficulty >= 0: content = content_class(difficulty=self.difficulty) else: content = content_class() env = Environment(content) obs = env.reset() reward = 0 done = False task_reward = 0 results = [] switch_count = 0 switch_correct = 0 for i in range(self.duration): image, angle = obs['screen'], obs['angle'] # Choose action by the agent's decision action = agent(image, angle, reward, done) task = agent.environment.current_task if task is not None and task != 'fixation': switch_count += 1 if task == task_names[self.content_id-1]: switch_correct += 1 # Foward environment one step obs, reward, done, info = env.step(action) if 'result' in info: result = TrialResult(self.content_id, self.difficulty, reward, info,) results.append(result) task_reward += reward assert(done is not True) print("content:{} difficulty:{} end, reward={}".format(self.content_id, self.difficulty, task_reward)) print('switch acc: {}'.format(float(switch_correct) / switch_count)) return results, task_reward
def __init__(self, content, display_size, model_name='1006-1538806261.ckpt', use_ppo_models=False): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() self.bg = BG(model_name=model_name, use_saved_models=use_ppo_models) self.sc = SC() self.hp = HP() self.cb = CB() self.step = 0 self.episode = 0 self.agent = Agent(retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb) self.env = Environment(content) obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size
def __init__(self, content, display_size): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() self.bg = BG() self.sc = SC() self.hp = HP() self.cb = CB() self.agent = Agent( retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb ) self.env = Environment(content) self.pfc.load_model('data/pfc_task_detection.pth') #self.bg.load_model('data/bg_rl.pth') obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size
def calc_fps(self, content_type, with_agent): content = self.get_content(content_type) if with_agent: agent = Agent( retina=Retina(), lip=LIP(), vc=VC(), pfc=PFC(), fef=FEF(), bg=BG(), sc=SC(), hp=HP(), cb=CB() ) env = Environment(content) obs = env.reset() reward = 0 done = False step_size = 1000 step = 0 start = time.time() for i in range(step_size): if with_agent: image, angle = obs['screen'], obs['angle'] dummy_action = agent(image, angle, reward, done) dh = np.random.uniform(low=-0.05, high=0.05) dv = np.random.uniform(low=-0.05, high=0.05) action = np.array([dh, dv]) obs, reward, done, _ = env.step(action) step += 1 if done: obs = env.reset() elapsed_time = time.time() - start fps = step_size / elapsed_time return fps
def evaluate(self, agent): print("content:{} difficulty:{} start".format(self.content_id, self.difficulty)) content_class_name = content_class_names[self.content_id - 1] content_class = globals()[content_class_name] if self.difficulty >= 0: content = content_class(difficulty=self.difficulty) else: content = content_class() env = Environment(content) obs = env.reset() reward = 0 done = False task_reward = 0 results = [] for i in range(self.duration): image, angle = obs['screen'], obs['angle'] # Choose action by the agent's decision action = agent(image, angle, reward, done) # Foward environment one step obs, reward, done, info = env.step(action) if 'result' in info: result = TrialResult( self.content_id, self.difficulty, reward, info, ) results.append(result) task_reward += reward assert (done is not True) print("content:{} difficulty:{} end, reward={}".format( self.content_id, self.difficulty, task_reward)) return results, task_reward
def check_offscreen(): content = PointToTargetContent() env = Environment(content) frame_size = 10 for i in range(frame_size): dh = np.random.uniform(low=-0.02, high=0.02) dv = np.random.uniform(low=-0.02, high=0.02) action = np.array([dh, dv]) obs, reward, done, info = env.step(action) image = obs['screen'] save_img(image) if done: print("Episode terminated") obs = env.reset()
def __init__(self, content, display_size): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() #self.bg = BG() # Train mode self.bg = BG(train=False, backprop=False) # Test mode self.bg.agent.load("./results/exp20_after") # Test mode self.sc = SC() self.hp = HP() self.cb = CB() self.agent = Agent(retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb) self.env = Environment(content) obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size
help="Flag to debug execute step by step with one key press", type=bool, default=False) args = parser.parse_args() if args.content == Contents.POINT_TO_TARGET: content = PointToTargetContent() elif args.content == Contents.CHANGE_DETECTION: content = ChangeDetectionContent() elif args.content == Contents.ODD_ONE_OUT: content = OddOneOutContent() elif args.content == Contents.VISUAL_SEARCH: content = VisualSearchContent() elif args.content == Contents.MULTIPLE_OBJECT_TRACKING: content = MultipleObjectTrackingContent() elif args.content == Contents.RANDOM_DOT_MOTION_DISCRIMINATION: content = RandomDotMotionDiscriminationContent() else: print("Unknown argument") sys.exit(1) env = Environment(content) env.render() # env.window is created here handler = KeyHandler(env, args.step_debug) pyglet.app.run() env.close()
content = OddOneOutContent() elif args.content == Contents.VISUAL_SEARCH: content = VisualSearchContent() elif args.content == Contents.MULTIPLE_OBJECT_TRACKING: content = MultipleObjectTrackingContent() elif args.content == Contents.RANDOM_DOT_MOTION_DISCRIMINATION: content = RandomDotMotionDiscriminationContent() elif args.content == Contents.RED_CURSOR: content = None else: print("Unknown argument") sys.exit(1) env = Environment(content, on_buffer_width=128, skip_red_cursor=args.skip_red_cursor, retina=args.retina, saliency=args.saliency, diff=args.diff) if content else RedCursorEnvironment( None, on_buffer_width=128, retina=args.retina, saliency=args.saliency, diff=args.diff) env.render() # env.window is created here handler = KeyHandler(env, args.step_debug) pyglet.app.run() env.close()
class Inspector(object): def __init__(self, content, display_size): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() self.bg = BG() self.sc = SC() self.hp = HP() self.cb = CB() self.agent = Agent( retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb ) self.env = Environment(content) self.pfc.load_model('data/pfc_task_detection.pth') #self.bg.load_model('data/bg_rl.pth') obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size def update(self): self.surface.fill(BLACK) done = self.process() pygame.display.update() return done def draw_text(self, str, left, top, color=WHITE): text = self.font.render(str, True, color, BLACK) text_rect = text.get_rect() text_rect.left = left text_rect.top = top self.surface.blit(text, text_rect) def draw_center_text(self, str, center_x, top): text = self.font.render(str, True, WHITE, BLACK) text_rect = text.get_rect() text_rect.centerx = center_x text_rect.top = top self.surface.blit(text, text_rect) def show_rgb_256_image(self, data, left, top, label): self.show_image((data * 1.0).astype(np.uint8), left, top, label) def show_gray_1_image(self, data, left, top, label): data = np.clip(data * 255.0, 0.0, 255.0) data = data.astype(np.uint8) data = np.stack([data for _ in range(3)], axis=2) self.show_image(data, left, top, label) def show_optical_flow(self, optical_flow, left, top, label): # Show optical flow with HSV color image image = self.get_optical_flow_hsv(optical_flow) # Draw optical flow direction with lines step = 16 h, w = optical_flow.shape[:2] y, x = np.mgrid[step // 2:h:step, step // 2:w:step].reshape( 2, -1).astype(int) fx, fy = optical_flow[y, x].T * 10 lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2) lines = np.int32(lines + 0.5) cv2.polylines(image, lines, 0, (0, 255, 0)) for (x1, y1), (x2, y2) in lines: cv2.circle(image, (x1, y1), 1, (0, 255, 0), -1) self.show_image(image, left, top, label) def get_optical_flow_hsv(self, optical_flow): h, w = optical_flow.shape[:2] fx, fy = optical_flow[:, :, 0], optical_flow[:, :, 1] ang = np.arctan2(fy, fx) + np.pi v = np.sqrt(fx * fx + fy * fy) hsv = np.zeros((h, w, 3), np.uint8) hsv[..., 0] = ang * (180 / np.pi / 2) hsv[..., 1] = 255 hsv[..., 2] = np.minimum(v * 4, 255) image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return image def show_image(self, data, left, top, label): image = pygame.image.frombuffer(data, (128, 128), 'RGB') self.surface.blit(image, (left, top)) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) def show_reward(self): self.draw_text("TASK: {}, REWARD: {}, PHASE: {}, PFC_STEPS: {}, ENV_STEPS: {}".format(self.pfc.task, int(self.episode_reward), self.pfc.phase, self.pfc.steps, self.env.content.step_count), 8, 434 + 48) def show_fef_data_bars(self, fef_data): fef_data_len = len(fef_data) bottom = 256 + 16 pygame.draw.line(self.surface, DARK_GRAY, (8, bottom - 100), (3 * fef_data_len + 8, bottom - 100), 1) for i, data in enumerate(fef_data): likelihood = data[0] left = 8 + 3 * i top = bottom - 100 * likelihood pygame.draw.line(self.surface, WHITE, (left, top), (left, bottom), 1) self.draw_center_text("likelihoods", (8 + 3 * fef_data_len) // 4, bottom + 8) def show_fef_data_grid(self, fef_data): grid_division = int(math.sqrt(len(fef_data) // 4)) grid_width = 128 // grid_division likelihoods0 = [] likelihoods1 = [] likelihoods2 = [] likelihoods3 = [] data_len = len(fef_data) // 4 for i in range(data_len): likelihoods0.append(fef_data[i][0]) likelihoods1.append(fef_data[i + data_len][0]) likelihoods2.append(fef_data[i + data_len*2][0]) likelihoods3.append(fef_data[i + data_len*3][0]) if self.pfc.task in [6]: self.show_grid(likelihoods1, 0, grid_division, grid_width, 8 + 128*0, 330, "direction acc") if self.pfc.task in [1, 4, 5]: self.show_grid(likelihoods0, 0, grid_division, grid_width, 8 + 128*1, 330, "template acc") if self.pfc.task in [2]: self.show_grid(likelihoods2, 0, grid_division, grid_width, 8 + 128*2, 330, "change acc") if self.pfc.task in [1, 3, 4, 5]: self.show_grid(likelihoods3, 0, grid_division, grid_width, 8 + 128*3, 330, "search acc") def show_grid(self, data, offset, grid_division, grid_width, left, top, label): index = 0 for ix in range(grid_division): x = grid_width * ix for iy in range(grid_division): y = grid_width * iy likelihood = data[index] c = int(likelihood * 255.0) color = (c, c, c) pygame.draw.rect(self.surface, color, Rect(left + x, top + y, grid_width, grid_width)) index += 1 pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) def process(self): action = self.agent(self.last_image, self.last_angle, self.last_reward, self.last_done) obs, reward, done, _ = self.env.step(action) self.episode_reward += reward if done: obs = self.env.reset() self.episode_reward = 0 image = obs['screen'] angle = obs['angle'] self.show_rgb_256_image(image, 128*0+8, 128*0+8, 'input') if self.retina.last_retina_image is not None: self.show_rgb_256_image(self.retina.last_retina_image, 128*1+8, 128*0+8, 'retina') if self.pfc.last_memory_image is not None: self.show_rgb_256_image(self.pfc.last_memory_image, 128*2+8, 128*0+8, 'working memory') if self.pfc.last_saliency_map is not None: self.show_gray_1_image(self.pfc.last_saliency_map, 128*3 + 8, 128*0+8, 'saliency map') if self.pfc.task in [6] and self.lip.last_optical_flow is not None: self.show_optical_flow(self.lip.last_optical_flow, 128*0+8, 128*1+8+32, "optical flow") if self.pfc.task in [2] and self.pfc.last_change_map is not None: self.show_rgb_256_image(self.pfc.last_change_map, 128*2+8, 128*1+8+32, 'change map') if self.pfc.task in [1, 3, 4, 5] and self.pfc.last_search_map is not None: self.show_gray_1_image(self.pfc.last_search_map, 128*3+8, 128*1+8+32, 'search map') #if self.lip.last_small_e_match_map is not None: # self.show_gray_1_image(self.lip.last_small_e_match_map, 128 * 3 + 8, 8, 'match small E') #if self.lip.last_magenta_t_match_map is not None: # self.show_gray_1_image(self.lip.last_magenta_t_match_map, 128 * 3 + 8, 8, 'match magenta T') if self.sc.last_fef_data is not None: #self.show_fef_data_bars(self.sc.last_fef_data) self.show_fef_data_grid(self.sc.last_fef_data) #if self.hp.map_image is not None: # self.show_rgb_256_image(self.hp.map_image, 128 * 3 + 8, 300, 'allocentric map') self.show_reward() self.last_image = image self.last_angle = angle self.last_reward = reward self.last_done = done return done def get_frame(self): data = self.surface.get_buffer().raw image = np.fromstring(data, dtype=np.uint8) image = image.reshape((self.display_size[1], self.display_size[0], 3)) return image
class DataGenerator(): def __init__(self, content, retina=False): self.CAMERA_INITIAL_ANGLE_V = deg2rad(10.0) # TODO: 他の環境も指定できるようにする self.content = content self.env = Environment(self.content) self.egocentric_images = None self.allocentric_images = None self.retina = Retina() if retina else None # TODO: 他の環境でも同様のmethodで動くか確認する def generate_egocentric_images(self, episode=5, length=400, inplace=True): self.env.reset() # egocentric_images.shape: (episode, length, height, width) egocentric_images = [] for _ in tqdm(range(episode)): self.env.reset() # 初期状態から遷移するのに必要な行動 action = np.array([0, -self.CAMERA_INITIAL_ANGLE_V]) obs, reward, done, _ = self.env.step(action) if self.retina is not None: obs['screen'] = self.retina(obs['screen']) images = [] for _ in range(length): dh = np.random.uniform(low=-0.02, high=0.02) dv = np.random.uniform(low=-0.02, high=0.02) action = np.array([dh, dv]) obs, reward, done, _ = self.env.step(action) if self.retina is not None: obs['screen'] = self.retina(obs['screen']) if reward != 0: # タスクがたまたま成功して終了した場合は強制的に再開する self.env.reset() action = np.array([0, -self.CAMERA_INITIAL_ANGLE_V]) obs, reward, done, _ = self.env.step(action) if self.retina is not None: obs['screen'] = self.retina(obs['screen']) image = obs['screen'].copy() images.append(image) egocentric_images.append(images) egocentric_images = np.array(egocentric_images).reshape( (-1, 128, 128, 3)) if inplace: self.egocentric_images = egocentric_images return egocentric_images def save_egocentric_images(self, dirname='images', prefix='egocentric_images'): dirname = str(Path(dirname).joinpath('env')) os.makedirs(dirname, exist_ok=True) now = datetime.datetime.now() filename = prefix + '{:%Y%m%d}'.format(now) + '.npy' path = Path(dirname).joinpath(filename) if self.egocentric_images is not None: np.save(path, self.egocentric_images) return path # TODO: 他の環境でも同様のmethodで動くか確認する def generate_allocentric_images(self, scene=5, inplace=True): self.env.reset() # allocentric_images.shape: (scene, height, width) allocentric_images = [] for _ in tqdm(range(scene)): self.env.reset() # 初期状態から遷移するのに必要な行動 action = np.array([0, -self.CAMERA_INITIAL_ANGLE_V]) obs, reward, done, _ = self.env.step(action) images = obs['screen'].copy() allocentric_images.append(images) allocentric_images = np.array(allocentric_images) if inplace: self.allocentric_images = allocentric_images return allocentric_images def save_allocentric_images(self, dirname='images', prefix='allocentric_images'): dirname = str(Path(dirname).joinpath('env')) os.makedirs(dirname, exist_ok=True) now = datetime.datetime.now() filename = prefix + '{:%Y%m%d}'.format(now) + '.npy' path = Path(dirname).joinpath(filename) if self.allocentric_images is not None: np.save(path, self.allocentric_images) return path def reset_egocentric_images(self): self.egocentric_images = [] def reset_allocentric_images(self): self.allocentric_images = []
class Inspector(object): def __init__(self, content, display_size, model_name='1006-1538806261.ckpt', use_ppo_models=False): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() self.bg = BG(model_name=model_name, use_saved_models=use_ppo_models) self.sc = SC() self.hp = HP() self.cb = CB() self.step = 0 self.episode = 0 self.agent = Agent(retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb) self.env = Environment(content) obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size def update(self): self.surface.fill(BLACK) done = self.process() pygame.display.update() return done def draw_text(self, str, left, top, color=WHITE): text = self.font.render(str, True, color, BLACK) text_rect = text.get_rect() text_rect.left = left text_rect.top = top self.surface.blit(text, text_rect) def draw_center_text(self, str, center_x, top): text = self.font.render(str, True, WHITE, BLACK) text_rect = text.get_rect() text_rect.centerx = center_x text_rect.top = top self.surface.blit(text, text_rect) def show_original_image(self, image): image_ = image * 1.0 data = image_.astype(np.uint8) self.show_image(data, 8, 8, "input") def show_retina_image(self, image): image_ = image * 1.0 data = image_.astype(np.uint8) self.show_image(data, 128 + 8, 8, "retina") def show_saliency_map(self, saliency_map): saliency_map_ = np.clip(saliency_map * 255.0, 0.0, 255.0) data = saliency_map_.astype(np.uint8) data = np.stack([data for _ in range(3)], axis=2) self.show_image(data, 128 * 2 + 8, 8, "saliency") def show_optical_flow(self, optical_flow): # Show optical flow with HSV color image image = self.get_optical_flow_hsv(optical_flow) # Draw optical flow direction with lines step = 16 h, w = optical_flow.shape[:2] y, x = np.mgrid[step // 2:h:step, step // 2:w:step].reshape(2, -1).astype(int) fx, fy = optical_flow[y, x].T lines = np.vstack([x, y, x + fx * 5, y + fy * 5]).T.reshape(-1, 2, 2) lines = np.int32(lines + 0.5) for i, ((x1, y1), (x2, y2)) in enumerate(lines): if (not (17 < i < 22)) and (not (25 < i < 30)) and \ (not (33 < i < 38)) and (not (41 < i < 46)): continue line = np.vstack([x1, y1, x2, y2]).T.reshape(-1, 2, 2) line = np.int32(line + 0.5) cv2.polylines(image, line, 0, (0, 255, 0)) self.show_image(image, 128 * 3 + 8, 8, "opt_flow") def show_map_image(self, map_image): # Show allocentric map image in the Hippocampal formation. self.show_image(map_image, 128 * 3 + 8, 300, "allocentric map") def get_optical_flow_hsv(self, optical_flow): h, w = optical_flow.shape[:2] fx, fy = optical_flow[:, :, 0], optical_flow[:, :, 1] ang = np.arctan2(fy, fx) + np.pi v = np.sqrt(fx * fx + fy * fy) hsv = np.zeros((h, w, 3), np.uint8) hsv[..., 0] = ang * (180 / np.pi / 2) hsv[..., 1] = 255 hsv[..., 2] = np.minimum(v * 4, 255) image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return image def show_image(self, data, left, top, label, color='RGB'): image = pygame.image.frombuffer(data, (128, 128), color) self.surface.blit(image, (left, top)) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) def show_reward(self): self.draw_text("REWARD: {}".format(int(self.episode_reward)), 128 * 3 + 24, 128 + 48) def show_bg_data_bars(self, bg_data): bg_data_len = len(bg_data) bottom = 512 + 16 pygame.draw.line(self.surface, DARK_GRAY, (8, bottom - 100), (3 * bg_data_len + 8, bottom - 100), 1) for i, threshold in enumerate(bg_data): left = 8 + 3 * i top = bottom - 30 * threshold pygame.draw.line(self.surface, WHITE, (left, top), (left, bottom), 1) self.draw_center_text("thresholds", (8 + 3 * bg_data_len) // 2, bottom + 8) def show_fef_data_bars(self, fef_data): fef_data = list(fef_data[:64]) + list(fef_data[128:]) fef_data_len = len(fef_data) bottom = 256 + 16 pygame.draw.line(self.surface, DARK_GRAY, (8, bottom - 100), (3 * fef_data_len + 8, bottom - 100), 1) for i, data in enumerate(fef_data): likelihood = data[0] left = 8 + 3 * i top = bottom - 100 * likelihood pygame.draw.line(self.surface, WHITE, (left, top), (left, bottom), 1) self.draw_center_text("likelihoods", (8 + 3 * fef_data_len) // 2, bottom + 8) def show_fef_data_grid(self, fef_data): grid_division = 8 grid_width = 128 // grid_division likelihoods0 = [] likelihoods1 = [] likelihoods2 = [] data_len = len(fef_data) // 3 for i in range(data_len): likelihoods0.append(fef_data[i][0]) likelihoods1.append(fef_data[i + data_len][0]) likelihoods2.append(fef_data[i + data_len * 2][0]) self.show_grid(likelihoods0, 0, grid_division, grid_width, 8, 300, "saliency acc") self.show_grid(likelihoods1, 0, grid_division, grid_width, 8 + 128, 300, "cursor acc") self.show_grid(likelihoods2, 0, grid_division, grid_width, 8 + 128 * 2, 300, "error acc") def show_vae_reconstruction_grid(self, vae_data): data_len = len(vae_data) width = 128.0 bottom = 650 left = 8 - width for i, (key, image) in enumerate(vae_data.items()): image = np.array(image) * 255.0 image = np.array(image, dtype=np.uint8) image = cv2.resize(image, (128, 128)) image = np.ascontiguousarray(image, dtype=np.uint8) left += width if i == 4: left = 8 bottom = 800 self.show_image(image, left, bottom, "vae:" + key) def show_current_task(self, task): self.draw_text("PFC task: {}".format(task), 24, 950) def show_vae_errors(self, pixel_errors): data_len = len(pixel_errors) width = 128.0 bottom = 980 left = 8 - width for i, (key, error) in enumerate(pixel_errors.items()): error = np.reshape(error, list(error.shape) + [1]) * 255.0 error = np.tile(error, [1, 1, 3]) error = np.array(error, dtype=np.uint8) error = cv2.resize(error, (128, 128)) error = np.ascontiguousarray(error, dtype=np.uint8) left += width if i == 4: left = 8 bottom = 1140 self.show_image(error, left, bottom, "error:" + key) def show_grid(self, data, offset, grid_division, grid_width, left, top, label): index = 0 for ix in range(grid_division): x = grid_width * ix for iy in range(grid_division): y = grid_width * iy likelihood = data[index] c = int(likelihood * 255.0) color = (c, c, c) pygame.draw.rect( self.surface, color, Rect(left + x, top + y, grid_width, grid_width)) index += 1 pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) def process(self): self.step += 1 action = self.agent(self.last_image, self.last_angle, self.last_reward, self.last_done) obs, reward, done, _ = self.env.step(action) self.episode_reward += reward # TODO: remove this done = done or self.step % 180 == 0 if done: obs = self.env.reset() print("\033[93m {}step, {}episode reward={} \033[0m".format( self.step, self.episode, self.episode_reward)) self.episode += 1 self.episode_reward = 0 image = obs['screen'] angle = obs['angle'] self.show_reward() self.show_original_image(image) if self.retina.last_retina_image is not None: self.show_retina_image(self.retina.last_retina_image) if self.lip.last_saliency_map is not None: self.show_saliency_map(self.lip.last_saliency_map) if self.lip.last_optical_flow is not None: self.show_optical_flow(self.lip.last_optical_flow) if self.sc.last_fef_data is not None: self.show_fef_data_bars(self.sc.last_fef_data) self.show_fef_data_grid(self.sc.last_fef_data) if self.hp.map_image is not None: self.show_map_image(self.hp.map_image) if self.bg.last_bg_data is not None: self.show_bg_data_bars(self.bg.last_bg_data) if self.vc.last_vae_reconstruction is not None: self.show_vae_reconstruction_grid(self.vc.last_vae_reconstruction) if self.pfc.last_current_task is not None: self.show_current_task(self.pfc.last_current_task) if self.vc.last_vae_top_errors is not None: self.show_vae_errors(self.vc.last_vae_top_errors) self.last_image = image self.last_angle = angle self.last_reward = reward self.last_done = done return done def get_frame(self): data = self.surface.get_buffer().raw image = np.fromstring(data, dtype=np.uint8) image = image.reshape((self.display_size[1], self.display_size[0], 3)) return image
def train(content, step_size, logger): retina = Retina() lip = LIP() vc = VC() pfc = PFC() fef = FEF() bg = BG(init_weight_path="./data/bg.pth") sc = SC() hp = HP() cb = CB() agent = Agent(retina=retina, lip=lip, vc=vc, pfc=pfc, fef=fef, bg=bg, sc=sc, hp=hp, cb=cb) env = Environment(content) # If your training code is written inside BG module, please add model load code here like. # # bg.load_model("model.pkl") # # When runnning with Docker, directory under 'oculomotor/' is volume shared # with the host, so you can load/save the model data at anywhere under 'oculomotor/' dir. obs = env.reset() reward = 0 done = False episode_reward = 0 episode_count = 0 step = 0 # Add initial reward log logger.log("episode_reward", episode_reward, episode_count) for i in range(step_size): image, angle = obs['screen'], obs['angle'] # Choose action by the agent's decision action = agent(image, angle, reward, done) # Foward environment one step obs, reward, done, _ = env.step(action) episode_reward += reward step += 1 if step % EPI_THRESHOLD == 0: done = True if done: obs = env.reset() print("episode reward={}".format(episode_reward)) # Store log for tensorboard graph episode_count += 1 logger.log("episode_reward", episode_reward, episode_count) episode_reward = 0 step = 0 # Plase add model save code as you like. # # if i % 10 == 0: # bg.save_model("model.pkl") print("training finished") logger.close()
class Inspector(object): def __init__(self, content, display_size): pygame.init() self.surface = pygame.display.set_mode(display_size, 0, 24) pygame.display.set_caption('oculomotor') self.retina = Retina() self.lip = LIP() self.vc = VC() self.pfc = PFC() self.fef = FEF() #self.bg = BG() # Train mode self.bg = BG(train=False, backprop=False) # Test mode self.bg.agent.load("./results/exp20_after") # Test mode self.sc = SC() self.hp = HP() self.cb = CB() self.agent = Agent(retina=self.retina, lip=self.lip, vc=self.vc, pfc=self.pfc, fef=self.fef, bg=self.bg, sc=self.sc, hp=self.hp, cb=self.cb) self.env = Environment(content) obs = self.env.reset() self.last_image = obs['screen'] self.last_angle = obs['angle'] self.last_reward = 0 self.last_done = False self.episode_reward = 0 self.font = pygame.font.Font(None, 20) self.display_size = display_size def update(self): self.surface.fill(BLACK) done = self.process() pygame.display.update() return done def draw_text(self, str, left, top, color=WHITE): text = self.font.render(str, True, color, BLACK) text_rect = text.get_rect() text_rect.left = left text_rect.top = top self.surface.blit(text, text_rect) def draw_center_text(self, str, center_x, top): text = self.font.render(str, True, WHITE, BLACK) text_rect = text.get_rect() text_rect.centerx = center_x text_rect.top = top self.surface.blit(text, text_rect) def show_original_image(self, image): image_ = image * 1.0 data = image_.astype(np.uint8) self.show_image(data, 8, 8, "input") def show_retina_image(self, image): image_ = image * 1.0 data = image_.astype(np.uint8) self.show_image(data, 128 + 8, 8, "retina") def show_saliency_map(self, saliency_map): saliency_map_ = np.clip(saliency_map * 255.0, 0.0, 255.0) data = saliency_map_.astype(np.uint8) data = np.stack([data for _ in range(3)], axis=2) self.show_image(data, 128 * 2 + 8, 8, "saliency") def show_action_map(self, action_map): action_map_std = (action_map - np.min(action_map)) / ( np.max(action_map) - np.min(action_map)) data = action_map_std self.show_grid(data, 0, 8, 16, 8, 380, "action") def show_thres_map(self, thresholds): denom = np.max(thresholds[:64]) - np.min(thresholds[:64]) if denom <= 0: denom = 1e-10 sal_std = (thresholds[:64] - np.min(thresholds[:64])) / denom #cur_std = (thresholds[64:]-np.min(thresholds[64:]))/(np.max(thresholds[64:])-np.min(thresholds[64:])) self.show_grid(sal_std, 0, 8, 16, 128 + 8, 200, "saliency baseline") #self.show_grid(cur_std,0, 8, 16, 128 * 3 + 8, 200, "cursor thres") def show_optical_flow(self, optical_flow): # Show optical flow with HSV color image image = self.get_optical_flow_hsv(optical_flow) # Draw optical flow direction with lines step = 16 h, w = optical_flow.shape[:2] y, x = np.mgrid[step // 2:h:step, step // 2:w:step].reshape(2, -1).astype(int) fx, fy = optical_flow[y, x].T lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2) lines = np.int32(lines + 0.5) cv2.polylines(image, lines, 0, (0, 255, 0)) for (x1, y1), (x2, y2) in lines: cv2.circle(image, (x1, y1), 1, (0, 255, 0), -1) self.show_image(image, 128 * 3 + 8, 8, "opt_flow") def show_map_image(self, map_image): # Show allocentric map image in the Hippocampal formation. self.show_image(map_image, 128 * 3 + 8, 300, "allocentric map") def get_optical_flow_hsv(self, optical_flow): h, w = optical_flow.shape[:2] fx, fy = optical_flow[:, :, 0], optical_flow[:, :, 1] ang = np.arctan2(fy, fx) + np.pi v = np.sqrt(fx * fx + fy * fy) hsv = np.zeros((h, w, 3), np.uint8) hsv[..., 0] = ang * (180 / np.pi / 2) hsv[..., 1] = 255 hsv[..., 2] = np.minimum(v * 4, 255) image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return image def show_image(self, data, left, top, label): image = pygame.image.frombuffer(data, (128, 128), 'RGB') self.surface.blit(image, (left, top)) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) def show_reward(self): self.draw_text("REWARD: {}".format(int(self.episode_reward)), 128 * 3 + 24, 380) #print(self.bg.reward) self.draw_text("PHASE: {}".format(int(self.pfc.phase)), 128 * 3 + 24, 400) def show_fef_data_bars(self, fef_data): fef_data_len = len(fef_data) bottom = 256 + 16 pygame.draw.line(self.surface, DARK_GRAY, (8, bottom - 100), (3 * fef_data_len + 8, bottom - 100), 1) for i, data in enumerate(fef_data): likelihood = data[0] left = 8 + 3 * i top = bottom - 100 * likelihood pygame.draw.line(self.surface, WHITE, (left, top), (left, bottom), 1) self.draw_center_text("likelihoods", (8 + 3 * fef_data_len) // 2, bottom + 8) def show_fef_data_grid(self, fef_data): grid_division = int(math.sqrt(len(fef_data))) grid_width = 128 // grid_division likelihoods0 = [] #likelihoods1 = [] data_len = len(fef_data) for i in range(data_len): likelihoods0.append(fef_data[i][0]) #likelihoods1.append(fef_data[i + data_len][0]) self.show_grid(likelihoods0, 0, grid_division, grid_width, 8, 200, "saliency acc") #self.show_grid(likelihoods1, 0, grid_division, grid_width, 8 + 128*2, # 200, "cursor acc") def show_grid(self, data, offset, grid_division, grid_width, left, top, label): index = 0 for ix in range(grid_division): x = grid_width * ix for iy in range(grid_division): y = grid_width * iy likelihood = data[index] c = int(likelihood * 255.0) color = (c, c, c) pygame.draw.rect( self.surface, color, Rect(left + x, top + y, grid_width, grid_width)) index += 1 pygame.draw.rect(self.surface, DARK_GRAY, Rect(left, top, 128, 128), 1) self.draw_center_text(label, 128 / 2 + left, top + 128 + 8) def process(self): action = self.agent(self.last_image, self.last_angle, self.last_reward, self.last_done) obs, reward, done, _ = self.env.step(action) self.episode_reward += reward if done: obs = self.env.reset() self.episode_reward = 0 image = obs['screen'] angle = obs['angle'] self.show_reward() self.show_original_image(image) if self.retina.last_retina_image is not None: self.show_retina_image(self.retina.last_retina_image) if self.lip.last_saliency_map is not None: self.show_saliency_map(self.lip.last_saliency_map) if self.sc.last_sc_data is not None: self.show_action_map(self.sc.last_sc_data) if self.lip.last_optical_flow is not None: self.show_optical_flow(self.lip.last_optical_flow) if self.sc.last_fef_data is not None: #self.show_fef_data_bars(self.sc.last_fef_data) self.show_fef_data_grid(self.sc.last_fef_data) if self.sc.baseline is not None: #self.show_fef_data_bars(self.sc.last_fef_data) self.show_thres_map(self.sc.baseline) # if self.hp.map_image is not None: # self.show_map_image(self.hp.map_image) self.last_image = image self.last_angle = angle self.last_reward = reward self.last_done = done return done def get_frame(self): data = self.surface.get_buffer().raw image = np.fromstring(data, dtype=np.uint8) image = image.reshape((self.display_size[1], self.display_size[0], 3)) return image
def train(content, step_size, logger, model_name, use_ppo_models): retina = Retina() lip = LIP() vc = VC() pfc = PFC() fef = FEF() bg = BG(model_name, use_saved_models=use_ppo_models) sc = SC() hp = HP() cb = CB() agent = Agent( retina=retina, lip=lip, vc=vc, pfc=pfc, fef=fef, bg=bg, sc=sc, hp=hp, cb=cb ) env = Environment(content) # If your training code is written inside BG module, please add model load code here like. # # bg.load_model("model.pkl") # # When runnning with Docker, directory under 'oculomotor/' is volume shared # with the host, so you can load/save the model data at anywhere under 'oculomotor/' dir. obs = env.reset() reward = 0 done = False episode_reward = 0 episode_count = 0 # Add initial reward log logger.log("episode_reward", episode_reward, episode_count) for i in range(step_size): image, angle = obs['screen'], obs['angle'] # Choose action by the agent's decision action = agent(image, angle, reward, done) # Foward environment one step obs, reward, done, _ = env.step(action) episode_reward += reward # TODO: remove this done = done or i % 180 == 0 if done: obs = env.reset() print( "\033[93m {} step, {} episode reward={} \033[0m".format( i, episode_count, episode_reward ) ) # Store log for tensorboard graph episode_count += 1 logger.log("episode_reward", episode_reward, episode_count) episode_reward = 0 # Plase add model save code as you like. if i % 5000 == 0: bg.save_model() print("training finished") logger.close()
def train(content, step_size, logger, log_path): starttime = time.time() retina = Retina() lip = LIP() vc = VC() pfc = PFC() fef = FEF() bg = BG(log_path=log_path) sc = SC() hp = HP() cb = CB() agent = Agent(retina=retina, lip=lip, vc=vc, pfc=pfc, fef=fef, bg=bg, sc=sc, hp=hp, cb=cb) env = Environment(content) # If your training code is written inside BG module, please add model load code here like. # # bg.load_model("model.pkl") # # When runnning with Docker, directory under 'oculomotor/' is volume shared # with the host, so you can load/save the model data at anywhere under 'oculomotor/' dir. obs = env.reset() reward = 0 done = False episode_reward = 0 episode_count = 0 # Add initial reward log logger.log("episode_reward", episode_reward, episode_count) #step_size = 10800 while True: for i in range(step_size): image, angle = obs['screen'], obs['angle'] # Choose action by the agent's decision action = agent(image, angle, reward, done) # Foward environment one step obs, reward, done, _ = env.step(action) episode_reward += reward if done: print("episode count={}".format(episode_count)) obs = env.reset() print("episode reward={}".format(episode_reward)) # Store log for tensorboard graph episode_count += 1 logger.log("episode_reward", episode_reward, episode_count) episode_reward = 0 # Plase add model save code as you like. bg.save_model(str(episode_count) + "model") episode_count += 1 logger.log("episode_reward", episode_reward, episode_count) episode_reward = 0 bg.save_model(str(episode_count) + "model") print("training finished") logger.close() endtime = time.time() print('whole time:', endtime - starttime)