def _draw_status_pane(self, pane): pane.data[:] = to_255(self.settings.window_background) defaults = {'face': getattr(cv2, self.settings.caffevis_status_face), 'fsize': self.settings.caffevis_status_fsize, 'clr': to_255(self.settings.caffevis_status_clr), 'thick': self.settings.caffevis_status_thick} loc = self.settings.caffevis_status_loc[::-1] # Reverse to OpenCV c,r order status = StringIO.StringIO() fps = self.proc_thread.approx_fps() with self.state.lock: print >>status, 'pattern' if self.state.pattern_mode else ('back' if self.state.layers_show_back else 'fwd'), print >>status, '%s:%d |' % (self.state.layer, self.state.selected_unit), if not self.state.back_enabled: print >>status, 'Back: off', else: print >>status, 'Back: %s' % ('deconv' if self.state.back_mode == 'deconv' else 'bprop'), print >>status, '(from %s_%d, disp %s)' % (self.state.backprop_layer, self.state.backprop_unit, self.state.back_filt_mode), print >>status, '|', print >>status, 'Boost: %g/%g' % (self.state.layer_boost_indiv, self.state.layer_boost_gamma) if fps > 0: print >>status, '| FPS: %.01f' % fps if self.state.extra_msg: print >>status, '|', self.state.extra_msg self.state.extra_msg = '' strings = [FormattedString(line, defaults) for line in status.getvalue().split('\n')] cv2_typeset_text(pane.data, strings, loc, line_spacing = self.settings.caffevis_status_line_spacing)
def _draw_prob_labels_pane(self, pane): '''Adds text label annotation atop the given pane.''' if not self.labels or not self.state.show_label_predictions or not self.settings.caffevis_prob_layer: return #pane.data[:] = to_255(self.settings.window_background) defaults = {'face': getattr(cv2, self.settings.caffevis_class_face), 'fsize': self.settings.caffevis_class_fsize, 'clr': to_255(self.settings.caffevis_class_clr_0), 'thick': self.settings.caffevis_class_thick} loc = self.settings.caffevis_class_loc[::-1] # Reverse to OpenCV c,r order clr_0 = to_255(self.settings.caffevis_class_clr_0) clr_1 = to_255(self.settings.caffevis_class_clr_1) probs_flat = self.net.blobs[self.settings.caffevis_prob_layer].data.flatten() top_5 = probs_flat.argsort()[-1:-6:-1] strings = [] pmax = probs_flat[top_5[0]] for idx in top_5: prob = probs_flat[idx] text = '%.2f %s' % (prob, self.labels[idx]) fs = FormattedString(text, defaults) #fs.clr = tuple([clr_1[ii]*prob/pmax + clr_0[ii]*(1-prob/pmax) for ii in range(3)]) fs.clr = tuple([max(0,min(255,clr_1[ii]*prob + clr_0[ii]*(1-prob))) for ii in range(3)]) strings.append([fs]) # Line contains just fs cv2_typeset_text(pane.data, strings, loc, line_spacing = self.settings.caffevis_class_line_spacing)
def _draw_control_pane(self, pane): pane.data[:] = to_255(self.settings.window_background) with self.state.lock: layer_idx = self.state.layer_idx loc = self.settings.caffevis_control_loc[::-1] # Reverse to OpenCV c,r order strings = [] defaults = {'face': getattr(cv2, self.settings.caffevis_control_face), 'fsize': self.settings.caffevis_control_fsize, 'clr': to_255(self.settings.caffevis_control_clr), 'thick': self.settings.caffevis_control_thick} for ii in range(len(self.layer_print_names)): fs = FormattedString(self.layer_print_names[ii], defaults) this_layer = self.state._layers[ii] if self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer: fs.clr = to_255(self.settings.caffevis_control_clr_bp) fs.thick = self.settings.caffevis_control_thick_bp if this_layer == self.state.layer: if self.state.cursor_area == 'top': fs.clr = to_255(self.settings.caffevis_control_clr_cursor) fs.thick = self.settings.caffevis_control_thick_cursor else: if not (self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer): fs.clr = to_255(self.settings.caffevis_control_clr_selected) fs.thick = self.settings.caffevis_control_thick_selected strings.append(fs) cv2_typeset_text(pane.data, strings, loc, line_spacing = self.settings.caffevis_control_line_spacing, wrap = True)
def draw_help(self): self.help_buffer[:] *= .7 self.help_pane.data *= .7 #pane.data[:] = to_255(self.settings.window_background) defaults = { 'face': getattr(cv2, self.settings.caffevis_help_face), 'fsize': self.settings.caffevis_help_fsize, 'clr': to_255(self.settings.caffevis_help_clr), 'thick': self.settings.caffevis_help_thick } loc = self.settings.caffevis_help_loc[:: -1] # Reverse to OpenCV c,r order lines = [] lines.append([ FormattedString( '~ ~ ~ Deep Visualization Toolbox ~ ~ ~', defaults, align='center', width=self.help_pane.j_size) ]) lines.append([FormattedString('', defaults)]) lines.append([FormattedString('Base keys', defaults)]) #lines.append([FormattedString('lllll', defaults), FormattedString('WWWW', defaults)]) #lines.append([FormattedString('WWWWW', defaults), FormattedString('llll', defaults)]) #lines.append([FormattedString('lllll', defaults, width=150), FormattedString('WWWW', defaults, width=150)]) #lines.append([FormattedString('WWWWW', defaults, width=150), FormattedString('llll', defaults, width=150)]) #lines.append([FormattedString('AAA', defaults), # FormattedString('left', defaults, width = 300), # FormattedString('BBB', defaults)]) #lines.append([FormattedString('AAA', defaults), # FormattedString('center', defaults, width = 300, align='center'), # FormattedString('BBB', defaults)]) #lines.append([FormattedString('AAA', defaults), # FormattedString('right', defaults, width = 300, align='right'), # FormattedString('BBB', defaults)]) for tag in ('help_mode', 'freeze_cam', 'toggle_input_mode', 'static_file_increment', 'static_file_decrement', 'stretch_mode', 'quit'): key_strings, help_string = self.bindings.get_key_help(tag) label = '%10s:' % (','.join(key_strings)) lines.append([ FormattedString(label, defaults, width=120, align='right'), FormattedString(help_string, defaults) ]) locy = cv2_typeset_text( self.help_pane.data, lines, loc, line_spacing=self.settings.caffevis_help_line_spacing) for app_name, app in self.apps.iteritems(): locy = app.draw_help(self.help_pane, locy)
def draw_help(self): self.help_buffer[:] *= 0.7 self.help_pane.data *= 0.7 # pane.data[:] = to_255(self.settings.window_background) defaults = { "face": getattr(cv2, self.settings.caffevis_help_face), "fsize": self.settings.caffevis_help_fsize, "clr": to_255(self.settings.caffevis_help_clr), "thick": self.settings.caffevis_help_thick, } loc = self.settings.caffevis_help_loc[::-1] # Reverse to OpenCV c,r order lines = [] lines.append( [ FormattedString( "~ ~ ~ Deep Visualization Toolbox ~ ~ ~", defaults, align="center", width=self.help_pane.j_size ) ] ) lines.append([FormattedString("", defaults)]) lines.append([FormattedString("Base keys", defaults)]) # lines.append([FormattedString('lllll', defaults), FormattedString('WWWW', defaults)]) # lines.append([FormattedString('WWWWW', defaults), FormattedString('llll', defaults)]) # lines.append([FormattedString('lllll', defaults, width=150), FormattedString('WWWW', defaults, width=150)]) # lines.append([FormattedString('WWWWW', defaults, width=150), FormattedString('llll', defaults, width=150)]) # lines.append([FormattedString('AAA', defaults), # FormattedString('left', defaults, width = 300), # FormattedString('BBB', defaults)]) # lines.append([FormattedString('AAA', defaults), # FormattedString('center', defaults, width = 300, align='center'), # FormattedString('BBB', defaults)]) # lines.append([FormattedString('AAA', defaults), # FormattedString('right', defaults, width = 300, align='right'), # FormattedString('BBB', defaults)]) for tag in ( "help_mode", "freeze_cam", "toggle_input_mode", "static_file_increment", "static_file_decrement", "stretch_mode", "quit", ): key_strings, help_string = self.bindings.get_key_help(tag) label = "%10s:" % (",".join(key_strings)) lines.append( [FormattedString(label, defaults, width=120, align="right"), FormattedString(help_string, defaults)] ) locy = cv2_typeset_text(self.help_pane.data, lines, loc, line_spacing=self.settings.caffevis_help_line_spacing) for app_name, app in self.apps.iteritems(): locy = app.draw_help(self.help_pane, locy)
def draw_help(self, help_pane, locy): defaults = {'face': getattr(cv2, self.settings.help_face), 'fsize': self.settings.help_fsize, 'clr': to_255(self.settings.help_clr), 'thick': self.settings.help_thick} loc_base = self.settings.help_loc[::-1] # Reverse to OpenCV c,r order locx = loc_base[0] lines = [] lines.append([FormattedString('', defaults)]) lines.append([FormattedString('Caffevis keys', defaults)]) kl,_ = self.bindings.get_key_help('sel_left') kr,_ = self.bindings.get_key_help('sel_right') ku,_ = self.bindings.get_key_help('sel_up') kd,_ = self.bindings.get_key_help('sel_down') klf,_ = self.bindings.get_key_help('sel_left_fast') krf,_ = self.bindings.get_key_help('sel_right_fast') kuf,_ = self.bindings.get_key_help('sel_up_fast') kdf,_ = self.bindings.get_key_help('sel_down_fast') keys_nav_0 = ','.join([kk[0] for kk in (kl, kr, ku, kd)]) keys_nav_1 = '' if len(kl)>1 and len(kr)>1 and len(ku)>1 and len(kd)>1: keys_nav_1 += ' or ' keys_nav_1 += ','.join([kk[1] for kk in (kl, kr, ku, kd)]) keys_nav_f = ','.join([kk[0] for kk in (klf, krf, kuf, kdf)]) nav_string = 'Navigate with %s%s. Use %s to move faster.' % (keys_nav_0, keys_nav_1, keys_nav_f) lines.append([FormattedString('', defaults, width=120, align='right'), FormattedString(nav_string, defaults)]) for tag in ('sel_layer_left', 'sel_layer_right', 'zoom_mode', 'pattern_mode', 'ez_back_mode_loop', 'freeze_back_unit', 'show_back', 'back_mode', 'back_filt_mode', 'boost_gamma', 'boost_individual', 'reset_state'): key_strings, help_string = self.bindings.get_key_help(tag) label = '%10s:' % (','.join(key_strings)) lines.append([FormattedString(label, defaults, width=120, align='right'), FormattedString(help_string, defaults)]) locy = cv2_typeset_text(help_pane.data, lines, (locx, locy), line_spacing = self.settings.help_line_spacing) return locy
def draw_help(self): self.help_buffer[:] = self.help_buffer[:] * .7 self.help_pane.data[:] = self.help_pane.data[:] * .7 loc = self.settings.help_loc[::-1] # Reverse to OpenCV c,r order defaults = self.help_pane_defaults lines = [] lines.append([FormattedString('~ ~ ~ Deep Visualization Toolbox ~ ~ ~', defaults, align='center', width=self.help_pane.j_size)]) lines.append([FormattedString('', defaults)]) lines.append([FormattedString('Base keys', defaults)]) for tag in ('help_mode', 'freeze_cam', 'toggle_input_mode', 'static_file_increment', 'static_file_decrement', 'stretch_mode', 'quit'): key_strings, help_string = self.bindings.get_key_help(tag) label = '%10s:' % (','.join(key_strings)) lines.append([FormattedString(label, defaults, width=120, align='right'), FormattedString(help_string, defaults)]) locy = cv2_typeset_text(self.help_pane.data, lines, loc, line_spacing = self.settings.help_line_spacing) for app_name, app in self.apps.iteritems(): locy = app.draw_help(self.help_pane, locy)
def run_loop(self): self.quit = False # Setup self.init_window() #cap = cv2.VideoCapture(self.settings.capture_device) self.input_updater = InputImageFetcher(self.settings) self.input_updater.bind_camera() self.input_updater.start() heartbeat_functions = [self.input_updater.heartbeat] for app_name, app in self.apps.iteritems(): print 'Starting app:', app_name app.start() heartbeat_functions.extend(app.get_heartbeats()) ii = 0 since_keypress = 999 since_redraw = 999 since_imshow = 0 last_render = time.time() - 999 latest_frame_idx = None latest_frame_data = None frame_for_apps = None redraw_needed = True # Force redraw the first time imshow_needed = True while not self.quit: # Call any heartbeats for heartbeat in heartbeat_functions: #print 'Heartbeat: calling', heartbeat heartbeat() # Handle key presses keys = [] # Collect key presses (multiple if len(range)>1) for cc in range(1): with WithTimer('LiveVis:waitKey', quiet = self.debug_level < 2): key = cv2.waitKey(self.settings.main_loop_sleep_ms) if key == -1: break else: keys.append(key) #print 'Got key:', key now = time.time() #print 'Since last:', now - last_render skip_imshow = False #if now - last_render > .05 and since_imshow < 1: # skip_imshow = True if skip_imshow: since_imshow += 1 else: since_imshow = 0 last_render = now #print ' Number of keys:', len(keys) for key in keys: since_keypress = 0 #print 'Got Key:', key key,do_redraw = self.handle_key_pre_apps(key) redraw_needed |= do_redraw imshow_needed |= do_redraw for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_key' % app_name, quiet = self.debug_level < 1): key = app.handle_key(key, self.panes) key = self.handle_key_post_apps(key) if self.quit: break for app_name, app in self.apps.iteritems(): redraw_needed |= app.redraw_needed() # Grab latest frame from input_updater thread fr_idx,fr_data = self.input_updater.get_frame() is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None) if is_new_frame: latest_frame_idx = fr_idx latest_frame_data = fr_data frame_for_apps = fr_data if is_new_frame: with WithTimer('LiveVis.display_frame', quiet = self.debug_level < 1): self.display_frame(latest_frame_data) imshow_needed = True do_handle_input = (ii == 0 or since_keypress >= self.settings.keypress_pause_handle_iterations) if frame_for_apps is not None and do_handle_input: # Pass frame to apps for processing for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_input' % app_name, quiet = self.debug_level < 1): app.handle_input(latest_frame_data, self.panes) frame_for_apps = None # Tell each app to draw do_redraw = (redraw_needed and (since_keypress >= self.settings.keypress_pause_redraw_iterations or since_redraw >= self.settings.redraw_at_least_every)) if redraw_needed and do_redraw: for app_name, app in self.apps.iteritems(): with WithTimer('%s:draw' % app_name, quiet = self.debug_level < 1): imshow_needed |= app.draw(self.panes) redraw_needed = False since_redraw = 0 # Render buffer if imshow_needed: # Only redraw pane debug if display will be updated if hasattr(self.settings, 'debug_window_panes') and self.settings.debug_window_panes: for pane_name,pane in self.panes.iteritems(): print pane_name, pane pane.data[:] = pane.data * .5 line = [FormattedString('%s |' % pane_name, self.debug_pane_defaults), FormattedString('pos: %d,%d |' % (pane.i_begin, pane.j_begin), self.debug_pane_defaults), FormattedString('shape: %d,%d' % (pane.i_size, pane.j_size), self.debug_pane_defaults)] cv2_typeset_text(pane.data, line, (5,20), line_spacing = 5, wrap = True) pane.data[:1,:] = pane_debug_clr pane.data[-1:,:] = pane_debug_clr pane.data[:,:1] = pane_debug_clr pane.data[:,-1:] = pane_debug_clr with WithTimer('LiveVis:imshow', quiet = self.debug_level < 1): if self.help_mode: # Copy main buffer to help buffer self.help_buffer[:] = self.window_buffer[:] self.draw_help() cv2_imshow_rgb(self.window_name, self.help_buffer) else: cv2_imshow_rgb(self.window_name, self.window_buffer) imshow_needed = False ii += 1 since_keypress += 1 since_redraw += 1 if ii % 2 == 0 and self.settings.print_dots: sys.stdout.write('.') sys.stdout.flush() # Extra sleep just for debugging. In production all main loop sleep should be in cv2.waitKey. #time.sleep(2) print '\n\nTrying to exit run_loop...' self.input_updater.quit = True self.input_updater.join(.01 + float(self.settings.input_updater_sleep_after_read_frame) * 5) if self.input_updater.is_alive(): raise Exception('Could not join self.input_updater thread') else: self.input_updater.free_camera() for app_name, app in self.apps.iteritems(): print 'Quitting app:', app_name app.quit() print 'Input thread joined and apps quit; exiting run_loop.'
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if self.state.layers_show_back: layer_dat_3D = self.net.blobs[self.state.layer].diff[0] else: layer_dat_3D = self.net.blobs[self.state.layer].data[0] # Promote FC layers with shape (n) to have shape (n,1,1) if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:,np.newaxis,np.newaxis] n_tiles = layer_dat_3D.shape[0] tile_rows,tile_cols = self.net_layer_info[self.state.layer]['tiles_rc'] display_3D_highres = None if self.state.pattern_mode: # Show desired patterns loaded from disk load_layer = self.state.layer if self.settings.caffevis_jpgvis_remap and self.state.layer in self.settings.caffevis_jpgvis_remap: load_layer = self.settings.caffevis_jpgvis_remap[self.state.layer] if self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers: jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir, 'regularized_opt', load_layer, 'whole_layer.jpg') # Get highres version #cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) #else: # display_3D_highres = None if display_3D_highres is None: try: with WithTimer('CaffeVisApp:load_sprite_image', quiet = self.debug_level < 1): display_3D_highres = load_square_sprite_image(jpg_path, n_sprites = n_tiles) except IOError: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) #cache_after = str(self.img_cache) #print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: #print 'Downsampling by', ds display_3D = display_3D_highres[:,::ds,::ds,:] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile(self.settings.window_background, layer_dat_3D.shape + (1,)) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile(self.settings.stale_background, layer_dat_3D.shape + (1,)) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma, neg_pos_colors = ((1,0,0), (0,1,0))) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma) #print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # e.g. (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:,:,:,np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # e.g. (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.caffevis_layer_clr_back_background else: padval = self.settings.window_background highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[self.state.selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[self.state.backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range _, display_2D = tile_images_make_tiles(display_3D, hw = (tile_rows,tile_cols), padval = padval, highlights = highlights) if display_3D_highres is None: display_3D_highres = display_3D # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0,1,2) if state_layers_pane_zoom_mode == 0: # Mode 0: normal display (activations or patterns) display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) elif state_layers_pane_zoom_mode == 1: # Mode 1: zoomed selection unit_data = display_3D_highres[self.state.selected_unit] display_2D_resize = ensure_uint255_and_resize_to_fit(unit_data, pane.data.shape) else: # Mode 2: zoomed backprop pane display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) * 0 pane.data[:] = to_255(self.settings.window_background) pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize if self.settings.caffevis_label_layers and self.state.layer in self.settings.caffevis_label_layers and self.labels and self.state.cursor_area == 'bottom': # Display label annotation atop layers pane (e.g. for fc8/prob) defaults = {'face': getattr(cv2, self.settings.caffevis_label_face), 'fsize': self.settings.caffevis_label_fsize, 'clr': to_255(self.settings.caffevis_label_clr), 'thick': self.settings.caffevis_label_thick} loc_base = self.settings.caffevis_label_loc[::-1] # Reverse to OpenCV c,r order lines = [FormattedString(self.labels[self.state.selected_unit], defaults)] cv2_typeset_text(pane.data, lines, loc_base) return display_3D_highres
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if self.state.layers_show_back: layer_dat_3D = self.net.blobs[self.state.layer].diff[0] else: layer_dat_3D = self.net.blobs[self.state.layer].data[0] # Promote FC layers with shape (n) to have shape (n,1,1) if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:, np.newaxis, np.newaxis] n_tiles = layer_dat_3D.shape[0] tile_rows, tile_cols = self.net_layer_info[ self.state.layer]['tiles_rc'] display_3D_highres = None if self.state.pattern_mode: # Show desired patterns loaded from disk load_layer = self.state.layer if self.settings.caffevis_jpgvis_remap and self.state.layer in self.settings.caffevis_jpgvis_remap: load_layer = self.settings.caffevis_jpgvis_remap[ self.state.layer] if self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers: jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir, 'regularized_opt', load_layer, 'whole_layer.jpg') # Get highres version #cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) #else: # display_3D_highres = None if display_3D_highres is None: try: with WithTimer('CaffeVisApp:load_sprite_image', quiet=self.debug_level < 1): display_3D_highres = load_square_sprite_image( jpg_path, n_sprites=n_tiles) except IOError: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) #cache_after = str(self.img_cache) #print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: #print 'Downsampling by', ds display_3D = display_3D_highres[:, ::ds, ::ds, :] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile( self.settings.window_background, layer_dat_3D.shape + (1, )) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile( self.settings.stale_background, layer_dat_3D.shape + (1, )) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma, neg_pos_colors=((1, 0, 0), (0, 1, 0))) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma) #print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # e.g. (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:, :, :, np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # e.g. (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.caffevis_layer_clr_back_background else: padval = self.settings.window_background highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[ self.state. selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[ self.state. backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range _, display_2D = tile_images_make_tiles(display_3D, hw=(tile_rows, tile_cols), padval=padval, highlights=highlights) if display_3D_highres is None: display_3D_highres = display_3D # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0, 1, 2) if state_layers_pane_zoom_mode == 0: # Mode 0: normal display (activations or patterns) display_2D_resize = ensure_uint255_and_resize_to_fit( display_2D, pane.data.shape) elif state_layers_pane_zoom_mode == 1: # Mode 1: zoomed selection unit_data = display_3D_highres[self.state.selected_unit] display_2D_resize = ensure_uint255_and_resize_to_fit( unit_data, pane.data.shape) else: # Mode 2: zoomed backprop pane display_2D_resize = ensure_uint255_and_resize_to_fit( display_2D, pane.data.shape) * 0 pane.data[:] = to_255(self.settings.window_background) pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize if self.settings.caffevis_label_layers and self.state.layer in self.settings.caffevis_label_layers and self.labels and self.state.cursor_area == 'bottom': # Display label annotation atop layers pane (e.g. for fc8/prob) defaults = { 'face': getattr(cv2, self.settings.caffevis_label_face), 'fsize': self.settings.caffevis_label_fsize, 'clr': to_255(self.settings.caffevis_label_clr), 'thick': self.settings.caffevis_label_thick } loc_base = self.settings.caffevis_label_loc[:: -1] # Reverse to OpenCV c,r order lines = [ FormattedString(self.labels[self.state.selected_unit], defaults) ] cv2_typeset_text(pane.data, lines, loc_base) return display_3D_highres
def run_loop(self): self.quit = False # Setup self.init_window() #cap = cv2.VideoCapture(self.settings.capture_device) self.input_updater = InputImageFetcher(self.settings) self.input_updater.bind_camera() self.input_updater.start() heartbeat_functions = [self.input_updater.heartbeat] for app_name, app in self.apps.iteritems(): print 'Starting app:', app_name app.start() heartbeat_functions.extend(app.get_heartbeats()) ii = 0 since_keypress = 999 since_redraw = 999 since_imshow = 0 last_render = time.time() - 999 latest_frame_idx = None latest_frame_data = None frame_for_apps = None redraw_needed = True # Force redraw the first time imshow_needed = True count = 0 while not self.quit: # Call any heartbeats for heartbeat in heartbeat_functions: #print 'Heartbeat: calling', heartbeat heartbeat() # Handle key presses keys = [] # Collect key presses (multiple if len(range)>1) for cc in range(1): with WithTimer('LiveVis:waitKey', quiet=self.debug_level < 2): key = cv2.waitKey(self.settings.main_loop_sleep_ms) if key == -1: break else: keys.append(key) #print 'Got key:', key now = time.time() #print 'Since last:', now - last_render skip_imshow = False #if now - last_render > .05 and since_imshow < 1: # skip_imshow = True if skip_imshow: since_imshow += 1 else: since_imshow = 0 last_render = now #print ' Number of keys:', len(keys) for key in keys: since_keypress = 0 #print 'Got Key:', key key, do_redraw = self.handle_key_pre_apps(key) redraw_needed |= do_redraw imshow_needed |= do_redraw for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_key' % app_name, quiet=self.debug_level < 1): key = app.handle_key(key, self.panes) key = self.handle_key_post_apps(key) if self.quit: break for app_name, app in self.apps.iteritems(): redraw_needed |= app.redraw_needed() # Grab latest frame from input_updater thread fr_idx, fr_data = self.input_updater.get_frame() is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None) if is_new_frame: latest_frame_idx = fr_idx latest_frame_data = fr_data frame_for_apps = fr_data if is_new_frame: with WithTimer('LiveVis.display_frame', quiet=self.debug_level < 1): self.display_frame(latest_frame_data) imshow_needed = True do_handle_input = (ii == 0 or since_keypress >= self.settings.keypress_pause_handle_iterations) if frame_for_apps is not None and do_handle_input: # Pass frame to apps for processing for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_input' % app_name, quiet=self.debug_level < 1): app.handle_input(latest_frame_data, self.panes) frame_for_apps = None # Tell each app to draw do_redraw = (redraw_needed and (since_keypress >= self.settings.keypress_pause_redraw_iterations or since_redraw >= self.settings.redraw_at_least_every)) if redraw_needed and do_redraw: for app_name, app in self.apps.iteritems(): with WithTimer('%s:draw' % app_name, quiet=self.debug_level < 1): imshow_needed |= app.draw(self.panes) redraw_needed = False since_redraw = 0 # Render buffer if imshow_needed: # Only redraw pane debug if display will be updated if hasattr(self.settings, 'debug_window_panes' ) and self.settings.debug_window_panes: for pane_name, pane in self.panes.iteritems(): print pane_name, pane pane.data[:] = pane.data * .5 line = [ FormattedString('%s |' % pane_name, self.debug_pane_defaults), FormattedString( 'pos: %d,%d |' % (pane.i_begin, pane.j_begin), self.debug_pane_defaults), FormattedString( 'shape: %d,%d' % (pane.i_size, pane.j_size), self.debug_pane_defaults) ] cv2_typeset_text(pane.data, line, (5, 20), line_spacing=5, wrap=True) pane.data[:1, :] = pane_debug_clr pane.data[-1:, :] = pane_debug_clr pane.data[:, :1] = pane_debug_clr pane.data[:, -1:] = pane_debug_clr with WithTimer('LiveVis:imshow', quiet=self.debug_level < 1): if self.help_mode: # Copy main buffer to help buffer self.help_buffer[:] = self.window_buffer[:] self.draw_help() cv2_imshow_rgb(self.window_name, self.help_buffer, True) else: if count == 0: cv2_imshow_rgb(self.window_name, self.window_buffer, False) if count == 1: cv2_imshow_rgb(self.window_name, self.window_buffer, True) break count += 1 imshow_needed = False ii += 1 since_keypress += 1 since_redraw += 1 if ii % 2 == 0 and self.settings.print_dots: sys.stdout.write('.') sys.stdout.flush() # Extra sleep just for debugging. In production all main loop sleep should be in cv2.waitKey. #time.sleep(2) print '\n\nTrying to exit run_loop...' self.input_updater.quit = True self.input_updater.join( .01 + float(self.settings.input_updater_sleep_after_read_frame) * 5) if self.input_updater.is_alive(): raise Exception('Could not join self.input_updater thread') else: self.input_updater.free_camera() for app_name, app in self.apps.iteritems(): print 'Quitting app:', app_name app.quit() print 'Input thread joined and apps quit; exiting run_loop.'
def get_image_from_files(settings, unit_folder_path, should_crop_to_corner, resize_shape, first_only, captions=[], values=[]): try: # list unit images unit_images_path = sorted(glob.glob(unit_folder_path)) mega_image = np.zeros((resize_shape[0], resize_shape[1], 3), dtype=np.uint8) # if no images if not unit_images_path: return mega_image if first_only: unit_images_path = [unit_images_path[0]] # load all images unit_images = [ caffe_load_image(unit_image_path, color=True, as_uint=True) for unit_image_path in unit_images_path ] if settings.caffevis_clear_negative_activations: # clear images with 0 value if values: for i in range(len(values)): if values[i] < float_info.epsilon: unit_images[i] *= 0 if should_crop_to_corner: unit_images = [crop_to_corner(img, 2) for img in unit_images] num_images = len(unit_images) images_per_axis = int(np.math.ceil(np.math.sqrt(num_images))) padding_pixel = 1 if first_only: single_resized_image_shape = (resize_shape[0] - 2 * padding_pixel, resize_shape[1] - 2 * padding_pixel) else: single_resized_image_shape = ((resize_shape[0] / images_per_axis) - 2 * padding_pixel, (resize_shape[1] / images_per_axis) - 2 * padding_pixel) unit_images = [ ensure_uint255_and_resize_without_fit(unit_image, single_resized_image_shape) for unit_image in unit_images ] # build mega image should_add_caption = (len(captions) == num_images) defaults = { 'face': settings.caffevis_score_face, 'fsize': settings.caffevis_score_fsize, 'clr': to_255(settings.caffevis_score_clr), 'thick': settings.caffevis_score_thick } for i in range(num_images): # add caption if we have exactly one for each image if should_add_caption: loc = settings.caffevis_score_loc[:: -1] # Reverse to OpenCV c,r order fs = FormattedString(captions[i], defaults) cv2_typeset_text(unit_images[i], [[fs]], loc) cell_row = i / images_per_axis cell_col = i % images_per_axis mega_image_height_start = 1 + cell_row * ( single_resized_image_shape[0] + 2 * padding_pixel) mega_image_height_end = mega_image_height_start + single_resized_image_shape[ 0] mega_image_width_start = 1 + cell_col * ( single_resized_image_shape[1] + 2 * padding_pixel) mega_image_width_end = mega_image_width_start + single_resized_image_shape[ 1] mega_image[mega_image_height_start:mega_image_height_end, mega_image_width_start: mega_image_width_end, :] = unit_images[i] return mega_image except: print('\nAttempted to load files from %s but failed. ' % unit_folder_path) # set black image as place holder return np.zeros((resize_shape[0], resize_shape[1], 3), dtype=np.uint8) pass return