def DEP__update_frame(self, frame): if frame is not None: # Display frame in 'input' pane # resize (stretch) to pane size. cv2.resize expects (cols,rows) order with WithTimer('InputImageFetcher.update_frame: resize'): frame_disp = cv2.resize( frame[:], self.panes['input'].data.shape[:2][::-1]) #print ' frame dtype', frame.dtype #print ' frame_disp dtype', frame_disp.dtype self.panes['input'].data[:] = frame_disp print 'InputImageFetcher: updated pane. Redrawing...' with WithTimer('InputImageFetcher.update_frame: render_caller'): self.render_caller.call() print 'InputImageFetcher: redrawing done.'
def caffe_load_image(filename, color=True, as_uint=False): ''' Copied from Caffe to simplify potential import problems. Load an image converting from grayscale or alpha as needed. Take filename: string color: flag for color format. True (default) loads as RGB while False loads as intensity (if image is already grayscale). Give image: an image with type np.float32 in range [0, 1] of size (H x W x 3) in RGB or of size (H x W x 1) in grayscale. ''' with WithTimer('imread', quiet = True): if as_uint: img = skimage.io.imread(filename) else: img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32) if img.ndim == 2: img = img[:, :, np.newaxis] if color: img = np.tile(img, (1, 1, 3)) elif img.shape[2] == 4: img = img[:, :, :3] return img
def on_mouse_click(self, event, x, y, flags, param): ''' Handle all button presses. ''' if event == cv2.EVENT_LBUTTONUP: for app_name, app in iter(self.apps.items()): with WithTimer('%s:on_mouse_click' % app_name, quiet=self.debug_level < 1): key = app.handle_mouse_left_click(x, y, flags, param, self.panes)
def run(self): print 'CaffeProcThread.run called' frame = None import caffe # Set the mode to CPU or GPU. Note: in the latest Caffe # versions, there is one Caffe object *per thread*, so the # mode must be set per thread! Here we set the mode for the # CaffeProcThread thread; it is also set in the main thread. if self.mode_gpu: caffe.set_mode_gpu() print 'CaffeVisApp mode (in CaffeProcThread): GPU' else: caffe.set_mode_cpu() print 'CaffeVisApp mode (in CaffeProcThread): CPU' while not self.is_timed_out(): with self.state.lock: if self.state.quit: #print 'CaffeProcThread.run: quit is True' #print self.state.quit break #print 'CaffeProcThread.run: caffe_net_state is:', self.state.caffe_net_state #print 'CaffeProcThread.run loop: next_frame: %s, caffe_net_state: %s, back_enabled: %s' % ( # 'None' if self.state.next_frame is None else 'Avail', # self.state.caffe_net_state, # self.state.back_enabled) frame = None run_fwd = False run_back = False if self.state.caffe_net_state == 'free' and time.time( ) - self.state.last_key_at > self.pause_after_keys: frame = self.state.next_frame self.state.next_frame = None back_enabled = self.state.back_enabled back_mode = self.state.back_mode back_stale = self.state.back_stale backprop_layer_def = self.state.get_current_backprop_layer_definition( ) backprop_unit = self.state.backprop_unit # Forward should be run for every new frame run_fwd = (frame is not None) # Backward should be run if back_enabled and (there was a new frame OR back is stale (new backprop layer/unit selected)) run_back = (back_enabled and (run_fwd or back_stale)) self.state.caffe_net_state = 'proc' if ( run_fwd or run_back) else 'free' #print 'run_fwd,run_back =', run_fwd, run_back if run_fwd: #print 'TIMING:, processing frame' self.frames_processed_fwd += 1 if self.settings.is_siamese and ((type(frame), len(frame)) == (tuple, 2)): im_small = self.state.convert_image_pair_to_network_input_format( self.settings, frame, self.input_dims) else: im_small = resize_without_fit(frame, self.input_dims) with WithTimer('CaffeProcThread:forward', quiet=self.debug_level < 1): net_preproc_forward(self.settings, self.net, im_small, self.input_dims) if run_back: if back_mode == BackpropMode.GRAD: with WithTimer('CaffeProcThread:backward', quiet=self.debug_level < 1): self.state.backward_from_layer(self.net, backprop_layer_def, backprop_unit) elif back_mode == BackpropMode.DECONV_ZF: with WithTimer('CaffeProcThread:deconv', quiet=self.debug_level < 1): self.state.deconv_from_layer(self.net, backprop_layer_def, backprop_unit, 'Zeiler & Fergus') elif back_mode == BackpropMode.DECONV_GB: with WithTimer('CaffeProcThread:deconv', quiet=self.debug_level < 1): self.state.deconv_from_layer(self.net, backprop_layer_def, backprop_unit, 'Guided Backprop') with self.state.lock: self.state.back_stale = False if run_fwd or run_back: with self.state.lock: self.state.caffe_net_state = 'free' self.state.drawing_stale = True now = time.time() if self.last_process_finished_at: self.last_process_elapsed = now - self.last_process_finished_at self.last_process_finished_at = now else: time.sleep(self.loop_sleep) print 'CaffeProcThread.run: finished' print 'CaffeProcThread.run: processed %d frames fwd, %d frames back' % ( self.frames_processed_fwd, self.frames_processed_back)
def run(self): print 'KerasProcThread.run called' while not self.is_timed_out(): with self.state.lock: if self.state.quit: if self.debug_level == 3: print 'KerasProcThread.run: quit is: {}'.format( self.state.quit) break if self.debug_level == 3: print 'KerasProcThread.run: keras_net_state is: {}'.format( self.state.keras_net_state) print 'KerasProcThread.run loop: next_frame: {}, keras_net_state: {}, back_enabled: {}'.format( 'None' if self.state.next_frame is None else 'Avail', self.state.keras_net_state, self.state.back_enabled) frame = None run_fwd = False run_back = False if self.state.keras_net_state == 'free' and time.time( ) - self.state.last_key_at > self.pause_after_keys: frame = self.state.next_frame self.state.next_frame = None back_enabled = self.state.back_enabled back_mode = self.state.back_mode back_stale = self.state.back_stale # state_layer = self.state.layer # selected_unit = self.state.selected_unit backprop_layer = self.state.backprop_layer backprop_unit = self.state.backprop_unit # Forward should be run for every new frame run_fwd = (frame is not None) # Backward should be run if back_enabled and (there was a new frame OR back is stale (new backprop layer/unit selected)) run_back = (back_enabled and (run_fwd or back_stale)) self.state.keras_net_state = 'proc' if ( run_fwd or run_back) else 'free' if self.debug_level == 3: print 'run_fwd = {}, run_back = {}'.format(run_fwd, run_back) if run_fwd: if self.debug_level == 3: print 'TIMING:, processing frame' self.frames_processed_fwd += 1 with WithTimer('KerasProcThread:forward', quiet=self.debug_level < 1): with self.graph.as_default(): start_time = timeit.default_timer() if len(frame.shape) == 2: frame = expand_dims(frame, axis=0) outputs = [layer.output for layer in self.net.layers ] # all layer outputs functor = K.function( [self.net.input, K.learning_phase()], outputs) # evaluation function layer_outs = functor([frame, 0.]) self.net.intermediate_predictions = layer_outs elapsed = timeit.default_timer() - start_time print('self.net.predict function ran for', elapsed) if self.debug_level == 3: print('KerasProcThread:forward self.net.predict:', self.net.intermediate_predictions[-1][0]) if run_back: diffs = self.net.blobs[backprop_layer].diff * 0 diffs[0][backprop_unit] = self.net.blobs[backprop_layer].data[ 0, backprop_unit] assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('KerasProcThread:backward', quiet=self.debug_level < 1): if self.debug_level == 3: print '**** Doing backprop with {} diffs in [{},{}]'.format( backprop_layer, diffs.min(), diffs.max()) try: self.net.backward_from_layer(backprop_layer, diffs, zero_higher=True) except AttributeError: print 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox' raise else: with WithTimer('KerasProcThread:deconv', quiet=self.debug_level < 1): if self.debug_level == 3: print '**** Doing deconv with {} diffs in [{},{}]'.format( backprop_layer, diffs.min(), diffs.max()) try: self.net.deconv_from_layer(backprop_layer, diffs, zero_higher=True) except AttributeError: print 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox' raise with self.state.lock: self.state.back_stale = False if run_fwd or run_back: with self.state.lock: self.state.keras_net_state = 'free' self.state.drawing_stale = True now = time.time() if self.last_process_finished_at: self.last_process_elapsed = now - self.last_process_finished_at self.last_process_finished_at = now else: time.sleep(self.loop_sleep) print 'KerasProcThread.run: finished' print 'KerasProcThread.run: processed {} frames fwd, {} frames back'.format( self.frames_processed_fwd, self.frames_processed_back)
def run_loop(self): self.quit = False # Setup self.init_window() #cap = cv2.VideoCapture(self.settings.capture_device) self.input_updater = InputImageFetcher(self.settings) self.input_updater.bind_camera() self.input_updater.start() heartbeat_functions = [self.input_updater.heartbeat] for app_name, app in self.apps.iteritems(): print 'Starting app:', app_name app.start() heartbeat_functions.extend(app.get_heartbeats()) ii = 0 since_keypress = 999 since_redraw = 999 since_imshow = 0 last_render = time.time() - 999 latest_frame_idx = None latest_frame_data = None frame_for_apps = None redraw_needed = True # Force redraw the first time imshow_needed = True while not self.quit: # Call any heartbeats for heartbeat in heartbeat_functions: #print 'Heartbeat: calling', heartbeat heartbeat() #print 'run_loop: sleeping .5...' #time.sleep(.5) #print 'run_loop: continuing' # Handle key presses #time.sleep(.2) keys = [] # Collect key presses, up to 10 for cc in range(1): with WithTimer('LiveVis:waitKey', quiet=self.debug_level < 2): key = cv2.waitKey(self.settings.main_loop_sleep_ms) if key == -1: break else: keys.append(key) #print 'Got key:', key now = time.time() #print 'Since last:', now - last_render skip_imshow = False #if now - last_render > .05 and since_imshow < 1: # skip_imshow = True if skip_imshow: since_imshow += 1 else: since_imshow = 0 last_render = now #print ' Number of keys:', len(keys) for key in keys: #if key != -1: since_keypress = 0 #print 'Got Key:', key key, do_redraw = self.handle_key_pre_apps(key) redraw_needed |= do_redraw imshow_needed |= do_redraw for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_key' % app_name, quiet=self.debug_level < 1): key = app.handle_key(key, self.panes) key = self.handle_key_post_apps(key) if self.quit: break for app_name, app in self.apps.iteritems(): redraw_needed |= app.redraw_needed() #if ii > 0: # print 'skipping...' # continue # Read frame #with WithTimer('reading'): # #if ii == 0: # if ii == 0 or since_keypress > 1: # frame_full = read_cam_frame(cap) # frame = crop_to_square(frame_full) ###print 'Main: acquiring lock' # Grab latest frame from input_updater thread fr_idx, fr_data = self.input_updater.get_frame() is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None) if is_new_frame: latest_frame_idx = fr_idx latest_frame_data = fr_data frame_for_apps = fr_data if is_new_frame: with WithTimer('LiveVis.display_frame', quiet=self.debug_level < 1): self.display_frame(latest_frame_data) imshow_needed = True #redraw_needed = True do_handle_input = (ii == 0 or since_keypress >= self.settings.keypress_pause_handle_iterations) if frame_for_apps is not None and do_handle_input: # Pass frame to apps for processing for app_name, app in self.apps.iteritems(): with WithTimer('%s:handle_input' % app_name, quiet=self.debug_level < 1): app.handle_input(latest_frame_data, self.panes) frame_for_apps = None # Tell each app to draw do_redraw = (redraw_needed and (since_keypress >= self.settings.keypress_pause_redraw_iterations or since_redraw >= self.settings.redraw_at_least_every)) if redraw_needed and do_redraw: for app_name, app in self.apps.iteritems(): #print 'HERE +++' #with WithTimer('drawing ' + app_name): # app.draw(self.panes) with WithTimer('%s:draw' % app_name, quiet=self.debug_level < 1): imshow_needed |= app.draw(self.panes) redraw_needed = False since_redraw = 0 # Render buffer #HERE Skip every other time to see if it helps if imshow_needed: with WithTimer('LiveVis:imshow', quiet=self.debug_level < 1): if self.help_mode: # Copy main buffer to help buffer self.help_buffer[:] = self.window_buffer[:] self.draw_help() cv2_imshow_rgb(self.window_name, self.help_buffer) else: cv2_imshow_rgb(self.window_name, self.window_buffer) imshow_needed = False #if skip_imshow: # print ' * skipped imshow' #else: # print ' * ran imshow' #pass ii += 1 since_keypress += 1 since_redraw += 1 if ii % 2 == 0: sys.stdout.write('.') sys.stdout.flush() # Extra sleep for debugging. In production all main loop sleep should be in cv2.waitKey. #time.sleep(2) print '\n\nTrying to exit run_loop...' self.input_updater.quit = True self.input_updater.join( .01 + float(self.settings.input_updater_sleep_after_read_frame) * 5) if self.input_updater.is_alive(): raise Exception('Could not join self.input_updater thread') else: #print 'Final Is_alive: %s' % self.input_updater.is_alive() self.input_updater.free_camera() #print 'self.input_updater.bound_cap_device is', self.input_updater.bound_cap_device for app_name, app in self.apps.iteritems(): print 'Quitting app:', app_name app.quit() print 'Input thread joined and apps quit; exiting run_loop.'
def run(self): print('CaffeProcThread.run called') frame = None import caffe # Set the mode to CPU or GPU. Note: in the latest Caffe # versions, there is one Caffe object *per thread*, so the # mode must be set per thread! Here we set the mode for the # CaffeProcThread thread; it is also set in the main thread. if self.mode_gpu: caffe.set_mode_gpu() print('CaffeVisApp mode (in CaffeProcThread): GPU') else: caffe.set_mode_cpu() print('CaffeVisApp mode (in CaffeProcThread): CPU') while not self.is_timed_out(): with self.state.lock: if self.state.quit: #print 'CaffeProcThread.run: quit is True' #print self.state.quit break #print 'CaffeProcThread.run: caffe_net_state is:', self.state.caffe_net_state #print 'CaffeProcThread.run loop: next_frame: %s, caffe_net_state: %s, back_enabled: %s' % ( # 'None' if self.state.next_frame is None else 'Avail', # self.state.caffe_net_state, # self.state.back_enabled) frame = None run_fwd = False run_back = False if self.state.caffe_net_state == 'free' and time.time( ) - self.state.last_key_at > self.pause_after_keys: frame = self.state.next_frame self.state.next_frame = None back_enabled = self.state.back_enabled back_mode = self.state.back_mode back_stale = self.state.back_stale #state_layer = self.state.layer #selected_unit = self.state.selected_unit backprop_layer = self.state.backprop_layer backprop_unit = self.state.backprop_unit # Forward should be run for every new frame run_fwd = (frame is not None) # Backward should be run if back_enabled and (there was a new frame OR back is stale (new backprop layer/unit selected)) run_back = (back_enabled and (run_fwd or back_stale)) self.state.caffe_net_state = 'proc' if ( run_fwd or run_back) else 'free' #print 'run_fwd,run_back =', run_fwd, run_back if run_fwd: #print 'TIMING:, processing frame' self.frames_processed_fwd += 1 if self.settings.static_files_input_mode == "siamese_image_list": frame1 = frame[0] frame2 = frame[1] im_small1 = cv2.resize(frame1, self.input_dims) im_small2 = cv2.resize(frame2, self.input_dims) im_small = np.concatenate((im_small1, im_small2), axis=2) else: im_small = cv2.resize(frame, self.input_dims) with WithTimer('CaffeProcThread:forward', quiet=self.debug_level < 1): net_preproc_forward(self.settings, self.net, im_small, self.input_dims) if run_back: diffs = self.net.blobs[backprop_layer].diff * 0 diffs[0][backprop_unit] = self.net.blobs[backprop_layer].data[ 0, backprop_unit] assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('CaffeProcThread:backward', quiet=self.debug_level < 1): #print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) try: self.net.backward_from_layer(backprop_layer, diffs, zero_higher=True) except AttributeError: print( 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox' ) raise else: with WithTimer('CaffeProcThread:deconv', quiet=self.debug_level < 1): #print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) try: self.net.deconv_from_layer(backprop_layer, diffs, zero_higher=True) except AttributeError: print( 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox' ) raise with self.state.lock: self.state.back_stale = False if run_fwd or run_back: with self.state.lock: self.state.caffe_net_state = 'free' self.state.drawing_stale = True now = time.time() if self.last_process_finished_at: self.last_process_elapsed = now - self.last_process_finished_at self.last_process_finished_at = now else: time.sleep(self.loop_sleep) print('CaffeProcThread.run: finished') print('CaffeProcThread.run: processed %d frames fwd, %d frames back' % (self.frames_processed_fwd, self.frames_processed_back))
def run(self): print 'CaffeProcThread.run called' frame = None mask = None while not self.is_timed_out(): with self.state.lock: if self.state.quit: #print 'CaffeProcThread.run: quit is True' #print self.state.quit break #print 'CaffeProcThread.run: caffe_net_state is:', self.state.caffe_net_state #print 'CaffeProcThread.run loop: next_frame: %s, caffe_net_state: %s, back_enabled: %s' % ( # 'None' if self.state.next_frame is None else 'Avail', # self.state.caffe_net_state, # self.state.back_enabled) frame = None mask = None run_fwd = False run_back = False if self.state.caffe_net_state == 'free' and time.time() - self.state.last_key_at > self.pause_after_keys: frame = self.state.next_frame mask = self.state.mask self.state.next_frame = None back_enabled = self.state.back_enabled back_mode = self.state.back_mode back_stale = self.state.back_stale #state_layer = self.state.layer #selected_unit = self.state.selected_unit backprop_layer = self.state.backprop_layer backprop_unit = self.state.backprop_unit # Forward should be run for every new frame run_fwd = (frame is not None) # Backward should be run if back_enabled and (there was a new frame OR back is stale (new backprop layer/unit selected)) run_back = (back_enabled and (run_fwd or back_stale)) self.state.caffe_net_state = 'proc' if (run_fwd or run_back) else 'free' #print 'run_fwd,run_back =', run_fwd, run_back if run_fwd: #print 'TIMING:, processing frame' self.frames_processed_fwd += 1 self.net_input_image = cv2.resize(frame, self.input_dims) with WithTimer('CaffeProcThread:forward', quiet = self.debug_level < 1): print "run forward layer" self.net_proc_forward_layer(self.net_input_image, mask) # self.net_preproc_forward(self.net_input_image) if self.state.save_descriptor: self.save_descriptor() # switch descriptor for match and back prop if self.state.next_descriptor: print 'load descriptor' self.descriptor = self.descriptor_handler.get_next() self.state.next_descriptor = False if self.state.compare_descriptor: # find print 'compare' desc_current = self.descriptor_handler.gen_descriptor('current', self.net.blobs) match_file = self.descriptor_handler.get_max_match(desc_current) print 'match: ' + match_file self.state.compare_descriptor = False if run_back: print "run backward" # Match to saved descriptor if self.state.match_descriptor: print '*' diffs = self.net.blobs[self.descriptor_layer_1].diff * 0 # zero all diffs if doesn't match print "shape ", self.net.blobs[self.descriptor_layer_1].data.shape for unit, response in enumerate(self.net.blobs[self.descriptor_layer_1].data[0]): if response.max() > 0 and abs(response.max() - self.descriptor.get_sig_list()[0][0][unit].max())/response.max() < 0.2: diffs[0][unit] = self.net.blobs[self.descriptor_layer_1].data[0][unit] assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('CaffeProcThread:backward', quiet = self.debug_level < 1): #print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) self.net.backward_from_layer(self.descriptor_layer_1, diffs, zero_higher = True) else: with WithTimer('CaffeProcThread:deconv', quiet = self.debug_level < 1): #print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) self.net.deconv_from_layer(self.descriptor_layer_1, diffs, zero_higher = True) with self.state.lock: self.state.back_stale = False # Filter when back propagating elif self.state.backprop_filter: print "run_back" # print backprop_layer start_layer_idx = self.available_layer.index(backprop_layer) idx = start_layer_idx for current_layer in list(reversed(self.available_layer[0:start_layer_idx+1])): diffs = self.net.blobs[current_layer].diff * 0 max_response = self.net.blobs[current_layer].data[0].max() for unit, response in enumerate(self.net.blobs[current_layer].data[0]): if response.max() > max_response * 0.6: diffs[0][unit] = self.net.blobs[current_layer].data[0,unit] assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('CaffeProcThread:backward', quiet = self.debug_level < 1): #print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) self.net.backward_from_to_layer(current_layer, diffs, self.available_layer[idx-1], zero_higher = (idx == start_layer_idx)) # else: # with WithTimer('CaffeProcThread:deconv', quiet = self.debug_level < 1): # #print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) # self.net.deconv_from_layer(backprop_layer, diffs, zero_higher = True) idx -= 1 with self.state.lock: self.state.back_stale = False # original approach else: diffs = self.net.blobs[backprop_layer].diff * 0 diffs[0][backprop_unit] = self.net.blobs[backprop_layer].data[0,backprop_unit] assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('CaffeProcThread:backward', quiet = self.debug_level < 1): #print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) self.net.backward_from_layer(backprop_layer, diffs, zero_higher = True) else: with WithTimer('CaffeProcThread:deconv', quiet = self.debug_level < 1): #print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max()) self.net.deconv_from_layer(backprop_layer, diffs, zero_higher = True) with self.state.lock: self.state.back_stale = False if run_fwd or run_back: with self.state.lock: self.state.caffe_net_state = 'free' self.state.drawing_stale = True else: time.sleep(self.loop_sleep) time.sleep(0.1) print 'CaffeProcThread.run: finished' print 'CaffeProcThread.run: processed %d frames fwd, %d frames back' % (self.frames_processed_fwd, self.frames_processed_back)
def run_loop(self): self.quit = False # Setup self.init_window() #cap = cv2.VideoCapture(self.settings.capture_device) from input_fetcher import InputImageFetcher self.input_updater = InputImageFetcher(self.settings) self.input_updater.bind_camera() self.input_updater.start() heartbeat_functions = [self.input_updater.heartbeat] for app_name, app in iter(self.apps.items()): print('Starting app: {}'.format(app_name)) app.start(self) heartbeat_functions.extend(app.get_heartbeats()) ii = 0 since_keypress = 999 since_redraw = 999 since_imshow = 0 last_render = time.time() - 999 latest_frame_idx = None latest_frame_data = None frame_for_apps = None redraw_needed = True # Force redraw the first time imshow_needed = True while not self.quit: # Call any heartbeats for heartbeat in heartbeat_functions: #print 'Heartbeat: calling', heartbeat heartbeat() # Handle key presses keys = [] # Collect key presses (multiple if len(range)>1) for cc in range(1): with WithTimer('LiveVis:waitKey', quiet = self.debug_level < 2): key = cv2.waitKey(self.settings.main_loop_sleep_ms) if key == -1: break else: if (key != 255): keys.append(key) #print 'Got key:', key now = time.time() #print 'Since last:', now - last_render skip_imshow = False #if now - last_render > .05 and since_imshow < 1: # skip_imshow = True if skip_imshow: since_imshow += 1 else: since_imshow = 0 last_render = now #print ' Number of keys:', len(keys) for key in keys: since_keypress = 0 #print 'Got Key:', key key,do_redraw = self.handle_key_pre_apps(key) redraw_needed |= do_redraw imshow_needed |= do_redraw for app_name, app in iter(self.apps.items()): with WithTimer('%s:handle_key' % app_name, quiet = self.debug_level < 1): key = app.handle_key(key, self.panes) key = self.handle_key_post_apps(key) if self.quit: break for app_name, app in iter(self.apps.items()): redraw_needed |= app.redraw_needed() redraw_needed |= self.check_for_control_height_update() # Grab latest frame from input_updater thread fr_idx,fr_data,fr_label,fr_filename = self.input_updater.get_frame() is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None) if is_new_frame: latest_frame_idx = fr_idx latest_frame_data = fr_data latest_label = fr_label latest_filename = fr_filename frame_for_apps = fr_data if is_new_frame: with WithTimer('LiveVis.display_frame', quiet = self.debug_level < 1): self.display_frame(latest_frame_data) imshow_needed = True do_handle_input = (ii == 0 or since_keypress >= self.settings.keypress_pause_handle_iterations) if frame_for_apps is not None and do_handle_input: # Pass frame to apps for processing for app_name, app in iter(self.apps.items()): with WithTimer('%s:handle_input' % app_name, quiet = self.debug_level < 1): app.handle_input(latest_frame_data, latest_label, latest_filename, self.panes) frame_for_apps = None # Tell each app to draw do_redraw = (redraw_needed and (since_keypress >= self.settings.keypress_pause_redraw_iterations or since_redraw >= self.settings.redraw_at_least_every)) if redraw_needed and do_redraw: for app_name, app in iter(self.apps.items()): with WithTimer('%s:draw' % app_name, quiet = self.debug_level < 1): imshow_needed |= app.draw(self.panes) redraw_needed = False since_redraw = 0 # Render buffer if imshow_needed: # Only redraw pane debug if display will be updated if hasattr(self.settings, 'debug_window_panes') and self.settings.debug_window_panes: for pane_name,pane in iter(self.panes.items()): print([pane_name, pane]) pane.data[:] = pane.data * .5 line = [FormattedString('%s |' % pane_name, self.debug_pane_defaults), FormattedString('pos: %d,%d |' % (pane.i_begin, pane.j_begin), self.debug_pane_defaults), FormattedString('shape: %d,%d' % (pane.i_size, pane.j_size), self.debug_pane_defaults)] cv2_typeset_text(pane.data, line, (5,20), line_spacing = 5, wrap = True) pane.data[:1,:] = pane_debug_clr pane.data[-1:,:] = pane_debug_clr pane.data[:,:1] = pane_debug_clr pane.data[:,-1:] = pane_debug_clr with WithTimer('LiveVis:imshow', quiet = self.debug_level < 1): if self.help_mode: # Copy main buffer to help buffer self.help_buffer[:] = self.window_buffer[:] self.draw_help() cv2_imshow_rgb(self.window_name, self.help_buffer) else: cv2_imshow_rgb(self.window_name, self.window_buffer) imshow_needed = False ii += 1 since_keypress += 1 since_redraw += 1 if ii % 2 == 0 and self.settings.print_dots: sys.stdout.write('.') sys.stdout.flush() # Extra sleep just for debugging. In production all main loop sleep should be in cv2.waitKey. #time.sleep(2) print('\n\nTrying to exit run_loop...') self.input_updater.quit = True self.input_updater.join(.01 + float(self.settings.input_updater_sleep_after_read_frame) * 5) if self.input_updater.is_alive(): raise Exception('Could not join self.input_updater thread') else: self.input_updater.free_camera() for app_name, app in iter(self.apps.items()): print('Quitting app: {}'.format(app_name)) app.quit() print('Input thread joined and apps quit; exiting run_loop.')
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if self.state.layers_show_back: layer_dat_3D = self.net.blobs[self.state.layer].diff[0] else: layer_dat_3D = self.net.blobs[self.state.layer].data[0] # Promote FC layers with shape (n) to have shape (n,1,1) if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:, np.newaxis, np.newaxis] n_tiles = layer_dat_3D.shape[0] tile_rows, tile_cols = self.net_layer_info[ self.state.layer]['tiles_rc'] display_3D_highres = None if self.state.pattern_mode: # Show desired patterns loaded from disk load_layer = self.state.layer if self.settings.caffevis_jpgvis_remap and self.state.layer in self.settings.caffevis_jpgvis_remap: load_layer = self.settings.caffevis_jpgvis_remap[ self.state.layer] if self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers: jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir, 'regularized_opt', load_layer, 'whole_layer.jpg') # Get highres version #cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) #else: # display_3D_highres = None if display_3D_highres is None: try: with WithTimer('CaffeVisApp:load_sprite_image', quiet=self.debug_level < 1): display_3D_highres = load_square_sprite_image( jpg_path, n_sprites=n_tiles) except IOError: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) #cache_after = str(self.img_cache) #print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: #print 'Downsampling by', ds display_3D = display_3D_highres[:, ::ds, ::ds, :] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile( self.settings.window_background, layer_dat_3D.shape + (1, )) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile( self.settings.stale_background, layer_dat_3D.shape + (1, )) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma, neg_pos_colors=((1, 0, 0), (0, 1, 0))) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma) #print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # e.g. (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:, :, :, np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # e.g. (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.caffevis_layer_clr_back_background else: padval = self.settings.window_background highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[ self.state. selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[ self.state. backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range _, display_2D = tile_images_make_tiles(display_3D, hw=(tile_rows, tile_cols), padval=padval, highlights=highlights) if display_3D_highres is None: display_3D_highres = display_3D # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0, 1, 2) if state_layers_pane_zoom_mode == 0: # Mode 0: normal display (activations or patterns) display_2D_resize = ensure_uint255_and_resize_to_fit( display_2D, pane.data.shape) elif state_layers_pane_zoom_mode == 1: # Mode 1: zoomed selection unit_data = display_3D_highres[self.state.selected_unit] display_2D_resize = ensure_uint255_and_resize_to_fit( unit_data, pane.data.shape) else: # Mode 2: zoomed backprop pane display_2D_resize = ensure_uint255_and_resize_to_fit( display_2D, pane.data.shape) * 0 pane.data[:] = to_255(self.settings.window_background) pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize if self.settings.caffevis_label_layers and self.state.layer in self.settings.caffevis_label_layers and self.labels and self.state.cursor_area == 'bottom': # Display label annotation atop layers pane (e.g. for fc8/prob) defaults = { 'face': getattr(cv2, self.settings.caffevis_label_face), 'fsize': self.settings.caffevis_label_fsize, 'clr': to_255(self.settings.caffevis_label_clr), 'thick': self.settings.caffevis_label_thick } loc_base = self.settings.caffevis_label_loc[:: -1] # Reverse to OpenCV c,r order lines = [ FormattedString(self.labels[self.state.selected_unit], defaults) ] cv2_typeset_text(pane.data, lines, loc_base) return display_3D_highres
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if not hasattr(self.net, 'intermediate_predictions') or \ self.net.intermediate_predictions is None: return None, None display_3D_highres, selected_unit_highres = None, None out = self.net.intermediate_predictions[self.state.layer_idx] if self.state.layers_pane_filter_mode in ( 4, 5) and self.state.extra_info is None: self.state.layers_pane_filter_mode = 0 state_layers_pane_filter_mode = self.state.layers_pane_filter_mode assert state_layers_pane_filter_mode in (0, 1, 2, 3, 4) # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0, 1, 2) layer_dat_3D = out[0].T n_tiles = layer_dat_3D.shape[0] tile_rows, tile_cols = self.net_layer_info[ self.state.layer]['tiles_rc'] if state_layers_pane_filter_mode == 0: if len(layer_dat_3D.shape) > 1: img_width, img_height = get_tiles_height_width_ratio( layer_dat_3D.shape[1], self.settings.kerasvis_layers_aspect_ratio) pad = np.zeros( (layer_dat_3D.shape[0], ((img_width * img_height) - layer_dat_3D.shape[1]))) layer_dat_3D = np.concatenate((layer_dat_3D, pad), axis=1) layer_dat_3D = np.reshape( layer_dat_3D, (layer_dat_3D.shape[0], img_width, img_height)) elif state_layers_pane_filter_mode == 1: if len(layer_dat_3D.shape) > 1: layer_dat_3D = np.average(layer_dat_3D, axis=1) elif state_layers_pane_filter_mode == 2: if len(layer_dat_3D.shape) > 1: layer_dat_3D = np.max(layer_dat_3D, axis=1) elif state_layers_pane_filter_mode == 3: if len(layer_dat_3D.shape) > 1: title, r, c, hide_axis = None, tile_rows, tile_cols, True x_axis_label, y_axis_label = None, None if self.state.cursor_area == 'bottom' and state_layers_pane_zoom_mode == 1: r, c, hide_axis = 1, 1, False layer_dat_3D = layer_dat_3D[self.state.selected_unit:self. state.selected_unit + 1] title = 'Layer {}, Filter {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit) x_axis_label, y_axis_label = 'Time', 'Activation' display_3D = plt_plot_filters_blit( y=layer_dat_3D, x=None, shape=(pane.data.shape[0], pane.data.shape[1]), rows=r, cols=c, title=title, log_scale=self.state.log_scale, hide_axis=hide_axis, x_axis_label=x_axis_label, y_axis_label=y_axis_label) if self.state.cursor_area == 'bottom' and state_layers_pane_zoom_mode == 0: selected_unit_highres = plt_plot_filter( x=None, y=layer_dat_3D[self.state.selected_unit], title='Layer {}, Filter {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit), log_scale=self.state.log_scale, x_axis_label='Time', y_axis_label='Activation') else: state_layers_pane_filter_mode = 0 elif state_layers_pane_filter_mode == 4: if self.state.extra_info is not None: extra = self.state.extra_info.item() is_heatmap = True if 'type' in extra and extra[ 'type'] == 'heatmap' else False if is_heatmap: layer_dat_3D = extra['data'][self.state.layer_idx] if self.state.cursor_area == 'bottom' and state_layers_pane_zoom_mode == 1: display_3D = plt_plot_heatmap( data=layer_dat_3D[self.state.selected_unit:self. state.selected_unit + 1], shape=(pane.data.shape[0], pane.data.shape[1]), rows=1, cols=1, x_axis_label=extra['x_axis'], y_axis_label=extra['y_axis'], title='Layer {}, Filter {} \n {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit, extra['title']), hide_axis=False, x_axis_values=extra['x_axis_values'], y_axis_values=extra['y_axis_values'], vmin=layer_dat_3D.min(), vmax=layer_dat_3D.max()) else: display_3D = plt_plot_heatmap( data=layer_dat_3D, shape=(pane.data.shape[0], pane.data.shape[1]), rows=tile_rows, cols=tile_cols, x_axis_label=extra['x_axis'], y_axis_label=extra['y_axis'], title=extra['title'], x_axis_values=extra['x_axis_values'], y_axis_values=extra['y_axis_values']) if self.state.cursor_area == 'bottom': selected_unit_highres = plt_plot_heatmap( data=layer_dat_3D[self.state.selected_unit:self. state.selected_unit + 1], shape=(300, 300), rows=1, cols=1, x_axis_label=extra['x_axis'], y_axis_label=extra['y_axis'], title='Layer {}, Filter {} \n {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit, extra['title']), x_axis_values=extra['x_axis_values'], y_axis_values=extra['y_axis_values'], hide_axis=False, vmin=layer_dat_3D.min(), vmax=layer_dat_3D.max())[0] else: layer_dat_3D = extra['x'][self.state.layer_idx] title, x_axis_label, y_axis_label, r, c, hide_axis = None, None, None, tile_rows, tile_cols, True if self.state.cursor_area == 'bottom': if state_layers_pane_zoom_mode == 1: r, c, hide_axis = 1, 1, False layer_dat_3D = layer_dat_3D[self.state. selected_unit:self. state.selected_unit + 1] title = 'Layer {}, Filter {} \n {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit, extra['title']) x_axis_label, y_axis_label = extra[ 'x_axis'], extra['y_axis'] if self.state.log_scale == 1: y_axis_label = y_axis_label + ' (log-scale)' # start_time = timeit.default_timer() display_3D = plt_plot_filters_blit( y=layer_dat_3D, x=extra['y'], shape=(pane.data.shape[0], pane.data.shape[1]), rows=r, cols=c, title=title, log_scale=self.state.log_scale, x_axis_label=x_axis_label, y_axis_label=y_axis_label, hide_axis=hide_axis) if self.state.cursor_area == 'bottom' and state_layers_pane_zoom_mode == 0: selected_unit_highres = plt_plot_filter( x=extra['y'], y=layer_dat_3D[self.state.selected_unit], title='Layer {}, Filter {} \n {}'.format( self.state._layers[self.state.layer_idx], self.state.selected_unit, extra['title']), log_scale=self.state.log_scale, x_axis_label=extra['x_axis'], y_axis_label=extra['y_axis']) # TODO # if hasattr(self.settings, 'static_files_extra_fn'): # self.data = self.settings.static_files_extra_fn(self.latest_static_file) # self.state.layer_idx if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:, np.newaxis, np.newaxis] if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.kerasvis_layer_clr_back_background else: padval = self.settings.window_background if self.state.pattern_mode: # Show desired patterns loaded from disk load_layer = self.state.layer if self.settings.kerasvis_jpgvis_remap and self.state.layer in self.settings.kerasvis_jpgvis_remap: load_layer = self.settings.kerasvis_jpgvis_remap[ self.state.layer] if self.settings.kerasvis_jpgvis_layers and load_layer in self.settings.kerasvis_jpgvis_layers: jpg_path = os.path.join(self.settings.kerasvis_unit_jpg_dir, 'regularized_opt', load_layer, 'whole_layer.jpg') # Get highres version # cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) # else: # display_3D_highres = None if display_3D_highres is None: try: with WithTimer('KerasVisApp:load_sprite_image', quiet=self.debug_level < 1): display_3D_highres = load_square_sprite_image( jpg_path, n_sprites=n_tiles) except IOError: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) # cache_after = str(self.img_cache) # print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int( np.ceil( float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: # print 'Downsampling by', ds display_3D = display_3D_highres[:, ::ds, ::ds, :] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile( self.settings.window_background, layer_dat_3D.shape + (1, )) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile( self.settings.stale_background, layer_dat_3D.shape + (1, )) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma, neg_pos_colors=((1, 0, 0), (0, 1, 0))) else: layer_dat_3D_normalized = tile_images_normalize( layer_dat_3D, boost_indiv=self.state.layer_boost_indiv, boost_gamma=self.state.layer_boost_gamma) # print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() if state_layers_pane_filter_mode in (0, 1, 2): display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # e.g. (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:, :, :, np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # e.g. (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if state_layers_pane_zoom_mode in (0, 2): highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[ self.state. selected_unit] = self.settings.kerasvis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[ self.state. backprop_unit] = self.settings.kerasvis_layer_clr_back_sel # in [0,1] range if self.state.cursor_area == 'bottom' and state_layers_pane_filter_mode in ( 3, 4): # pane.data[0:display_2D_resize.shape[0], 0:2, :] = to_255(self.settings.window_background) # pane.data[0:2, 0:display_2D_resize.shape[1], :] = to_255(self.settings.window_background) display_3D[self.state.selected_unit, 0:display_3D.shape[1], 0:2, :] = self.settings.kerasvis_layer_clr_cursor display_3D[ self.state.selected_unit, 0:2, 0:display_3D. shape[2], :] = self.settings.kerasvis_layer_clr_cursor display_3D[self.state.selected_unit, 0:display_3D.shape[1], -2:, :] = self.settings.kerasvis_layer_clr_cursor display_3D[ self.state.selected_unit, -2:, 0:display_3D. shape[2], :] = self.settings.kerasvis_layer_clr_cursor _, display_2D = tile_images_make_tiles(display_3D, hw=(tile_rows, tile_cols), padval=padval, highlights=highlights) # Mode 0: normal display (activations or patterns) display_2D_resize = ensure_uint255_and_resize_to_fit( display_2D, pane.data.shape) if state_layers_pane_zoom_mode == 2: display_2D_resize = display_2D_resize * 0 if display_3D_highres is None: display_3D_highres = display_3D elif state_layers_pane_zoom_mode == 1: if display_3D_highres is None: display_3D_highres = display_3D # Mode 1: zoomed selection if state_layers_pane_filter_mode in (0, 1, 2): unit_data = display_3D_highres[self.state.selected_unit] else: unit_data = display_3D_highres[0] display_2D_resize = ensure_uint255_and_resize_to_fit( unit_data, pane.data.shape) pane.data[:] = to_255(self.settings.window_background) pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize # # Add background strip around the top and left edges # pane.data[0:display_2D_resize.shape[0], 0:2, :] = to_255(self.settings.window_background) # pane.data[0:2, 0:display_2D_resize.shape[1], :] = to_255(self.settings.window_background) if self.settings.kerasvis_label_layers and \ self.state.layer in self.settings.kerasvis_label_layers and \ self.labels and self.state.cursor_area == 'bottom': # Display label annotation atop layers pane (e.g. for fc8/prob) defaults = { 'face': getattr(cv2, self.settings.kerasvis_label_face), 'fsize': self.settings.kerasvis_label_fsize, 'clr': to_255(self.settings.kerasvis_label_clr), 'thick': self.settings.kerasvis_label_thick } loc_base = self.settings.kerasvis_label_loc[:: -1] # Reverse to OpenCV c,r order lines = [ FormattedString(self.labels[self.state.selected_unit], defaults) ] cv2_typeset_text(pane.data, lines, loc_base) return display_3D_highres, selected_unit_highres
def run(self): '''Run this CaffeProcThread. ''' print 'CaffeProcThread.run called' frame = None import caffe # Set the mode to CPU or GPU. Note: in the latest Caffe # versions, there is one Caffe object *per thread*, so the # mode must be set per thread! Here we set the mode for the # CaffeProcThread thread; it is also set in the main thread. if self.mode_gpu: caffe.set_mode_gpu() print 'CaffeVisApp mode (in CaffeProcThread): GPU' else: caffe.set_mode_cpu() print 'CaffeVisApp mode (in CaffeProcThread): CPU' while not self.is_timed_out(): with self.state.lock: if self.state.quit: #print 'CaffeProcThread.run: quit is True' #print self.state.quit break #print 'CaffeProcThread.run: caffe_net_state is:', self.state.caffe_net_state #print 'CaffeProcThread.run loop: next_frame: %s, caffe_net_state: %s, back_enabled: %s' % ( # 'None' if self.state.next_frame is None else 'Avail', # self.state.caffe_net_state, # self.state.back_enabled) frame = None run_fwd = False run_back = False if self.state.caffe_net_state == 'free' and time.time( ) - self.state.last_key_at > self.pause_after_keys: frame = self.state.next_frame self.state.next_frame = None back_enabled = self.state.back_enabled back_mode = self.state.back_mode back_stale = self.state.back_stale #state_layer = self.state.layer #selected_unit = self.state.selected_unit backprop_layer = self.state.backprop_layer backprop_unit = self.state.backprop_unit # Forward should be run for every new frame run_fwd = (frame is not None) # Backward should be run if back_enabled and (there was a new frame OR back is stale (new backprop layer/unit selected)) run_back = (back_enabled and (run_fwd or back_stale)) self.state.caffe_net_state = 'proc' if ( run_fwd or run_back) else 'free' #print 'run_fwd,run_back =', run_fwd, run_back if run_fwd: #print 'TIMING:, processing frame' self.frames_processed_fwd += 1 im_small = cv2.resize(frame, self.input_dims) with WithTimer('CaffeProcThread:forward', quiet=self.debug_level < 1): self.my_net.preproc_forward(im_small, self.input_dims) if run_back: #ULF[old]: #diffs = self.net.blobs[backprop_layer].diff * 0 #diffs = self.my_net.get_layer_zeros(backprop_layer) diffs = np.zeros((1, ) + self.my_net.get_layer_shape(backprop_layer)) #diffs[0][backprop_unit] = self.net.blobs[backprop_layer].data[0,backprop_unit] diffs[0][backprop_unit] = self.my_net.get_layer_data( backprop_layer, unit=backprop_unit) assert back_mode in ('grad', 'deconv') if back_mode == 'grad': with WithTimer('CaffeProcThread:backward', quiet=self.debug_level < 1): self.my_net.backward_from_layer(backprop_layer, diffs) else: with WithTimer('CaffeProcThread:deconv', quiet=self.debug_level < 1): self.my_net.deconv_from_layer(backprop_layer, diffs) with self.state.lock: self.state.back_stale = False if run_fwd or run_back: with self.state.lock: self.state.caffe_net_state = 'free' self.state.drawing_stale = True now = time.time() if self.last_process_finished_at: self.last_process_elapsed = now - self.last_process_finished_at self.last_process_finished_at = now else: time.sleep(self.loop_sleep) print 'CaffeProcThread.run: finished' print 'CaffeProcThread.run: processed %d frames fwd, %d frames back' % ( self.frames_processed_fwd, self.frames_processed_back)
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if self.state.layers_show_back: layer_dat_3D = self.net.blobs[self.state.layer].diff[0] else: layer_dat_3D = self.net.blobs[self.state.layer].data[0] # Promote FC layers with shape (n) to have shape (n,1,1) if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:,np.newaxis,np.newaxis] n_tiles = layer_dat_3D.shape[0] tile_rows,tile_cols = get_tiles_height_width(n_tiles) display_3D_highres = None if self.state.pattern_mode: # Show desired patterns loaded from disk #available = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6', 'fc7', 'fc8', 'prob'] jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir, 'regularized_opt', self.state.layer, 'whole_layer.jpg') # Get highres version cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) if display_3D_highres is None: try: with WithTimer('CaffeVisApp:load_sprite_image', quiet = self.debug_level < 1): display_3D_highres = load_sprite_image(jpg_path, (tile_rows, tile_cols), n_sprites = n_tiles) except IOError: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) cache_after = str(self.img_cache) #print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: #print 'Downsampling by', ds display_3D = display_3D_highres[:,::ds,::ds,:] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile(self.settings.window_background, layer_dat_3D.shape + (1,)) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile(self.settings.stale_background, layer_dat_3D.shape + (1,)) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma, neg_pos_colors = ((1,0,0), (0,1,0))) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma) #print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:,:,:,np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.caffevis_layer_clr_back_background else: padval = self.settings.window_background # Tell the state about the updated (height,width) tile display (ensures valid selection) self.state.update_tiles_height_width((tile_rows,tile_cols), display_3D.shape[0]) #if self.state.layers_show_back: # highlights = [(.5, .5, 1)] * n_tiles #else: highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[self.state.selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[self.state.backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range _, display_2D = tile_images_make_tiles(display_3D, padval = padval, highlights = highlights) #print ' ===tile_conv dtype', tile_conv.dtype, 'range', tile_conv.min(), tile_conv.max() if display_3D_highres is None: display_3D_highres = display_3D # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0,1,2) if state_layers_pane_zoom_mode == 0: # Mode 0: base case display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) elif state_layers_pane_zoom_mode == 1: # Mode 1: zoomed selection unit_data = display_3D_highres[self.state.selected_unit] display_2D_resize = ensure_uint255_and_resize_to_fit(unit_data, pane.data.shape) else: # Mode 2: ??? backprop ??? display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) * 0 pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize return display_3D_highres