def get_loss_history_preview(loss_history, iter, w, c): loss_history = np.array(loss_history.copy()) lh_height = 100 lh_img = np.ones((lh_height, w, c)) * 0.1 if len(loss_history) != 0: loss_count = len(loss_history[0]) lh_len = len(loss_history) l_per_col = lh_len / w plist_max = [[ max( 0.0, loss_history[int(col * l_per_col)][p], *[ loss_history[i_ab][p] for i_ab in range( int(col * l_per_col), int((col + 1) * l_per_col)) ]) for p in range(loss_count) ] for col in range(w)] plist_min = [[ min( plist_max[col][p], loss_history[int(col * l_per_col)][p], *[ loss_history[i_ab][p] for i_ab in range( int(col * l_per_col), int((col + 1) * l_per_col)) ]) for p in range(loss_count) ] for col in range(w)] plist_abs_max = np.mean(loss_history[len(loss_history) // 5:]) * 2 for col in range(0, w): for p in range(0, loss_count): point_color = [1.0] * c point_color[0:3] = colorsys.hsv_to_rgb( p * (1.0 / loss_count), 1.0, 1.0) ph_max = int( (plist_max[col][p] / plist_abs_max) * (lh_height - 1)) ph_max = np.clip(ph_max, 0, lh_height - 1) ph_min = int( (plist_min[col][p] / plist_abs_max) * (lh_height - 1)) ph_min = np.clip(ph_min, 0, lh_height - 1) for ph in range(ph_min, ph_max + 1): lh_img[(lh_height - ph - 1), col] = point_color lh_lines = 5 lh_line_height = (lh_height - 1) / lh_lines for i in range(0, lh_lines + 1): lh_img[int(i * lh_line_height), :] = (0.8, ) * c last_line_t = int((lh_lines - 1) * lh_line_height) last_line_b = int(lh_lines * lh_line_height) lh_text = 'Iter: %d' % (iter) if iter != 0 else '' lh_img[last_line_t:last_line_b, 0:w] += imagelib.get_text_image( (last_line_b - last_line_t, w, c), lh_text, color=[0.8] * c) return lh_img
def onGetPreview(self, generators_samples): test_src = generators_samples[0][0][0:4] #first 4 samples test_pyr_src = generators_samples[0][1][0:4] test_dst = generators_samples[1][0][0:4] test_pyr_dst = generators_samples[1][1][0:4] h, w, c = self.resolution, self.resolution, 3 h_line = 13 result = [] for name, img, pyr in [ ['training data', test_src, test_pyr_src], \ ['evaluating data',test_dst, test_pyr_dst] ]: pyr_pred = self.pose_est.extract(img) hor_imgs = [] for i in range(len(img)): img_info = np.ones((h, w, c)) * 0.1 lines = ["%s" % (str(pyr[i])), "%s" % (str(pyr_pred[i]))] lines_count = len(lines) for ln in range(lines_count): img_info[ ln*h_line:(ln+1)*h_line, 0:w] += \ imagelib.get_text_image ( (h_line,w,c), lines[ln], color=[0.8]*c ) hor_imgs.append( np.concatenate((img[i, :, :, 0:3], img_info), axis=1)) result += [(name, np.concatenate(hor_imgs, axis=0))] return result
def create_preview_pane_image(previews, selected_preview, loss_history, show_last_history_iters_count, iteration, batch_size, zoom=Zoom.ZOOM_100): scaled_previews = scale_previews(previews, zoom) selected_preview_name = scaled_previews[selected_preview][0] selected_preview_rgb = scaled_previews[selected_preview][1] h, w, c = selected_preview_rgb.shape # HEAD head_lines = [ '[s]:save [enter]:exit [-/+]:zoom: %s' % zoom.label, '[p]:update [space]:next preview [l]:change history range', 'Preview: "%s" [%d/%d]' % (selected_preview_name, selected_preview + 1, len(previews)) ] head_line_height = int(15 * zoom.scale) head_height = len(head_lines) * head_line_height head = np.ones((head_height, w, c)) * 0.1 for i in range(0, len(head_lines)): t = i * head_line_height b = (i + 1) * head_line_height head[t:b, 0:w] += imagelib.get_text_image((head_line_height, w, c), head_lines[i], color=[0.8] * c) final = head if loss_history is not None: if show_last_history_iters_count == 0: loss_history_to_show = loss_history else: loss_history_to_show = loss_history[ -show_last_history_iters_count:] lh_height = int(100 * zoom.scale) lh_img = models.ModelBase.get_loss_history_preview( loss_history_to_show, iteration, batch_size, w, c, lh_height) final = np.concatenate([final, lh_img], axis=0) final = np.concatenate([final, selected_preview_rgb], axis=0) final = np.clip(final, 0, 1) return (final * 255).astype(np.uint8)
def make_screen(self): alt, azi, inten = self.alt_azi_ar[self.alt_azi_cur] img = self.dpr.relight(self.current_img, alt, azi, inten, self.lighten) h, w, c = img.shape lines = [ 'Pick light directions for whole faceset.', '[q]-new test face', '[w][e]-navigate', '[a][s]-intensity', '[r]-new [t]-delete [enter]-process', '' ] for i, (alt, azi, inten) in enumerate(self.alt_azi_ar): s = '>:' if self.alt_azi_cur == i else ' :' s += f'alt=[{ int(alt):03}] azi=[{ int(azi):03}] int=[{inten:01.1f}]' lines += [s] lines_count = len(lines) h_line = 16 sh = lines_count * h_line sw = 400 sc = c status_img = np.ones((sh, sw, sc)) * 0.1 for i in range(lines_count): status_img[ i*h_line:(i+1)*h_line, 0:sw] += \ imagelib.get_text_image ( (h_line,sw,c), lines[i], color=[0.8]*c ) status_img = np.clip(status_img * 255, 0, 255).astype(np.uint8) #combine screens if sh > h: img = np.concatenate( [img, np.zeros((sh - h, w, c), dtype=img.dtype)], axis=0) elif h > sh: status_img = np.concatenate( [status_img, np.zeros((h - sh, sw, sc), dtype=img.dtype)], axis=0) img = np.concatenate([img, status_img], axis=1) return img
def get_screen_status_block(self, w, c): if self.screen_status_block_dirty: self.screen_status_block_dirty = False lines = [ 'Polys current/max = %d/%d' % (self.ie_polys.n, self.ie_polys.n_max), ] if self.get_status_lines_func is not None: lines += self.get_status_lines_func() lines_count = len(lines) h_line = 21 h = lines_count * h_line img = np.ones ( (h,w,c) ) * 0.1 for i in range(lines_count): img[ i*h_line:(i+1)*h_line, 0:w] += \ imagelib.get_text_image ( (h_line,w,c), lines[i], color=[0.8]*c ) self.screen_status_block = np.clip(img*255, 0, 255).astype(np.uint8) return self.screen_status_block
def main(args, device_args): io.log_info("Running trainer.\r\n") no_preview = args.get('no_preview', False) s2c = queue.Queue() c2s = queue.Queue() thread = threading.Thread(target=trainerThread, args=(s2c, c2s, args, device_args)) thread.start() if no_preview: while True: if not c2s.empty(): input = c2s.get() op = input.get('op', '') if op == 'close': break try: io.process_messages(0.1) except KeyboardInterrupt: s2c.put({'op': 'close'}) else: wnd_name = "Training preview" io.named_window(wnd_name) io.capture_keys(wnd_name) previews = None loss_history = None selected_preview = 0 update_preview = False is_showing = False is_waiting_preview = False show_last_history_iters_count = 0 iter = 0 while True: if not c2s.empty(): input = c2s.get() op = input['op'] if op == 'show': is_waiting_preview = False loss_history = input[ 'loss_history'] if 'loss_history' in input.keys( ) else None previews = input['previews'] if 'previews' in input.keys( ) else None iter = input['iter'] if 'iter' in input.keys() else 0 if previews is not None: max_w = 0 max_h = 0 for (preview_name, preview_rgb) in previews: (h, w, c) = preview_rgb.shape max_h = max(max_h, h) max_w = max(max_w, w) max_size = 800 if max_h > max_size: max_w = int(max_w / (max_h / max_size)) max_h = max_size #make all previews size equal for preview in previews[:]: (preview_name, preview_rgb) = preview (h, w, c) = preview_rgb.shape if h != max_h or w != max_w: previews.remove(preview) previews.append( (preview_name, cv2.resize(preview_rgb, (max_w, max_h)))) selected_preview = selected_preview % len(previews) update_preview = True elif op == 'close': break if update_preview: update_preview = False selected_preview_name = previews[selected_preview][0] selected_preview_rgb = previews[selected_preview][1] (h, w, c) = selected_preview_rgb.shape # HEAD head_lines = [ '[s]:save [enter]:exit', '[p]:update [space]:next preview [l]:change history range', 'Preview: "%s" [%d/%d]' % (selected_preview_name, selected_preview + 1, len(previews)) ] head_line_height = 15 head_height = len(head_lines) * head_line_height head = np.ones((head_height, w, c)) * 0.1 for i in range(0, len(head_lines)): t = i * head_line_height b = (i + 1) * head_line_height head[t:b, 0:w] += imagelib.get_text_image( (head_line_height, w, c), head_lines[i], color=[0.8] * c) final = head if loss_history is not None: if show_last_history_iters_count == 0: loss_history_to_show = loss_history else: loss_history_to_show = loss_history[ -show_last_history_iters_count:] lh_img = models.ModelBase.get_loss_history_preview( loss_history_to_show, iter, w, c) final = np.concatenate([final, lh_img], axis=0) final = np.concatenate([final, selected_preview_rgb], axis=0) final = np.clip(final, 0, 1) io.show_image(wnd_name, (final * 255).astype(np.uint8)) is_showing = True key_events = io.get_key_events(wnd_name) key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if key == ord('\n') or key == ord('\r'): s2c.put({'op': 'close'}) elif key == ord('s'): s2c.put({'op': 'save'}) elif key == ord('p'): if not is_waiting_preview: is_waiting_preview = True s2c.put({'op': 'preview'}) elif key == ord('l'): if show_last_history_iters_count == 0: show_last_history_iters_count = 5000 elif show_last_history_iters_count == 5000: show_last_history_iters_count = 10000 elif show_last_history_iters_count == 10000: show_last_history_iters_count = 50000 elif show_last_history_iters_count == 50000: show_last_history_iters_count = 100000 elif show_last_history_iters_count == 100000: show_last_history_iters_count = 0 update_preview = True elif key == ord(' '): selected_preview = (selected_preview + 1) % len(previews) update_preview = True try: io.process_messages(0.1) except KeyboardInterrupt: s2c.put({'op': 'close'}) io.destroy_all_windows()