def on_result(self, host_dict, data, result): idx, dflimg = result self.result[idx] = (self.image_paths[idx], dflimg) InfoNotifier.InfoNotifier.g_progress_info.append( f"加载训练数据: {self.image_paths[idx]} 完毕!") QApplication.processEvents() io.progress_bar_inc(1)
def on_result(self, host_dict, data, result): if result[1] == 0: self.trash_img_list.append(result) else: self.img_list.append(result) io.progress_bar_inc(1)
def on_result(self, host_dict, data, result): if self.type == 'landmarks-manual': filepath, landmarks = result.filepath, result.landmarks if len(landmarks) != 0 and landmarks[0] is not None: self.landmarks = landmarks[0] (h, w, c) = self.image.shape if not self.hide_help: image = cv2.addWeighted(self.image, 1.0, self.text_lines_img, 1.0, 0) else: image = self.image.copy() view_rect = (np.array(self.rect) * self.view_scale).astype( np.int).tolist() view_landmarks = (np.array(self.landmarks) * self.view_scale).astype(np.int).tolist() if self.rect_size <= 40: scaled_rect_size = h // 3 if w > h else w // 3 p1 = (self.x - self.rect_size, self.y - self.rect_size) p2 = (self.x + self.rect_size, self.y - self.rect_size) p3 = (self.x - self.rect_size, self.y + self.rect_size) wh = h if h < w else w np1 = (w / 2 - wh / 4, h / 2 - wh / 4) np2 = (w / 2 + wh / 4, h / 2 - wh / 4) np3 = (w / 2 - wh / 4, h / 2 + wh / 4) mat = cv2.getAffineTransform( np.float32([p1, p2, p3]) * self.view_scale, np.float32([np1, np2, np3])) image = cv2.warpAffine(image, mat, (w, h)) view_landmarks = LandmarksProcessor.transform_points( view_landmarks, mat) landmarks_color = (255, 255, 0) if self.rect_locked else (0, 255, 0) LandmarksProcessor.draw_rect_landmarks( image, view_rect, view_landmarks, self.image_size, self.face_type, landmarks_color=landmarks_color) self.extract_needed = False io.show_image(self.wnd_name, image) else: self.result.append(result) io.progress_bar_inc(1) InfoNotifier.InfoNotifier.g_progress_info.append( str(result.filepath) + " 提取完毕!") QApplication.processEvents()
def on_result(self, host_dict, data, result): if self.type == 'landmarks-manual': filepath, landmarks = result.filepath, result.landmarks if len(landmarks) != 0 and landmarks[0] is not None: self.landmarks = landmarks[0] self.redraw() else: self.result.append(result) io.progress_bar_inc(1)
def sort_by_absdiff(input_path): io.log_info("Sorting by absolute difference...") is_sim = io.input_bool("Sort by similar?", True, help_message="Otherwise sort by dissimilar.") from core.leras import nn device_config = nn.ask_choose_device_idxs(choose_only_one=True, return_device_config=True) nn.initialize(device_config=device_config) tf = nn.tf image_paths = pathex.get_image_paths(input_path) image_paths_len = len(image_paths) batch_size = 1024 batch_size_remain = image_paths_len % batch_size i_t = tf.placeholder(tf.float32, (None, 256, 256, 3)) j_t = tf.placeholder(tf.float32, (None, 256, 256, 3)) outputs_full = [] outputs_remain = [] for i in range(batch_size): diff_t = tf.reduce_sum(tf.abs(i_t - j_t[i]), axis=[1, 2, 3]) outputs_full.append(diff_t) if i < batch_size_remain: outputs_remain.append(diff_t) def func_bs_full(i, j): return nn.tf_sess.run(outputs_full, feed_dict={i_t: i, j_t: j}) def func_bs_remain(i, j): return nn.tf_sess.run(outputs_remain, feed_dict={i_t: i, j_t: j}) import h5py db_file_path = Path(tempfile.gettempdir()) / 'sort_cache.hdf5' db_file = h5py.File(str(db_file_path), "w") db = db_file.create_dataset("results", (image_paths_len, image_paths_len), compression="gzip") pg_len = image_paths_len // batch_size if batch_size_remain != 0: pg_len += 1 pg_len = int((pg_len * pg_len - pg_len) / 2 + pg_len) io.progress_bar("Computing", pg_len) j = 0 while j < image_paths_len: j_images = [cv2_imread(x) for x in image_paths[j:j + batch_size]] j_images_len = len(j_images) func = func_bs_remain if image_paths_len - j < batch_size else func_bs_full i = 0 while i < image_paths_len: if i >= j: i_images = [ cv2_imread(x) for x in image_paths[i:i + batch_size] ] i_images_len = len(i_images) result = func(i_images, j_images) db[j:j + j_images_len, i:i + i_images_len] = np.array(result) io.progress_bar_inc(1) i += batch_size db_file.flush() j += batch_size io.progress_bar_close() next_id = 0 sorted = [next_id] for i in io.progress_bar_generator(range(image_paths_len - 1), "Sorting"): id_ar = np.concatenate([db[:next_id, next_id], db[next_id, next_id:]]) id_ar = np.argsort(id_ar) next_id = np.setdiff1d(id_ar, sorted, True)[0 if is_sim else -1] sorted += [next_id] db_file.close() db_file_path.unlink() img_list = [(image_paths[x], ) for x in sorted] return img_list, []
def on_result(self, host_dict, data, result): idx, yaws_sample_list = data self.result[idx] = yaws_sample_list io.progress_bar_inc(1)
def on_result(self, host_dict, data, result): if result[0] == 0: self.result.append(result[1]) else: self.result_trash.append(result[1]) io.progress_bar_inc(1)
def on_result(self, host_dict, data, result): self.img_list[data[0]][2] = result io.progress_bar_inc(1)
def get_data(self, host_dict): if self.type == 'landmarks-manual': need_remark_face = False redraw_needed = False while len(self.input_data) > 0: data = self.input_data[0] filepath, data_rects, data_landmarks = data.filepath, data.rects, data.landmarks is_frame_done = False if need_remark_face: # need remark image from input data that already has a marked face? need_remark_face = False if len( data_rects ) != 0: # If there was already a face then lock the rectangle to it until the mouse is clicked self.rect = data_rects.pop() self.landmarks = data_landmarks.pop() data_rects.clear() data_landmarks.clear() redraw_needed = True self.rect_locked = True self.rect_size = (self.rect[2] - self.rect[0]) / 2 self.x = (self.rect[0] + self.rect[2]) / 2 self.y = (self.rect[1] + self.rect[3]) / 2 if len(data_rects) == 0: if self.cache_original_image[0] == filepath: self.original_image = self.cache_original_image[1] else: self.original_image = imagelib.normalize_channels( cv2_imread(filepath), 3) self.cache_original_image = (filepath, self.original_image) (h, w, c) = self.original_image.shape self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0 / 9.0)) if self.cache_image[0] == (h, w, c) + (self.view_scale, filepath): self.image = self.cache_image[1] else: self.image = cv2.resize(self.original_image, (int( w * self.view_scale), int(h * self.view_scale)), interpolation=cv2.INTER_LINEAR) self.cache_image = ((h, w, c) + (self.view_scale, filepath), self.image) (h, w, c) = self.image.shape sh = (0, 0, w, min(100, h)) if self.cache_text_lines_img[0] == sh: self.text_lines_img = self.cache_text_lines_img[1] else: self.text_lines_img = (imagelib.get_draw_text_lines( self.image, sh, [ '[Mouse click] - lock/unlock selection', '[Mouse wheel] - change rect', '[Enter] / [Space] - confirm / skip frame', '[,] [.]- prev frame, next frame. [Q] - skip remaining frames', '[a] - accuracy on/off (more fps)', '[h] - hide this help' ], (1, 1, 1)) * 255).astype(np.uint8) self.cache_text_lines_img = (sh, self.text_lines_img) while True: io.process_messages(0.0001) new_x = self.x new_y = self.y new_rect_size = self.rect_size mouse_events = io.get_mouse_events(self.wnd_name) for ev in mouse_events: (x, y, ev, flags) = ev if ev == io.EVENT_MOUSEWHEEL and not self.rect_locked: mod = 1 if flags > 0 else -1 diff = 1 if new_rect_size <= 40 else np.clip( new_rect_size / 10, 1, 10) new_rect_size = max(5, new_rect_size + diff * mod) elif ev == io.EVENT_LBUTTONDOWN: self.rect_locked = not self.rect_locked self.extract_needed = True elif not self.rect_locked: new_x = np.clip(x, 0, w - 1) / self.view_scale new_y = np.clip(y, 0, h - 1) / self.view_scale key_events = io.get_key_events(self.wnd_name) key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if key == ord('\r') or key == ord('\n'): #confirm frame is_frame_done = True data_rects.append(self.rect) data_landmarks.append(self.landmarks) break elif key == ord(' '): #confirm skip frame is_frame_done = True break elif key == ord(',') and len(self.result) > 0: #go prev frame if self.rect_locked: self.rect_locked = False # Only save the face if the rect is still locked data_rects.append(self.rect) data_landmarks.append(self.landmarks) self.input_data.insert(0, self.result.pop()) io.progress_bar_inc(-1) need_remark_face = True break elif key == ord('.'): #go next frame if self.rect_locked: self.rect_locked = False # Only save the face if the rect is still locked data_rects.append(self.rect) data_landmarks.append(self.landmarks) need_remark_face = True is_frame_done = True break elif key == ord('q'): #skip remaining if self.rect_locked: self.rect_locked = False data_rects.append(self.rect) data_landmarks.append(self.landmarks) while len(self.input_data) > 0: self.result.append(self.input_data.pop(0)) io.progress_bar_inc(1) break elif key == ord('h'): self.hide_help = not self.hide_help break elif key == ord('a'): self.landmarks_accurate = not self.landmarks_accurate break if self.x != new_x or \ self.y != new_y or \ self.rect_size != new_rect_size or \ self.extract_needed or \ redraw_needed: self.x = new_x self.y = new_y self.rect_size = new_rect_size self.rect = (int(self.x - self.rect_size), int(self.y - self.rect_size), int(self.x + self.rect_size), int(self.y + self.rect_size)) if redraw_needed: redraw_needed = False return ExtractSubprocessor.Data( filepath, landmarks_accurate=self.landmarks_accurate) else: return ExtractSubprocessor.Data( filepath, rects=[self.rect], landmarks_accurate=self.landmarks_accurate) else: is_frame_done = True if is_frame_done: self.result.append(data) self.input_data.pop(0) io.progress_bar_inc(1) self.extract_needed = True self.rect_locked = False else: if len(self.input_data) > 0: return self.input_data.pop(0) return None
def on_result(self, host_dict, data, result): idx, dflimg = result self.result[idx] = (self.image_paths[idx], dflimg) io.progress_bar_inc(1)
def on_result (self, host_dict, data, result): idx, is_ok = result if is_ok: self.result.append(idx) io.progress_bar_inc(1)
def on_result (self, host_dict, data, result): io.progress_bar_inc(1) if result[0] == 1: self.result +=[ (result[1], result[2]) ]
def get_data(self, host_dict): if self.type == 'landmarks-manual': need_remark_face = False while len(self.input_data) > 0: data = self.input_data[0] filepath, data_rects, data_landmarks = data.filepath, data.rects, data.landmarks is_frame_done = False if self.image_filepath != filepath: self.image_filepath = filepath if self.cache_original_image[0] == filepath: self.original_image = self.cache_original_image[1] else: self.original_image = imagelib.normalize_channels( cv2_imread(filepath), 3) self.cache_original_image = (filepath, self.original_image) (h, w, c) = self.original_image.shape self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0 / 9.0)) if self.cache_image[0] == (h, w, c) + (self.view_scale, filepath): self.image = self.cache_image[1] else: self.image = cv2.resize(self.original_image, (int( w * self.view_scale), int(h * self.view_scale)), interpolation=cv2.INTER_LINEAR) self.cache_image = ((h, w, c) + (self.view_scale, filepath), self.image) (h, w, c) = self.image.shape sh = (0, 0, w, min(100, h)) if self.cache_text_lines_img[0] == sh: self.text_lines_img = self.cache_text_lines_img[1] else: self.text_lines_img = (imagelib.get_draw_text_lines( self.image, sh, [ '[L Mouse click] - lock/unlock selection. [Mouse wheel] - change rect', '[R Mouse Click] - manual face rectangle', '[Enter] / [Space] - confirm / skip frame', '[,] [.]- prev frame, next frame. [Q] - skip remaining frames', '[a] - accuracy on/off (more fps)', '[h] - hide this help' ], (1, 1, 1)) * 255).astype(np.uint8) self.cache_text_lines_img = (sh, self.text_lines_img) if need_remark_face: # need remark image from input data that already has a marked face? need_remark_face = False if len( data_rects ) != 0: # If there was already a face then lock the rectangle to it until the mouse is clicked self.rect = data_rects.pop() self.landmarks = data_landmarks.pop() data_rects.clear() data_landmarks.clear() self.rect_locked = True self.rect_size = (self.rect[2] - self.rect[0]) / 2 self.x = (self.rect[0] + self.rect[2]) / 2 self.y = (self.rect[1] + self.rect[3]) / 2 self.redraw() if len(data_rects) == 0: (h, w, c) = self.image.shape while True: io.process_messages(0.0001) self.ea.loop() if not self.force_landmarks: new_x = self.x new_y = self.y new_rect_size = self.rect_size mouse_events = io.get_mouse_events(self.wnd_name) for ev in mouse_events: (x, y, ev, flags) = ev if ev == io.EVENT_MOUSEWHEEL and not self.rect_locked: mod = 1 if flags > 0 else -1 diff = 1 if new_rect_size <= 40 else np.clip( new_rect_size / 10, 1, 10) new_rect_size = max(5, new_rect_size + diff * mod) elif ev == io.EVENT_LBUTTONDOWN: if self.force_landmarks: self.x = new_x self.y = new_y self.force_landmarks = False self.rect_locked = True self.redraw() else: self.rect_locked = not self.rect_locked self.extract_needed = True elif ev == io.EVENT_RBUTTONDOWN: self.ea.right_btn_down = True # self.force_landmarks = not self.force_landmarks # if self.force_landmarks: # self.rect_locked = False elif not self.rect_locked: new_x = np.clip(x, 0, w - 1) / self.view_scale new_y = np.clip(y, 0, h - 1) / self.view_scale key_events = io.get_key_events(self.wnd_name) key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if self.ea.right_btn_down and self.rect_locked: is_frame_done = True data_rects.append(self.rect) data_landmarks.append(self.landmarks) self.ea.last_outer = self.ea.cur_outer self.ea.last_landmarks = self.ea.cur_landmarks self.ea.auto = True break elif key == ord('s'): self.ea.auto = False break elif self.ea.auto and len( self.ea.last_outer) > 0 and len( self.ea.last_landmarks) > 0: # 根据上次的外框算出这次的x/y,以及外框大小 border_ratio = 0.6 last_mid = F.mid_point_by_range( self.ea.last_landmarks) last_border = np.linalg.norm( np.array(self.ea.last_outer[0]) - np.array(self.ea.last_outer[1])) last_area = F.poly_area(self.ea.last_outer) x, y = last_mid new_x = np.clip(x, 0, w - 1) / self.view_scale new_y = np.clip(y, 0, h - 1) / self.view_scale new_rect_size = last_border / 2 / self.view_scale * border_ratio # make sure rect and landmarks have been refreshed if len(self.ea.cur_outer) != 0: # 根据本次外框大小算是否valid,通过边长,面积,角度 # temp_mid = F.mid_point(self.temp_outer) cur_mid = F.mid_point_by_range( self.ea.cur_landmarks) dist = np.linalg.norm( np.array(cur_mid) - np.array(last_mid)) dist_r = dist / last_border temp_area = F.poly_area(self.ea.cur_outer) area_r = temp_area / last_area v0 = np.array(last_mid) - np.array( self.ea.last_outer[0]) v1 = np.array(cur_mid) - np.array( self.ea.cur_outer[0]) angle = math.fabs(F.angle_between(v0, v1)) if dist_r < 0.5 and 0.5 < area_r < 1.5 and angle < 0.7: is_frame_done = True self.ea.last_outer = self.ea.cur_outer self.ea.last_landmarks = self.ea.cur_landmarks data_rects.append(self.rect) data_landmarks.append(self.landmarks) self.ea.auto = True break elif self.x != new_x or self.y != new_y: # 可以在等一轮更新后试一下 pass else: self.ea.auto = False F.beep() elif key == ord('n') and len(self.result) > 0: # go prev frame without save and clear result self.rect_locked = False n = 10 if shift_pressed else 1 while n > 0 and len(self.result) > 0: self.input_data.insert(0, self.result.pop()) self.input_data[0].rects.clear() self.input_data[0].landmarks.clear() io.progress_bar_inc(-1) n -= 1 # 直接无视之前的结果,重新标注 self.extract_needed = True break elif key == ord('m') and len(self.input_data) > 0: # go next frame without save self.rect_locked = False n = 10 if shift_pressed else 1 while n > 0 and len(self.input_data) > 0: self.result.append(self.input_data.pop(0)) io.progress_bar_inc(1) n -= 1 # 直接无视之前的结果,重新标注 self.extract_needed = True break elif key == ord('\r') or key == ord('\n'): #confirm frame is_frame_done = True data_rects.append(self.rect) data_landmarks.append(self.landmarks) break elif key == ord(' '): #confirm skip frame is_frame_done = True break elif key == ord(',') and len(self.result) > 0: #go prev frame if self.rect_locked: self.rect_locked = False # Only save the face if the rect is still locked data_rects.append(self.rect) data_landmarks.append(self.landmarks) self.input_data.insert(0, self.result.pop()) io.progress_bar_inc(-1) need_remark_face = True break elif key == ord('.'): #go next frame if self.rect_locked: self.rect_locked = False # Only save the face if the rect is still locked data_rects.append(self.rect) data_landmarks.append(self.landmarks) need_remark_face = True is_frame_done = True break elif key == ord('q'): #skip remaining if self.rect_locked: self.rect_locked = False data_rects.append(self.rect) data_landmarks.append(self.landmarks) while len(self.input_data) > 0: self.result.append(self.input_data.pop(0)) io.progress_bar_inc(1) break elif key == ord('h'): self.hide_help = not self.hide_help break elif key == ord('a'): self.landmarks_accurate = not self.landmarks_accurate break if self.force_landmarks: pt2 = np.float32([new_x, new_y]) pt1 = np.float32([self.x, self.y]) pt_vec_len = npla.norm(pt2 - pt1) pt_vec = pt2 - pt1 if pt_vec_len != 0: pt_vec /= pt_vec_len self.rect_size = pt_vec_len self.rect = (int(self.x - self.rect_size), int(self.y - self.rect_size), int(self.x + self.rect_size), int(self.y + self.rect_size)) if pt_vec_len > 0: lmrks = np.concatenate( (np.zeros((17, 2), np.float32), LandmarksProcessor.landmarks_2D), axis=0) lmrks -= lmrks[30:31, :] mat = cv2.getRotationMatrix2D( (0, 0), -np.arctan2(pt_vec[1], pt_vec[0]) * 180 / math.pi, pt_vec_len) mat[:, 2] += (self.x, self.y) self.landmarks = LandmarksProcessor.transform_points( lmrks, mat) self.redraw() elif self.x != new_x or \ self.y != new_y or \ self.rect_size != new_rect_size or \ self.extract_needed: self.x = new_x self.y = new_y self.rect_size = new_rect_size self.rect = (int(self.x - self.rect_size), int(self.y - self.rect_size), int(self.x + self.rect_size), int(self.y + self.rect_size)) return ExtractSubprocessor.Data( filepath, rects=[self.rect], landmarks_accurate=self.landmarks_accurate) else: is_frame_done = True if is_frame_done: self.result.append(data) self.input_data.pop(0) io.progress_bar_inc(1) self.extract_needed = True self.rect_locked = False self.ea.cur_outer = [] else: if len(self.input_data) > 0: return self.input_data.pop(0) return None
def run(self): if not self.on_check_run(): return self.get_result() self.clis = [] #getting info about name of subprocesses, host and client dicts, and spawning them for name, host_dict, client_dict in self.process_info_generator(): try: cli = self.SubprocessorCli_class(client_dict) cli.state = 1 cli.sent_time = 0 cli.sent_data = None cli.name = name cli.host_dict = host_dict self.clis.append (cli) if self.initialize_subprocesses_in_serial: while True: while not cli.c2s.empty(): obj = cli.c2s.get() op = obj.get('op','') if op == 'init_ok': cli.state = 0 elif op == 'log_info': io.log_info(obj['msg']) elif op == 'log_err': io.log_err(obj['msg']) elif op == 'error': cli.kill() self.clis.remove(cli) break if cli.state == 0: break io.process_messages(0.005) except: raise Exception (f"Unable to start subprocess {name}. Error: {traceback.format_exc()}") if len(self.clis) == 0: raise Exception ("Unable to start Subprocessor '%s' " % (self.name)) #waiting subprocesses their success(or not) initialization while True: for cli in self.clis[:]: while not cli.c2s.empty(): obj = cli.c2s.get() op = obj.get('op','') if op == 'init_ok': cli.state = 0 elif op == 'log_info': io.log_info(obj['msg']) elif op == 'log_err': io.log_err(obj['msg']) elif op == 'error': cli.kill() self.clis.remove(cli) break if all ([cli.state == 0 for cli in self.clis]): break io.process_messages(0.005) if len(self.clis) == 0: raise Exception ( "Unable to start subprocesses." ) #ok some processes survived, initialize host logic self.on_clients_initialized() #main loop of data processing while True: for cli in self.clis[:]: while not cli.c2s.empty(): obj = cli.c2s.get() op = obj.get('op','') if op == 'success': #success processed data, return data and result to on_result self.on_result (cli.host_dict, obj['data'], obj['result']) self.sent_data = None cli.state = 0 elif op == 'error': #some error occured while process data, returning chunk to on_data_return if 'data' in obj.keys(): self.on_data_return (cli.host_dict, obj['data'] ) #and killing process cli.kill() self.clis.remove(cli) elif op == 'log_info': io.log_info(obj['msg']) elif op == 'log_err': io.log_err(obj['msg']) elif op == 'progress_bar_inc': io.progress_bar_inc(obj['c']) for cli in self.clis[:]: if cli.state == 1: if cli.sent_time != 0 and self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec: #subprocess busy too long print ( '%s doesnt response, terminating it.' % (cli.name) ) self.on_data_return (cli.host_dict, cli.sent_data ) cli.kill() self.clis.remove(cli) for cli in self.clis[:]: if cli.state == 0: #free state of subprocess, get some data from get_data data = self.get_data(cli.host_dict) if data is not None: #and send it to subprocess cli.s2c.put ( {'op': 'data', 'data' : data} ) cli.sent_time = time.time() cli.sent_data = data cli.state = 1 if self.io_loop_sleep_time != 0: io.process_messages(self.io_loop_sleep_time) if self.on_tick() and all ([cli.state == 0 for cli in self.clis]): #all subprocesses free and no more data available to process, ending loop break #gracefully terminating subprocesses for cli in self.clis[:]: cli.s2c.put ( {'op': 'close'} ) cli.sent_time = time.time() while True: for cli in self.clis[:]: terminate_it = False while not cli.c2s.empty(): obj = cli.c2s.get() obj_op = obj['op'] if obj_op == 'finalized': terminate_it = True break if self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec: terminate_it = True if terminate_it: cli.state = 2 cli.kill() if all ([cli.state == 2 for cli in self.clis]): break #finalizing host logic and return result self.on_clients_finalized() return self.get_result()
def tick(self): for cli in self.clis[:]: while not cli.c2s.empty(): obj = cli.c2s.get() op = obj.get('op', '') if op == 'success': #success processed data, return data and result to on_result self.on_result(cli.host_dict, obj['data'], obj['result']) self.sent_data = None cli.state = 0 elif op == 'error': #some error occured while process data, returning chunk to on_data_return if 'data' in obj.keys(): self.on_data_return(cli.host_dict, obj['data']) #and killing process cli.kill() self.clis.remove(cli) elif op == 'log_info': io.log_info(obj['msg']) elif op == 'log_err': io.log_err(obj['msg']) elif op == 'progress_bar_inc': io.progress_bar_inc(obj['c']) for cli in self.clis[:]: if cli.state == 1: if cli.sent_time != 0 and self.no_response_time_sec != 0 and ( time.time() - cli.sent_time) > self.no_response_time_sec: #subprocess busy too long io.log_info('%s doesnt response, terminating it.' % (cli.name)) self.on_data_return(cli.host_dict, cli.sent_data) cli.kill() self.clis.remove(cli) for cli in self.clis[:]: if cli.state == 0: #free state of subprocess, get some data from get_data data = self.get_data(cli.host_dict) if data is not None: #and send it to subprocess cli.s2c.put({'op': 'data', 'data': data}) cli.sent_time = time.time() cli.sent_data = data cli.state = 1 if all([cli.state == 0 for cli in self.clis]): #gracefully terminating subprocesses for cli in self.clis[:]: cli.s2c.put({'op': 'close'}) cli.sent_time = time.time() while True: for cli in self.clis[:]: terminate_it = False while not cli.c2s.empty(): obj = cli.c2s.get() obj_op = obj['op'] if obj_op == 'finalized': terminate_it = True break if (time.time() - cli.sent_time) > 30: terminate_it = True if terminate_it: cli.state = 2 cli.kill() if all([cli.state == 2 for cli in self.clis]): break #finalizing host logic self.q_timer.stop() self.q_timer = None self.on_clients_finalized()
def on_result(self, host_dict, data, result): io.progress_bar_inc(1)
def on_tick(self): self.predictor_func_host.process_messages() self.superres_host.process_messages() self.fanseg_host.process_messages() go_prev_frame = False go_first_frame = False go_prev_frame_overriding_cfg = False go_first_frame_overriding_cfg = False go_next_frame = self.process_remain_frames go_next_frame_overriding_cfg = False go_last_frame_overriding_cfg = False cur_frame = None if len(self.frames_idxs) != 0: cur_frame = self.frames[self.frames_idxs[0]] if self.is_interactive: self.main_screen.set_waiting_icon(False) if not self.is_interactive_quitting and not self.process_remain_frames: if cur_frame is not None: if not cur_frame.is_shown: if cur_frame.is_done: cur_frame.is_shown = True io.log_info( cur_frame.cfg.to_string( cur_frame.frame_info.filepath.name)) if cur_frame.image is None: cur_frame.image = cv2_imread( cur_frame.output_filepath) if cur_frame.image is None: # unable to read? recompute then cur_frame.is_done = False cur_frame.is_shown = False self.main_screen.set_image(cur_frame.image) else: self.main_screen.set_waiting_icon(True) else: self.main_screen.set_image(None) else: self.main_screen.set_image(None) self.main_screen.set_waiting_icon(True) self.screen_manager.show_current() key_events = self.screen_manager.get_key_events() key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[ -1] if len(key_events) > 0 else (0, 0, False, False, False) if key == 9: #tab self.screen_manager.switch_screens() else: if key == 27: #esc self.is_interactive_quitting = True elif self.screen_manager.get_current() is self.main_screen: if self.merger_config.type == MergerConfig.TYPE_MASKED and chr_key in self.masked_keys: self.process_remain_frames = False if cur_frame is not None: cfg = cur_frame.cfg prev_cfg = cfg.copy() if cfg.type == MergerConfig.TYPE_MASKED: self.masked_keys_funcs[chr_key](cfg, shift_pressed) if prev_cfg != cfg: io.log_info( cfg.to_string( cur_frame.frame_info.filepath.name)) cur_frame.is_done = False cur_frame.is_shown = False else: if chr_key == ',' or chr_key == 'm': self.process_remain_frames = False go_prev_frame = True if chr_key == ',': if shift_pressed: go_first_frame = True elif chr_key == 'm': if not shift_pressed: go_prev_frame_overriding_cfg = True else: go_first_frame_overriding_cfg = True elif chr_key == '.' or chr_key == '/': self.process_remain_frames = False go_next_frame = True if chr_key == '.': if shift_pressed: self.process_remain_frames = not self.process_remain_frames elif chr_key == '/': if not shift_pressed: go_next_frame_overriding_cfg = True else: go_last_frame_overriding_cfg = True elif chr_key == '-': self.screen_manager.get_current().diff_scale(-0.1) elif chr_key == '=': self.screen_manager.get_current().diff_scale(0.1) if go_prev_frame: if cur_frame is None or cur_frame.is_done: if cur_frame is not None: cur_frame.image = None while True: if len(self.frames_done_idxs) > 0: prev_frame = self.frames[self.frames_done_idxs.pop()] self.frames_idxs.insert(0, prev_frame.idx) prev_frame.is_shown = False io.progress_bar_inc(-1) if cur_frame is not None and ( go_prev_frame_overriding_cfg or go_first_frame_overriding_cfg): if prev_frame.cfg != cur_frame.cfg: prev_frame.cfg = cur_frame.cfg.copy() prev_frame.is_done = False cur_frame = prev_frame if go_first_frame_overriding_cfg or go_first_frame: if len(self.frames_done_idxs) > 0: continue break elif go_next_frame: if cur_frame is not None and cur_frame.is_done: cur_frame.image = None cur_frame.is_shown = True self.frames_done_idxs.append(cur_frame.idx) self.frames_idxs.pop(0) io.progress_bar_inc(1) f = self.frames if len(self.frames_idxs) != 0: next_frame = f[self.frames_idxs[0]] next_frame.is_shown = False if go_next_frame_overriding_cfg or go_last_frame_overriding_cfg: if go_next_frame_overriding_cfg: to_frames = next_frame.idx + 1 else: to_frames = len(f) for i in range(next_frame.idx, to_frames): f[i].cfg = None for i in range( min(len(self.frames_idxs), self.prefetch_frame_count)): frame = f[self.frames_idxs[i]] if frame.cfg is None: if i == 0: frame.cfg = cur_frame.cfg.copy() else: frame.cfg = f[self.frames_idxs[i - 1]].cfg.copy() frame.is_done = False #initiate solve again frame.is_shown = False if len(self.frames_idxs) == 0: self.process_remain_frames = False return (self.is_interactive and self.is_interactive_quitting) or \ (not self.is_interactive and self.process_remain_frames == False)
def on_result(self, host_dict, data, result): if result == False: self.result.append(data[0]) io.progress_bar_inc(1)
def on_clients_initialized(self): io.progress_bar("Sorting", len(self.img_list)) io.progress_bar_inc(len(self.img_chunks_list))