def onClientInitialize(self, client_dict): self.safe_print('Running on %s.' % (client_dict['device_name'])) self.type = client_dict['type'] self.image_size = client_dict['image_size'] self.face_type = client_dict['face_type'] self.device_idx = client_dict['device_idx'] self.cpu_only = client_dict['device_type'] == 'CPU' self.output_path = Path( client_dict['output_dir']) if 'output_dir' in client_dict.keys( ) else None self.debug = client_dict['debug'] self.detector = client_dict['detector'] self.keras = None self.tf = None self.tf_session = None self.e = None if self.type == 'rects': if self.detector is not None: if self.detector == 'mt': self.gpu_config = gpufmkmgr.GPUConfig( cpu_only=self.cpu_only, force_best_gpu_idx=self.device_idx, allow_growth=True) self.tf = gpufmkmgr.import_tf(self.gpu_config) self.tf_session = gpufmkmgr.get_tf_session() self.keras = gpufmkmgr.import_keras() self.e = facelib.MTCExtractor(self.keras, self.tf, self.tf_session) elif self.detector == 'dlib': self.dlib = gpufmkmgr.import_dlib(self.device_idx, cpu_only=self.cpu_only) self.e = facelib.DLIBExtractor(self.dlib) self.e.__enter__() elif self.type == 'landmarks': self.gpu_config = gpufmkmgr.GPUConfig( cpu_only=self.cpu_only, force_best_gpu_idx=self.device_idx, allow_growth=True) self.tf = gpufmkmgr.import_tf(self.gpu_config) self.tf_session = gpufmkmgr.get_tf_session() self.keras = gpufmkmgr.import_keras() self.e = facelib.LandmarksExtractor(self.keras) self.e.__enter__() elif self.type == 'final': pass return None
def __init__(self,): import gpufmkmgr self.tf_module = gpufmkmgr.import_tf() self.tf_session = gpufmkmgr.get_tf_session() self.bgr_input_tensor = self.tf_module.placeholder("float", [None, None, 3]) self.lab_input_tensor = self.tf_module.placeholder("float", [None, None, 3]) self.lab_output_tensor = self.rgb_to_lab(self.tf_module, self.bgr_input_tensor) self.bgr_output_tensor = self.lab_to_rgb(self.tf_module, self.lab_input_tensor)
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, multi_gpu=False, force_best_gpu_idx=-1, force_gpu_idxs=None, write_preview_history=False, debug=False, **in_options): print("Loading model...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat')) self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.training_datas = [None] * TrainingDataType.QTY self.src_images_paths = None self.dst_images_paths = None self.src_yaw_images_paths = None self.dst_yaw_images_paths = None self.src_data_generator = None self.dst_data_generator = None self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None) self.batch_size = 1 self.write_preview_history = write_preview_history self.debug = debug self.supress_std_once = False #True if self.model_data_path.exists(): model_data = pickle.loads(self.model_data_path.read_bytes()) self.epoch = model_data['epoch'] self.options = model_data['options'] self.loss_history = model_data[ 'loss_history'] if 'loss_history' in model_data.keys() else [] self.generator_dict_states = model_data[ 'generator_dict_states'] if 'generator_dict_states' in model_data.keys( ) else None self.sample_for_preview = model_data[ 'sample_for_preview'] if 'sample_for_preview' in model_data.keys( ) else None else: self.epoch = 0 self.options = {} self.loss_history = [] self.generator_dict_states = None self.sample_for_preview = None if self.write_preview_history: self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name())) if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.epoch == 0: for filename in Path_utils.get_image_paths( self.preview_history_path): Path(filename).unlink() self.multi_gpu = multi_gpu gpu_idx = force_best_gpu_idx if ( force_best_gpu_idx >= 0 and gpufmkmgr.isValidDeviceIdx(force_best_gpu_idx) ) else gpufmkmgr.getBestDeviceIdx() gpu_total_vram_gb = gpufmkmgr.getDeviceVRAMTotalGb(gpu_idx) is_gpu_low_mem = (gpu_total_vram_gb < 4) self.gpu_total_vram_gb = gpu_total_vram_gb if self.epoch == 0: #first run self.options['created_vram_gb'] = gpu_total_vram_gb self.created_vram_gb = gpu_total_vram_gb else: #not first run if 'created_vram_gb' in self.options.keys(): self.created_vram_gb = self.options['created_vram_gb'] else: self.options['created_vram_gb'] = gpu_total_vram_gb self.created_vram_gb = gpu_total_vram_gb if force_gpu_idxs is not None: self.gpu_idxs = [int(x) for x in force_gpu_idxs.split(',')] else: if self.multi_gpu: self.gpu_idxs = gpufmkmgr.getDeviceIdxsEqualModel(gpu_idx) if len(self.gpu_idxs) <= 1: self.multi_gpu = False else: self.gpu_idxs = [gpu_idx] self.tf = gpufmkmgr.import_tf(self.gpu_idxs, allow_growth=False) self.keras = gpufmkmgr.import_keras() self.keras_contrib = gpufmkmgr.import_keras_contrib() self.onInitialize(**in_options) if self.debug: self.batch_size = 1 if self.is_training_mode: if self.generator_list is None: raise Exception('You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, TrainingDataGeneratorBase): raise Exception( 'training data generator is not subclass of TrainingDataGeneratorBase' ) if self.generator_dict_states is not None and i < len( self.generator_dict_states): generator.set_dict_state(self.generator_dict_states[i]) if self.sample_for_preview is None: self.sample_for_preview = self.generate_next_sample() print("===== Model summary =====") print("== Model name: " + self.get_model_name()) print("==") print("== Current epoch: " + str(self.epoch)) print("==") print("== Options:") print("== |== batch_size : %s " % (self.batch_size)) print("== |== multi_gpu : %s " % (self.multi_gpu)) for key in self.options.keys(): print("== |== %s : %s" % (key, self.options[key])) print("== Running on:") for idx in self.gpu_idxs: print("== |== [%d : %s]" % (idx, gpufmkmgr.getDeviceName(idx))) if self.gpu_total_vram_gb == 2: print("==") print( "== WARNING: You are using 2GB GPU. If training does not start," ) print("== close all programs and try again.") print( "== Also you can disable Windows Aero Desktop to get extra free VRAM." ) print("==") print("=========================")
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, batch_size=0, write_preview_history=False, debug=False, **in_options): print("Loading model...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat')) self.training_data_src_path = training_data_src_path self.training_data_dst_path = training_data_dst_path self.src_images_paths = None self.dst_images_paths = None self.src_yaw_images_paths = None self.dst_yaw_images_paths = None self.src_data_generator = None self.dst_data_generator = None self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None) self.batch_size = batch_size self.write_preview_history = write_preview_history self.debug = debug self.supress_std_once = ('TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1') if self.model_data_path.exists(): model_data = pickle.loads(self.model_data_path.read_bytes()) self.epoch = model_data['epoch'] self.options = model_data['options'] self.loss_history = model_data[ 'loss_history'] if 'loss_history' in model_data.keys() else [] self.sample_for_preview = model_data[ 'sample_for_preview'] if 'sample_for_preview' in model_data.keys( ) else None else: self.epoch = 0 self.options = {} self.loss_history = [] self.sample_for_preview = None if self.write_preview_history: self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name())) if not self.preview_history_path.exists(): self.preview_history_path.mkdir(exist_ok=True) else: if self.epoch == 0: for filename in Path_utils.get_image_paths( self.preview_history_path): Path(filename).unlink() self.gpu_config = gpufmkmgr.GPUConfig(allow_growth=False, **in_options) self.gpu_total_vram_gb = self.gpu_config.gpu_total_vram_gb if self.epoch == 0: #first run self.options['created_vram_gb'] = self.gpu_total_vram_gb self.created_vram_gb = self.gpu_total_vram_gb else: #not first run if 'created_vram_gb' in self.options.keys(): self.created_vram_gb = self.options['created_vram_gb'] else: self.options['created_vram_gb'] = self.gpu_total_vram_gb self.created_vram_gb = self.gpu_total_vram_gb self.tf = gpufmkmgr.import_tf(self.gpu_config) self.tf_sess = gpufmkmgr.get_tf_session() self.keras = gpufmkmgr.import_keras() self.keras_contrib = gpufmkmgr.import_keras_contrib() self.onInitialize(**in_options) if self.debug or self.batch_size == 0: self.batch_size = 1 if self.is_training_mode: if self.generator_list is None: raise Exception('You didnt set_training_data_generators()') else: for i, generator in enumerate(self.generator_list): if not isinstance(generator, SampleGeneratorBase): raise Exception( 'training data generator is not subclass of SampleGeneratorBase' ) if self.sample_for_preview is None: self.sample_for_preview = self.generate_next_sample() print("===== Model summary =====") print("== Model name: " + self.get_model_name()) print("==") print("== Current epoch: " + str(self.epoch)) print("==") print("== Options:") print("== |== batch_size : %s " % (self.batch_size)) print("== |== multi_gpu : %s " % (self.gpu_config.multi_gpu)) for key in self.options.keys(): print("== |== %s : %s" % (key, self.options[key])) print("== Running on:") if self.gpu_config.cpu_only: print("== |== [CPU]") else: for idx in self.gpu_config.gpu_idxs: print("== |== [%d : %s]" % (idx, gpufmkmgr.getDeviceName(idx))) if not self.gpu_config.cpu_only and self.gpu_total_vram_gb == 2: print("==") print( "== WARNING: You are using 2GB GPU. Result quality may be significantly decreased." ) print( "== If training does not start, close all programs and try again." ) print( "== Also you can disable Windows Aero Desktop to get extra free VRAM." ) print("==") print("=========================")
def extract_pass_process(sq, cq): e = None type = None device_idx = None debug = False output_path = None detector = None image_size = None face_type = None while True: obj = sq.get() obj_op = obj['op'] if obj_op == 'extract': data = obj['data'] filename_path = Path(data[0]) if not filename_path.exists(): cq.put({ 'op': 'error', 'close': False, 'is_file_not_found': True, 'data': obj['data'], 'message': 'Failed to extract %s, reason: file not found.' % (str(filename_path)) }) else: try: image = cv2.imread(str(filename_path)) if type == 'rects': rects = e.extract_from_bgr(image) cq.put({ 'op': 'extract_success', 'data': obj['data'], 'result': [str(filename_path), rects] }) elif type == 'landmarks': rects = data[1] landmarks = e.extract_from_bgr(image, rects) cq.put({ 'op': 'extract_success', 'data': obj['data'], 'result': [str(filename_path), landmarks] }) elif type == 'final': result = [] faces = data[1] if debug: debug_output_file = '{}_{}'.format( str( Path(str(output_path) + '_debug') / filename_path.stem), 'debug.png') debug_image = image.copy() for (face_idx, face) in enumerate(faces): rect = face[0] image_landmarks = np.array(face[1]) image_to_face_mat = facelib.LandmarksProcessor.get_transform_mat( image_landmarks, image_size, face_type) output_file = '{}_{}{}'.format( str(output_path / filename_path.stem), str(face_idx), '.png') if debug: facelib.LandmarksProcessor.draw_rect_landmarks( debug_image, rect, image_landmarks, image_size, face_type) face_image = cv2.warpAffine( image, image_to_face_mat, (image_size, image_size)) face_image_landmarks = facelib.LandmarksProcessor.transform_points( image_landmarks, image_to_face_mat) cv2.imwrite(output_file, face_image) a_png = AlignedPNG.load(output_file) d = { 'type': 'face', 'landmarks': face_image_landmarks.tolist(), 'yaw_value': facelib.LandmarksProcessor.calc_face_yaw( face_image_landmarks), 'pitch_value': facelib.LandmarksProcessor.calc_face_pitch( face_image_landmarks), 'source_filename': filename_path.name, 'source_rect': rect, 'source_landmarks': image_landmarks.tolist() } a_png.setFaceswapDictData(d) a_png.save(output_file) result.append(output_file) if debug: cv2.imwrite(debug_output_file, debug_image) cq.put({ 'op': 'extract_success', 'data': obj['data'], 'result': result }) except Exception as e: cq.put({ 'op': 'error', 'close': True, 'data': obj['data'], 'message': 'Failed to extract %s, reason: %s. \r\n%s' % (str(filename_path), str(e), traceback.format_exc()) }) break elif obj_op == 'init': try: type = obj['type'] image_size = obj['image_size'] face_type = obj['face_type'] device_idx = obj['device_idx'] output_path = Path( obj['output_dir']) if 'output_dir' in obj.keys() else None debug = obj['debug'] detector = obj['detector'] if type == 'rects': if detector is not None: if detector == 'mt': tf = gpufmkmgr.import_tf([device_idx], allow_growth=True) tf_session = gpufmkmgr.get_tf_session() keras = gpufmkmgr.import_keras() e = facelib.MTCExtractor(keras, tf, tf_session) elif detector == 'dlib': dlib = gpufmkmgr.import_dlib(device_idx) e = facelib.DLIBExtractor(dlib) e.__enter__() elif type == 'landmarks': gpufmkmgr.import_tf([device_idx], allow_growth=True) keras = gpufmkmgr.import_keras() e = facelib.LandmarksExtractor(keras) e.__enter__() elif type == 'final': pass cq.put({'op': 'init_ok'}) except Exception as e: cq.put({ 'op': 'error', 'close': True, 'message': 'Exception while initialization: %s' % (traceback.format_exc()) }) break if detector is not None and (type == 'rects' or type == 'landmarks'): e.__exit__()