コード例 #1
0
ファイル: nnlib.py プロジェクト: yakumo1255qq/DeepFaceLab
    def import_keras(device_config=None):
        if nnlib.keras is not None:
            return nnlib.code_import_keras

        nnlib.import_tf(device_config)
        device_config = nnlib.active_DeviceConfig
        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()

        import keras as keras_
        nnlib.keras = keras_

        if device_config.use_fp16:
            nnlib.keras.backend.set_floatx('float16')

        nnlib.keras.backend.set_session(nnlib.tf_sess)
        nnlib.keras.backend.set_image_data_format('channels_last')

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor.__exit__()

        nnlib.__initialize_keras_functions()
        nnlib.code_import_keras = compile(nnlib.code_import_keras_string, '',
                                          'exec')
コード例 #2
0
def import_keras():
    global keras_module
    global keras_contrib_module
    
    if keras_module is not None:
        raise Exception ('Multiple import of keras is not allowed, reorganize your program.')
        
    sess = get_tf_session()
    if sess is None:
        raise Exception ('No TF session found. Import tf first.')
        
    if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
        suppressor = std_utils.suppress_stdout_stderr().__enter__()
        
    import keras
    import keras_contrib        

    keras.backend.tensorflow_backend.set_session(sess)
    
    if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':        
        suppressor.__exit__()

    keras_module = keras
    keras_contrib_module = keras_contrib
    return keras_module
コード例 #3
0
    def train_one_epoch(self):    
        if self.supress_std_once:
            supressor = std_utils.suppress_stdout_stderr()
            supressor.__enter__()
            
        self.last_sample = self.generate_next_sample() 

        epoch_time = time.time()
        
        losses = self.onTrainOneEpoch(self.last_sample)
        
        epoch_time = time.time() - epoch_time

        self.loss_history.append ( [float(loss[1]) for loss in losses] )
        
        if self.supress_std_once:
            supressor.__exit__()
            self.supress_std_once = False
                  
        if self.write_preview_history:
            if self.epoch % 10 == 0:
                img = (self.get_static_preview() * 255).astype(np.uint8)
                cv2.imwrite ( str (self.preview_history_path / ('%.6d.jpg' %( self.epoch) )), img )     
                
        self.epoch += 1
        
        #............."Saving... 
        loss_string = "Training [#{0:06d}][{1:04d}ms]".format ( self.epoch, int(epoch_time*1000) % 10000 )
        for (loss_name, loss_value) in losses:
            loss_string += " %s:%.3f" % (loss_name, loss_value)

        return loss_string
コード例 #4
0
def import_tf( device_idxs_list, allow_growth ):
    global tf_module
    global tf_session
    
    if tf_module is not None:
        raise Exception ('Multiple import of tf is not allowed, reorganize your program.')

    if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
        suppressor = std_utils.suppress_stdout_stderr().__enter__()
    else:
        suppressor = None

    if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
        os.environ.pop('CUDA_VISIBLE_DEVICES')
    
    os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
    
    import tensorflow as tf
    tf_module = tf
    
    visible_device_list = ''
    for idx in device_idxs_list: visible_device_list += str(idx) + ','
    visible_device_list = visible_device_list[:-1]
        
    config = tf_module.ConfigProto()
    config.gpu_options.allow_growth = allow_growth
    config.gpu_options.visible_device_list=visible_device_list
    config.gpu_options.force_gpu_compatible = True
    tf_session = tf_module.Session(config=config)
        
    if suppressor is not None:  
        suppressor.__exit__()

    return tf_module
コード例 #5
0
ファイル: ModelBase.py プロジェクト: maxliaops/DeepFaceLab
    def train_one_epoch(self):    
        if self.supress_std_once:
            supressor = std_utils.suppress_stdout_stderr()
            supressor.__enter__()
            
        sample = self.generate_next_sample()        
        epoch_time = time.time()        
        losses = self.onTrainOneEpoch(sample, self.generator_list)        
        epoch_time = time.time() - epoch_time
        self.last_sample = sample
        
        self.loss_history.append ( [float(loss[1]) for loss in losses] )
        
        if self.supress_std_once:
            supressor.__exit__()
            self.supress_std_once = False
                  
        if self.write_preview_history:
            if self.epoch % 100 == 0:
                preview = self.get_static_preview()
                preview_lh = ModelBase.get_loss_history_preview(self.loss_history, self.epoch, preview.shape[1], preview.shape[2])
                img = (np.concatenate ( [preview_lh, preview], axis=0 ) * 255).astype(np.uint8)
                cv2_imwrite ( str (self.preview_history_path / ('%.6d.jpg' %( self.epoch) )), img )     
                
        self.epoch += 1

        if epoch_time >= 10000:
            #............."Saving... 
            loss_string = "Training [#{0:06d}][{1:.5s}s]".format ( self.epoch, '{:0.4f}'.format(epoch_time / 1000) )
        else:
            loss_string = "Training [#{0:06d}][{1:04d}ms]".format ( self.epoch, int(epoch_time*1000) )
        for (loss_name, loss_value) in losses:
            loss_string += " %s:%.3f" % (loss_name, loss_value)

        return loss_string
コード例 #6
0
    def import_tf(device_config = None):
        if nnlib.tf is not None:
            return nnlib.code_import_tf

        if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()
        else:
            suppressor = None
            
        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            os.environ.pop('CUDA_VISIBLE_DEVICES')
        
        os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
        
        import tensorflow as tf
        nnlib.tf = tf
        
        if device_config is None:
            device_config = nnlib.active_DeviceConfig
        
        tf_ver = [int(x) for x in tf.VERSION.split('.')]
        req_cap = 35
        if tf_ver[0] > 1 or (tf_ver[0] == 1 and tf_ver[1] >= 11):
            req_cap = 37
            
        if not device_config.cpu_only and device_config.gpu_compute_caps[0] < req_cap:
            if suppressor is not None:  
                suppressor.__exit__()
            
            print ("%s does not meet minimum required compute capability: %d.%d. Falling back to CPU mode." % ( device_config.gpu_names[0], req_cap // 10, req_cap % 10 ) )
            device_config = nnlib.DeviceConfig(cpu_only=True)
            
            if suppressor is not None:  
                suppressor.__enter__()

        nnlib.active_DeviceConfig = device_config
        
        if device_config.cpu_only:
            config = tf.ConfigProto( device_count = {'GPU': 0} )
        else:     
            config = tf.ConfigProto()
            visible_device_list = ''
            for idx in device_config.gpu_idxs:
                visible_device_list += str(idx) + ','
            config.gpu_options.visible_device_list=visible_device_list[:-1]
            
        config.gpu_options.force_gpu_compatible = True            
        config.gpu_options.allow_growth = device_config.allow_growth
        
        nnlib.tf_sess = tf.Session(config=config)
            
        if suppressor is not None:  
            suppressor.__exit__()

        nnlib.__initialize_tf_functions()
        nnlib.code_import_tf = compile (nnlib.code_import_tf_string,'','exec')
        return nnlib.code_import_tf
コード例 #7
0
    def import_keras(device_config=None):
        if nnlib.keras is not None:
            return nnlib.code_import_keras

        if device_config is None:
            device_config = nnlib.active_DeviceConfig

        nnlib.active_DeviceConfig = device_config

        if "tensorflow" in device_config.backend:
            nnlib._import_tf(device_config)
            device_config = nnlib.active_DeviceConfig
        elif device_config.backend == "plaidML":
            os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
            os.environ["PLAIDML_DEVICE_IDS"] = ",".join([
                nnlib.device.getDeviceID(idx) for idx in device_config.gpu_idxs
            ])

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()

        #if "tensorflow" in device_config.backend:
        #    nnlib.keras = nnlib.tf.keras
        #else:
        import keras as keras_
        nnlib.keras = keras_

        if device_config.backend == "plaidML":
            import plaidml
            import plaidml.tile
            nnlib.PML = plaidml
            nnlib.PMLK = plaidml.keras.backend
            nnlib.PMLTile = plaidml.tile

        if device_config.use_fp16:
            nnlib.keras.backend.set_floatx('float16')

        if "tensorflow" in device_config.backend:
            nnlib.keras.backend.set_session(nnlib.tf_sess)

        nnlib.keras.backend.set_image_data_format('channels_last')

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor.__exit__()

        nnlib.code_import_keras = compile(nnlib.code_import_keras_string, '',
                                          'exec')
        nnlib.__initialize_keras_functions()

        return nnlib.code_import_keras
コード例 #8
0
ファイル: gpufmkmgr.py プロジェクト: MiniAvarec/DeepFaceLab
def import_tf(gpu_config=None):
    global prefer_GPUConfig
    global tf_module
    global tf_session

    if gpu_config is None:
        gpu_config = prefer_GPUConfig
    else:
        prefer_GPUConfig = gpu_config

    if tf_module is not None:
        return tf_module

    if 'TF_SUPPRESS_STD' in os.environ.keys(
    ) and os.environ['TF_SUPPRESS_STD'] == '1':
        suppressor = std_utils.suppress_stdout_stderr().__enter__()
    else:
        suppressor = None

    if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
        os.environ.pop('CUDA_VISIBLE_DEVICES')

    os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'

    import tensorflow as tf
    tf_module = tf

    if gpu_config.cpu_only:
        config = tf_module.ConfigProto(device_count={'GPU': 0})
    else:
        config = tf_module.ConfigProto()
        visible_device_list = ''
        for idx in gpu_config.gpu_idxs:
            visible_device_list += str(idx) + ','
        visible_device_list = visible_device_list[:-1]
        config.gpu_options.visible_device_list = visible_device_list
        config.gpu_options.force_gpu_compatible = True

    config.gpu_options.allow_growth = gpu_config.allow_growth

    tf_session = tf_module.Session(config=config)

    if suppressor is not None:
        suppressor.__exit__()

    return tf_module
コード例 #9
0
    def save(self):
        print("Saving...")

        if self.supress_std_once:
            supressor = std_utils.suppress_stdout_stderr()
            supressor.__enter__()

        self.onSave()

        if self.supress_std_once:
            supressor.__exit__()

        model_data = {
            'epoch': self.epoch,
            'options': self.options,
            'loss_history': self.loss_history,
            'sample_for_preview': self.sample_for_preview
        }
        self.model_data_path.write_bytes(pickle.dumps(model_data))
コード例 #10
0
    def import_tf(device_config=None):
        if nnlib.tf is not None:
            return nnlib.code_import_tf

        if device_config is None:
            device_config = nnlib.prefer_DeviceConfig
        else:
            nnlib.prefer_DeviceConfig = device_config

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()
        else:
            suppressor = None

        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            os.environ.pop('CUDA_VISIBLE_DEVICES')

        os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'

        import tensorflow as tf
        nnlib.tf = tf

        if device_config.cpu_only:
            config = tf.ConfigProto(device_count={'GPU': 0})
        else:
            config = tf.ConfigProto()
            visible_device_list = ''
            for idx in device_config.gpu_idxs:
                visible_device_list += str(idx) + ','
            config.gpu_options.visible_device_list = visible_device_list[:-1]
            config.gpu_options.force_gpu_compatible = True

        config.gpu_options.allow_growth = device_config.allow_growth

        nnlib.tf_sess = tf.Session(config=config)

        if suppressor is not None:
            suppressor.__exit__()

        nnlib.__initialize_tf_functions()
        nnlib.code_import_tf = compile(nnlib.code_import_tf_string, '', 'exec')
        return nnlib.code_import_tf
コード例 #11
0
ファイル: nnlib.py プロジェクト: jp462484393/deepfacelab
    def _import_tf(device_config):
        if nnlib.tf is not None:
            return nnlib.code_import_tf

        if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()
        else:
            suppressor = None

        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            os.environ.pop('CUDA_VISIBLE_DEVICES')

        os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #tf log errors only

        import warnings
        warnings.simplefilter(action='ignore', category=FutureWarning)

        import tensorflow as tf
        nnlib.tf = tf

        if device_config.cpu_only:
            config = tf.ConfigProto(device_count={'GPU': 0})
        else:
            config = tf.ConfigProto()

            if device_config.backend != "tensorflow-generic":
                #tensorflow-generic is system with NVIDIA card, but w/o NVSMI
                #so dont hide devices and let tensorflow to choose best card
                visible_device_list = ''
                for idx in device_config.gpu_idxs:
                    visible_device_list += str(idx) + ','
                config.gpu_options.visible_device_list=visible_device_list[:-1]

        config.gpu_options.force_gpu_compatible = True
        config.gpu_options.allow_growth = device_config.allow_growth

        nnlib.tf_sess = tf.Session(config=config)

        if suppressor is not None:
            suppressor.__exit__()
コード例 #12
0
    def import_keras(device_config=None):
        if nnlib.keras is not None:
            return nnlib.code_import_keras

        nnlib.import_tf(device_config)

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor = std_utils.suppress_stdout_stderr().__enter__()

        import keras as keras_
        nnlib.keras = keras_
        nnlib.keras.backend.tensorflow_backend.set_session(nnlib.tf_sess)

        if 'TF_SUPPRESS_STD' in os.environ.keys(
        ) and os.environ['TF_SUPPRESS_STD'] == '1':
            suppressor.__exit__()

        nnlib.__initialize_keras_functions()
        nnlib.code_import_keras = compile(nnlib.code_import_keras_string, '',
                                          'exec')
コード例 #13
0
ファイル: gpufmkmgr.py プロジェクト: tomsang/DeepFaceLab
def import_keras():
    global keras_module
    
    if keras_module is not None:
        return keras_module
        
    sess = get_tf_session()
    if sess is None:
        raise Exception ('No TF session found. Import TF first.')
        
    if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
        suppressor = std_utils.suppress_stdout_stderr().__enter__()
        
    import keras     

    keras.backend.tensorflow_backend.set_session(sess)
    
    if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':        
        suppressor.__exit__()

    keras_module = keras
    return keras_module
コード例 #14
0
    def extract_from_bgr(self, input_image, rects):
        input_image = input_image[:, :, ::-1].copy()
        (h, w, ch) = input_image.shape

        landmarks = []
        for (left, top, right, bottom) in rects:

            center = np.array([(left + right) / 2.0, (top + bottom) / 2.0])
            center[1] -= (bottom - top) * 0.12
            scale = (right - left + bottom - top) / 195.0

            image = crop(input_image, center, scale).transpose(
                (2, 0, 1)).astype(np.float32) / 255.0
            image = np.expand_dims(image, 0)

            with std_utils.suppress_stdout_stderr():
                predicted = self.keras_model.predict(image)

            pts_img = get_pts_from_predict(predicted[-1][0], center, scale)
            pts_img = [(int(pt[0]), int(pt[1])) for pt in pts_img]
            landmarks.append(((left, top, right, bottom), pts_img))

        return landmarks