class Pretrained_Inference(Trainer): def __init__(self, separator, name, **kwargs): super(Pretrained_Inference, self).__init__(trainer_type=name, **kwargs) self.separator = separator def build(self): self.args.update({'pretraining':True}) self.model = Adapt(**self.args) self.model.create_saver() self.model.restore_model(self.args['model_folder'])
config_model["type"] = "DPCL_finetuning" learning_rate = 0.001 batch_size = 2 config_model["chunk_size"] = chunk_size config_model["alpha"] = learning_rate config_model["batch_size"] = batch_size folder = 'DPCL_finetuning' model = Adapt(config_model=config_model, pretraining=False) model.create_saver() path = os.path.join(config.model_root, 'log', 'DPCL_train_front') model.restore_model(path, full_id) model.connect_front_back_to_separator(DPCL) with model.graph.as_default(): model.create_saver() model.restore_model(path, full_id) # model.freeze_front() model.optimize model.tensorboard_init() init = model.non_initialized_variables() model.sess.run(init) print 'Total name :'
#### config_model["type"] = "L41_finetuning" learning_rate = 0.001 batch_size = 64 config_model["chunk_size"] = chunk_size config_model["alpha"] = learning_rate config_model["batch_size"] = batch_size model = Adapt(config_model=config_model, pretraining=False) with model.graph.as_default(): model.connect_front(L41Model) var_list = [v for v in tf.global_variables() if ('front' in v.name)] model.create_saver(var_list) model.restore_model(path_adapt, full_id_adapt) model.sepNet.prediction model.sepNet.separate model.sepNet.output = model.sepNet.enhance var_list = [ v for v in tf.global_variables() if ('prediction' in v.name or 'speaker_centroids' in v.name or 'enhance' in v.name) ] model.create_saver(var_list) model.restore_model(path, full_id) model.separator model.back var_list = [v for v in tf.global_variables() if ('back/' in v.name)] model.create_saver(var_list) model.restore_model(path_adapt, full_id_adapt)