class Adapt_Pretrainer(Trainer): def __init__(self, **kwargs): super(Adapt_Pretrainer, self).__init__(trainer_type='pretraining', **kwargs) def build(self): self.model = Adapt(**self.args) self.model.tensorboard_init() self.model.init_all()
class Pretrained_Inference(Trainer): def __init__(self, separator, name, **kwargs): super(Pretrained_Inference, self).__init__(trainer_type=name, **kwargs) self.separator = separator def build(self): self.args.update({'pretraining':True}) self.model = Adapt(**self.args) self.model.create_saver() self.model.restore_model(self.args['model_folder'])
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Expanding the graph with enhance layer self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.separate self.model.back self.model.create_saver() self.model.restore_model(self.args['model_folder']) self.model.finish_construction() self.model.initialize_non_init()
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Restoring the front layer: # Expanding the graph with enhance layer self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.enhance self.model.back self.model.create_saver() self.model.restore_model(self.args['model_folder']) # Initialize only non restored values self.model.initialize_non_init()
def build(self): if self.args['model_previous'] is not None: self.model = Adapt.load(self.args['model_previous'], self.args) self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.prediction self.model.cost_model = self.model.sepNet.cost self.model.back # To save the back values ! self.model.create_saver() self.model.restore_model(self.args['model_previous']) self.model.finish_construction() self.model.freeze_all_with('front/') self.model.freeze_all_with('back/') self.model.optimize self.model.tensorboard_init() self.model.initialize_non_init() else: self.model = Adapt.load(self.args['model_folder'], self.args) self.model.connect_only_front_to_separator(self.separator) # Initialize only non restored values self.model.initialize_non_init()
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Expanding the graph with enhance layer self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.separate self.model.back self.model.create_saver() self.model.restore_model(self.args['model_folder']) self.model.cost_model = self.model.cost self.model.finish_construction() self.model.freeze_all_except('prediction', 'speaker_centroids') self.model.optimize self.model.tensorboard_init() # Initialize only non restored values self.model.initialize_non_init()
def build_model(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Restoring previous Model: self.model.restore_front_separator(self.args['model_folder'], self.separator) # Expanding the graph with enhance layer with self.model.graph.as_default(): self.model.sepNet.output = self.model.sepNet.enhance self.model.cost_model = self.model.sepNet.enhance_cost self.model.finish_construction() self.model.freeze_all_except('enhance') self.model.optimize self.model.tensorboard_init() # Initialize only non restored values self.model.initialize_non_init()
def build_model(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Expanding the graph with enhance layer with self.model.graph.as_default(): self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.separate self.model.back self.model.restore_model(self.args['model_folder']) self.model.cost_model = self.model.cost self.model.finish_construction() self.model.optimize self.model.tensorboard_init() # Initialize only non restored values self.model.initialize_non_init()
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) self.model.front self.model.pretraining = True self.model.separator self.model.back self.model.create_saver() self.model.restore_model(self.args['model_folder']) self.model.enhance self.model.cost_model = self.model.enhance_cost self.model.finish_construction() self.model.freeze_all_with('front/') self.model.freeze_all_with('back/') self.model.optimize self.model.tensorboard_init() self.model.initialize_non_init()
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Restoring the front layer: # Expanding the graph with enhance layer self.model.connect_front(self.separator) self.model.sepNet.output = self.model.sepNet.enhance self.model.back self.model.create_saver() self.model.restore_model(self.args['model_folder']) self.model.cost_model = self.model.cost_finetuning self.model.finish_construction() to_train = [] for var in self.model.trainable_variables: for p in self.args['train']: if p in var.name: to_train.append(var) self.model.trainable_variables = to_train # self.model.freeze_all_except('prediction', 'speaker_centroids', 'enhance') self.model.optimize self.model.tensorboard_init() # Initialize only non restored values self.model.initialize_non_init()
males = H5PY_RW() males.open_h5_dataset('test_raw.h5py', subset=males_keys(H5_dico)) males.set_chunk(5 * 4 * 512) males.shuffle() print 'Male voices loaded: ', males.length(), ' items' fem = H5PY_RW() fem.open_h5_dataset('test_raw.h5py', subset=females_keys(H5_dico)) fem.set_chunk(5 * 4 * 512) fem.shuffle() print 'Female voices loaded: ', fem.length(), ' items' Mixer = Mixer([males, fem], with_mask=False, with_inputs=True) adapt_model = Adapt.load('jolly-firefly-9628', pretraining=False, separator=DPCL) # adapt_model.init() print 'Model DAS created' testVar = raw_input("Model loaded : Press Enter") cost_valid_min = 1e10 Mixer.select_split(0) learning_rate = 0.01 for i in range(config.max_iterations): X_in, X_mix, Ind = Mixer.get_batch(1) if (i + 1) % 100 == 0: learning_rate /= 10 c = adapt_model.train(X_mix, X_in, learning_rate, i)
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) # Restoring previous Model: self.model.connect_enhance_to_separator(self.separator) self.model.initialize_non_init()
def build(self): self.model = Adapt.load(self.args['model_folder'], self.args) self.model.connect_only_front_to_separator(self.separator) # Initialize only non restored values self.model.initialize_non_init()
def build(self): self.model = Adapt(**self.args) self.model.tensorboard_init() self.model.init_all()
config_model["smooth_size"] = 10 config_model["alpha"] = learning_rate config_model["reg"] = 1e-3 config_model["beta"] = 0.1 config_model["rho"] = 0.01 config_model["same_filter"] = True config_model["optimizer"] = 'Adam' #### #### adapt_model = Adapt(config_model=config_model, pretraining=True, folder='pretraining') adapt_model.tensorboard_init() adapt_model.init() print 'Total name :' print adapt_model.runID # nb_iterations = 500 mixed_data.adjust_split_size_to_batchsize(batch_size) nb_batches = mixed_data.nb_batches(batch_size) nb_epochs = 2 time_spent = [0 for _ in range(5)] for epoch in range(nb_epochs):
config_model["beta"] = 0.1 config_model["rho"] = 0.01 idd = ''.join('-{}={}-'.format(key, val) for key, val in sorted(config_model.items())) batch_size = 4 config_model["batch_size"] = batch_size config_model["type"] = "Dense_train" from models.adapt import Adapt import config full_id = 'soft-base-9900' + idd folder = 'Dense_train' model = Adapt(config_model=config_model, pretraining=False) model.create_saver() path = os.path.join(config.workdir, 'floydhub_model', "pretraining") # path = os.path.join(config.log_dir, "pretraining") model.restore_model(path, full_id) ## Connect DAS model to the front end from models.dense import Dense_net as Dense with model.graph.as_default(): model.connect_front(Dense) model.sepNet.output = model.sepNet.prediction model.back model.cost
full_id = 'cold-dust-9076' + idd path = os.path.join(config.model_root if not config.floydhub else '/model2', 'log', config_model["type"]) #### #### NEW MODEL CONFIGURATION #### config_model["type"] = "L41_finetuning" learning_rate = 0.001 batch_size = 64 config_model["chunk_size"] = chunk_size config_model["alpha"] = learning_rate config_model["batch_size"] = batch_size model = Adapt(config_model=config_model, pretraining=False) with model.graph.as_default(): model.connect_front(L41Model) var_list = [v for v in tf.global_variables() if ('front' in v.name)] model.create_saver(var_list) model.restore_model(path_adapt, full_id_adapt) model.sepNet.prediction model.sepNet.separate model.sepNet.output = model.sepNet.enhance var_list = [ v for v in tf.global_variables() if ('prediction' in v.name or 'speaker_centroids' in v.name or 'enhance' in v.name) ] model.create_saver(var_list)
full_id = "frosty-fire-4612" + idd #### #### NEW MODEL CONFIG #### config_model["type"] = "L41_enhance" learning_rate = 0.001 batch_size = 8 config_model["chunk_size"] = chunk_size config_model["batch_size"] = batch_size config_model["alpha"] = learning_rate config_model["optimizer"] = 'Adam' config_model["reg"] = 1e-3 model = Adapt(config_model=config_model, pretraining=False) # Small modification for enhance #TODO with model.graph.as_default(): model.connect_front(L41Model) model.sepNet.output = model.sepNet.prediction model.create_saver() model.restore_model(path, full_id) model.sepNet.separate model.sepNet.output = model.sepNet.enhance model.cost = model.sepNet.enhance_cost model.freeze_variables() model.optimize model.tensorboard_init() init = model.non_initialized_variables()
males = H5PY_RW() males.open_h5_dataset('test_raw.h5py', subset=males_keys(H5_dico)) males.set_chunk(5 * 4 * 512) males.shuffle() print 'Male voices loaded: ', males.length(), ' items' fem = H5PY_RW() fem.open_h5_dataset('test_raw.h5py', subset=females_keys(H5_dico)) fem.set_chunk(5 * 4 * 512) fem.shuffle() print 'Female voices loaded: ', fem.length(), ' items' Mixer = Mixer([males, fem], with_mask=False, with_inputs=True) adapt_model = Adapt() print 'Model DAS created' adapt_model.init() cost_valid_min = 1e10 Mixer.select_split(0) learning_rate = 0.005 for i in range(config.max_iterations): X_in, X_mix, Ind = Mixer.get_batch(1) c = adapt_model.train(X_mix, X_in, learning_rate, i) print 'Step #', i, ' ', c if i % 20 == 0: #cost_valid < cost_valid_min: print 'DAS model saved at iteration number ', i, ' with cost = ', c adapt_model.save(i)
config_model["beta"] = 0.1 config_model["rho"] = 0.01 idd = ''.join('-{}={}-'.format(key, val) for key, val in sorted(config_model.items())) batch_size = 1 config_model["batch_size"] = batch_size config_model["type"] = "DAS_train_front" from models.adapt import Adapt import config full_id = 'soft-base-9900' + idd folder = 'DAS_train_front' model = Adapt(config_model=config_model, pretraining=False) model.create_saver() path = os.path.join(config.workdir, 'floydhub_model', "pretraining") # path = os.path.join(config.log_dir, "pretraining") model.restore_model(path, full_id) from models.das import DAS model.connect_only_front_to_separator(DAS) init = model.non_initialized_variables() # Model creation # Pretraining the model
for key, val in sorted(config_model.items())) full_id = "noisy-breeze-3898" + idd path = os.path.join(config.model_root, 'log', 'pretraining') #### #### NEW MODEL #### config_model["type"] = "L41_train_front" learning_rate = 0.01 batch_size = 8 config_model["chunk_size"] = 512 * 40 config_model["batch_size"] = batch_size config_model["alpha"] = learning_rate model = Adapt(config_model=config_model, pretraining=False) model.create_saver() model.restore_model(path, full_id) model.connect_only_front_to_separator(L41Model) init = model.non_initialized_variables() model.sess.run(init) print 'Total name :' print model.runID # nb_iterations = 500 mixed_data.adjust_split_size_to_batchsize(batch_size) nb_batches = mixed_data.nb_batches(batch_size) nb_epochs = 40
def build(self): self.args.update({'pretraining':True}) self.model = Adapt(**self.args) self.model.create_saver() self.model.restore_model(self.args['model_folder'])
#### #### NEW MODEL CONFIGURATION #### config_model["type"] = "DPCL_finetuning" learning_rate = 0.001 batch_size = 2 config_model["chunk_size"] = chunk_size config_model["alpha"] = learning_rate config_model["batch_size"] = batch_size folder = 'DPCL_finetuning' model = Adapt(config_model=config_model, pretraining=False) model.create_saver() path = os.path.join(config.model_root, 'log', 'DPCL_train_front') model.restore_model(path, full_id) model.connect_front_back_to_separator(DPCL) with model.graph.as_default(): model.create_saver() model.restore_model(path, full_id) # model.freeze_front() model.optimize model.tensorboard_init() init = model.non_initialized_variables()
#### LOAD PREVIOUS MODEL #### idd = ''.join('-{}={}-'.format(key, val) for key, val in sorted(config_model.items())) config_model["type"] = "DPCL_train_front" learning_rate = 0.01 batch_size = 32 config_model["batch_size"] = batch_size config_model["alpha"] = learning_rate full_id = "long-term-4925" + idd #full_id = 'jolly-sound-3162'+idd folder = 'DPCL_train_front' model = Adapt(config_model=config_model, pretraining=False) model.create_saver() path = os.path.join(config.model_root, 'log', 'pretraining') model.restore_model(path, full_id) model.connect_only_front_to_separator(DPCL) init = model.non_initialized_variables() model.sess.run(init) print 'Total name :' print model.runID # nb_iterations = 500 mixed_data.adjust_split_size_to_batchsize(batch_size)