def _rollback(self, is_final=False): # TODO: update rollback mechanism if not self._allow_rollback and not is_final: return # trigger event for callbacks self._callback.event(TrainSignal.ROLLBACK) # default rollback procedure if self._save_path is not None and os.path.exists(self._save_path): self._show_noti("[%s] Rollback from: %s" % (ctext('MainLoop', 'red'), self._save_path)) # restore previous checkpoint immediately N.deserialize(self._save_path, force_restore_vars=True) # otherwise, load stored variables from RAM elif self._best_object is not None: self._show_noti("[%s] Rollback to the best stored object from RAM" % (ctext('MainLoop', 'red'))) N.deserialize(path_or_data=self._best_object, force_restore_vars=True)
n_speakers = len(all_speakers) + 1 # =========================================================================== # Create the network # =========================================================================== inputs = [ K.placeholder(shape=(None, ) + shape[1:], dtype='float32', name='input%d' % i) for i, shape in enumerate(as_tuple_of_shape(train.shape)) ] X = inputs[0] y = inputs[1] print("Inputs:", ctext(inputs, 'cyan')) # ====== the network ====== # if os.path.exists(MODEL_PATH): x_vec = N.deserialize(path=MODEL_PATH, force_restore_vars=True) else: TRAIN_MODEL = True with N.args_scope( ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)], ['Dense', dict(activation=K.linear, b_init=None)], ['BatchNorm', dict(activation=K.relu)]): x_vec = N.Sequence([ N.Dropout(level=0.3), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=7), N.Dense(512), N.BatchNorm(), N.Dense(1500),
all_speakers) = prepare_dnn_data( recipe=args.recipe, feat=FEAT, utt_length=args.l) n_speakers = len(all_speakers) + 1 # =========================================================================== # Create the network # =========================================================================== inputs = [K.placeholder(shape=(None,) + shape[1:], dtype='float32', name='input%d' % i) for i, shape in enumerate(as_tuple_of_shape(train.shape))] X = inputs[0] y = inputs[1] print("Inputs:", ctext(inputs, 'cyan')) # ====== the network ====== # if os.path.exists(MODEL_PATH): x_vec = N.deserialize(path=MODEL_PATH, force_restore_vars=True) else: TRAIN_MODEL = True with N.args_scope( ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)], ['Dense', dict(activation=K.linear, b_init=None)], ['BatchNorm', dict(activation=K.relu)] ): x_vec = N.Sequence([ N.Dropout(level=0.3), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=7), N.Dense(512), N.BatchNorm(),
# Create the network # =========================================================================== inputs = [K.placeholder(shape=(None,) + shape[1:], dtype='float32', name='input%d' % i) for i, shape in enumerate(as_tuple_of_shape(train.shape))] X = inputs[0] y = inputs[1] print("Inputs:", ctext(inputs, 'cyan')) # ====== get all saved model if exists ====== # all_models = [os.path.join(EXP_DIR, i) for i in os.listdir(EXP_DIR)] all_models = sorted([i for i in all_models if MODEL_PATH + '.' in i]) # ====== the network ====== # if len(all_models) > 0: print("Load model at:", ctext(all_models[SCORE_SYSTEM_ID], 'cyan')) x_vec = N.deserialize(path=all_models[SCORE_SYSTEM_ID], force_restore_vars=True) else: with N.args_scope( ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)], ['Dense', dict(activation=K.linear, b_init=None)], ['BatchNorm', dict(activation=K.relu)] ): x_vec = N.Sequence([ N.Dropout(level=0.3), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=7), N.Dense(512), N.BatchNorm(), N.Dense(1500), N.BatchNorm(),
# =========================================================================== # mapping from # dataset_name -> 'name': 1-D array [n_samples], # # (path to original audio) # 'path': 1-D array [n_samples], # # Extracted latent vectors # 'X': 2-D array [n_samples, n_latent_dim]} # # speaker label or meta-data (e.g. 'test', 'enroll', 'unlabeled') # 'y': 1-D array [n_samples], all_vectors = {} # =========================================================================== # Extract the x-vector for enroll and trials # =========================================================================== if 'xvec' == SCORE_SYSTEM_NAME: # ====== load the network ====== # x_vec = N.deserialize(path=final_sys, force_restore_vars=True) # ====== get output tensors ====== # y_logit = x_vec() y_proba = tf.nn.softmax(y_logit) X = K.ComputationGraph(y_proba).placeholders[0] z = K.ComputationGraph(y_proba).get(roles=N.Dense, scope='LatentOutput', beginning_scope=False)[0] f_z = K.function(inputs=X, outputs=z, training=False) print('Inputs:', ctext(X, 'cyan')) print('Latent:', ctext(z, 'cyan')) # ====== recipe for feeder ====== # recipe = prepare_dnn_feeder_recipe() # ==================== extract x-vector from acoustic features ==================== # for dsname, (ds_feat, ds_indices, ds_meta, ds_path) in sorted(acoustic_features.items(),