def test_tf_dct_vs_dct_matmul(self): signal_size = 51 # input signal signal = np.random.rand(1, 1, signal_size) # build dct model using tf function input_signal = tf.keras.Input(shape=( 1, signal_size, ), batch_size=1) output = dct.DCT(use_tf=False)(input_signal) model1 = tf.keras.Model(input_signal, output) model1.summary() model1_output = model1.predict(signal) # build dct model using direct matmul input_signal = tf.keras.Input(shape=( 1, signal_size, ), batch_size=1) output = dct.DCT(use_tf=True)(input_signal) model2 = tf.keras.Model(input_signal, output) model2.summary() model2_output = model2.predict(signal) self.assertAllClose(model1_output, model2_output, rtol=1e-5, atol=1e-5)
def test_tf_dct_vs_dct_direct(self): signal_size = 64 # input signal signal = np.random.rand(1, 1, signal_size) # build mfcc model and run it input_signal = tf.keras.Input(shape=( 1, signal_size, ), batch_size=1) output = tf.signal.mfccs_from_log_mel_spectrograms(input_signal) model = tf.keras.Model(input_signal, output) model.summary() mfcc_output = model.predict(signal) # build dct model and run it input_signal = tf.keras.Input(shape=( 1, signal_size, ), batch_size=1) output = dct.DCT()(input_signal) model = tf.keras.Model(input_signal, output) model.summary() dct_output = model.predict(signal) self.assertAllClose(mfcc_output[0][0], dct_output[0][0], rtol=1e-5, atol=1e-6)
def build(self, input_shape): super(SpeechFeatures, self).build(input_shape) self.data_frame = data_frame.DataFrame( mode=self.mode, inference_batch_size=self.inference_batch_size, frame_size=self.frame_size, frame_step=self.frame_step) if self.noise_scale != 0.0 and self.mode == modes.Modes.TRAINING: self.add_noise = tf.keras.layers.GaussianNoise( stddev=self.noise_scale) else: self.add_noise = tf.keras.layers.Lambda(lambda x: x) if self.params['preemph'] != 0.0: self.preemphasis = preemphasis.Preemphasis( preemph=self.params['preemph']) else: self.preemphasis = tf.keras.layers.Lambda(lambda x: x) if self.params['window_type'] is not None: self.windowing = windowing.Windowing( window_size=self.frame_size, window_type=self.params['window_type']) else: self.windowing = tf.keras.layers.Lambda(lambda x: x) # If use_tf_fft is False, we will use # Real Discrete Fourier Transformation(RDFT), which is slower than RFFT # To increase RDFT efficiency we use properties of mel spectrum. # We find a range of non zero values in mel spectrum # and use it to compute RDFT: it will speed up computations. # If use_tf_fft is True, then we use TF RFFT which require # signal length alignment, so we disable mel_non_zero_only. self.mag_rdft_mel = magnitude_rdft_mel.MagnitudeRDFTmel( use_tf_fft=self.params['use_tf_fft'], magnitude_squared=self.params['fft_magnitude_squared'], num_mel_bins=self.params['mel_num_bins'], lower_edge_hertz=self.params['mel_lower_edge_hertz'], upper_edge_hertz=self.params['mel_upper_edge_hertz'], sample_rate=self.params['sample_rate'], mel_non_zero_only=self.params['mel_non_zero_only']) self.log_max = tf.keras.layers.Lambda(lambda x: tf.math.log( tf.math.maximum(x, self.params['log_epsilon']))) if self.params['dct_num_features'] != 0: self.dct = dct.DCT(num_features=self.params['dct_num_features']) else: self.dct = tf.keras.layers.Lambda(lambda x: x) self.normalizer = normalizer.Normalizer(mean=self.mean, stddev=self.stddev) # in any inference mode there is no need to add dynamic logic in tf graph if self.params[ 'use_spec_augment'] and self.mode == modes.Modes.TRAINING: self.spec_augment = spectrogram_augment.SpecAugment( time_masks_number=self.params['time_masks_number'], time_mask_max_size=self.params['time_mask_max_size'], frequency_masks_number=self.params['frequency_masks_number'], frequency_mask_max_size=self.params['frequency_mask_max_size']) else: self.spec_augment = tf.keras.layers.Lambda(lambda x: x)
def build(self, input_shape): super(SpeechFeatures, self).build(input_shape) if self.params[ 'sp_time_shift_samples'] != 0.0 and self.mode == modes.Modes.TRAINING: self.rand_shift = random_shift.RandomShift( self.params['sp_time_shift_samples']) else: self.rand_shift = tf.keras.layers.Lambda(lambda x: x) if self.params[ 'sp_resample'] != 0.0 and self.mode == modes.Modes.TRAINING: self.rand_stretch_squeeze = random_stretch_squeeze.RandomStretchSqueeze( self.params['sp_resample']) else: self.rand_stretch_squeeze = tf.keras.layers.Lambda(lambda x: x) self.data_frame = data_frame.DataFrame( mode=self.mode, inference_batch_size=self.inference_batch_size, frame_size=self.frame_size, frame_step=self.frame_step, use_one_step=self.params['use_one_step'], padding=self.params['data_frame_padding']) if self.noise_scale != 0.0 and self.mode == modes.Modes.TRAINING: self.add_noise = tf.keras.layers.GaussianNoise( stddev=self.noise_scale) else: self.add_noise = tf.keras.layers.Lambda(lambda x: x) if self.params['preemph'] != 0.0: self.preemphasis = preemphasis.Preemphasis( preemph=self.params['preemph']) else: self.preemphasis = tf.keras.layers.Lambda(lambda x: x) # if True it will replace direct DFT, DCT and hann window by tf functions # it is useful for model quantization, # because these functions will not be quantized use_tf_function = self.params['use_tf_fft'] mel_non_zero_only = self.params['mel_non_zero_only'] window_type = self.params['window_type'] # set mel and window type for tf function compatibility if use_tf_function: mel_non_zero_only = False window_type = 'hann_tf' if window_type is not None: self.windowing = windowing.Windowing(window_size=self.frame_size, window_type=window_type) else: self.windowing = tf.keras.layers.Lambda(lambda x: x) # If use_tf_fft is False, we will use # Real Discrete Fourier Transformation(RDFT), which is slower than RFFT # To increase RDFT efficiency we use properties of mel spectrum. # We find a range of non zero values in mel spectrum # and use it to compute RDFT: it will speed up computations. # If use_tf_fft is True, then we use TF RFFT which require # signal length alignment, so we disable mel_non_zero_only. self.mag_rdft_mel = magnitude_rdft_mel.MagnitudeRDFTmel( use_tf_fft=use_tf_function, magnitude_squared=self.params['fft_magnitude_squared'], num_mel_bins=self.params['mel_num_bins'], lower_edge_hertz=self.params['mel_lower_edge_hertz'], upper_edge_hertz=self.params['mel_upper_edge_hertz'], sample_rate=self.params['sample_rate'], mel_non_zero_only=mel_non_zero_only) self.log_max = tf.keras.layers.Lambda(lambda x: tf.math.log( tf.math.maximum(x, self.params['log_epsilon']))) if self.params['dct_num_features'] != 0: self.dct = dct.DCT(num_features=self.params['dct_num_features']) else: self.dct = tf.keras.layers.Lambda(lambda x: x) self.normalizer = normalizer.Normalizer(mean=self.mean, stddev=self.stddev) # in any inference mode there is no need to add dynamic logic in tf graph if self.params[ 'use_spec_augment'] and self.mode == modes.Modes.TRAINING: self.spec_augment = spectrogram_augment.SpecAugment( time_masks_number=self.params['time_masks_number'], time_mask_max_size=self.params['time_mask_max_size'], frequency_masks_number=self.params['frequency_masks_number'], frequency_mask_max_size=self.params['frequency_mask_max_size']) else: self.spec_augment = tf.keras.layers.Lambda(lambda x: x) if self.params['use_spec_cutout'] and self.mode == modes.Modes.TRAINING: self.spec_cutout = spectrogram_cutout.SpecCutout( masks_number=self.params['spec_cutout_masks_number'], time_mask_size=self.params['spec_cutout_time_mask_size'], frequency_mask_size=self. params['spec_cutout_frequency_mask_size']) else: self.spec_cutout = tf.keras.layers.Lambda(lambda x: x)