Exemplo n.º 1
0
    def nn_preprocess(self, x, n_mfcc=96, max_duration=5, is_mfcc=True):
        if self.raw_max_length is None:
            self.raw_max_length = get_max_length(x)
            if self.raw_max_length > (MIDDLE_DURATION * AUDIO_SAMPLE_RATE):
                self.need_30s = True
                if len(self._train_y) < 1000 and self._num_classes < 30:
                    self.crnn_first = True
            self.raw_max_length = min(max_duration * AUDIO_SAMPLE_RATE,
                                      self.raw_max_length)
            self.raw_max_length = max(MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE,
                                      self.raw_max_length)
        x = [sample[0:self.raw_max_length] for sample in x]

        if is_mfcc:
            # extract mfcc
            x = extract_mfcc_parallel(x, n_mfcc=n_mfcc)
        else:
            x = extract_melspectrogram_parallel(x,
                                                n_mels=128,
                                                use_power_db=True)
        if self.fea_max_length is None:
            self.fea_max_length = get_max_length(x)
            self.fea_max_length = min(MAX_FRAME_NUM, self.fea_max_length)
        x = pad_seq(x, pad_len=self.fea_max_length)

        return x
Exemplo n.º 2
0
    def lr_preprocess(self, x):
        global LR_HOP_DURATION
        global HOP_DURATION
        global AUDIO_SAMPLE_RATE
        if self.raw_max_length is None:
            self.raw_max_length = get_max_length(x)
            scale = self.raw_max_length/84000
            LR_HOP_DURATION = max(BASE_LR_HOP_DURATION,BASE_LR_HOP_DURATION*scale)
            HOP_DURATION = max(BASE_HOP_DURATION,BASE_HOP_DURATION*scale/2)
            AUDIO_SAMPLE_RATE = max(BASE_AUDIO_SAMPLE_RATE,int(BASE_AUDIO_SAMPLE_RATE*scale/3))
            print('LR_HOP_DURATION---%s'%LR_HOP_DURATION)
            print('HOP_DURATION---%s'%HOP_DURATION)
            print('AUDIO_SAMPLE_RATE---%s'%AUDIO_SAMPLE_RATE)
            
        x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE] for sample in x]
        x_mel = extract_melspectrogram_parallel(
            x, n_mels=30, use_power_db=True,lr=True)
        # x_contrast = extract_bandwidth_parallel(x)

        x_feas = []
        for i in range(len(x_mel)):
            mel = np.mean(x_mel[i], axis=0).reshape(-1)
            mel_std = np.std(x_mel[i], axis=0).reshape(-1)
            # contrast = np.mean(x_contrast[i], axis=0).reshape(-1)
            # contrast_std = np.std(x_contrast[i], axis=0).reshape(-1)
            # contrast, contrast_std
            x_feas.append(np.concatenate([mel, mel_std], axis=-1))
        x_feas = np.asarray(x_feas)

        scaler = StandardScaler()
        X = scaler.fit_transform(x_feas[:, :])
        return X
Exemplo n.º 3
0
 def preprocess_data(self, x):
     # if IS_CUT_AUDIO:
     #     x = [sample[0:MAX_AUDIO_DURATION*AUDIO_SAMPLE_RATE] for sample in x]
     # extract mfcc
     x = extract_mfcc_parallel(x, n_mfcc=96)
     if self.max_length is None:
         self.max_length = get_max_length(x)
         self.max_length = min(MAX_FRAME_NUM, self.max_length)
     x = pad_seq(x, pad_len=self.max_length)
     return x
Exemplo n.º 4
0
    def nn_preprocess(self, x, n_mfcc=96, max_duration=5, is_mfcc=True):
        global LR_HOP_DURATION
        global HOP_DURATION
        global AUDIO_SAMPLE_RATE
        if self.raw_max_length is None:
            self.raw_max_length = get_max_length(x)
            scale = self.raw_max_length/84000
            LR_HOP_DURATION = max(BASE_LR_HOP_DURATION,BASE_LR_HOP_DURATION*scale)
            HOP_DURATION = max(BASE_HOP_DURATION,BASE_HOP_DURATION*scale/2)
            AUDIO_SAMPLE_RATE = max(BASE_AUDIO_SAMPLE_RATE,int(BASE_AUDIO_SAMPLE_RATE*scale/3))
            print('LR_HOP_DURATION---%s'%LR_HOP_DURATION)
            print('HOP_DURATION---%s'%HOP_DURATION)
            print('AUDIO_SAMPLE_RATE---%s'%AUDIO_SAMPLE_RATE)
            
            if self.raw_max_length > (MIDDLE_DURATION * AUDIO_SAMPLE_RATE):
                self.need_30s = True
                if len(self._train_y) < 1000 and self._num_classes < 30:
                    self.crnn_first = True
            self.raw_max_length = min(
                max_duration * AUDIO_SAMPLE_RATE,
                self.raw_max_length)
            self.raw_max_length = max(
                MAX_AUDIO_DURATION *
                AUDIO_SAMPLE_RATE,
                self.raw_max_length)
        x = [sample[0:self.raw_max_length] for sample in x]

        if is_mfcc:
            # extract mfcc
            x = extract_mfcc_parallel(x, n_mfcc=n_mfcc)
        else:
            x = extract_melspectrogram_parallel(
                x, n_mels=128, use_power_db=True)
        if self.fea_max_length is None:
            self.fea_max_length = get_max_length(x)
            self.fea_max_length = min(MAX_FRAME_NUM, self.fea_max_length)
        x = pad_seq(x, pad_len=self.fea_max_length)

        return x
Exemplo n.º 5
0
    def preprocess_data(self, x):
        if IS_CUT_AUDIO:
            x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE]
                 for sample in x]

        x_mel = extract_melspectrogram_parallel(
            x, n_mels=128, use_power_db=True)
        # x_mel = extract_mfcc_parallel(x, n_mfcc=96)
        if self.max_length is None:
            self.max_length = get_max_length(x_mel)
            self.max_length = min(MAX_FRAME_NUM, self.max_length)
        x_mel = pad_seq(x_mel, pad_len=self.max_length)
        x_mel = x_mel[:, :, :, np.newaxis]
        return x_mel
Exemplo n.º 6
0
 def preprocess_data(self, x):
     # extract mfcc
     x = extract_mfcc(x)
     if self.max_length is None:
         self.max_length = get_max_length(x)
     x = pad_seq(x, self.max_length)
     # feature scale
     if self.mean is None or self.std is None:
         self.mean = np.mean(x)
         self.std = np.std(x)
         x = (x - self.mean) / self.std
     # calculate mean of mfcc
     x = np.mean(x, axis=-1)
     x = x[:, :, np.newaxis]
     return x
Exemplo n.º 7
0
 def preprocess_data(self, x):
     if IS_CUT_AUDIO:
         x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE] for sample in x]
     # extract mfcc
     x_mfcc = extract_mfcc_parallel(x, n_mfcc=64)
     x_mel = extract_melspectrogram_parallel(x, n_mels=64, use_power_db=True)
     if self.max_length is None:
         self.max_length = get_max_length(x_mfcc)
         self.max_length = min(MAX_FRAME_NUM, self.max_length)
     x_mfcc = pad_seq(x_mfcc, self.max_length)
     x_mel = pad_seq(x_mel, self.max_length)
     x_feas = np.concatenate([x_mfcc, x_mel], axis=-1)
     x_feas = x_feas[:, :, :, np.newaxis]
     # x_mel = pad_seq(x_mel, self.max_length)
     # x_mel = x_mel[:, :, :, np.newaxis]
     return x_feas
Exemplo n.º 8
0
    def preprocess_data(self, x):
        # mel-spectrogram parameters
        SR = 16000
        N_FFT = 512
        N_MELS = 96
        HOP_LEN = 256
        DURA = 21.84  # to make it 1366 frame.
        if IS_CUT_AUDIO:
            x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE] for sample in x]

        # x_mel = extract_melspectrogram_parallel(x, n_mels=128, use_power_db=True)
        x_mfcc = extract_mfcc_parallel(x, n_mfcc=96)
        if self.max_length is None:
            self.max_length = get_max_length(x_mfcc)
            self.max_length = min(MAX_FRAME_NUM, self.max_length)
        x_mfcc = pad_seq(x_mfcc, pad_len=self.max_length)
        x_mfcc = x_mfcc[:, :, :, np.newaxis]
        return x_mfcc
Exemplo n.º 9
0
    def preprocess_data(self, x):
        if IS_CUT_AUDIO:
            x = [
                sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE]
                for sample in x
            ]
        # extract mfcc
        x = extract_mfcc_parallel(x, n_mfcc=96)
        if self.max_length is None:
            self.max_length = get_max_length(x)
        x = pad_seq(x, self.max_length)

        # if self.scaler is None:
        #     self.scaler = []
        #     for i in range(x.shape[2]):
        #         self.scaler.append(StandardScaler().fit(x[:, :, i]))
        # for i in range(x.shape[2]):
        #     x[:, :, i] = self.scaler[i].transform(x[:, :, i])

        # feature scale
        # if self.mean is None or self.std is None:
        #     self.mean = np.mean(x)
        #     self.std = np.std(x)
        #     x = (x - self.mean) / self.std

        # s0, s1, s2 = x.shape[0], x.shape[1], x.shape[2]
        # x = x.reshape(s0 * s1, s2)
        # if not self.scaler:
        #     self.scaler = MinMaxScaler().fit(x)
        # x = self.scaler.transform(x)
        # x = x.reshape(s0, s1, s2)

        # 4 dimension?
        # (120, 437, 24) to (120, 437, 24, 1)
        # 120 is the number of instance
        # 437 is the max length
        # 24 frame in mfcc
        # log(f"max {np.max(x)} min {np.min(x)} mean {np.mean(x)}")

        x = x[:, :, :, np.newaxis]
        return x