示例#1
0
    def __getitem__(self, index):

        path = self.file_path + '/' + self.file_names[index] + '.wav'
        data = librosa.core.load(path, sr=SR, mono=True)[0]

        #pre-emphasis
        data = preprocessing.preemphasis(data)

        mfcc = librosa.feature.mfcc(y=data, sr=SR, hop_length=1024, n_mfcc=40)

        if self.transform:

            mfcc = self.transform(mfcc)
            soundFormatted = mfcc.float()
            soundFormatted = torch.cat((soundFormatted, soundFormatted,
                                        soundFormatted))  #?? size 확인하기
            # print("Brinda", soundFormatted.shape)
            return soundFormatted, self.labels[index]
        #print("soundformat shape:", soundFormatted.shape)

        soundFormatted = torch.from_numpy(mfcc).float()
        soundFormatted = torch.cat(
            (soundFormatted, soundFormatted, soundFormatted))  #?? size 확인하기
        #         soundFormatted = torch.unsqueeze(soundFormatted, dim=0)
        #         soundFormatted = torch.cat((soundFormatted,soundFormatted,soundFormatted))

        return soundFormatted, self.labels[index]
示例#2
0
    def __getitem__(self, index):

        path = self.file_path + '/' + self.file_names[index] + '.wav'
        data = librosa.core.load(path, sr=SR, mono=True)[0]

        #pre-emphasis
        data = preprocessing.preemphasis(data)

        mrcg = MRCG.mrcg_extract(data, SR)

        # if self.transform:

        #     mrcg = self.transform(mrcg)
        #     soundFormatted = mrcg.float()
        #     # soundFormatted = torch.squeeze(soundFormatted)
        #     # soundFormatted.transpose_(0, 1)
        #     print("mrcg shape", soundFormatted.shape)
        #     return soundFormatted, self.labels[index]

        soundFormatted = torch.from_numpy(mrcg).float()

        # soundFormatted.transpose_(0, 1)
        soundFormatted = soundFormatted.unsqueeze(dim=0)
        soundFormatted = torch.cat(
            (soundFormatted, soundFormatted, soundFormatted))
        # print("mrgc shape", soundFormatted.shape)
        return soundFormatted, self.labels[index]
    def __getitem__(self, index):

        path = self.file_path + '/' + self.file_names[index] + '.wav'
        data = librosa.core.load(path, sr=SR, mono=True)[0]

        #pre-emphasis
        data = preprocessing.preemphasis(data)

        mfcc = librosa.feature.mfcc(y=data, sr=SR, hop_length=1024, n_mfcc=40)

        soundFormatted = torch.from_numpy(mfcc).float()

        return soundFormatted, self.file_names[index]
    def __getitem__(self, index):

        path = self.file_path + '/' + self.file_names[index] + '.wav'
        data = librosa.core.load(path, sr=SR, mono=True)[0]

        #pre-emphasis
        data = preprocessing.preemphasis(data)

        mrcg = MRCG.mrcg_extract(data, SR)

        soundFormatted = torch.from_numpy(mrcg).float()
        # soundFormatted.transpose_(0, 1)
        # print("data shape", soundFormatted.shape)
        return soundFormatted, self.file_names[index]
示例#5
0
    def __getitem__(self, index):

        path = self.file_path + '/' + self.file_names[index] + '.wav'
        data = librosa.core.load(path, sr=SR, mono=True)[0]

        #pre-emphasis
        data = preprocessing.preemphasis(data)

        mfcc = librosa.feature.mfcc(y=data, sr=SR, hop_length=1024, n_mfcc=40)

        if self.transform:

            mfcc = self.transform(mfcc)
            soundFormatted = mfcc.float()
            soundFormatted = torch.squeeze(soundFormatted)
            # soundFormatted.transpose_(0, 1)
            # print("data shape", soundFormatted.shape)
            return soundFormatted, self.labels[index]

        soundFormatted = torch.from_numpy(mfcc).float()
        # soundFormatted.transpose_(0, 1)
        # print("data shape", soundFormatted.shape)
        return soundFormatted, self.labels[index]