コード例 #1
0
    def __init__(self, config):

        self.config = config

        self.filenames = librosa.util.find_files(config['data']['sample_set_dir'])

        self.set = {}

        for file in self.filenames:

            filename = os.path.basename(file)
            if filename[-1] == '/':
                filename = filename[0:-1]

            audio, labels = pp.get_samples_and_labels(filename, config)

            if config['data']['type'] == 'mel':

                spec = torch.Tensor(audio_utils.wav2spectrogram(audio)).t()
                melspec = torch.Tensor(audio_utils.wav2melspectrogram(audio)).t()

                self.set[filename] = [melspec, labels, spec]
            else:

                audio = np.array(audio, dtype = np.float64)

                f0, ap, sp, coded_sp = pw.cal_mcep(audio)
                coded_sp = torch.Tensor(coded_sp.T)
                self.set[filename] = [f0, ap, sp, coded_sp, labels]
コード例 #2
0
def get_session_data(data_dir, exclude_unlabelled=True):

    # print(data_dir)
    os.chdir(data_dir)

    filenames = []
    specs = []
    mels = []
    labels = []
    conts = []
    conts_dis = []
    speakers = []
    all_labels = []
    for foldername in os.listdir(data_dir):

        if not (foldername == "Annotations" or foldername == ".DS_Store"):

            for filename in os.listdir(data_dir + "/" + foldername):

                if not filename == ".DS_Store":
                    wav, labels = get_wav_and_labels(filename, data_dir)

                    if not (exclude_unlabelled
                            and labels[0] == -1):  #ignore some rare emotions
                        mel = audio_utils.wav2melspectrogram(wav)
                        spec = audio_utils.wav2spectrogram(wav)

                        filenames.append(filename)
                        mels.append(torch.Tensor(mel))
                        specs.append(torch.Tensor(spec))

                        # labels.append(label)
                        # conts.append(cont)
                        # conts_dis.append(cont_dis)
                        # speakers.append(speaker)
                        all_labels.append(labels)

        print(foldername + " completed.")

    return filenames, mels, specs, all_labels
コード例 #3
0
def all_wavs_and_labels(exclude_unlabelled=True):

    os.chdir(dataset_dir)

    filenames = []
    specs = []
    mels = []
    labels = []
    conts = []
    conts_dis = []
    speakers = []

    for foldername in os.listdir(dataset_dir):

        if not (foldername == "Annotations" or foldername == ".DS_Store"
                or foldername == "Processed"):

            for filename in os.listdir(dataset_dir + "/" + foldername):

                if not filename == ".DS_Store":
                    wav, label, cont, cont_dis, speaker = audio_utils.get_wav_and_labels(
                        filename)

                    if not (exclude_unlabelled
                            and label == -1):  #ignore some rare emotions

                        spec = audio_utils.wav2spectrogram(wav)
                        mel = audio_utils.spectrogram2melspectrogram(spec)

                        filenames.append(filename)
                        mels.append(torch.Tensor(mel))
                        specs.append(torch.Tensor(spec))

                        labels.append(label)
                        conts.append(cont)
                        conts_dis.append(cont_dis)
                        speakers.append(speaker)
        print(foldername + " done.")

    return filenames, mels, specs, labels, conts, conts_dis, speakers
コード例 #4
0
    #     # print(len(mels))
    #     save_data(filenames, dataset_dir + '/Processed_data/filenames' + ses_number)
    #     save_data(mels, dataset_dir + '/Processed_data/melspecs' + ses_number)
    #     save_data(specs, dataset_dir + '/Processed_data/specs' + ses_number)
    #     save_data(labels, dataset_dir + '/Processed_data/labels' + ses_number)
    #     print('Done ' + ses_number + ".")
    #
    # print(len(mels), len(labels), labels[0].size())

    # print(concatenate_labels(1,2,[3,4,5],[6,7,8]))

    wav, label = get_wav_and_labels("Ses02F_impro03_F002.wav",
                                    dataset_dir + "/Session2")
    # # wav2 = get_wav_and_labels("Ses01F_impro03_F002.wav", dataset_dir + "/Session1")[0]
    #
    spec = audio_utils.wav2spectrogram(wav)
    # # spec2 = audio_utils.wav2spectrogram(wav2)
    # print(spec.shape)
    # # print(spec2.shape)
    audio_utils.plot_spec(spec, type='log')
    # #
    melspec = audio_utils.spectrogram2melspectrogram(spec)
    # print(melspec.shape)
    audio_utils.plot_spec(melspec)
    #
    fn = str(label[0].item()) + 'to' + '2-' + str(label[1].item()) + '.png'

    audio_utils.save_spec(melspec, "Test", fn)
    # reproduced = audio_utils.spectrogram2wav(spec)
    #
    # audio_utils.save_wav(reproduced, "/Users/Max/MScProject/datasets/Produced/test1.wav")