Пример #1
0
def generate_noise_data(path):
    with open(path, 'rb') as f:
        audio_list = pickle.load(f)
        print('read pkl from ', f)
    spec_list = []
    record_count = 0
    for i, audio_name in enumerate(audio_list):
        spec, y = process_stft(path_join(wave_noise_dir, audio_name))
        if spec.shape[0] >= config.max_sequence_length:
            spec_list.extend(
                split_spectrogram(spec, config.max_sequence_length))
        else:
            spec_list.append(
                expand_spectrogram(spec, config.max_sequence_length))

        if len(spec_list) >= config.tfrecord_size:

            fname = 'noise' + increment_id(record_count, 5) + '.tfrecords'
            temp = spec_list[:config.tfrecord_size]
            spec_list = spec_list[config.tfrecord_size:]
            ex_list = [make_noise_example(spec) for spec in temp]
            writer = tf.python_io.TFRecordWriter(
                path_join(save_noise_dir, fname))
            for ex in ex_list:
                writer.write(ex.SerializeToString())
            writer.close()
            record_count += 1
            print(fname, 'created')
    print('save in %s' % save_noise_dir)
Пример #2
0
def generate_valid_data(path):
    with open(path, 'rb') as f:
        wav_list = pickle.load(f)
    print('read pkl from %s' % f)
    # each record should be (file.wav,((st,end),(st,end).....)))
    file_list = [i[0] for i in wav_list]
    label_list = [i[1] for i in wav_list]
    tuple_list = []
    counter = 0
    record_count = 0
    for i, audio_name in enumerate(file_list):
        # print(audio_name)
        # print(label_list[i])
        spec, seq_len, labels = make_record(
            path_join(wave_valid_dir, audio_name),
            label_list[i])

        counter += 1
        tuple_list.append((spec, labels, seq_len))
        if counter == config.tfrecord_size:
            tuple_list = batch_padding(tuple_list)
            fname = 'data' + increment_id(record_count, 5) + '.tfrecords'
            ex_list = [make_example(spec, seq_len, labels) for
                       spec, labels, seq_len in tuple_list]
            writer = tf.python_io.TFRecordWriter(
                path_join(save_valid_dir, fname))
            for ex in ex_list:
                writer.write(ex.SerializeToString())
            writer.close()
            record_count += 1
            counter = 0
            tuple_list.clear()
            print(fname, 'created')
    print('save in %s' % save_valid_dir)
Пример #3
0
def generate_noise_data(path):
    with open(path, 'rb') as f:
        audio_list = pickle.load(f)
        print('read pkl from ', f)
    spec_list = []
    counter = 0
    record_count = 0
    for i, audio_name in enumerate(audio_list):
        spec, y = process_stft(path_join(wave_noise_dir, audio_name))
        spec_list.append(spec)
        counter += 1
        if counter == config.tfrecord_size:
            spec_list = [
                expand_spectrogram(s, config.max_sequence_length)
                for s in spec_list
            ]

            fname = 'noise' + increment_id(record_count, 5) + '.tfrecords'
            ex_list = [make_noise_example(spec) for spec in spec_list]
            writer = tf.python_io.TFRecordWriter(
                path_join(save_noise_dir, fname))
            for ex in ex_list:
                writer.write(ex.SerializeToString())
            writer.close()
            record_count += 1
            counter = 0
            spec_list.clear()
            print(fname, 'created')
    print('save in %s' % save_noise_dir)
Пример #4
0
def generate_trainning_data(path):
    with open(path, 'rb') as f:
        wav_list = pickle.load(f)
    print('read pkl from %s' % f)
    audio_list = [i[0] for i in wav_list]
    label_list = [i[1] for i in wav_list]
    text_list = [i[2] for i in wav_list]
    assert len(audio_list) == len(text_list)
    tuple_list = []
    counter = 0
    record_count = 0
    for i, audio_name in enumerate(audio_list):
        spec, seq_len, label_values, label_indices, label_shape = make_record(
            path_join(wave_train_dir, audio_name), label_list[i])
        # print(text_list[i])
        # print(label_values)
        if spec is not None:
            counter += 1
            tuple_list.append(
                (spec, seq_len, label_values, label_indices, label_shape))
        if counter == config.tfrecord_size:
            tuple_list = batch_padding_trainning(tuple_list)
            fname = 'data' + increment_id(record_count, 5) + '.tfrecords'
            ex_list = [
                make_trainning_example(spec, seq_len, label_values,
                                       label_indices, label_shape) for spec,
                seq_len, label_values, label_indices, label_shape in tuple_list
            ]
            writer = tf.python_io.TFRecordWriter(
                path_join(save_train_dir, fname))
            for ex in ex_list:
                writer.write(ex.SerializeToString())
            writer.close()
            record_count += 1
            counter = 0
            tuple_list.clear()
            print(fname, 'created')
    print('save in %s' % save_train_dir)
Пример #5
0
def generate_valid_data(pkl_path):
    with open(pkl_path, 'rb') as f:
        wav_list = pickle.load(f)
    print('read pkl from %s' % f)
    audio_list = [i[0] for i in wav_list]
    correctness_list = [i[1] for i in wav_list]
    label_list = [i[2] for i in wav_list]
    assert len(audio_list) == len(correctness_list)
    tuple_list = []
    counter = 0
    record_count = 0
    for audio_name, correctness, label in zip(audio_list, correctness_list,
                                              label_list):
        spectrogram, wave = process_stft(path_join(wave_valid_dir, audio_name))
        seq_len = spectrogram.shape[0]
        label_values, _, _ = convert_label(label)

        tuple_list.append(
            (spectrogram, seq_len, correctness, label_values, audio_name))
        counter += 1
        if counter == config.tfrecord_size:
            tuple_list = batch_padding_valid(tuple_list)
            fname = 'valid' + increment_id(record_count, 5) + '.tfrecords'
            ex_list = [
                make_valid_example(spec, seq_len, correctness, label_values,
                                   audio_name) for spec, seq_len, correctness,
                label_values, audio_name in tuple_list
            ]
            writer = tf.python_io.TFRecordWriter(
                path_join(save_valid_dir, fname))
            for ex in ex_list:
                writer.write(ex.SerializeToString())
            writer.close()
            record_count += 1
            counter = 0
            tuple_list.clear()
            print(fname, 'created')
    print('save in %s' % save_valid_dir)