示例#1
0
    def create_h5(self, split_num, iter_num, input_sequence, DEL_TRAIN_WAV):

        cpu_cores = int(split_num / iter_num)
        tmp1 = []
        tmp2 = []
        tmp3 = []
        # noisy_dir = join(noisy_dir, 'train')
        training_data_list = search_wav(self.noisy_dir)
        print('Total training files: ', len(training_data_list))

        for file in training_data_list:
            try:
                snr, noise_name, clean_name1, clean_neme2 = file.split(
                    '/')[-1].split('_')
                clean_file = join(
                    self.noisy_dir,
                    '_'.join(['0ms', 'n0', clean_name1, clean_neme2]))
                noisy_file = file
            except:
                snr, noise_name, clean_name = file.split('/')[-1].split('_')
                clean_file = join(self.noisy_dir,
                                  '_'.join(['0ms', 'n0', clean_name]))
                noisy_file = file

            tmp1.append(clean_file)
            tmp2.append(noisy_file)

        training_num = 30000
        t1, t2 = shuffle(np.array(tmp1), np.array(tmp2))
        t1 = t1[:training_num]
        t2 = t2[:training_num]

        clean_split_list = split_list(t1, wanted_parts=split_num)
        noisy_split_list = split_list(t2, wanted_parts=split_num)

        start = 0
        end = cpu_cores
        for num in range(iter_num):
            print(start, end)
            pool = Pool(cpu_cores)
            func = partial(_create_split_h5, clean_split_list,
                           noisy_split_list, self.save_h5_dir,
                           self.save_h5_name, input_sequence)
            pool.map(func, range(start, end))
            pool.close()
            pool.join()
            start = end
            end += cpu_cores
        if DEL_TRAIN_WAV:
            shutil.rmtree(self.noisy_dir)
示例#2
0
    def test(self, testing_data_dir, result_dir, test_saver, n_cores, num_test=False):
        print('Start Testing')
        tmp_list = search_wav(testing_data_dir)

        if num_test:
            test_list = np.random.choice(tmp_list, num_test)
        else:
            test_list = tmp_list

        print('All testing data number:', len(test_list))
        REG_dir = join(result_dir, 'REG')
        Noisy_write_dir = join(result_dir, 'Source')
        Clean_write_dir = join(result_dir, 'Target')

        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
            os.makedirs(REG_dir)
            os.makedirs(Noisy_write_dir)
            os.makedirs(Clean_write_dir)
        with tf.Session(config=self.config) as sess:
            self.saver.restore(sess=sess, save_path=test_saver)
            for file in tqdm(test_list):
                hop_length = 256
                file_name = file.split('/')[-1]
                try:
                    snr, noise_name, clean_name1, clean_neme2 = file.split(
                        '/')[-1].split('_')
                    clean_file = join(testing_data_dir, '_'.join(
                        ['0dB', 'n0', clean_name1, clean_neme2]))
                    noisy_file = file
                except:
                    snr, noise_name, clean_name = file.split(
                        '/')[-1].split('_')
                noisy_file = join(testing_data_dir, file_name)
                REG_file = join(REG_dir, file_name)
                Noisy_file = join(Noisy_write_dir, file_name)
                Clean_file = join(Clean_write_dir, file_name)

                X_in_seq = wav2spec(noisy_file, sr=16000,
                                    forward_backward=True, SEQUENCE=False, norm=True, hop_length=hop_length)
                re_reg = sess.run([self.reg_layer],
                                  feed_dict={self.x_noisy: X_in_seq})[:][0]
                spec2wav(noisy_file, 16000, REG_file,
                         re_reg, hop_length=hop_length)
                copy_file(noisy_file, Noisy_file)
                copy_file(clean_file, Clean_file)
示例#3
0
def main():
    print('haha')
    clean_dir = FLAGS.clean_dir
    noise_dir = FLAGS.noise_dir
    noisy_dir = FLAGS.noisy_dir
    enhanced_dir = FLAGS.enhanced_dir
    tb_dir = FLAGS.tb_dir
    saver_dir = FLAGS.saver_dir
    TRAIN = FLAGS.TRAIN
    TEST = FLAGS.TEST
    ncores = FLAGS.n_cores
    epochs = FLAGS.epochs
    batch_size = FLAGS.batch_size
    lr = FLAGS.learning_rate

    train_task = 'same_noise'  # set task name for noting your dataset

    # ===========================================================
    # ===========       Synthesize Noisy Data        ============
    # ===========================================================
    clean_file_list = search_wav(clean_dir)
    clean_train_list, clean_test_list = train_test_split(
        clean_file_list, test_size=0.2)
    noise_file_list = search_wav(noise_dir)
    noise_train_list, noise_test_list = train_test_split(
        noise_file_list, test_size=0.2)
    print('--- Synthesize Training Noisy Data ---')
    train_noisy_dir = join(noisy_dir, 'train')
    sr_clean = 16000
    sr_noise = 44100
    snr_list = ['20dB', '0dB', '-20dB']
    data_num = 100  # set data_num to make training data numbers for different snr
    syn_train = Synth(clean_train_list, noise_train_list[
                      0:10], sr_clean, sr_noise)
    syn_train.gen_noisy(snr_list, train_noisy_dir,
                        data_num=data_num, ADD_CLEAN=True, cpu_cores=ncores)
    print('--- Synthesize Testing Noisy Data ---')
    test_noisy_dir = join(noisy_dir, 'test')
    sr_clean = 16000
    sr_noise = 44100
    data_num = 10 # set data_num to make testing data numbers for different snr
    snr_list = ['15dB', '5dB', '-5dB']
    syn_train = Synth(clean_test_list, noise_train_list[
                      0:10], sr_clean, sr_noise)
    syn_train.gen_noisy(snr_list, test_noisy_dir,
                        data_num=data_num, ADD_CLEAN=True, cpu_cores=ncores)
    # ===========================================================
    # ===========       Create Training Matrix       ============
    # ===========================================================
    print('--- Generate Training Matrix ---')
    train_task = 'same_noise'  # set task name for noting your dataset
    training_files_dir = FLAGS.training_files_dir
    train_noisy_dir = join(noisy_dir, 'train')
    DEL_TRAIN_WAV = True
    gen_mat = GenMatrix(training_files_dir, train_task, train_noisy_dir)
    split_num = 50  # number of spliting files
    iter_num = 2  # set iter number to use multi-processing, cpu_cores = split_num/iter_num
    input_sequence = True  # set input data is sequence or not
    gen_mat.create_h5(split_num=split_num, iter_num=iter_num,
                      input_sequence=input_sequence,
                      DEL_TRAIN_WAV=DEL_TRAIN_WAV)

    # ===========================================================
    # ===========             Main Model             ============
    # ===========================================================
    print('--- Build Model ---')
    note = 'DDAE'
    date = '0604'
    split_num = 50
    training_files_dir = FLAGS.training_files_dir
    model = REG(tb_dir, saver_dir, train_task, date, gpu_num='3', note=note)
    model.build(init_learning_rate=1e-3, reuse=False)

    print('--- Train Model ---')
    model.train(training_files_dir, split_num, epochs, batch_size)

    print('--- Test Model ---')
    testing_data_dir = join(noisy_dir, 'test')
    result_dir = '../data/enhanced/{}_{}/'.format(note, date)
    num_test = 30 # Set this number to decide how many testing data you wanna use. (None => All)
    cpu_cores = 30
    test_saver = '{}_{}/{}/best_saver_{}'.format(
        saver_dir, note, date, train_task)
    model.test(testing_data_dir, result_dir,
               test_saver, cpu_cores, num_test)