示例#1
0
def test(model, device, noisy_path, clean_path, enhance_path, score_path, args):
    model = model.to(device)
    # load model
    model.eval()
    torch.no_grad()
    #checkpoint = torch.load(model_path)
    #model.SEmodel.load_state_dict(checkpoint['model'])
    
    # load data
    test_files = np.array(getfilename(noisy_path))
    c_dict = get_cleanwav_dic(clean_path)
    
    #open score file
   
    if os.path.exists(score_path):
        os.remove(score_path)
    
    check_folder(score_path)
    print('Save PESQ&STOI results to:', score_path)
    
    with open(score_path, 'a') as f:
        f.write('Filename,PESQ,STOI\n')

    print('Testing...')       
    for test_file in tqdm(test_files):
        write_score(model, device, test_file, c_dict, enhance_path, score_path)
    
    data = pd.read_csv(score_path)
    pesq_mean = data['PESQ'].to_numpy().astype('float').mean()
    stoi_mean = data['STOI'].to_numpy().astype('float').mean()
    with open(score_path, 'a') as f:
        f.write(','.join(('Average',str(pesq_mean),str(stoi_mean)))+'\n')
示例#2
0
def test(model, device, noisy_path, clean_path, asr_dict, enhance_path, score_path, args):
    model = model.to(device)
    # load model
    model.eval()
    torch.no_grad()
    
    # load data
    if args.test_num is None:
        test_files = np.array(getfilename(noisy_path,"test"))
    else:
        test_files = np.array(getfilename(noisy_path,"test")[:args.test_num])

    c_dict = get_cleanwav_dic(clean_path, args.corpus)
    
    #open score file
    google_asr = True
    if google_asr:
        score_path = score_path.replace(".csv","_wer.csv") 

    if os.path.exists(score_path):
        os.remove(score_path)
    
    check_folder(score_path)
    if google_asr:
        print('Save WER results to:', score_path)
        with open(score_path, 'a') as f:
            f.write('Filename,PESQ,STOI,WER,CleanWER\n')
    else:
        print('Save PESQ&STOI results to:', score_path)
        with open(score_path, 'a') as f:
            f.write('Filename,PESQ,STOI\n')

    print('Testing...')       
    for test_file in tqdm(test_files):
        name=test_file.split('/')[-1].replace('.wav','')
        ilen, y=asr_dict[name][0],asr_dict[name][1]
        write_score(model, device, test_file, c_dict, enhance_path, ilen, y, score_path, args.asr_result, args.corpus)

    data = pd.read_csv(score_path)
    pesq_mean = data['PESQ'].to_numpy().astype('float').mean()
    stoi_mean = data['STOI'].to_numpy().astype('float').mean()
    with open(score_path, 'a') as f:
        f.write(','.join(('Average',str(pesq_mean),str(stoi_mean)))+'\n')
示例#3
0
def Load_data(args):

    train_paths = []
    val_paths = []

    train_spec_noisy_list = getfilename(
        os.path.join(args.out_path, 'spec', 'train/noisy'), "train")

    n_files = np.array(train_spec_noisy_list)

    if args.train_num is None:
        train_paths, val_paths = train_test_split(n_files,
                                                  test_size=args.val_ratio,
                                                  random_state=999)
    else:
        train_paths, val_paths = train_test_split(n_files[:args.train_num],
                                                  test_size=args.val_ratio,
                                                  random_state=999)

    print('Reading json files...')
    asr_y_path = [item for item in args.asr_y_path.split(',')]
    asr_dict = {}
    for json_path in asr_y_path:
        asr_dict = load_asr_data(json_path, asr_dict, args.TMHINT)

    train_dataset, val_dataset = CustomDataset(train_paths, asr_dict,
                                               args.TMHINT), CustomDataset(
                                                   val_paths, asr_dict,
                                                   args.TMHINT)

    # [Yo] Add padding collate_fn
    loader = {
        'train':
        DataLoader(train_dataset,
                   batch_size=args.batch_size,
                   shuffle=True,
                   num_workers=8,
                   pin_memory=True,
                   collate_fn=pad_collate),
        'val':
        DataLoader(val_dataset,
                   batch_size=args.batch_size,
                   num_workers=8,
                   pin_memory=True,
                   collate_fn=pad_collate)
    }

    return loader
示例#4
0
def Load_data(args):
    train_paths = []
    val_paths = []

    train_spec_noisy_list = getfilename(
        os.path.join(args.out_path, 'spec', 'train/noisy'), "train")

    n_files = np.array(train_spec_noisy_list)

    if args.train_num is None:
        train_paths, val_paths = train_test_split(n_files,
                                                  test_size=args.val_ratio,
                                                  random_state=999)
    else:
        train_paths, val_paths = train_test_split(n_files[:args.train_num],
                                                  test_size=args.val_ratio,
                                                  random_state=999)

    asr_dict = load_y_dict(args)

    train_dataset, val_dataset = CustomDataset(train_paths, asr_dict,
                                               args.corpus), CustomDataset(
                                                   val_paths, asr_dict,
                                                   args.corpus)

    # [Yo] Add padding collate_fn
    loader = {
        'train':
        DataLoader(train_dataset,
                   batch_size=args.batch_size,
                   shuffle=True,
                   num_workers=8,
                   pin_memory=True,
                   collate_fn=pad_collate),
        'val':
        DataLoader(val_dataset,
                   batch_size=args.batch_size,
                   num_workers=8,
                   pin_memory=True,
                   collate_fn=pad_collate)
    }

    return loader