def predict_one_label(self, data):
     self.query_num += 1
     data = data.squeeze()
     pred_real, pred_pro = Sincnet.sentence_test(
         self.model,
         torch.from_numpy(data).float().cuda())
     return pred_real
    if flag==False:
        if a.split('-')[0]==str(start):
            flag=True
        else:
            continue
    if MODE=="Librispeech":
        real_name=a.split('-')[2]
        target_name = t.split('-')[2]
    elif MODE=="TIMIT":
        real_name = a.split('-')[-1].split('.')[0].lower()
        target_name = t.split('-')[-1].split('.')[0].lower()
    # read audio
    real_data, fs = sf.read(os.path.join(attackdir, a))
    target_data, fs = sf.read(os.path.join(targetdir, t))
    # test is ok
    pid, _ = Sincnet.sentence_test(model,torch.from_numpy(target_data).float().cuda())
    target_index = speaker_label[target_name]
    if pid != target_index:
        print("error")
        continue
    # set file name
    print("now attack audio:", a)
    dl = a.split('-')[0]

    # local and attenutation attack
    save_pfn=os.path.join(abs_path,save_dir,"test-{}.txt".format(dl))
    save_info_n=os.path.join(abs_path,save_dir,"info-{}.txt".format(dl))
    laatk=LOCAL_ATT_HSJA_ATTACK.LAATTACK(os.path.join(attackdir, a), os.path.join(targetdir, t),
                                           save_p_fname=save_pfn, save_info_fname=save_info_n, MODE=MODE, dct_field=0.65)
    o2_audio, preturb,query_num,interval=laatk.targeted_attack()
    # save audio
Esempio n. 3
0
def predict_one_label(model, audio):
    qq, _ = Sincnet.sentence_test(model, audio.float().cuda())
    return qq
Esempio n. 4
0
def show_max(wav):
    qq, _ = Sincnet.sentence_test(model, wav.float().cuda())
    print(qq)
 def predict_label(self,data):
     pred_real, pred_pro = Sincnet.sentence_test(self.model,
                                                 torch.from_numpy(data).float().cuda())
     return pred_real