def eval_recu(epoch): assert config.batch_size == 1 model.eval() reference, candidate, source, alignments = [], [], [], [] e = epoch test_or_valid = 'test' test_or_valid = 'valid' # test_or_valid = 'train' print(('Test or valid:', test_or_valid)) eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) SDR_SUM = np.array([]) SDRi_SUM = np.array([]) batch_idx = 0 global best_SDR, Var while True: print(('-' * 30)) eval_data = next(eval_data_gen) if eval_data == False: print(('SDR_aver_eval_epoch:', SDR_SUM.mean())) print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean())) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']] raw_tgt = eval_data['batch_order'] feas_tgt = models.rank_feas( raw_tgt, eval_data['multi_spk_fea_list']) # 这里是目标的图谱 src_original = src.transpose(0, 1) #To T,bs,F predict_multi_mask_all = None samples_list = [] for len_idx in range(config.MIN_MIX + 2, 2, -1): #逐个分离 tgt_max_len = len_idx # 4,3,2 with bos and eos. topk_k = len_idx - 2 tgt = Variable(torch.ones( len_idx, config.batch_size)) # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。 src_len = Variable( torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable( torch.LongTensor([ tgt_max_len - 2 for one_spk in eval_data['multi_spk_fea_list'] ])).unsqueeze(0) if use_cuda: src = src.cuda().transpose(0, 1) # to T,bs,fre src_original = src_original.cuda() # TO T,bs,fre tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() # try: if len(opt.gpus) > 1: samples, alignment, hiddens, predicted_masks = model.module.beam_sample( src, src_len, dict_spk2idx, tgt, config.beam_size, src_original) else: samples, predicted_masks = model.beam_sample( src, src_len, dict_spk2idx, tgt, config.beam_size, src_original) # except: # continue # ''' # expand the raw mixed-features to topk_max channel. src = src_original.transpose(0, 1) #确保分离的时候用的是原始的语音 siz = src.size() assert len(siz) == 3 topk_max = topk_k x_input_map_multi = torch.unsqueeze(src, 1).expand( siz[0], topk_max, siz[1], siz[2]) if 0 and config.WFM: feas_tgt = x_input_map_multi.data * WFM_mask if len_idx == 4: aim_feas = list(range(0, 2 * config.batch_size, 2)) #每个samples的第一个说话人取出来 predict_multi_mask_all = predicted_masks #bs*topk,T,F src = src * (1 - predicted_masks[aim_feas] ) #调整到bs为第一维,# bs,T,F samples_list = samples elif len_idx == 3: aim_feas = list(range(1, 2 * config.batch_size, 2)) #每个samples的第二个说话人取出来 predict_multi_mask_all[aim_feas] = predicted_masks feas_tgt = feas_tgt[aim_feas] samples_list = [samples_list[:1] + samples] if test_or_valid != 'test': if 1 and len(opt.gpus) > 1: ss_loss = model.module.separation_loss( x_input_map_multi, predicted_masks, feas_tgt, ) else: ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt) print(('loss for ss,this batch:', ss_loss.cpu().item())) lera.log({ 'ss_loss_' + str(len_idx) + test_or_valid: ss_loss.cpu().item(), }) del ss_loss predicted_masks = predict_multi_mask_all if batch_idx <= (500 / config.batch_size ): # only the former batches counts the SDR predicted_maps = predicted_masks * x_input_map_multi # predicted_maps=Variable(feas_tgt) utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output_test') del predicted_maps, predicted_masks, x_input_map_multi try: sdr_aver_batch, sdri_aver_batch = bss_test.cal( 'batch_output_test/') SDR_SUM = np.append(SDR_SUM, sdr_aver_batch) SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch) except (AssertionError): print('Errors in calculating the SDR') print(('SDR_aver_now:', SDR_SUM.mean())) print(('SDRi_aver_now:', SDRi_SUM.mean())) lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()}) lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()}) writer.add_scalars('scalar/loss', {'SDR_sample_' + test_or_valid: sdr_aver_batch}, updates) # raw_input('Press any key to continue......') # ''' candidate += [ convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples_list ] # source += raw_src reference += raw_tgt print(('samples:', samples)) print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:]))) # alignments += [align for align in alignment] batch_idx += 1 input('wait to continue......') result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) print(( 'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f' % ( result['hamming_loss'], result['micro_f1'], result['micro_recall'], result['micro_precision'], ))) score = {} result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) logging_csv([e, updates, result['hamming_loss'], \ result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()]) print(('hamming_loss: %.8f | micro_f1: %.4f' % (result['hamming_loss'], result['micro_f1']))) score['hamming_loss'] = result['hamming_loss'] score['micro_f1'] = result['micro_f1'] 1 / 0 return score
def eval(epoch): # config.batch_size=1 model.eval() print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1' reference, candidate, source, alignments = [], [], [], [] e = epoch test_or_valid = 'test' print('Test or valid:', test_or_valid) eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) SDR_SUM = np.array([]) SDRi_SUM = np.array([]) batch_idx = 0 global best_SDR, Var while True: print('-' * 30) eval_data = eval_data_gen.next() if eval_data == False: print('SDR_aver_eval_epoch:', SDR_SUM.mean()) print('SDRi_aver_eval_epoch:', SDRi_SUM.mean()) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) raw_tgt = [ sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list'] ] feas_tgt = models.rank_feas( raw_tgt, eval_data['multi_spk_fea_list']) # 这里是目标的图谱 top_k = len(raw_tgt[0]) # 要保证底下这几个都是longTensor(长整数) # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。 tgt = Variable(torch.ones( top_k + 2, config.batch_size)) # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。 src_len = Variable( torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable( torch.LongTensor([ len(one_spk) for one_spk in eval_data['multi_spk_fea_list'] ])).unsqueeze(0) # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0) if config.WFM: tmp_size = feas_tgt.size() assert len(tmp_size) == 4 feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True) feas_tgt_sum_square = (feas_tgt_sum * feas_tgt_sum).expand(tmp_size) feas_tgt_square = feas_tgt * feas_tgt WFM_mask = feas_tgt_square / feas_tgt_sum_square if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() if config.WFM: WFM_mask = WFM_mask.cuda() if 1 and len(opt.gpus) > 1: samples, alignment, hiddens, predicted_masks = model.module.beam_sample( src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) else: samples, alignment, hiddens, predicted_masks = model.beam_sample( src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) # ''' # expand the raw mixed-features to topk_max channel. src = src.transpose(0, 1) siz = src.size() assert len(siz) == 3 # if samples[0][-1] != dict_spk2idx['<EOS>']: # print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40 # break topk_max = len(samples[0]) - 1 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]) if config.WFM: feas_tgt = x_input_map_multi.data * WFM_mask if 0 and test_or_valid == 'valid': if 1 and len(opt.gpus) > 1: ss_loss = model.module.separation_loss( x_input_map_multi, predicted_masks, feas_tgt, ) else: ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt) print('loss for ss,this batch:', ss_loss.cpu().item()) lera.log({ 'ss_loss_' + test_or_valid: ss_loss.cpu().item(), }) del ss_loss, hiddens # ''''' if batch_idx <= (500 / config.batch_size ): # only the former batches counts the SDR predicted_maps = predicted_masks * x_input_map_multi # predicted_maps=Variable(feas_tgt) utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output') del predicted_maps, predicted_masks, x_input_map_multi try: SDR_SUM, SDRi_SUM = np.append(SDR_SUM, bss_test.cal('batch_output/')) except AssertionError, wrong_info: print 'Errors in calculating the SDR', wrong_info print('SDR_aver_now:', SDR_SUM.mean()) print('SDRi_aver_now:', SDRi_SUM.mean()) lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()}) lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()}) # raw_input('Press any key to continue......') elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean( ) > best_SDR: # only record the best SDR once. print('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())) best_SDR = SDR_SUM.mean()
def eval(epoch): model.eval() reference, candidate, source, alignments = [], [], [], [] e = epoch test_or_valid = 'test' print 'Test or valid:', test_or_valid eval_data_gen = prepare_data_aim('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in validloader: SDR_SUM = np.array([]) batch_idx = 0 global best_SDR, Var while True: # for ___ in range(2): print '-' * 30 eval_data = eval_data_gen.next() if eval_data == False: print 'SDR_aver_eval_epoch:', SDR_SUM.mean() break #如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) raw_tgt = [ sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list'] ] feas_tgt = models.rank_feas(raw_tgt, eval_data['multi_spk_fea_list']) #这里是目标的图谱 top_k = len(raw_tgt[0]) # 要保证底下这几个都是longTensor(长整数) # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。 tgt = Variable(torch.ones( top_k + 2, config.batch_size)) # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。 src_len = Variable( torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable( torch.LongTensor([ len(one_spk) for one_spk in eval_data['multi_spk_fea_list'] ])).unsqueeze(0) # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0) if config.WFM: tmp_size = feas_tgt.size() assert len(tmp_size) == 4 feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True) feas_tgt_sum_square = (feas_tgt_sum * feas_tgt_sum).expand(tmp_size) feas_tgt_square = feas_tgt * feas_tgt WFM_mask = feas_tgt_square / feas_tgt_sum_square if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() if config.WFM: WFM_mask = WFM_mask.cuda() try: if 1 and len(opt.gpus) > 1: # samples, alignment = model.module.sample(src, src_len) samples, alignment, hiddens, predicted_masks = model.module.beam_sample( src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) else: samples, alignment, hiddens, predicted_masks = model.beam_sample( src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) # samples, alignment, hiddens, predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) except TabError, info: print '**************Error occurs here************:', info continue if config.top1: predicted_masks = torch.cat([predicted_masks, 1 - predicted_masks], 1) # ''' # expand the raw mixed-features to topk_max channel. src = src.transpose(0, 1) siz = src.size() assert len(siz) == 3 topk_max = feas_tgt.size()[1] assert samples[0][-1] == dict_spk2idx['<EOS>'] topk_max = len(samples[0]) - 1 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]) if config.WFM: feas_tgt = x_input_map_multi.data * WFM_mask if test_or_valid == 'valid': if 1 and len(opt.gpus) > 1: ss_loss = model.module.separation_loss(x_input_map_multi, predicted_masks, feas_tgt, Var) else: ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt) print 'loss for ss,this batch:', ss_loss.data[0] lera.log({ 'ss_loss_' + test_or_valid: ss_loss.data[0], }) del ss_loss, hiddens # ''''' if batch_idx <= (500 / config.batch_size ): #only the former batches counts the SDR predicted_maps = predicted_masks * x_input_map_multi # predicted_maps=Variable(feas_tgt) utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_outputjaa') del predicted_maps, predicted_masks, x_input_map_multi SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_outputjaa/')) print 'SDR_aver_now:', SDR_SUM.mean() lera.log({'SDR sample': SDR_SUM.mean()}) # raw_input('Press any key to continue......') elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean( ) > best_SDR: #only record the best SDR once. print 'Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean()) best_SDR = SDR_SUM.mean() # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR)) # ''' candidate += [ convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples ] # source += raw_src reference += raw_tgt print 'samples:', samples print 'can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:]) alignments += [align for align in alignment] batch_idx += 1
def eval(epoch): # config.batch_size=1 model.eval() # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1' reference, candidate, source, alignments = [], [], [], [] e = epoch # test_or_valid = 'test' test_or_valid = 'valid' print('Test or valid:', test_or_valid) eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) SDR_SUM = np.array([]) SDRi_SUM = np.array([]) batch_idx = 0 global best_SDR, Var while True: print('-' * 30) eval_data = next(eval_data_gen) if eval_data == False: print('SDR_aver_eval_epoch:', SDR_SUM.mean()) print('SDRi_aver_eval_epoch:', SDRi_SUM.mean()) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) raw_tgt = [ sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list'] ] feas_tgt = models.rank_feas( raw_tgt, eval_data['multi_spk_fea_list']) # 这里是目标的图谱 padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip'] padded_mixture = torch.from_numpy(padded_mixture).float() mixture_lengths = torch.from_numpy(mixture_lengths) padded_source = torch.from_numpy(padded_source).float() padded_mixture = padded_mixture.cuda().transpose(0, 1) mixture_lengths = mixture_lengths.cuda() padded_source = padded_source.cuda() top_k = len(raw_tgt[0]) # 要保证底下这几个都是longTensor(长整数) # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。 tgt = Variable(torch.ones( top_k + 2, config.batch_size)) # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。 src_len = Variable( torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable( torch.LongTensor([ len(one_spk) for one_spk in eval_data['multi_spk_fea_list'] ])).unsqueeze(0) # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0) if config.WFM: tmp_size = feas_tgt.size() #assert len(tmp_size) == 4 feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True) feas_tgt_sum_square = (feas_tgt_sum * feas_tgt_sum).expand(tmp_size) feas_tgt_square = feas_tgt * feas_tgt WFM_mask = feas_tgt_square / feas_tgt_sum_square if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() if config.WFM: WFM_mask = WFM_mask.cuda() if 1 and len(opt.gpus) > 1: samples, alignment, hiddens, predicted_masks = model.module.beam_sample( src, src_len, dict_spk2idx, tgt, config.beam_size, padded_mixture) else: samples, alignment, hiddens, predicted_masks = model.beam_sample( src, src_len, dict_spk2idx, tgt, config.beam_size, padded_mixture) # ''' # expand the raw mixed-features to topk_max channel. src = src.transpose(0, 1) siz = src.size() assert len(siz) == 3 # if samples[0][-1] != dict_spk2idx['<EOS>']: # print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40 # break topk_max = len(samples[0]) - 1 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]) if config.WFM: feas_tgt = x_input_map_multi.data * WFM_mask if not config.use_tas and test_or_valid == 'valid': if 1 and len(opt.gpus) > 1: ss_loss = model.module.separation_loss( x_input_map_multi, predicted_masks, feas_tgt, ) else: ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt) print('loss for ss,this batch:', ss_loss.cpu().item()) # lera.log({ # 'ss_loss_' + test_or_valid: ss_loss.cpu().item(), # }) del ss_loss, hiddens # ''''' if batch_idx <= (100 / config.batch_size ): # only the former batches counts the SDR if config.use_tas: utils.bss_eval_tas(config, predicted_masks, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output1') else: predicted_maps = predicted_masks * x_input_map_multi utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output1') del predicted_maps del predicted_masks, x_input_map_multi try: #SDR_SUM,SDRi_SUM = np.append(SDR_SUM, bss_test.cal('batch_output1/')) sdr_aver_batch, sdri_aver_batch = bss_test.cal( 'batch_output1/') SDR_SUM = np.append(SDR_SUM, sdr_aver_batch) SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch) except: # AssertionError,wrong_info: print('Errors in calculating the SDR', wrong_info) print('SDR_aver_now:', SDR_SUM.mean()) print('SDRi_aver_now:', SDRi_SUM.mean()) # lera.log({'SDR sample'+test_or_valid: SDR_SUM.mean()}) # lera.log({'SDRi sample'+test_or_valid: SDRi_SUM.mean()}) # raw_input('Press any key to continue......') elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean( ) > best_SDR: # only record the best SDR once. print('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())) best_SDR = SDR_SUM.mean() # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR)) # ''' candidate += [ convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples ] # source += raw_src reference += raw_tgt print('samples:', samples) print('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:])) alignments += [align for align in alignment] batch_idx += 1 score = {} result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) logging_csv([e, updates, result['hamming_loss'], \ result['micro_f1'], result['micro_precision'], result['micro_recall']]) print('hamming_loss: %.8f | micro_f1: %.4f' % (result['hamming_loss'], result['micro_f1'])) score['hamming_loss'] = result['hamming_loss'] score['micro_f1'] = result['micro_f1'] return score
def eval(epoch,test_or_valid='valid'): # config.batch_size=1 global updates,model model.eval() # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1' reference, candidate, source, alignments = [], [], [], [] e = epoch print(('Test or valid:', test_or_valid)) eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) SDR_SUM = np.array([]) SDRi_SUM = np.array([]) batch_idx = 0 global best_SDR, Var # for iii in range(2000): while True: print(('-' * 30)) eval_data = next(eval_data_gen) if eval_data == False: print(('SDR_aver_eval_epoch:', SDR_SUM.mean())) print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean())) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']] raw_tgt= eval_data['batch_order'] feas_tgt = models.rank_feas(raw_tgt, eval_data['multi_spk_fea_list']) # 这里是目标的图谱 top_k = len(raw_tgt[0]) # 要保证底下这几个都是longTensor(长整数) # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。 tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1) # 转换成数字,然后前后加开始和结束符号。 src_len = Variable(torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable(torch.LongTensor([len(one_spk) for one_spk in eval_data['multi_spk_fea_list']])).unsqueeze(0) # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0) if config.WFM: siz = src.size() # bs,T,F assert len(siz) == 3 # topk_max = config.MAX_MIX # 最多可能的topk个数 topk_max = 2 # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous().view(-1, siz[1], siz[ 2]) # bs,topk,T,F feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2]) feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp feas_tgt_sum_square = torch.sum(feas_tgt_square, dim=1, keepdim=True).expand(siz[0], topk_max, siz[1], siz[2]) WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15) feas_tgt = x_input_map_multi.view(siz[0], -1, siz[1], siz[2]).data * WFM_mask # bs,topk,T,F feas_tgt = feas_tgt.view(-1, siz[1], siz[2]) # bs*topk,T,F WFM_mask = WFM_mask.cuda() del x_input_map_multi elif config.PSM: siz = src.size() # bs,T,F assert len(siz) == 3 # topk_max = config.MAX_MIX # 最多可能的topk个数 topk_max = 2 # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous() # bs,topk,T,F feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2]) IRM=feas_tgt_tmp/(x_input_map_multi+1e-15) angle_tgt=models.rank_feas(raw_tgt, eval_data['multi_spk_angle_list']).view(siz[0],-1,siz[1],siz[2]) angle_mix=Variable(torch.from_numpy(np.array(eval_data['mix_angle']))).unsqueeze(1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous() ang=np.cos(angle_mix-angle_tgt) ang=np.clip(ang,0,None) # feas_tgt = x_input_map_multi *np.clip(IRM.numpy()*ang,0,1) # bs,topk,T,F # feas_tgt = x_input_map_multi *IRM*ang # bs,topk,T,F feas_tgt = feas_tgt.view(siz[0],-1,siz[1],siz[2])*ang # bs,topk,T,F feas_tgt = feas_tgt.view(-1, siz[1], siz[2]) # bs*topk,T,F del x_input_map_multi if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() if config.WFM: WFM_mask = WFM_mask.cuda() predicted_masks, enc_attn_list = model(src, src_len, tgt, tgt_len, dict_spk2idx) # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用 print('predicted mask size:', predicted_masks.size(),'should be topk,bs,T,F') # topk,bs,T,F # try: # ''' # expand the raw mixed-features to topk_max channel. src = src.transpose(0, 1) siz = src.size() assert len(siz) == 3 # if samples[0][-1] != dict_spk2idx['<EOS>']: # print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40 # break topk_max = config.MAX_MIX x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]) predicted_masks=predicted_masks.transpose(0, 1) # if config.WFM: # feas_tgt = x_input_map_multi.data * WFM_mask # 注意,bs是第二维 assert predicted_masks.shape == x_input_map_multi.shape assert predicted_masks.size(0) == config.batch_size if 1 and len(opt.gpus) > 1: ss_loss,best_pmt = model.module.separation_pit_loss(x_input_map_multi, predicted_masks, feas_tgt, ) else: ss_loss,best_pmt = model.separation_pit_loss(x_input_map_multi, predicted_masks, feas_tgt) print(('loss for ss,this batch:', ss_loss.cpu().item())) print('best perms for this batch:', best_pmt) lera.log({ 'ss_loss_' + test_or_valid: ss_loss.cpu().item(), }) writer.add_scalars('scalar/loss',{'ss_loss_'+test_or_valid:ss_loss.cpu().item()},updates+batch_idx) del ss_loss if batch_idx>10: break if False: #this part is to test the checkpoints sequencially. batch_idx += 1 if batch_idx%100==0: updates=updates+1000 opt.restore='/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(updates) print('loading checkpoint...\n', opt.restore) checkpoints = torch.load(opt.restore) model.module.load_state_dict(checkpoints['model']) break continue # ''''' if 0 and batch_idx <= (500 / config.batch_size): # only the former batches counts the SDR predicted_maps = predicted_masks * x_input_map_multi predicted_maps = predicted_maps.view(-1,mix_speech_len,speech_fre) # predicted_maps=Variable(feas_tgt) utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output_test') # utils.bss_eval(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, # dst='batch_output_test') del predicted_maps, predicted_masks, x_input_map_multi try: sdr_aver_batch, sdri_aver_batch= bss_test.cal('batch_output_test/') SDR_SUM = np.append(SDR_SUM, sdr_aver_batch) SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch) except(AssertionError): print('Errors in calculating the SDR') print(('SDR_aver_now:', SDR_SUM.mean())) print(('SRi_aver_now:', SDRi_SUM.mean())) lera.log({'SDR sample'+test_or_valid: SDR_SUM.mean()}) lera.log({'SDRi sample'+test_or_valid: SDRi_SUM.mean()}) writer.add_scalars('scalar/loss',{'SDR_sample_'+test_or_valid:sdr_aver_batch},updates) # raw_input('Press any key to continue......') elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean() > best_SDR: # only record the best SDR once. print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean()))) best_SDR = SDR_SUM.mean() # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR)) # ''' # candidate += [convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples] # source += raw_src # reference += raw_tgt # print(('samples:', samples)) # print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:]))) # alignments += [align for align in alignment] batch_idx += 1 result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) print(('hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f' % (result['hamming_loss'], result['micro_f1'], result['micro_recall'], result['micro_precision'], )))
def train(epoch): global e, updates, total_loss, start_time, report_total,report_correct, total_loss_sgm, total_loss_ss e = epoch model.train() SDR_SUM = np.array([]) SDRi_SUM = np.array([]) if updates<=config.warmup: #如果不在warm阶段就正常规划 pass elif config.schedule and scheduler.get_lr()[0]>4e-5: scheduler.step() print(("Decaying learning rate to %g" % scheduler.get_lr()[0],updates)) lera.log({ 'lr': [group['lr'] for group in optim.optimizer.param_groups][0], }) if opt.model == 'gated': model.current_epoch = epoch train_data_gen = prepare_data('once', 'train') while True: if updates <= config.warmup: # 如果在warm就开始warmup tmp_lr = config.learning_rate * min(max(updates,1)** (-0.5), max(updates,1) * (config.warmup ** (-1.5))) for param_group in optim.optimizer.param_groups: param_group['lr'] = tmp_lr scheduler.base_lrs=list([group['lr'] for group in optim.optimizer.param_groups]) if updates%100==0: #记录一下 print(updates) print("Warmup learning rate to %g" % tmp_lr) lera.log({ 'lr': [group['lr'] for group in optim.optimizer.param_groups][0], }) train_data = next(train_data_gen) if train_data == False: print(('SDR_aver_epoch:', SDR_SUM.mean())) print(('SDRi_aver_epoch:', SDRi_SUM.mean())) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(train_data['mix_feas'])) # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']] # raw_tgt = [sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']] raw_tgt=train_data['batch_order'] feas_tgt = models.rank_feas(raw_tgt, train_data['multi_spk_fea_list']) # 这里是目标的图谱,bs*Topk,len,fre # 要保证底下这几个都是longTensor(长整数) tgt_max_len = config.MAX_MIX + 2 # with bos and eos. tgt = Variable(torch.from_numpy(np.array( [[0] + [dict_spk2idx[spk] for spk in spks] + (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']] for spks in raw_tgt], dtype=np.int))).transpose(0, 1) # 转换成数字,然后前后加开始和结束符号。 # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1) # 转换成数字,然后前后加开始和结束符号。 src_len = Variable(torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable( torch.LongTensor([len(one_spk) for one_spk in train_data['multi_spk_fea_list']])).unsqueeze(0) if config.WFM: siz = src.size() # bs,T,F assert len(siz) == 3 # topk_max = config.MAX_MIX # 最多可能的topk个数 topk_max = 2 # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous().view(-1, siz[1], siz[ 2]) # bs,topk,T,F feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2]) feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp feas_tgt_sum_square = torch.sum(feas_tgt_square, dim=1, keepdim=True).expand(siz[0], topk_max, siz[1], siz[2]) WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15) feas_tgt = x_input_map_multi.view(siz[0], -1, siz[1], siz[2]).data * WFM_mask # bs,topk,T,F feas_tgt = feas_tgt.view(-1, siz[1], siz[2]) # bs*topk,T,F WFM_mask = WFM_mask.cuda() del x_input_map_multi elif config.PSM: siz = src.size() # bs,T,F assert len(siz) == 3 # topk_max = config.MAX_MIX # 最多可能的topk个数 topk_max = 2 # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous() # bs,topk,T,F feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2]) IRM=feas_tgt_tmp/(x_input_map_multi+1e-15) angle_tgt=models.rank_feas(raw_tgt, train_data['multi_spk_angle_list']).view(siz[0],-1,siz[1],siz[2]) # bs,topk,T,F angle_mix=Variable(torch.from_numpy(np.array(train_data['mix_angle']))).unsqueeze(1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous() ang=np.cos(angle_mix-angle_tgt) ang=np.clip(ang,0,None) # feas_tgt = x_input_map_multi *np.clip(IRM.numpy()*ang,0,1) # bs,topk,T,F # feas_tgt = x_input_map_multi *IRM*ang # bs,topk,T,F feas_tgt = feas_tgt.view(siz[0],-1,siz[1],siz[2])*ang # bs,topk,T,F feas_tgt = feas_tgt.view(-1, siz[1], siz[2]) # bs*topk,T,F del x_input_map_multi elif config.frame_mask: siz = src.size() # bs,T,F assert len(siz) == 3 # topk_max = config.MAX_MIX # 最多可能的topk个数 topk_max = 2 # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous() # bs,topk,T,F feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2]) feas_tgt_time=torch.sum(feas_tgt_tmp,3).transpose(1,2) #bs,T,topk for v1 in feas_tgt_time: for v2 in v1: if v2[0]>v2[1]: v2[0]=1 v2[1]=0 else: v2[0]=0 v2[1]=1 frame_mask=feas_tgt_time.transpose(1,2).unsqueeze(-1) #bs,topk,t,1 feas_tgt=x_input_map_multi*frame_mask feas_tgt = feas_tgt.view(-1, siz[1], siz[2]) # bs*topk,T,F if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() model.zero_grad() if config.use_center_loss: center_loss.zero_grad() # aim_list 就是找到有正经说话人的地方的标号 aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) != dict_spk2idx['<EOS>']).nonzero().squeeze() aim_list = aim_list.data.cpu().numpy() multi_mask, enc_attn_list = model(src, src_len, tgt, tgt_len, dict_spk2idx) # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用 print('mask size:', multi_mask.size()) # topk,bs,T,F # print('mask:', multi_mask[0,0,:3:3]) # topk,bs,T,F # writer.add_histogram('global gamma',gamma, updates) src = src.transpose(0, 1) # expand the raw mixed-features to topk_max channel. siz = src.size() assert len(siz) == 3 topk_max = config.MAX_MIX # 最多可能的topk个数 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()#.view(-1, siz[1], siz[2]) # x_input_map_multi = x_input_map_multi[aim_list] # x_input_map_multi = x_input_map_multi.transpose(0, 1) #topk,bs,T,F multi_mask = multi_mask.transpose(0, 1) # if config.WFM: # feas_tgt = x_input_map_multi.data * WFM_mask # 注意,bs是第二维 assert multi_mask.shape == x_input_map_multi.shape assert multi_mask.size(0) == config.batch_size if 1 and len(opt.gpus) > 1: #先ss获取Perm ss_loss, best_pmt = model.module.separation_pit_loss(x_input_map_multi, multi_mask, feas_tgt) else: ss_loss, best_pmt = model.separation_pit_loss(x_input_map_multi, multi_mask, feas_tgt) print('loss for SS,this batch:', ss_loss.cpu().item()) print('best perms for this batch:', best_pmt) writer.add_scalars('scalar/loss',{'ss_loss':ss_loss.cpu().item()},updates) loss = ss_loss loss.backward() total_loss_ss += ss_loss.cpu().item() lera.log({ 'ss_loss': ss_loss.cpu().item(), }) if updates>3 and updates % config.eval_interval in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,]: assert multi_mask.shape==x_input_map_multi.shape assert multi_mask.size(0)==config.batch_size predicted_maps = (multi_mask * x_input_map_multi).view(siz[0]*topk_max,siz[1],siz[2]) # predicted_maps=Variable(feas_tgt) # utils.bss_eval(config, predicted_maps, train_data['multi_spk_fea_list'], raw_tgt, train_data, dst=log_path+'batch_output/') utils.bss_eval2(config, predicted_maps, train_data['multi_spk_fea_list'], raw_tgt, train_data, dst=log_path+'batch_output') del predicted_maps, multi_mask, x_input_map_multi sdr_aver_batch, sdri_aver_batch= bss_test.cal(log_path+'batch_output/') lera.log({'SDR sample': sdr_aver_batch}) lera.log({'SDRi sample': sdri_aver_batch}) writer.add_scalars('scalar/loss',{'SDR_sample':sdr_aver_batch,'SDRi_sample':sdri_aver_batch},updates) SDR_SUM = np.append(SDR_SUM, sdr_aver_batch) SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch) print(('SDR_aver_now:', SDR_SUM.mean())) print(('SDRi_aver_now:', SDRi_SUM.mean())) # Heatmap here # n_layer个 (head*bs) x lq x dk ''' import matplotlib.pyplot as plt ax = plt.gca() ax.invert_yaxis() raw_src=models.rank_feas(raw_tgt, train_data['multi_spk_fea_list']) att_idx=1 att = enc_attn_list[-1].view(config.trans_n_head,config.batch_size,mix_speech_len,mix_speech_len).data.cpu().numpy()[:,att_idx] for head in range(config.trans_n_head): xx=att[head] plt.matshow(xx, cmap=plt.cm.hot, vmin=0,vmax=0.05) plt.colorbar() plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head)) plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2) plt.colorbar() plt.savefig(log_path+'batch_output/'+'source0.png') plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2) plt.colorbar() plt.savefig(log_path+'batch_output/'+'source1.png') 1/0 ''' total_loss += loss.cpu().item() optim.step() updates += 1 if updates % 30 == 0: logging( "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,ss loss: %6.6f\n" % (time.time() - start_time, epoch, updates, loss , total_loss_ss / 30.0)) total_loss_sgm, total_loss_ss = 0, 0 # continue if 0 and updates % config.eval_interval == 0 and epoch > 3: #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。 logging("time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" % (time.time() - start_time, epoch, updates, total_loss/config.eval_interval)) print(('evaluating after %d updates...\r' % updates)) eval(epoch,'valid') # eval的时候batch_size会变成1 eval(epoch,'test') # eval的时候batch_size会变成1 model.train() total_loss = 0 start_time = 0 report_total = 0 report_correct = 0 if 1 and updates % config.save_interval == 1: save_model(log_path + 'Transformer_PIT_{}.pt'.format(updates))
def eval(epoch): # config.batch_size=1 model.eval() # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1' reference, candidate, source, alignments = [], [], [], [] e = epoch test_or_valid = 'test' test_or_valid = 'valid' # test_or_valid = 'train' print(('Test or valid:', test_or_valid)) eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX) SDR_SUM = np.array([]) SDRi_SUM = np.array([]) batch_idx = 0 global best_SDR, Var # for iii in range(2000): while True: print(('-' * 30)) eval_data = next(eval_data_gen) if eval_data == False: print(('SDR_aver_eval_epoch:', SDR_SUM.mean())) print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean())) break # 如果这个epoch的生成器没有数据了,直接进入下一个epoch src = Variable(torch.from_numpy(eval_data['mix_feas'])) # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']] raw_tgt= eval_data['batch_order'] feas_tgt = models.rank_feas(raw_tgt, eval_data['multi_spk_fea_list']) # 这里是目标的图谱 top_k = len(raw_tgt[0]) # 要保证底下这几个都是longTensor(长整数) # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。 tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1) # 转换成数字,然后前后加开始和结束符号。 src_len = Variable(torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0) tgt_len = Variable(torch.LongTensor([len(one_spk) for one_spk in eval_data['multi_spk_fea_list']])).unsqueeze(0) # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0) if config.WFM: tmp_size = feas_tgt.size() assert len(tmp_size) == 3 feas_tgt_square = feas_tgt * feas_tgt feas_tgt_sum_square = torch.sum(feas_tgt_square, dim=0, keepdim=True).expand(tmp_size) WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15) if use_cuda: src = src.cuda().transpose(0, 1) tgt = tgt.cuda() src_len = src_len.cuda() tgt_len = tgt_len.cuda() feas_tgt = feas_tgt.cuda() if config.WFM: WFM_mask = WFM_mask.cuda() # try: if 1 and len(opt.gpus) > 1: samples, predicted_masks = model.module.pit_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) else: samples, predicted_masks = model.pit_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size) samples=samples.max(2)[1].data.cpu().numpy() # except: # continue # ''' # expand the raw mixed-features to topk_max channel. src = src.transpose(0, 1) siz = src.size() assert len(siz) == 3 # if samples[0][-1] != dict_spk2idx['<EOS>']: # print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40 # break topk_max = len(samples[0]) - 1 x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]) if 1 and config.WFM: feas_tgt = x_input_map_multi.data * WFM_mask if test_or_valid != 'test': if 1 and len(opt.gpus) > 1: ss_loss = model.module.separation_loss(x_input_map_multi, predicted_masks, feas_tgt, ) else: ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt) print(('loss for ss,this batch:', ss_loss.cpu().item())) lera.log({ 'ss_loss_' + test_or_valid: ss_loss.cpu().item(), }) del ss_loss # ''''' if 1 and batch_idx <= (500 / config.batch_size): # only the former batches counts the SDR predicted_maps = predicted_masks * x_input_map_multi # predicted_maps=Variable(feas_tgt) utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data, dst='batch_output_test') del predicted_maps, predicted_masks, x_input_map_multi try: sdr_aver_batch, sdri_aver_batch= bss_test.cal('batch_output_test/') SDR_SUM = np.append(SDR_SUM, sdr_aver_batch) SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch) except(AssertionError): print('Errors in calculating the SDR') print(('SDR_aver_now:', SDR_SUM.mean())) print(('SDRi_aver_now:', SDRi_SUM.mean())) lera.log({'SDR sample'+test_or_valid: SDR_SUM.mean()}) lera.log({'SDRi sample'+test_or_valid: SDRi_SUM.mean()}) writer.add_scalars('scalar/loss',{'SDR_sample_'+test_or_valid:sdr_aver_batch},updates) # raw_input('Press any key to continue......') elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean() > best_SDR: # only record the best SDR once. print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean()))) best_SDR = SDR_SUM.mean() # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR)) # ''' candidate += [convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples] # source += raw_src reference += raw_tgt print(('samples:', samples)) print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:]))) # alignments += [align for align in alignment] batch_idx += 1 result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) print(('hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f' % (result['hamming_loss'], result['micro_f1'], result['micro_recall'], result['micro_precision'], ))) score = {} result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path) logging_csv([e, updates, result['hamming_loss'], \ result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()]) print(('hamming_loss: %.8f | micro_f1: %.4f' % (result['hamming_loss'], result['micro_f1']))) score['hamming_loss'] = result['hamming_loss'] score['micro_f1'] = result['micro_f1'] 1/0 return score