Example #1
0
def eval(epoch):
    # config.batch_size=1
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    # test_or_valid = 'valid'
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,3,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        tgt = Variable(
            torch.from_numpy(
                np.array([
                    list(range(top_k + 1)) + [102]
                    for __ in range(config.batch_size)
                ],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 3
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=0,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        if 1 and len(opt.gpus) > 1:
            outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
                src,
                src_len,
                tgt,
                tgt_len,
                dict_spk2idx,
                None,
                mix_wav=padded_mixture
            )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        else:
            outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
                src,
                src_len,
                tgt,
                tgt_len,
                dict_spk2idx,
                None,
                mix_wav=padded_mixture
            )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        samples = list(
            pred.view(config.batch_size, top_k + 1,
                      -1).max(2)[1].data.cpu().numpy())
        '''

        if 1 and len(opt.gpus) > 1:
            samples,  predicted_masks = model.module.beam_sample(src, src_len, dict_spk2idx, tgt, config.beam_size,None,padded_mixture)
        else:
            samples,  predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, config.beam_size, None, padded_mixture)
            multi_mask = predicted_masks
            samples=[samples]
        # except:
        #     continue

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = top_k
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        multi_mask = multi_mask.transpose(0, 1)

        if test_or_valid != 'test':
            if config.use_tas:
                if 1 and len(opt.gpus) > 1:
                    ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                        padded_mixture, multi_mask, padded_source,
                        mixture_lengths)
                else:
                    ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                        padded_mixture, multi_mask, padded_source,
                        mixture_lengths)
            print(('loss for ss,this batch:', ss_loss.cpu().item()))
            lera.log({
                'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
            })
            del ss_loss

        # '''''
        if 1 and batch_idx <= (500 / config.batch_size
                               ):  # only the former batches counts the SDR
            utils.bss_eval_tas(config,
                               multi_mask,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output/')
            del multi_mask, x_input_map_multi
            try:
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(log_path +
                                                               'batch_output/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except (AssertionError):
                print('Errors in calculating the SDR')
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))
            lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()})
            writer.add_scalars('scalar/loss',
                               {'SDR_sample_' + test_or_valid: sdr_aver_batch},
                               updates)
            # raw_input('Press any key to continue......')
        elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  # only record the best SDR once.
            print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())))
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))
        '''
        import matplotlib.pyplot as plt
        ax = plt.gca()
        ax.invert_yaxis()

        raw_src=models.rank_feas(raw_tgt,eval_data['multi_spk_fea_list'])
        att_idx=0
        att =dec_enc_attn_list.data.cpu().numpy()[:,att_idx] # head,topk,T
        for spk in range(3):
            xx=att[:,spk]
            plt.matshow(xx.reshape(8,1,-1).repeat(50,1).reshape(-1,751), cmap=plt.cm.hot, vmin=0,vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'spk_{}.png'.format(spk))
            plt.matshow(xx.sum(0).reshape(1, 1, -1).repeat(50, 1).reshape(-1, 751), cmap=plt.cm.hot, vmin=0, vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path + 'batch_output/' + 'spk_sum_{}.png'.format(spk))
        for head in range(8):
            xx=att[head]
            plt.matshow(xx.reshape(3,1,-1).repeat(100,1).reshape(-1,751), cmap=plt.cm.hot, vmin=0,vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head))
        plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
        plt.colorbar()
        plt.savefig(log_path+'batch_output/'+'source0.png')
        plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
        plt.colorbar()
        plt.savefig(log_path+'batch_output/'+'source1.png')
        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print(('samples:', samples))
        print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                         reference[-1 * config.batch_size:])))
        # alignments += [align for align in alignment]
        batch_idx += 1

        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))

    score = {}
    result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
    logging_csv([e, updates, result['hamming_loss'], \
                 result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()])
    print(('hamming_loss: %.8f | micro_f1: %.4f' %
           (result['hamming_loss'], result['micro_f1'])))
    score['hamming_loss'] = result['hamming_loss']
    score['micro_f1'] = result['micro_f1']
    1 / 0
    return score
Example #2
0
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0]))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    # train_data_gen = prepare_data('once', 'train')
    train_data_gen = musdb.DB(root="~/MUSDB18/",
                              subsets='train',
                              split='train')
    train_data_gen = batch_generator(
        list(train_data_gen),
        config.batch_size,
    )
    # while 1:
    #     mix,ref=next(train_data_gen)
    #     import soundfile as sf
    #     sf.write('mix.wav',mix[0,0],44100)
    #     sf.write('vocal.wav',ref[0,0,0],44100)
    #     sf.write('drum.wav',ref[0,1,0],44100)
    #     sf.write('bass.wav',ref[0,2,0],44100)
    #     sf.write('other.wav',ref[0,3,0],44100)
    #     pass

    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        padded_mixture, mixture_lengths, padded_source = train_data
        # source:bs,2channel,T  target:bs,4(vocals,drums,bass,other),2channel,T
        padded_mixture = torch.from_numpy(padded_mixture).float()
        topk_this_batch = padded_source.shape[1]
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        # 要保证底下这几个都是longTensor(长整数)

        if use_cuda:
            padded_mixture = padded_mixture.cuda().transpose(0, 1)
            mixture_lengths = mixture_lengths.cuda()
            padded_source = padded_source.cuda()
            # src = src.cuda().transpose(0, 1)
            # tgt = tgt.cuda()
            # src_len = src_len.cuda()
            # tgt_len = tgt_len.cuda()
            # feas_tgt = feas_tgt.cuda()

        if 0 and loss < -5:
            import soundfile as sf
            idx_in_batch = 0
            sf.write(
                str(idx_in_batch) + '_mix.wav',
                padded_mixture.transpose(
                    0, 1).data.cpu().numpy()[idx_in_batch].transpose(), 44100)
            sf.write(
                str(idx_in_batch) + '_ref_vocal.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 0].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_drum.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 1].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_bass.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 2].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_other.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 3].transpose(),
                44100)

        model.zero_grad()
        outputs, pred, spks_ordre_list, multi_mask, y_map = model(
            None,
            None,
            None,
            None,
            dict_spk2idx,
            None,
            mix_wav=padded_mixture,
            clean_wavs=padded_source.transpose(
                0, 1))  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print('mask size:', multi_mask.size())
        print('y map size:', y_map.size())
        # print('spk order:', spks_ordre_list) # bs,topk
        # writer.add_histogram('global gamma',gamma, updates)
        multi_mask = multi_mask.transpose(0, 1)
        y_map = y_map.transpose(0, 1)
        spks_ordre_list = spks_ordre_list.transpose(0, 1)

        # expand the raw mixed-features to topk_max channel.
        topk_max = topk_this_batch  # 最多可能的topk个数

        if config.greddy_tf and config.add_last_silence:
            multi_mask, silence_channel = torch.split(multi_mask,
                                                      [topk_this_batch, 1],
                                                      dim=1)
            silence_channel = silence_channel[:, 0]
            assert len(padded_source.shape) == 3
            # padded_source = torch.cat([padded_source,torch.zeros(padded_source.size(0),1,padded_source.size(2))],1)
            if 1 and len(opt.gpus) > 1:
                ss_loss_silence = model.module.silence_loss(silence_channel)
            else:
                ss_loss_silence = model.silence_loss(silence_channel)
            print('loss for SS silence,this batch:',
                  ss_loss_silence.cpu().item())
            writer.add_scalars(
                'scalar/loss',
                {'ss_loss_silence': ss_loss_silence.cpu().item()}, updates)
            lera.log({'ss_loss_silence': ss_loss_silence.cpu().item()})
            if torch.isnan(ss_loss_silence):
                ss_loss_silence = 0

        if config.use_tas:
            # print('source',padded_source)
            # print('est', multi_mask)
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_tas_sdr_order_loss(
                    padded_mixture.transpose(0, 1), multi_mask, y_map,
                    mixture_lengths)
            else:
                ss_loss = model.separation_tas_sdr_order_loss(
                    padded_mixture, multi_mask, y_map, mixture_lengths)
            # best_pmt=[list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx]

        print('loss for SS,this batch:', ss_loss.cpu().item())
        # print('best perms for this batch:', best_pmt)
        print('greddy perms for this batch:',
              [ii for ii in spks_ordre_list.data.cpu().numpy()])
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        loss = ss_loss
        if config.add_last_silence:
            loss = loss + 0.1 * ss_loss_silence
        loss.backward()

        # print 'totallllllllllll loss:',loss
        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'ss_loss_' + str(topk_this_batch): ss_loss.cpu().item(),
            'loss:': loss.cpu().item(),
            'pre_min': multi_mask.data.cpu().numpy().min(),
            'pre_max': multi_mask.data.cpu().numpy().max(),
        })

        if 1 or loss < -5:
            import soundfile as sf
            idx_in_batch = 0
            y0 = multi_mask.data.cpu().numpy()[idx_in_batch, 0]
            y1 = multi_mask.data.cpu().numpy()[idx_in_batch, 1]
            y2 = multi_mask.data.cpu().numpy()[idx_in_batch, 2]
            y3 = multi_mask.data.cpu().numpy()[idx_in_batch, 3]
            # sf.write(str(idx_in_batch)+'_pre_0.wav',multi_mask.data.cpu().numpy()[idx_in_batch,0].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_1.wav',multi_mask.data.cpu().numpy()[idx_in_batch,1].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_2.wav',multi_mask.data.cpu().numpy()[idx_in_batch,2].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_3.wav',multi_mask.data.cpu().numpy()[idx_in_batch,3].transpose(),44100)
            print('y0 range:', y0.min(), y0.max())
            print('y1 range:', y1.min(), y1.max())
            print('y2 range:', y2.min(), y2.max())
            print('y3 range:', y3.min(), y3.max())
            # input('wait')
            print('*' * 50)

        if 0 and updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5
        ]:
            utils.bss_eval_tas(config,
                               multi_mask,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst=log_path + '/batch_output1')
            sdr_aver_batch, sdri_aver_batch = bss_test.cal(log_path +
                                                           '/batch_output1/')

            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))

        total_loss += loss.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, 0,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0, 0))
            # lera.log({'label_acc':report_correct/report_total})
            # writer.add_scalars('scalar/loss',{'label_acc':report_correct/report_total},updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates, 0))
            print(('evaluating after %d updates...\r' % updates))
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if updates > 10 and updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv4_conditional_{}.pt'.format(updates))
Example #3
0
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0]))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']]
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_fea_list'])  # 这里是目标的图谱,aim_size,len,fre

        padded_mixture, mixture_lengths, padded_source = train_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        if config.WFM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1],
                siz[2]).contiguous().view(-1, siz[1], siz[2])  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=1,
                                            keepdim=True).expand(
                                                siz[0], topk_max, siz[1],
                                                siz[2])
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)
            feas_tgt = x_input_map_multi.view(
                siz[0], -1, siz[1], siz[2]).data * WFM_mask  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            WFM_mask = WFM_mask.cuda()
            del x_input_map_multi

        elif config.PSM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            IRM = feas_tgt_tmp / (x_input_map_multi + 1e-15)

            angle_tgt = models.rank_feas(
                raw_tgt, train_data['multi_spk_angle_list']).view(
                    siz[0], -1, siz[1], siz[2])
            angle_mix = Variable(
                torch.from_numpy(np.array(
                    train_data['mix_angle']))).unsqueeze(1).expand(
                        siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang = np.cos(angle_mix - angle_tgt)
            ang = np.clip(ang, 0, None)

            feas_tgt = x_input_map_multi * IRM * ang  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            del x_input_map_multi

        elif config.frame_mask:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_time = torch.sum(feas_tgt_tmp, 3).transpose(1,
                                                                 2)  #bs,T,topk
            for v1 in feas_tgt_time:
                for v2 in v1:
                    if v2[0] > v2[1]:
                        v2[0] = 1
                        v2[1] = 0
                    else:
                        v2[0] = 0
                        v2[1] = 1
            frame_mask = feas_tgt_time.transpose(1,
                                                 2).unsqueeze(-1)  #bs,topk,t,1
            feas_tgt = x_input_map_multi * frame_mask
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        # aim_list 就是找到有正经说话人的地方的标号
        aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) !=
                    dict_spk2idx['<EOS>']).nonzero().squeeze()
        aim_list = aim_list.data.cpu().numpy()

        outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
            src,
            src_len,
            tgt,
            tgt_len,
            dict_spk2idx,
            None,
            mix_wav=padded_mixture
        )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print('mask size:', multi_mask.size())
        # writer.add_histogram('global gamma',gamma, updates)

        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk_max channel.
        siz = src.size()
        assert len(siz) == 3
        topk_max = config.MAX_MIX  # 最多可能的topk个数
        x_input_map_multi = torch.unsqueeze(src, 1).expand(
            siz[0], topk_max, siz[1],
            siz[2]).contiguous()  #.view(-1, siz[1], siz[2])
        # x_input_map_multi = x_input_map_multi[aim_list]
        multi_mask = multi_mask.transpose(0, 1)
        # if config.WFM:
        #     feas_tgt = x_input_map_multi.data * WFM_mask

        if config.use_tas:
            if 1 and len(opt.gpus) > 1:
                ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                    padded_mixture, multi_mask, padded_source, mixture_lengths)
            else:
                ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                    padded_mixture, multi_mask, padded_source, mixture_lengths)
            best_pmt = [
                list(pmt_list[int(mm)].data.cpu().numpy())
                for mm in max_snr_idx
            ]
        else:
            if 1 and len(opt.gpus) > 1:  # 先ss获取Perm
                ss_loss, best_pmt = model.module.separation_pit_loss(
                    x_input_map_multi, multi_mask, feas_tgt)
            else:
                ss_loss, best_pmt = model.separation_pit_loss(
                    x_input_map_multi, multi_mask, feas_tgt)

        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        # 按照Best_perm重新排列spk的预测目标
        targets = targets.transpose(0, 1)  #bs,aim+1(EOS也在)
        # print('targets',targets)
        targets_old = targets
        for idx, (tar, per) in enumerate(zip(targets, best_pmt)):
            per.append(topk_max)  #每个batch后面加个结尾,保持最后一个EOS不变
            targets_old[idx] = tar[per]
        targets = targets_old.transpose(0, 1)
        # print('targets',targets)

        if 1 and len(opt.gpus) > 1:
            sgm_loss, num_total, num_correct = model.module.compute_loss(
                outputs, targets, opt.memory)
        else:
            sgm_loss, num_total, num_correct = model.compute_loss(
                outputs, targets, opt.memory)
        print(('loss for SGM,this batch:', sgm_loss.cpu().item()))
        writer.add_scalars('scalar/loss', {'sgm_loss': sgm_loss.cpu().item()},
                           updates)
        if config.use_center_loss:
            cen_alpha = 0.01
            cen_loss = center_loss(outputs.view(-1, config.SPK_EMB_SIZE),
                                   targets.view(-1))
            print(('loss for SGM center loss,this batch:',
                   cen_loss.cpu().item()))
            writer.add_scalars('scalar/loss',
                               {'center_loss': cen_loss.cpu().item()}, updates)

        if not config.use_tas:
            loss = sgm_loss + 5 * ss_loss
        else:
            loss = 50 * sgm_loss + ss_loss

        loss.backward()

        if config.use_center_loss:
            for c_param in center_loss.parameters():
                c_param.grad.data *= (0.01 /
                                      (cen_alpha * scheduler.get_lr()[0]))
        # print 'totallllllllllll loss:',loss
        total_loss_sgm += sgm_loss.cpu().item()
        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'sgm_loss': sgm_loss.cpu().item(),
            'ss_loss': ss_loss.cpu().item(),
            'loss:': loss.cpu().item(),
        })

        if updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
        ]:
            if not config.use_tas:
                predicted_maps = multi_mask * x_input_map_multi.view(
                    siz[0] * topk_max, siz[1], siz[2])
                # predicted_maps=Variable(feas_tgt) # 这个是groundTruth
                utils.bss_eval(config,
                               predicted_maps,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst='batch_output1')
                del predicted_maps, multi_mask, x_input_map_multi
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(
                    'batch_output1/')
            else:
                utils.bss_eval_tas(config,
                                   multi_mask,
                                   train_data['multi_spk_fea_list'],
                                   raw_tgt,
                                   train_data,
                                   dst='batch_output1')
                del x_input_map_multi
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(
                    'batch_output1/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))

        total_loss += loss.cpu().item()
        report_correct += num_correct.cpu().item()
        report_total += num_total.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0,
                   report_correct / report_total))
            lera.log({'label_acc': report_correct / report_total})
            writer.add_scalars('scalar/loss',
                               {'label_acc': report_correct / report_total},
                               updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print(('evaluating after %d updates...\r' % updates))
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if 1 and updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv3_PIT_{}.pt'.format(updates))
Example #4
0
def eval(epoch):
    # config.batch_size=1
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    # test_or_valid = 'test'
    test_or_valid = 'valid'
    print('Test or valid:', test_or_valid)
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        print('-' * 30)
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print('SDR_aver_eval_epoch:', SDR_SUM.mean())
            print('SDRi_aver_eval_epoch:', SDRi_SUM.mean())
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        raw_tgt = [
            sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱
        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            #assert len(tmp_size) == 4
            feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True)
            feas_tgt_sum_square = (feas_tgt_sum *
                                   feas_tgt_sum).expand(tmp_size)
            feas_tgt_square = feas_tgt * feas_tgt
            WFM_mask = feas_tgt_square / feas_tgt_sum_square

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        if 1 and len(opt.gpus) > 1:
            samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                src, src_len, dict_spk2idx, tgt, config.beam_size,
                padded_mixture)
        else:
            samples, alignment, hiddens, predicted_masks = model.beam_sample(
                src, src_len, dict_spk2idx, tgt, config.beam_size,
                padded_mixture)

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = len(samples[0]) - 1
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        if config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask

        if not config.use_tas and test_or_valid == 'valid':
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(
                    x_input_map_multi,
                    predicted_masks,
                    feas_tgt,
                )
            else:
                ss_loss = model.separation_loss(x_input_map_multi,
                                                predicted_masks, feas_tgt)
            print('loss for ss,this batch:', ss_loss.cpu().item())
            # lera.log({
            #     'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
            # })
            del ss_loss, hiddens

        # '''''
        if batch_idx <= (100 / config.batch_size
                         ):  # only the former batches counts the SDR
            if config.use_tas:
                utils.bss_eval_tas(config,
                                   predicted_masks,
                                   eval_data['multi_spk_fea_list'],
                                   raw_tgt,
                                   eval_data,
                                   dst='batch_output1')
            else:
                predicted_maps = predicted_masks * x_input_map_multi
                utils.bss_eval2(config,
                                predicted_maps,
                                eval_data['multi_spk_fea_list'],
                                raw_tgt,
                                eval_data,
                                dst='batch_output1')
                del predicted_maps
            del predicted_masks, x_input_map_multi
            try:
                #SDR_SUM,SDRi_SUM = np.append(SDR_SUM, bss_test.cal('batch_output1/'))
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(
                    'batch_output1/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except:  # AssertionError,wrong_info:
                print('Errors in calculating the SDR', wrong_info)
            print('SDR_aver_now:', SDR_SUM.mean())
            print('SDRi_aver_now:', SDRi_SUM.mean())
            # lera.log({'SDR sample'+test_or_valid: SDR_SUM.mean()})
            # lera.log({'SDRi sample'+test_or_valid: SDRi_SUM.mean()})
            # raw_input('Press any key to continue......')
        elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  # only record the best SDR once.
            print('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean()))
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print('samples:', samples)
        print('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                        reference[-1 * config.batch_size:]))
        alignments += [align for align in alignment]
        batch_idx += 1

    score = {}
    result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
    logging_csv([e, updates, result['hamming_loss'], \
                 result['micro_f1'], result['micro_precision'], result['micro_recall']])
    print('hamming_loss: %.8f | micro_f1: %.4f' %
          (result['hamming_loss'], result['micro_f1']))
    score['hamming_loss'] = result['hamming_loss']
    score['micro_f1'] = result['micro_f1']
    return score
Example #5
0
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if config.schedule and scheduler.get_lr()[0] > 5e-5:
        scheduler.step()
        print("Decaying learning rate to %g" % scheduler.get_lr()[0])

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        # print '\n'
        # train_data = train_data_gen.next()
        train_data = next(train_data_gen)
        if train_data == False:
            print('SDR_aver_epoch:', SDR_SUM.mean())
            print('SDRi_aver_epoch:', SDRi_SUM.mean())
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_fea_list'])  # 这里是目标的图谱,aim_size,len,fre

        padded_mixture, mixture_lengths, padded_source = train_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()

        # aim_list 就是找到有正经说话人的地方的标号
        aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) !=
                    dict_spk2idx['<EOS>']).nonzero().squeeze()
        aim_list = aim_list.data.cpu().numpy()

        outputs, targets, multi_mask, gamma = model(
            src, src_len, tgt, tgt_len, dict_spk2idx,
            padded_mixture)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        # print('mask size:', multi_mask.size())
        writer.add_histogram('global gamma', gamma, updates)

        if 1 and len(opt.gpus) > 1:
            sgm_loss, num_total, num_correct = model.module.compute_loss(
                outputs, targets, opt.memory)
        else:
            sgm_loss, num_total, num_correct = model.compute_loss(
                outputs, targets, opt.memory)
        print('loss for SGM,this batch:', sgm_loss.cpu().item())
        writer.add_scalars('scalar/loss', {'sgm_loss': sgm_loss.cpu().item()},
                           updates)

        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk_max channel.
        siz = src.size()
        assert len(siz) == 3
        topk_max = config.MAX_MIX  # 最多可能的topk个数
        x_input_map_multi = torch.unsqueeze(src, 1).expand(
            siz[0], topk_max, siz[1],
            siz[2]).contiguous().view(-1, siz[1], siz[2])
        x_input_map_multi = x_input_map_multi[aim_list]
        multi_mask = multi_mask.transpose(0, 1)

        if config.use_tas:
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_tas_loss(
                    padded_mixture, multi_mask, padded_source, mixture_lengths)
            else:
                ss_loss = model.separation_tas_loss(padded_mixture, multi_mask,
                                                    padded_source,
                                                    mixture_lengths)
        else:
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(x_input_map_multi,
                                                       multi_mask, feas_tgt)
            else:
                ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                                feas_tgt)

        print('loss for SS,this batch:', ss_loss.cpu().item())
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        if not config.use_tas:
            loss = sgm_loss + 5 * ss_loss
        else:
            loss = 50 * sgm_loss + ss_loss

        loss.backward()
        # print 'totallllllllllll loss:',loss
        total_loss_sgm += sgm_loss.cpu().item()
        total_loss_ss += ss_loss.cpu().item()
        # lera.log({
        #     'sgm_loss': sgm_loss.cpu().item(),
        #     'ss_loss': ss_loss.cpu().item(),
        #     'loss:': loss.cpu().item(),
        # })

        # if not config.use_tas and updates>10 and updates % config.eval_interval in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
        if updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
        ]:
            if not config.use_tas:
                predicted_maps = multi_mask * x_input_map_multi
                utils.bss_eval(config,
                               predicted_maps,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst='batch_output1')
                del predicted_maps, multi_mask, x_input_map_multi
            else:
                utils.bss_eval_tas(config,
                                   multi_mask,
                                   train_data['multi_spk_fea_list'],
                                   raw_tgt,
                                   train_data,
                                   dst='batch_output1')
                del x_input_map_multi
            sdr_aver_batch, sdri_aver_batch = bss_test.cal('batch_output1/')
            # lera.log({'SDR sample': sdr_aver_batch})
            # lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print('SDR_aver_now:', SDR_SUM.mean())
            print('SDRi_aver_now:', SDRi_SUM.mean())
            #raw_input('Press to continue...')

        total_loss += loss.cpu().item()
        report_correct += num_correct.cpu().item()
        report_total += num_total.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0,
                   report_correct / report_total))
            # lera.log({'label_acc':report_correct/report_total})
            writer.add_scalars('scalar/loss',
                               {'label_acc': report_correct / report_total},
                               updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print('evaluating after %d updates...\r' % updates)
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                # lera.log({
                #     'sgm_micro_f1': score[metric],
                # })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv3_{}.pt'.format(updates))
def eval(epoch, test_or_valid='valid'):
    # config.batch_size=1
    global updates, model
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_complex_two_channel'])
                       )  # bs,T,F,2 both real and imag values
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            eval_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt = Variable(
            torch.from_numpy(
                np.array([[0, 1, 2, 102] for __ in range(config.batch_size)],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask_real, multi_mask_imag, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask_real = multi_mask_real.transpose(0, 1)
        multi_mask_imag = multi_mask_imag.transpose(0, 1)
        src_real = src[:, :, :, 0].transpose(0, 1)  # bs,T,F
        src_imag = src[:, :, :, 1].transpose(0, 1)  # bs,T,F
        print('mask size for real/imag:',
              multi_mask_real.size())  # bs,topk,T,F, 已经压缩过了
        print('mixture size for real/imag:', src_real.size())  # bs,T,F

        predicted_maps0_real = multi_mask_real[:,
                                               0] * src_real - multi_mask_imag[:,
                                                                               0] * src_imag  #bs,T,F
        predicted_maps0_imag = multi_mask_real[:,
                                               0] * src_imag + multi_mask_imag[:,
                                                                               0] * src_real  #bs,T,F
        predicted_maps1_real = multi_mask_real[:,
                                               1] * src_real - multi_mask_imag[:,
                                                                               1] * src_imag  #bs,T,F
        predicted_maps1_imag = multi_mask_real[:,
                                               1] * src_imag + multi_mask_imag[:,
                                                                               1] * src_real  #bs,T,F

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
        })
        writer.add_scalars('scalar/loss',
                           {'ss_loss_' + test_or_valid: ss_loss.cpu().item()},
                           updates + batch_idx)
        del ss_loss
        # if batch_idx>10:
        #     break

        if False:  #this part is to test the checkpoints sequencially.
            batch_idx += 1
            if batch_idx % 100 == 0:
                updates = updates + 1000
                opt.restore = '/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(
                    updates)
                print('loading checkpoint...\n', opt.restore)
                checkpoints = torch.load(opt.restore)
                model.module.load_state_dict(checkpoints['model'])
                break
            continue
        # '''''
        if 1 and batch_idx <= (500 / config.batch_size):
            utils.bss_eval_tas(config,
                               predict_wav,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

        batch_idx += 1
        if batch_idx > 100:
            break
        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 4e-5:
        scheduler.step()
        print(
            ("Decaying learning rate to %g" % scheduler.get_lr()[0], updates))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_complex_two_channel'])
                       )  # bs,T,F,2 both real and imag values
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = train_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask_real, multi_mask_imag, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask_real = multi_mask_real.transpose(0, 1)
        multi_mask_imag = multi_mask_imag.transpose(0, 1)
        src_real = src[:, :, :, 0].transpose(0, 1)  # bs,T,F
        src_imag = src[:, :, :, 1].transpose(0, 1)  # bs,T,F
        print('mask size for real/imag:',
              multi_mask_real.size())  # bs,topk,T,F, 已经压缩过了
        print('mixture size for real/imag:', src_real.size())  # bs,T,F

        predicted_maps0_real = multi_mask_real[:,
                                               0] * src_real - multi_mask_imag[:,
                                                                               0] * src_imag  #bs,T,F
        predicted_maps0_imag = multi_mask_real[:,
                                               0] * src_imag + multi_mask_imag[:,
                                                                               0] * src_real  #bs,T,F
        predicted_maps1_real = multi_mask_real[:,
                                               1] * src_real - multi_mask_imag[:,
                                                                               1] * src_imag  #bs,T,F
        predicted_maps1_imag = multi_mask_real[:,
                                               1] * src_imag + multi_mask_imag[:,
                                                                               1] * src_real  #bs,T,F

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        loss = ss_loss
        loss.backward()

        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'ss_loss': ss_loss.cpu().item(),
        })

        if epoch > 20 and updates > 5 and updates % config.eval_interval in [
                0, 1, 2, 3, 4
        ]:
            utils.bss_eval_tas(config,
                               predict_wav,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

            # Heatmap here
            # n_layer个 (head*bs) x lq x dk
            '''
            import matplotlib.pyplot as plt
            ax = plt.gca()
            ax.invert_yaxis()

            raw_src=models.rank_feas(raw_tgt, train_data['multi_spk_fea_list'])
            att_idx=1
            att = enc_attn_list[-1].view(config.trans_n_head,config.batch_size,mix_speech_len,mix_speech_len).data.cpu().numpy()[:,att_idx]
            for head in range(config.trans_n_head):
                xx=att[head]
                plt.matshow(xx, cmap=plt.cm.hot, vmin=0,vmax=0.05)
                plt.colorbar()
                plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head))
            plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source0.png')
            plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source1.png')
            1/0
            '''

        total_loss += loss.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,ss loss: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss,
                   total_loss_ss / 30.0))
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 1 and updates % config.save_interval == 1:
            save_model(log_path + 'Transformer_PIT_2ch_{}.pt'.format(updates))

        if 0 and updates > 0 and updates % config.eval_interval == 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / config.eval_interval))
            print(('evaluating after %d updates...\r' % updates))
            eval(epoch, 'valid')  # eval的时候batch_size会变成1
            eval(epoch, 'test')  # eval的时候batch_size会变成1

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0
Example #8
0
def eval(epoch, test_or_valid='train'):
    # config.batch_size=1
    global updates, model
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(eval_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in eval_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            eval_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        # tgt = Variable(torch.from_numpy(np.array(
        #     [[0] + [dict_spk2idx[spk] for spk in spks] + (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']] for
        #      spks in raw_tgt], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        tgt = Variable(
            torch.from_numpy(
                np.array([[0, 1, 2, 102] for __ in range(config.batch_size)],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask = multi_mask.transpose(0, 1)
        print('mask size:', multi_mask.size())  # bs,topk,T,F

        predicted_maps0_spectrogram = multi_mask[:, 0] * src.transpose(
            0, 1)  #bs,T,F
        predicted_maps1_spectrogram = multi_mask[:, 1] * src.transpose(
            0, 1)  #bs,T,F

        if True:  # Analyze the optimal assignments
            predicted_spectrogram = torch.cat([
                predicted_maps0_spectrogram.unsqueeze(1),
                predicted_maps1_spectrogram.unsqueeze(1)
            ], 1)
            feas_tgt_tmp = models.rank_feas(
                raw_tgt,
                eval_data['multi_spk_fea_list'])  # 这里是目标的图谱,bs*Topk,len,fre
            src = src.transpose(0, 1)
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt_tmp.view(siz[0], -1, siz[1], siz[2])

            angle_tgt = models.rank_feas(
                raw_tgt, eval_data['multi_spk_angle_list']).view(
                    siz[0], -1, siz[1], siz[2])  # bs,topk,T,F
            angle_mix = Variable(
                torch.from_numpy(np.array(
                    eval_data['mix_angle']))).unsqueeze(1).expand(
                        siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang = np.cos(angle_mix - angle_tgt)
            ang = np.clip(ang, 0, None)

            feas_tgt_tmp = feas_tgt_tmp.view(siz[0], -1, siz[1],
                                             siz[2]) * ang  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt_tmp.cuda()
            del x_input_map_multi
            src = src.transpose(0, 1)
            MSE_func = nn.MSELoss().cuda()
            best_perms_this_batch = []
            for bs_idx in range(siz[0]):
                best_perms_this_sample = []
                for tt in range(siz[1]):  # 对每一帧
                    tar = feas_tgt_tmp[bs_idx, :, tt]  #topk,F
                    est = predicted_spectrogram[bs_idx, :, tt]  #topk,F
                    best_loss_mse_this_batch = -1
                    for idx, per in enumerate([[0, 1], [1, 0]]):
                        if idx == 0:
                            best_loss_mse_this_batch = MSE_func(est[per], tar)
                            perm_this_frame = per
                            predicted_spectrogram[bs_idx, :, tt] = est[per]
                        else:
                            loss = MSE_func(est[per], tar)
                            if loss <= best_loss_mse_this_batch:
                                best_loss_mse_this_batch = loss
                                perm_this_frame = per
                                predicted_spectrogram[bs_idx, :, tt] = est[per]

                    best_perms_this_sample.append(perm_this_frame)
                best_perms_this_batch.append(best_perms_this_sample)
            print(
                'different assignment ratio:',
                np.mean(np.min(
                    np.array(best_perms_this_batch).sum(1) / 751, 1)))
            # predicted_maps0_spectrogram = predicted_spectrogram[:,0]
            # predicted_maps1_spectrogram = predicted_spectrogram[:,1]

        _mix_spec = eval_data['mix_phase']  # bs,T,F,2
        angle_mix = np.angle(_mix_spec)
        predicted_maps0_real = predicted_maps0_spectrogram * torch.from_numpy(
            np.cos(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps0_imag = predicted_maps0_spectrogram * torch.from_numpy(
            np.sin(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps1_real = predicted_maps1_spectrogram * torch.from_numpy(
            np.cos(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps1_imag = predicted_maps1_spectrogram * torch.from_numpy(
            np.sin(angle_mix)).cuda()  # e(ix) = cosx + isin x

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
        })
        writer.add_scalars('scalar/loss',
                           {'ss_loss_' + test_or_valid: ss_loss.cpu().item()},
                           updates + batch_idx)
        del ss_loss
        # if batch_idx>10:
        #     break

        if False:  #this part is to test the checkpoints sequencially.
            batch_idx += 1
            if batch_idx % 100 == 0:
                updates = updates + 1000
                opt.restore = '/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(
                    updates)
                print('loading checkpoint...\n', opt.restore)
                checkpoints = torch.load(opt.restore)
                model.module.load_state_dict(checkpoints['model'])
                break
            continue
        # '''''
        if 1 and batch_idx <= (500 / config.batch_size):
            utils.bss_eval_tas(config,
                               predict_wav,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

        batch_idx += 1
        if batch_idx > 100:
            break
        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))