def train(epoch):
    print('\nEpoch: %d' % epoch)
    print("Train")
    net.train()
    train_loss = 0
    correct_count = 0
    total = 0
    flag = 0
    for batch_idx, (input1, target1, input2,
                    target2) in enumerate(train_loader):
        input1, target1, input2, target2 = input1.to(device), target1.to(
            device), input2.to(device), target2.to(device)
        #inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        output1 = net(input1).float()
        output2 = net(input2).float()
        target1 = target1.float()
        target2 = target2.float()

        ##
        if output1 > output2 and target1 > target2:
            if epoch == 5:
                print(output1, output2, target1, target2)
            correct = True
        elif output2 > output1 and target2 > target1:
            if epoch == 5:
                print(output1, output2, target1, target2)
            correct = True
        elif output1 == output2 and target1 == target2:
            if epoch == 5:
                print(output1, output2, target1, target2)
            correct = True
        else:
            correct = False
        #(like count1, like count2]
        ## Have to write the criterion function
        if flag <= 5:
            print(flag, ":", output1, output2, target1, target2)
        flag += 1
        loss = criterion(output1, output2, target1, target2)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        # _, predicted = outputs.max(1)
        # total += targets.size(0)
        # correct += predicted.eq(targets).sum().item()
        # loss.data[0]

        total += 1
        correct_count += 1 if correct else 0

        # print(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
        #     % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
    print("Total Loss: %.3f | Acc: %.3f" %
          (train_loss / (batch_idx + 1), 100. * correct_count / total))
    lera.log('train_loss', train_loss / (batch_idx + 1))
    lera.log('train_acc', 100. * correct_count / total)
def test(epoch):
    print("Validation")
    global best_acc
    net.eval()
    test_loss = 0
    correct_count = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (input1, target1, input2,
                        target2) in enumerate(val_loader):
            input1, target1, input2, target2 = input1.to(device), target1.to(
                device), input2.to(device), target2.to(device)
            output1 = net(input1).float()
            output2 = net(input2).float()
            target1 = target1.float()
            target2 = target2.float()
            loss = criterion(output1, output2, target1, target2)

            test_loss += loss.item()
            # _, predicted = outputs.max(1)
            # total += targets.size(0)
            # correct += predicted.eq(targets).sum().item()

            # print(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
            #     % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
            if output1 > output2 and target1 > target2:
                correct = True
            elif output2 > output1 and target2 > target1:
                correct = True
            elif output1 == output2 and target1 == target2:
                correct = True
            else:
                correct = False

            total += 1
            correct_count += 1 if correct else 0

        print("Total Loss: %.3f | Acc: %.3f" %
              (test_loss / (batch_idx + 1), 100. * correct_count / total))
        lera.log('val_loss', test_loss / (batch_idx + 1))
        lera.log('val_acc', 100. * correct_count / total)

    # Save checkpoint.
    acc = 100. * correct_count / total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/ckpt.pth')
        best_acc = acc
Beispiel #3
0
def train(config):
    lera.log_hyperparams({
        "title": "hw1",
        "epoch": config.epochs,
        "lr": config.lr
    })
    dataset = img_dataset("./dataset/train", "train")
    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=config.bs,
                                             shuffle=True,
                                             drop_last=True)

    net = Classifier(num_classes=13).cuda()
    net.load_state_dict(
        torch.load(
            join(f"{config.weight_path}",
                 f"{config.pre_epochs}_classifier.pth")))
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.Adam(net.parameters(), lr=config.lr)
    for epoch in range(config.epochs):
        for _, data in enumerate(dataloader, 0):
            optimizer.zero_grad()
            net.train()
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data, 1)
            correct_counts = predicted.eq(labels.data.view_as(predicted))
            train_acc = torch.sum(correct_counts).item() / predicted.size(0)
        lera.log({"loss": loss.item(), "acc": train_acc})
        print("epoch:{}/{}, loss:{}, acc:{:02f}".format(
            epoch + 1 + config.pre_epochs,
            config.epochs + config.pre_epochs,
            loss.item(),
            train_acc,
        ))
        if (epoch + 1 + config.pre_epochs) % 10 == 0:
            torch.save(
                net.state_dict(),
                join(
                    f"{config.weight_path}",
                    f"{epoch + 1 + config.pre_epochs}_classifier.pth",
                ),
            )
Beispiel #4
0
def train(epoch):
    e = epoch
    model.train()

    if config.schedule:
        scheduler.step()
        print("Decaying learning rate to %g" % scheduler.get_lr()[0])
        if config.is_dis:
            scheduler_dis.step()
        lera.log({
            'lr': scheduler.get_lr()[0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    global e, updates, total_loss, start_time, report_total, total_loss_sgm, total_loss_ss
    if config.MLMSE:
        global Var

    train_data_gen = prepare_data('once', 'train')
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in trainloader:
    while True:
        train_data = train_data_gen.next()
        if train_data == False:
            break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        raw_tgt = [
            sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(
            raw_tgt, train_data['multi_spk_fea_list'])  #这里是目标的图谱

        # 要保证底下这几个都是longTensor(长整数)
        tgt = Variable(
            torch.from_numpy(
                np.array([[0] + [dict_spk2idx[spk]
                                 for spk in spks] + [dict_spk2idx['<EOS>']]
                          for spks in raw_tgt],
                         dtype=np.int))).transpose(0, 1)  #转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            len(train_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        # optim.optimizer.zero_grad()
        outputs, targets, multi_mask = model(
            src, src_len, tgt,
            tgt_len)  #这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print 'mask size:', multi_mask.size()

        if 1 and len(opt.gpus) > 1:
            sgm_loss, num_total, num_correct = model.module.compute_loss(
                outputs, targets, opt.memory)
        else:
            sgm_loss, num_total, num_correct = model.compute_loss(
                outputs, targets, opt.memory)
        print 'loss for SGM,this batch:', sgm_loss.data[0] / num_total

        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk channel.
        siz = src.size()
        assert len(siz) == 3
        topk = feas_tgt.size()[1]
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk, siz[1],
                                                      siz[2])
        multi_mask = multi_mask.transpose(0, 1)

        if 1 and len(opt.gpus) > 1:
            if config.MLMSE:
                Var = model.module.update_var(x_input_map_multi, multi_mask,
                                              feas_tgt)
                lera.log_image(u'Var weight',
                               Var.data.cpu().numpy().reshape(
                                   config.speech_fre, config.speech_fre,
                                   1).repeat(3, 2),
                               clip=(-1, 1))
                ss_loss = model.module.separation_loss(x_input_map_multi,
                                                       multi_mask, feas_tgt,
                                                       Var)
            else:
                ss_loss = model.module.separation_loss(x_input_map_multi,
                                                       multi_mask, feas_tgt)
        else:
            ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                            feas_tgt)

        loss = sgm_loss + ss_loss
        # dis_loss model
        if config.is_dis:
            dis_loss = models.loss.dis_loss(config, topk, model_dis,
                                            x_input_map_multi, multi_mask,
                                            feas_tgt, func_dis)
            loss = loss + dis_loss
            # print 'dis_para',model_dis.parameters().next()[0]
            # print 'ss_para',model.parameters().next()[0]

        loss.backward()
        # print 'totallllllllllll loss:',loss
        total_loss_sgm += sgm_loss.data[0]
        total_loss_ss += ss_loss.data[0]
        lera.log({
            'sgm_loss': sgm_loss.data[0],
            'ss_loss': ss_loss.data[0],
        })
        total_loss += loss.data[0]
        report_total += num_total
        optim.step()
        if config.is_dis:
            optim_dis.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0))
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 or updates % config.eval_interval == 0 and epoch > 1:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print('evaluating after %d updates...\r' % updates)
            score = eval(epoch)
            for metric in config.metric:
                scores[metric].append(score[metric])
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0

        if updates % config.save_interval == 1:
            save_model(log_path +
                       'checkpoint_v2_withdis{}.pt'.format(config.is_dis))
def eval(epoch):
    model.eval()
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'valid'
    print 'Test or valid:', test_or_valid
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in validloader:
    SDR_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        # for ___ in range(2):
        print '-' * 30
        eval_data = eval_data_gen.next()
        if eval_data == False:
            break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        raw_tgt = [
            sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']
        ]
        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        feas_tgt = models.rank_feas(raw_tgt,
                                    eval_data['multi_spk_fea_list'])  #这里是目标的图谱
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 4
            feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True)
            feas_tgt_sum_square = (feas_tgt_sum *
                                   feas_tgt_sum).expand(tmp_size)
            feas_tgt_square = feas_tgt * feas_tgt
            WFM_mask = feas_tgt_square / feas_tgt_sum_square

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        if config.buffer_size or config.buffer_shift:  # first convet to realtime batches
            assert src.size()[1] == 1
            left_padding = Variable(
                torch.zeros(config.buffer_size,
                            src.size()[1],
                            src.size()[-1]).cuda())
            src = torch.cat((left_padding, src), dim=0)

            split_idx = 0
            src_new = Variable(
                torch.zeros(config.buffer_size + config.buffer_shift,
                            mix_speech_len / config.buffer_shift + 1,
                            src.size()[-1]).cuda())
            batch_counter = 0
            while True:
                print 'split_idx at:', split_idx
                split_len = config.buffer_size + config.buffer_shift  # the len of every split
                if split_idx + split_len > src.size(
                )[0]:  # if pass the right length
                    print 'Need to add right padding with len:', (
                        split_idx + split_len) - src.size()[0]
                    right_padding = Variable(
                        torch.zeros((split_idx + split_len) - src.size()[0],
                                    src.size()[1],
                                    src.size()[-1]).cuda())
                    src = torch.cat((src, right_padding), dim=0)
                    src_split = src[split_idx:(split_idx + split_len)]
                    src_new[:, batch_counter] = src_split
                    break
                src_split = src[split_idx:(split_idx + split_len)]
                src_new[:, batch_counter] = src_split
                split_idx += config.buffer_shift
                batch_counter += 1
            assert batch_counter + 1 == src_new.size()[1]
            src_len[0] = config.buffer_shift + config.buffer_size
            src_len = src_len.expand(1, src_new.size()[1])

        try:
            if 1 and len(opt.gpus) > 1:
                # samples, alignment = model.module.sample(src, src_len)
                samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                    src_new,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
            else:
                samples, alignment, hiddens, predicted_masks = model.beam_sample(
                    src_new,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
                # samples, alignment, hiddens, predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
        except Exception, info:
            print '**************Error occurs here************:', info
            continue

        if config.top1:
            predicted_masks = torch.cat([predicted_masks, 1 - predicted_masks],
                                        1)

        if config.buffer_size and config.buffer_shift:  # then recover the whole maps
            # masks:[7,topk,buffer_size+buffer_shift,fre]
            masks_recover = Variable(
                torch.zeros(1, predicted_masks.size(1), mix_speech_len,
                            speech_fre).cuda())
            recover_idx = 0
            for batch_counter in range(predicted_masks.size(0)):
                if not batch_counter == predicted_masks.size(0) - 1:
                    masks_recover[:, :, recover_idx:recover_idx +
                                  config.buffer_shift] = predicted_masks[
                                      batch_counter, :,
                                      -1 * config.buffer_shift:]
                else:  # the last shift
                    assert mix_speech_len - recover_idx == config.buffer_shift - right_padding.size(
                        0)
                    masks_recover[:, :, recover_idx:] = predicted_masks[
                        batch_counter, :,
                        -1 * config.buffer_shift:(-1 * right_padding.size(0))]
                recover_idx += config.buffer_shift
            predicted_masks = masks_recover
            src = Variable(torch.from_numpy(eval_data['mix_feas'])).transpose(
                0, 1).cuda()

        # '''
        # expand the raw mixed-features to topk channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        topk = feas_tgt.size()[1]
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk, siz[1],
                                                      siz[2])
        if config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask
        '''
        if 1 and len(opt.gpus) > 1:
            ss_loss = model.module.separation_loss(x_input_map_multi, predicted_masks, feas_tgt,Var)
        else:
            ss_loss = model.separation_loss(x_input_map_multi, predicted_masks, feas_tgt,None)
        print 'loss for ss,this batch:',ss_loss.data[0]
        lera.log({
            'ss_loss_'+test_or_valid: ss_loss.data[0],
        })

        del ss_loss,hiddens

        # ''' ''
        if batch_idx <= (500 / config.batch_size
                         ):  #only the former batches counts the SDR
            # x_input_map_multi=x_input_map_multi[:,:,:config.buffer_shift]
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval(config,
                           predicted_maps,
                           eval_data['multi_spk_fea_list'],
                           raw_tgt,
                           eval_data,
                           dst='batch_outputwaddd')
            del predicted_maps, predicted_masks, x_input_map_multi
            SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_outputwaddd/'))
            print 'SDR_aver_now:', SDR_SUM.mean()
            lera.log({'SDR sample': SDR_SUM.mean()})
            # raw_input('Press any key to continue......')
        elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  #only record the best SDR once.
            print 'Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print 'samples:', samples
        print 'can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                        reference[-1 * config.batch_size:])
        alignments += [align for align in alignment]
        batch_idx += 1
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0]))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']]
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_fea_list'])  # 这里是目标的图谱,aim_size,len,fre

        padded_mixture, mixture_lengths, padded_source = train_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()
        topk_this_batch = int(len(raw_tgt[0]))

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = topk_this_batch + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        if config.WFM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = topk_this_batch  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1],
                siz[2]).contiguous().view(-1, siz[1], siz[2])  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=1,
                                            keepdim=True).expand(
                                                siz[0], topk_max, siz[1],
                                                siz[2])
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)
            feas_tgt = x_input_map_multi.view(
                siz[0], -1, siz[1], siz[2]).data * WFM_mask  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            WFM_mask = WFM_mask.cuda()
            del x_input_map_multi

        elif config.PSM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = topk_this_batch  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            IRM = feas_tgt_tmp / (x_input_map_multi + 1e-15)

            angle_tgt = models.rank_feas(
                raw_tgt, train_data['multi_spk_angle_list']).view(
                    siz[0], -1, siz[1], siz[2])
            angle_mix = Variable(
                torch.from_numpy(np.array(
                    train_data['mix_angle']))).unsqueeze(1).expand(
                        siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang = np.cos(angle_mix - angle_tgt)
            ang = np.clip(ang, 0, None)

            feas_tgt = x_input_map_multi * IRM * ang  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            del x_input_map_multi

        elif config.frame_mask:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = topk_this_batch  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_time = torch.sum(feas_tgt_tmp, 3).transpose(1,
                                                                 2)  #bs,T,topk
            for v1 in feas_tgt_time:
                for v2 in v1:
                    if v2[0] > v2[1]:
                        v2[0] = 1
                        v2[1] = 0
                    else:
                        v2[0] = 0
                        v2[1] = 1
            frame_mask = feas_tgt_time.transpose(1,
                                                 2).unsqueeze(-1)  #bs,topk,t,1
            feas_tgt = x_input_map_multi * frame_mask
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()

        # aim_list 就是找到有正经说话人的地方的标号
        aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) !=
                    dict_spk2idx['<EOS>']).nonzero().squeeze()
        aim_list = aim_list.data.cpu().numpy()

        outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
            src,
            src_len,
            tgt,
            tgt_len,
            dict_spk2idx,
            None,
            mix_wav=padded_mixture
        )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print('mask size:', multi_mask.size())
        # writer.add_histogram('global gamma',gamma, updates)

        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk_max channel.
        siz = src.size()
        assert len(siz) == 3
        topk_max = topk_this_batch  # 最多可能的topk个数
        x_input_map_multi = torch.unsqueeze(src, 1).expand(
            siz[0], topk_max, siz[1],
            siz[2]).contiguous()  #.view(-1, siz[1], siz[2])
        # x_input_map_multi = x_input_map_multi[aim_list]
        multi_mask = multi_mask.transpose(0, 1)
        # if config.WFM:
        #     feas_tgt = x_input_map_multi.data * WFM_mask

        if 1 and len(opt.gpus) > 1:
            sgm_loss, num_total, num_correct = model.module.compute_loss(
                outputs, targets, opt.memory)
        else:
            sgm_loss, num_total, num_correct = model.compute_loss(
                outputs, targets, opt.memory)
        print(('loss for SGM,this batch:', sgm_loss.cpu().item()))
        writer.add_scalars('scalar/loss', {'sgm_loss': sgm_loss.cpu().item()},
                           updates)

        loss = sgm_loss
        ss_loss = 0
        loss.backward()

        # print 'totallllllllllll loss:',loss
        total_loss_sgm += sgm_loss.cpu().item()
        lera.log({
            'sgm_loss': sgm_loss.cpu().item(),
            'loss:': loss.cpu().item(),
        })

        total_loss += loss.cpu().item()
        report_correct += num_correct.cpu().item()
        report_total += num_total.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0,
                   report_correct / report_total))
            lera.log({'label_acc': report_correct / report_total})
            writer.add_scalars('scalar/loss',
                               {'label_acc': report_correct / report_total},
                               updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print(('evaluating after %d updates...\r' % updates))
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if updates > 10 and updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv3_PIT_{}.pt'.format(updates))
Beispiel #7
0
def train(epoch):
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if config.schedule and scheduler.get_lr()[0] > 5e-5:
        scheduler.step()
        print("Decaying learning rate to %g" % scheduler.get_lr()[0])
        lera.log({
            'lr': scheduler.get_lr()[0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss

    train_data_gen = prepare_data('once', 'train')
    while True:
        print '\n'
        train_data = train_data_gen.next()
        if train_data == False:
            print('SDR_aver_epoch:', SDR_SUM.mean())
            print('SDRi_aver_epoch:', SDRi_SUM.mean())
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        raw_tgt = [
            sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_fea_list'])  # 这里是目标的图谱,aim_size,len,fre

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()

        # aim_list 就是找到有正经说话人的地方的标号
        aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) !=
                    dict_spk2idx['<EOS>']).nonzero().squeeze()
        aim_list = aim_list.data.cpu().numpy()

        outputs, targets, multi_mask, gamma = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        # print('mask size:', multi_mask.size())
        writer.add_histogram('global gamma', gamma, updates)

        if 1 and len(opt.gpus) > 1:
            sgm_loss, num_total, num_correct = model.module.compute_loss(
                outputs, targets, opt.memory)
        else:
            sgm_loss, num_total, num_correct = model.compute_loss(
                outputs, targets, opt.memory)
        print('loss for SGM,this batch:', sgm_loss.cpu().item())
        writer.add_scalars('scalar/loss', {'sgm_loss': sgm_loss.cpu().item()},
                           updates)

        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk_max channel.
        siz = src.size()
        assert len(siz) == 3
        topk_max = config.MAX_MIX  # 最多可能的topk个数
        x_input_map_multi = torch.unsqueeze(src, 1).expand(
            siz[0], topk_max, siz[1],
            siz[2]).contiguous().view(-1, siz[1], siz[2])
        x_input_map_multi = x_input_map_multi[aim_list]
        multi_mask = multi_mask.transpose(0, 1)

        if 1 and len(opt.gpus) > 1:
            ss_loss = model.module.separation_loss(x_input_map_multi,
                                                   multi_mask, feas_tgt)
        else:
            ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                            feas_tgt)
        print('loss for SS,this batch:', ss_loss.cpu().item())
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        loss = sgm_loss + 5 * ss_loss

        loss.backward()
        # print 'totallllllllllll loss:',loss
        total_loss_sgm += sgm_loss.cpu().item()
        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'sgm_loss': sgm_loss.cpu().item(),
            'ss_loss': ss_loss.cpu().item(),
            'loss:': loss.cpu().item(),
        })

        if updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
        ]:
            predicted_maps = multi_mask * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval(config,
                           predicted_maps,
                           train_data['multi_spk_fea_list'],
                           raw_tgt,
                           train_data,
                           dst='batch_output')
            del predicted_maps, multi_mask, x_input_map_multi
            sdr_aver_batch, sdri_aver_batch = bss_test.cal('batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print('SDR_aver_now:', SDR_SUM.mean())
            print('SDRi_aver_now:', SDRi_SUM.mean())

        total_loss += loss.cpu().item()
        report_correct += num_correct.cpu().item()
        report_total += num_total.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0,
                   report_correct / report_total))
            lera.log({'label_acc': report_correct / report_total})
            writer.add_scalars('scalar/loss',
                               {'label_acc': report_correct / report_total},
                               updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print('evaluating after %d updates...\r' % updates)
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            print 'Orignal bs:', original_bs
            config.batch_size = original_bs
            print 'Now bs:', config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv3_{}.pt'.format(updates))
Beispiel #8
0
def eval_recu(epoch):
    assert config.batch_size == 1
    model.eval()
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    test_or_valid = 'valid'
    # test_or_valid = 'train'
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        src_original = src.transpose(0, 1)  #To T,bs,F
        predict_multi_mask_all = None
        samples_list = []
        for len_idx in range(config.MIN_MIX + 2, 2, -1):  #逐个分离
            tgt_max_len = len_idx  # 4,3,2 with bos and eos.
            topk_k = len_idx - 2
            tgt = Variable(torch.ones(
                len_idx, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。
            src_len = Variable(
                torch.LongTensor(config.batch_size).zero_() +
                mix_speech_len).unsqueeze(0)
            tgt_len = Variable(
                torch.LongTensor([
                    tgt_max_len - 2
                    for one_spk in eval_data['multi_spk_fea_list']
                ])).unsqueeze(0)
            if use_cuda:
                src = src.cuda().transpose(0, 1)  # to T,bs,fre
                src_original = src_original.cuda()  # TO T,bs,fre
                tgt = tgt.cuda()
                src_len = src_len.cuda()
                tgt_len = tgt_len.cuda()
                feas_tgt = feas_tgt.cuda()

            # try:
            if len(opt.gpus) > 1:
                samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                    src, src_len, dict_spk2idx, tgt, config.beam_size,
                    src_original)
            else:
                samples, predicted_masks = model.beam_sample(
                    src, src_len, dict_spk2idx, tgt, config.beam_size,
                    src_original)

            # except:
            #     continue

            # '''
            # expand the raw mixed-features to topk_max channel.
            src = src_original.transpose(0, 1)  #确保分离的时候用的是原始的语音
            siz = src.size()
            assert len(siz) == 3
            topk_max = topk_k
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2])
            if 0 and config.WFM:
                feas_tgt = x_input_map_multi.data * WFM_mask

            if len_idx == 4:
                aim_feas = list(range(0, 2 * config.batch_size,
                                      2))  #每个samples的第一个说话人取出来
                predict_multi_mask_all = predicted_masks  #bs*topk,T,F
                src = src * (1 - predicted_masks[aim_feas]
                             )  #调整到bs为第一维,# bs,T,F
                samples_list = samples
            elif len_idx == 3:
                aim_feas = list(range(1, 2 * config.batch_size,
                                      2))  #每个samples的第二个说话人取出来
                predict_multi_mask_all[aim_feas] = predicted_masks
                feas_tgt = feas_tgt[aim_feas]
                samples_list = [samples_list[:1] + samples]

            if test_or_valid != 'test':
                if 1 and len(opt.gpus) > 1:
                    ss_loss = model.module.separation_loss(
                        x_input_map_multi,
                        predicted_masks,
                        feas_tgt,
                    )
                else:
                    ss_loss = model.separation_loss(x_input_map_multi,
                                                    predicted_masks, feas_tgt)
                print(('loss for ss,this batch:', ss_loss.cpu().item()))
                lera.log({
                    'ss_loss_' + str(len_idx) + test_or_valid:
                    ss_loss.cpu().item(),
                })
                del ss_loss

        predicted_masks = predict_multi_mask_all
        if batch_idx <= (500 / config.batch_size
                         ):  # only the former batches counts the SDR
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval2(config,
                            predicted_maps,
                            eval_data['multi_spk_fea_list'],
                            raw_tgt,
                            eval_data,
                            dst='batch_output_test')
            del predicted_maps, predicted_masks, x_input_map_multi
            try:
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(
                    'batch_output_test/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except (AssertionError):
                print('Errors in calculating the SDR')
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))
            lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()})
            writer.add_scalars('scalar/loss',
                               {'SDR_sample_' + test_or_valid: sdr_aver_batch},
                               updates)
            # raw_input('Press any key to continue......')

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples_list
        ]
        # source += raw_src
        reference += raw_tgt
        print(('samples:', samples))
        print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                         reference[-1 * config.batch_size:])))
        # alignments += [align for align in alignment]
        batch_idx += 1
        input('wait to continue......')

        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))

    score = {}
    result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
    logging_csv([e, updates, result['hamming_loss'], \
                 result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()])
    print(('hamming_loss: %.8f | micro_f1: %.4f' %
           (result['hamming_loss'], result['micro_f1'])))
    score['hamming_loss'] = result['hamming_loss']
    score['micro_f1'] = result['micro_f1']
    1 / 0
    return score
Beispiel #9
0
def train(epoch, step):
    #lera.log('epoch', epoch)
    epoch += 1

    for input, _ in DataLoader(datasets[dataset], batch_size=batch_size, pin_memory=use_cuda, num_workers=2, shuffle=True, drop_last=True):
        if use_cuda:
            input = input.cuda()

        step += 1

        ze = enc(V(input))

        index = min_dist(V(ze.data), embeddings)
        sz = index.size()

        zq = (embeddings[index.view(-1)]       # [batch_size * x * x, D] containing vectors from embeddings
                .view(sz[0], sz[1], sz[2], D)  # [batch_size, x, x, D] 
                .permute(0, 3, 1, 2))          # [batch_size, D, x, x]

        emb_loss = (zq - V(ze.data)).pow(2).sum(1).mean() + 1e-2 * embeddings.pow(2).mean()

        # detach zq so it won't backprop to embeddings with recon loss
        zq = V(zq.data, requires_grad=True)

        output = dec(zq)

        commit_loss = beta * (ze - V(zq.data)).pow(2).sum(1).mean()
        recon_loss = F.mse_loss(output, V(input))

        optimizer.zero_grad()

        commit_loss.backward(retain_graph=True)
        emb_loss.backward()
        recon_loss.backward()

        # pass data term gradient from decoder to encoder
        ze.backward(zq.grad)

        optimizer.step()

        emb_count[index.data.view(-1)] = 1
        emb_count.sub_(0.01).clamp_(min=0)
        unique_embeddings = emb_count.gt(0).sum()

        sensitivity.add_(emb_loss.data[0] * (K - unique_embeddings) / K)
        sensitivity[emb_count.gt(0)] = 0

        lera.log({ 
            'recon_loss': recon_loss.data[0],
            'commit_loss': commit_loss.data[0],
            'unique_embeddings': emb_count.gt(0).sum(),
            }, console=True)

        # make comparison image
        if lera.every(seconds=60):
            input = input.cpu()[0:8,:,:,:]
            w = input.size(-1)
            output = output.data.cpu()[0:8,:,:,:]
            result = (torch.stack([input, output])           # [2, 8, 3, w, w]
                        .transpose(0, 1).contiguous()        # [8, 2, 3, w, w]
                        .view(4, 4, 3, w, w)                 # [4, 4, 3, w, w]
                        .permute(0, 3, 1, 4, 2).contiguous() # [4, w, 4, w, 3]
                        .view(w * 4, w * 4, 3))              # [w * 4, w * 4, 3]
            lera.log_image('reconstruction', result.numpy(), clip=(0, 1))

    # continue training
    if step < total_steps:
        train(epoch, step)
Beispiel #10
0
def eval(epoch):
    # config.batch_size=1
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    test_or_valid = 'valid'
    # test_or_valid = 'train'
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 3
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=0,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        # try:
        if 1 and len(opt.gpus) > 1:
            samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
        else:
            samples, predicted_masks = model.beam_sample(
                src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
            samples = [samples]

        # except:
        #     continue

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = len(samples[0]) - 1
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        if 1 and config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask

        if test_or_valid != 'test':
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(
                    x_input_map_multi,
                    predicted_masks,
                    feas_tgt,
                )
            else:
                ss_loss = model.separation_loss(x_input_map_multi,
                                                predicted_masks, feas_tgt)
            print(('loss for ss,this batch:', ss_loss.cpu().item()))
            lera.log({
                'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
            })
            del ss_loss

        # '''''
        if 1 and batch_idx <= (500 / config.batch_size
                               ):  # only the former batches counts the SDR
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval2(config,
                            predicted_maps,
                            eval_data['multi_spk_fea_list'],
                            raw_tgt,
                            eval_data,
                            dst='batch_output_test')
            del predicted_maps, predicted_masks, x_input_map_multi
            try:
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(
                    'batch_output_test/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except (AssertionError):
                print('Errors in calculating the SDR')
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))
            lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()})
            writer.add_scalars('scalar/loss',
                               {'SDR_sample_' + test_or_valid: sdr_aver_batch},
                               updates)
            # raw_input('Press any key to continue......')
        elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  # only record the best SDR once.
            print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())))
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print(('samples:', samples))
        print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                         reference[-1 * config.batch_size:])))
        # alignments += [align for align in alignment]
        batch_idx += 1

        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))

    score = {}
    result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
    logging_csv([e, updates, result['hamming_loss'], \
                 result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()])
    print(('hamming_loss: %.8f | micro_f1: %.4f' %
           (result['hamming_loss'], result['micro_f1'])))
    score['hamming_loss'] = result['hamming_loss']
    score['micro_f1'] = result['micro_f1']
    1 / 0
    return score
Beispiel #11
0
def train_recu(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0]))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']]
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_fea_list'])  # 这里是目标的图谱,aim_size,len,fre
        if 0 and config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 3
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=0,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)
            WFM_mask = WFM_mask.cuda()
            feas_tgt = x_input_map_multi.data * WFM_mask

        # 要保证底下这几个都是longTensor(长整数)
        src_original = src.transpose(0, 1)  #To T,bs,F
        multi_mask_all = None
        for len_idx in range(config.MIN_MIX + 2, 2, -1):  #逐个分离
            # len_idx=3
            tgt_max_len = len_idx  # 4,3,2 with bos and eos.
            tgt = Variable(
                torch.from_numpy(
                    np.array([[0] + [
                        dict_spk2idx[spk]
                        for spk in spks[-1 * (tgt_max_len - 2):]
                    ] + 1 * [dict_spk2idx['<EOS>']] for spks in raw_tgt],
                             dtype=np.int))).transpose(
                                 0, 1)  # 转换成数字,然后前后加开始和结束符号。4,bs
            src_len = Variable(
                torch.LongTensor(config.batch_size).zero_() +
                mix_speech_len).unsqueeze(0)
            tgt_len = Variable(
                torch.LongTensor([
                    tgt_max_len - 2
                    for one_spk in train_data['multi_spk_fea_list']
                ])).unsqueeze(0)
            if use_cuda:
                src = src.cuda().transpose(0, 1)  # to T,bs,fre
                src_original = src_original.cuda()  # TO T,bs,fre
                tgt = tgt.cuda()
                src_len = src_len.cuda()
                tgt_len = tgt_len.cuda()
                feas_tgt = feas_tgt.cuda()

            model.zero_grad()

            outputs, targets, multi_mask, gamma = model(
                src, src_len, tgt, tgt_len, dict_spk2idx,
                src_original)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
            print('mask size:', multi_mask.size())
            # writer.add_histogram('global gamma',gamma, updates)

            if 1 and len(opt.gpus) > 1:
                sgm_loss, num_total, num_correct = model.module.compute_loss(
                    outputs, targets, opt.memory)
            else:
                sgm_loss, num_total, num_correct = model.compute_loss(
                    outputs, targets, opt.memory)
            print(('loss for SGM,this batch:', sgm_loss.cpu().item()))
            writer.add_scalars(
                'scalar/loss',
                {'sgm_loss' + str(len_idx): sgm_loss.cpu().item()}, updates)

            src = src_original.transpose(0, 1)  #确保分离的时候用的是原始的语音
            # expand the raw mixed-features to topk_max channel.
            siz = src.size()  #bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = len_idx - 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1],
                siz[2]).contiguous().view(-1, siz[1], siz[2])  #bs,topk,T,F
            # x_input_map_multi = x_input_map_multi[aim_list]
            multi_mask = multi_mask.transpose(0, 1)

            if len_idx == 4:
                aim_feas = list(range(0, 2 * config.batch_size,
                                      2))  #每个samples的第一个说话人取出来
                multi_mask_all = multi_mask  #bs*topk,T,F
                src = src * (1 - multi_mask[aim_feas])  #调整到bs为第一维,# bs,T,F
                # src=src.transpose(0,1)*(1-multi_mask[aim_feas]) #调整到bs为第一维
                src = src.detach()  #第二轮用第一轮预测出来的剩下的谱
            elif len_idx == 3:
                aim_feas = list(range(1, 2 * config.batch_size,
                                      2))  #每个samples的第二个说话人取出来
                multi_mask_all[aim_feas] = multi_mask
                feas_tgt = feas_tgt[aim_feas]
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(x_input_map_multi,
                                                       multi_mask, feas_tgt)
            else:
                ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                                feas_tgt)
            print(('loss for SS,this batch:', ss_loss.cpu().item()))
            writer.add_scalars(
                'scalar/loss',
                {'ss_loss' + str(len_idx): ss_loss.cpu().item()}, updates)

            loss = sgm_loss + 5 * ss_loss
            loss.backward()
            optim.step()
            lera.log({
                'sgm_loss' + str(len_idx): sgm_loss.cpu().item(),
                'ss_loss' + str(len_idx): ss_loss.cpu().item(),
                'loss:' + str(len_idx): loss.cpu().item(),
            })
            total_loss_sgm += sgm_loss.cpu().item()
            total_loss_ss += ss_loss.cpu().item()

        multi_mask = multi_mask_all
        x_input_map_multi = torch.unsqueeze(src, 1).expand(
            siz[0], 2, siz[1], siz[2]).contiguous().view(-1, siz[1], siz[2])
        if updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
        ]:
            predicted_maps = multi_mask * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval(config,
                           predicted_maps,
                           train_data['multi_spk_fea_list'],
                           raw_tgt,
                           train_data,
                           dst='batch_output')
            del predicted_maps, multi_mask, x_input_map_multi
            sdr_aver_batch, sdri_aver_batch = bss_test.cal('batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))

        total_loss += loss.cpu().item()
        report_correct += num_correct.cpu().item()
        report_total += num_total.cpu().item()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss / num_total,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0,
                   report_correct / report_total))
            lera.log({'label_acc': report_correct / report_total})
            writer.add_scalars('scalar/loss',
                               {'label_acc': report_correct / report_total},
                               updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / report_total))
            print(('evaluating after %d updates...\r' % updates))
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if 1 and updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv3_{}.pt'.format(updates))
Beispiel #12
0
def eval(epoch,test_or_valid='valid'):
    # config.batch_size=1
    global updates,model
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX, config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt= eval_data['batch_order']
        feas_tgt = models.rank_feas(raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。

        src_len = Variable(torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0)
        tgt_len = Variable(torch.LongTensor([len(one_spk) for one_spk in eval_data['multi_spk_fea_list']])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous().view(-1, siz[1], siz[ 2])  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp
            feas_tgt_sum_square = torch.sum(feas_tgt_square, dim=1, keepdim=True).expand(siz[0], topk_max, siz[1], siz[2])
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)
            feas_tgt = x_input_map_multi.view(siz[0], -1, siz[1], siz[2]).data * WFM_mask  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            WFM_mask = WFM_mask.cuda()
            del x_input_map_multi

        elif config.PSM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            IRM=feas_tgt_tmp/(x_input_map_multi+1e-15)

            angle_tgt=models.rank_feas(raw_tgt, eval_data['multi_spk_angle_list']).view(siz[0],-1,siz[1],siz[2])
            angle_mix=Variable(torch.from_numpy(np.array(eval_data['mix_angle']))).unsqueeze(1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang=np.cos(angle_mix-angle_tgt)
            ang=np.clip(ang,0,None)

            # feas_tgt = x_input_map_multi *np.clip(IRM.numpy()*ang,0,1) # bs,topk,T,F
            # feas_tgt = x_input_map_multi *IRM*ang # bs,topk,T,F
            feas_tgt = feas_tgt.view(siz[0],-1,siz[1],siz[2])*ang # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            del x_input_map_multi

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        predicted_masks, enc_attn_list = model(src, src_len, tgt, tgt_len,
                                               dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用

        print('predicted mask size:', predicted_masks.size(),'should be topk,bs,T,F') # topk,bs,T,F
        # try:

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = config.MAX_MIX
        x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2])

        predicted_masks=predicted_masks.transpose(0, 1)
        # if config.WFM:
        #     feas_tgt = x_input_map_multi.data * WFM_mask

        # 注意,bs是第二维
        assert predicted_masks.shape == x_input_map_multi.shape
        assert predicted_masks.size(0) == config.batch_size

        if 1 and len(opt.gpus) > 1:
            ss_loss,best_pmt = model.module.separation_pit_loss(x_input_map_multi, predicted_masks, feas_tgt, )
        else:
            ss_loss,best_pmt = model.separation_pit_loss(x_input_map_multi, predicted_masks, feas_tgt)
        print(('loss for ss,this batch:', ss_loss.cpu().item()))
        print('best perms for this batch:', best_pmt)
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
        })
        writer.add_scalars('scalar/loss',{'ss_loss_'+test_or_valid:ss_loss.cpu().item()},updates+batch_idx)
        del ss_loss
        if batch_idx>10:
            break

        if False: #this part is to test the checkpoints sequencially.
            batch_idx += 1
            if batch_idx%100==0:
                updates=updates+1000
                opt.restore='/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(updates)
                print('loading checkpoint...\n', opt.restore)
                checkpoints = torch.load(opt.restore)
                model.module.load_state_dict(checkpoints['model'])
                break
            continue
        # '''''
        if 0 and batch_idx <= (500 / config.batch_size):  # only the former batches counts the SDR
            predicted_maps = predicted_masks * x_input_map_multi
            predicted_maps = predicted_maps.view(-1,mix_speech_len,speech_fre)
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval2(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data,
                            dst='batch_output_test')
            # utils.bss_eval(config, predicted_maps, eval_data['multi_spk_fea_list'], raw_tgt, eval_data,
            #                 dst='batch_output_test')
            del predicted_maps, predicted_masks, x_input_map_multi
            try:
                sdr_aver_batch, sdri_aver_batch=  bss_test.cal('batch_output_test/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except(AssertionError):
                print('Errors in calculating the SDR')
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SRi_aver_now:', SDRi_SUM.mean()))
            lera.log({'SDR sample'+test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample'+test_or_valid: SDRi_SUM.mean()})
            writer.add_scalars('scalar/loss',{'SDR_sample_'+test_or_valid:sdr_aver_batch},updates)
            # raw_input('Press any key to continue......')
        elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean() > best_SDR:  # only record the best SDR once.
            print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())))
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        # candidate += [convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>']) for s in samples]
        # source += raw_src
        # reference += raw_tgt
        # print(('samples:', samples))
        # print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:], reference[-1 * config.batch_size:])))
        # alignments += [align for align in alignment]
        batch_idx += 1

        result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
        print(('hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
                   % (result['hamming_loss'], result['micro_f1'], result['micro_recall'], result['micro_precision'], )))
Beispiel #13
0
def train(epoch):
    global e, updates, total_loss, start_time, report_total,report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates<=config.warmup: #如果不在warm阶段就正常规划
       pass
    elif config.schedule and scheduler.get_lr()[0]>4e-5:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0],updates))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch


    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr =  config.learning_rate * min(max(updates,1)** (-0.5),
                                             max(updates,1) * (config.warmup ** (-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs=list([group['lr'] for group in optim.optimizer.param_groups])
            if updates%100==0: #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']]
        raw_tgt=train_data['batch_order']
        feas_tgt = models.rank_feas(raw_tgt, train_data['multi_spk_fea_list'])  # 这里是目标的图谱,bs*Topk,len,fre

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(torch.from_numpy(np.array(
            [[0] + [dict_spk2idx[spk] for spk in spks] + (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']] for
             spks in raw_tgt], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(torch.LongTensor(config.batch_size).zero_() + mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([len(one_spk) for one_spk in train_data['multi_spk_fea_list']])).unsqueeze(0)
        if config.WFM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous().view(-1, siz[1], siz[ 2])  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_square = feas_tgt_tmp * feas_tgt_tmp
            feas_tgt_sum_square = torch.sum(feas_tgt_square, dim=1, keepdim=True).expand(siz[0], topk_max, siz[1], siz[2])
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)
            feas_tgt = x_input_map_multi.view(siz[0], -1, siz[1], siz[2]).data * WFM_mask  # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            WFM_mask = WFM_mask.cuda()
            del x_input_map_multi

        elif config.PSM:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            IRM=feas_tgt_tmp/(x_input_map_multi+1e-15)

            angle_tgt=models.rank_feas(raw_tgt, train_data['multi_spk_angle_list']).view(siz[0],-1,siz[1],siz[2]) # bs,topk,T,F
            angle_mix=Variable(torch.from_numpy(np.array(train_data['mix_angle']))).unsqueeze(1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang=np.cos(angle_mix-angle_tgt)
            ang=np.clip(ang,0,None)

            # feas_tgt = x_input_map_multi *np.clip(IRM.numpy()*ang,0,1) # bs,topk,T,F
            # feas_tgt = x_input_map_multi *IRM*ang # bs,topk,T,F
            feas_tgt = feas_tgt.view(siz[0],-1,siz[1],siz[2])*ang # bs,topk,T,F
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F
            del x_input_map_multi

        elif config.frame_mask:
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt.view(siz[0], -1, siz[1], siz[2])

            feas_tgt_time=torch.sum(feas_tgt_tmp,3).transpose(1,2) #bs,T,topk
            for v1 in feas_tgt_time:
                for v2 in v1:
                    if v2[0]>v2[1]:
                        v2[0]=1
                        v2[1]=0
                    else:
                        v2[0]=0
                        v2[1]=1
            frame_mask=feas_tgt_time.transpose(1,2).unsqueeze(-1) #bs,topk,t,1
            feas_tgt=x_input_map_multi*frame_mask
            feas_tgt = feas_tgt.view(-1, siz[1], siz[2])  # bs*topk,T,F


        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        # aim_list 就是找到有正经说话人的地方的标号
        aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) != dict_spk2idx['<EOS>']).nonzero().squeeze()
        aim_list = aim_list.data.cpu().numpy()

        multi_mask, enc_attn_list = model(src, src_len, tgt, tgt_len,
                                             dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print('mask size:', multi_mask.size()) # topk,bs,T,F
        # print('mask:', multi_mask[0,0,:3:3]) # topk,bs,T,F
        # writer.add_histogram('global gamma',gamma, updates)


        src = src.transpose(0, 1)
        # expand the raw mixed-features to topk_max channel.
        siz = src.size()
        assert len(siz) == 3
        topk_max = config.MAX_MIX  # 最多可能的topk个数
        x_input_map_multi = torch.unsqueeze(src, 1).expand(siz[0], topk_max, siz[1], siz[2]).contiguous()#.view(-1, siz[1], siz[2])
        # x_input_map_multi = x_input_map_multi[aim_list]
        # x_input_map_multi = x_input_map_multi.transpose(0, 1) #topk,bs,T,F
        multi_mask = multi_mask.transpose(0, 1)
        # if config.WFM:
        #     feas_tgt = x_input_map_multi.data * WFM_mask

        # 注意,bs是第二维
        assert multi_mask.shape == x_input_map_multi.shape
        assert multi_mask.size(0) == config.batch_size

        if 1 and len(opt.gpus) > 1: #先ss获取Perm
            ss_loss, best_pmt = model.module.separation_pit_loss(x_input_map_multi, multi_mask, feas_tgt)
        else:
            ss_loss, best_pmt = model.separation_pit_loss(x_input_map_multi, multi_mask, feas_tgt)
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss',{'ss_loss':ss_loss.cpu().item()},updates)

        loss = ss_loss
        loss.backward()

        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'ss_loss': ss_loss.cpu().item(),
        })

        if updates>3 and updates % config.eval_interval in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,]:
            assert multi_mask.shape==x_input_map_multi.shape
            assert multi_mask.size(0)==config.batch_size
            predicted_maps = (multi_mask * x_input_map_multi).view(siz[0]*topk_max,siz[1],siz[2])

            # predicted_maps=Variable(feas_tgt)
            # utils.bss_eval(config, predicted_maps, train_data['multi_spk_fea_list'], raw_tgt, train_data, dst=log_path+'batch_output/')
            utils.bss_eval2(config, predicted_maps, train_data['multi_spk_fea_list'], raw_tgt, train_data, dst=log_path+'batch_output')
            del predicted_maps, multi_mask, x_input_map_multi
            sdr_aver_batch, sdri_aver_batch=  bss_test.cal(log_path+'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss',{'SDR_sample':sdr_aver_batch,'SDRi_sample':sdri_aver_batch},updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))

            # Heatmap here
            # n_layer个 (head*bs) x lq x dk
            '''
            import matplotlib.pyplot as plt
            ax = plt.gca()
            ax.invert_yaxis()

            raw_src=models.rank_feas(raw_tgt, train_data['multi_spk_fea_list'])
            att_idx=1
            att = enc_attn_list[-1].view(config.trans_n_head,config.batch_size,mix_speech_len,mix_speech_len).data.cpu().numpy()[:,att_idx]
            for head in range(config.trans_n_head):
                xx=att[head]
                plt.matshow(xx, cmap=plt.cm.hot, vmin=0,vmax=0.05)
                plt.colorbar()
                plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head))
            plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source0.png')
            plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source1.png')
            1/0
            '''

        total_loss += loss.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,ss loss: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss , total_loss_ss / 30.0))
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3: #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging("time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n"
                    % (time.time() - start_time, epoch, updates, total_loss/config.eval_interval))
            print(('evaluating after %d updates...\r' % updates))
            eval(epoch,'valid') # eval的时候batch_size会变成1
            eval(epoch,'test') # eval的时候batch_size会变成1

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if 1 and updates % config.save_interval == 1:
            save_model(log_path + 'Transformer_PIT_{}.pt'.format(updates))
def eval(epoch, test_or_valid='valid'):
    # config.batch_size=1
    global updates, model
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_complex_two_channel'])
                       )  # bs,T,F,2 both real and imag values
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            eval_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt = Variable(
            torch.from_numpy(
                np.array([[0, 1, 2, 102] for __ in range(config.batch_size)],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask_real, multi_mask_imag, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask_real = multi_mask_real.transpose(0, 1)
        multi_mask_imag = multi_mask_imag.transpose(0, 1)
        src_real = src[:, :, :, 0].transpose(0, 1)  # bs,T,F
        src_imag = src[:, :, :, 1].transpose(0, 1)  # bs,T,F
        print('mask size for real/imag:',
              multi_mask_real.size())  # bs,topk,T,F, 已经压缩过了
        print('mixture size for real/imag:', src_real.size())  # bs,T,F

        predicted_maps0_real = multi_mask_real[:,
                                               0] * src_real - multi_mask_imag[:,
                                                                               0] * src_imag  #bs,T,F
        predicted_maps0_imag = multi_mask_real[:,
                                               0] * src_imag + multi_mask_imag[:,
                                                                               0] * src_real  #bs,T,F
        predicted_maps1_real = multi_mask_real[:,
                                               1] * src_real - multi_mask_imag[:,
                                                                               1] * src_imag  #bs,T,F
        predicted_maps1_imag = multi_mask_real[:,
                                               1] * src_imag + multi_mask_imag[:,
                                                                               1] * src_real  #bs,T,F

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
        })
        writer.add_scalars('scalar/loss',
                           {'ss_loss_' + test_or_valid: ss_loss.cpu().item()},
                           updates + batch_idx)
        del ss_loss
        # if batch_idx>10:
        #     break

        if False:  #this part is to test the checkpoints sequencially.
            batch_idx += 1
            if batch_idx % 100 == 0:
                updates = updates + 1000
                opt.restore = '/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(
                    updates)
                print('loading checkpoint...\n', opt.restore)
                checkpoints = torch.load(opt.restore)
                model.module.load_state_dict(checkpoints['model'])
                break
            continue
        # '''''
        if 1 and batch_idx <= (500 / config.batch_size):
            utils.bss_eval_tas(config,
                               predict_wav,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

        batch_idx += 1
        if batch_idx > 100:
            break
        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 4e-5:
        scheduler.step()
        print(
            ("Decaying learning rate to %g" % scheduler.get_lr()[0], updates))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    train_data_gen = prepare_data('once', 'train')
    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_complex_two_channel'])
                       )  # bs,T,F,2 both real and imag values
        raw_tgt = train_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            train_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = train_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        tgt = Variable(
            torch.from_numpy(
                np.array(
                    [[0] + [dict_spk2idx[spk] for spk in spks] +
                     (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']]
                     for spks in raw_tgt],
                    dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in train_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask_real, multi_mask_imag, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask_real = multi_mask_real.transpose(0, 1)
        multi_mask_imag = multi_mask_imag.transpose(0, 1)
        src_real = src[:, :, :, 0].transpose(0, 1)  # bs,T,F
        src_imag = src[:, :, :, 1].transpose(0, 1)  # bs,T,F
        print('mask size for real/imag:',
              multi_mask_real.size())  # bs,topk,T,F, 已经压缩过了
        print('mixture size for real/imag:', src_real.size())  # bs,T,F

        predicted_maps0_real = multi_mask_real[:,
                                               0] * src_real - multi_mask_imag[:,
                                                                               0] * src_imag  #bs,T,F
        predicted_maps0_imag = multi_mask_real[:,
                                               0] * src_imag + multi_mask_imag[:,
                                                                               0] * src_real  #bs,T,F
        predicted_maps1_real = multi_mask_real[:,
                                               1] * src_real - multi_mask_imag[:,
                                                                               1] * src_imag  #bs,T,F
        predicted_maps1_imag = multi_mask_real[:,
                                               1] * src_imag + multi_mask_imag[:,
                                                                               1] * src_real  #bs,T,F

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        loss = ss_loss
        loss.backward()

        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'ss_loss': ss_loss.cpu().item(),
        })

        if epoch > 20 and updates > 5 and updates % config.eval_interval in [
                0, 1, 2, 3, 4
        ]:
            utils.bss_eval_tas(config,
                               predict_wav,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

            # Heatmap here
            # n_layer个 (head*bs) x lq x dk
            '''
            import matplotlib.pyplot as plt
            ax = plt.gca()
            ax.invert_yaxis()

            raw_src=models.rank_feas(raw_tgt, train_data['multi_spk_fea_list'])
            att_idx=1
            att = enc_attn_list[-1].view(config.trans_n_head,config.batch_size,mix_speech_len,mix_speech_len).data.cpu().numpy()[:,att_idx]
            for head in range(config.trans_n_head):
                xx=att[head]
                plt.matshow(xx, cmap=plt.cm.hot, vmin=0,vmax=0.05)
                plt.colorbar()
                plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head))
            plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source0.png')
            plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'source1.png')
            1/0
            '''

        total_loss += loss.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,ss loss: %6.6f\n"
                % (time.time() - start_time, epoch, updates, loss,
                   total_loss_ss / 30.0))
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 1 and updates % config.save_interval == 1:
            save_model(log_path + 'Transformer_PIT_2ch_{}.pt'.format(updates))

        if 0 and updates > 0 and updates % config.eval_interval == 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates,
                 total_loss / config.eval_interval))
            print(('evaluating after %d updates...\r' % updates))
            eval(epoch, 'valid')  # eval的时候batch_size会变成1
            eval(epoch, 'test')  # eval的时候batch_size会变成1

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0
Beispiel #16
0
def train(epoch):
    e = epoch
    model.train()
    SDR_SUM = np.array([])

    if config.schedule:
        scheduler.step()
        print("Decaying learning rate to %g" % scheduler.get_lr()[0])
        if config.is_dis:
            scheduler_dis.step()
        lera.log({
            'lr': scheduler.get_lr()[0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    global e, updates, total_loss, start_time, report_total, total_loss_sgm, total_loss_ss
    if config.MLMSE:
        global Var

    train_data_gen = prepare_data('once', 'train')
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in trainloader:
    while True:
        try:
            train_data = train_data_gen.next()
            if train_data == False:
                print 'SDR_aver_epoch:', SDR_SUM.mean()
                break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch

            src = Variable(torch.from_numpy(train_data['mix_feas']))
            # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
            raw_tgt = [
                sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']
            ]
            feas_tgt = models.rank_feas(
                raw_tgt,
                train_data['multi_spk_fea_list'])  #这里是目标的图谱,aim_size,len,fre

            # 要保证底下这几个都是longTensor(长整数)
            tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
            tgt = Variable(
                torch.from_numpy(
                    np.array([[0] + [dict_spk2idx[spk] for spk in spks] +
                              (tgt_max_len - len(spks) - 1) *
                              [dict_spk2idx['<EOS>']] for spks in raw_tgt],
                             dtype=np.int))).transpose(0,
                                                       1)  #转换成数字,然后前后加开始和结束符号。
            src_len = Variable(
                torch.LongTensor(config.batch_size).zero_() +
                mix_speech_len).unsqueeze(0)
            tgt_len = Variable(
                torch.LongTensor([
                    len(one_spk)
                    for one_spk in train_data['multi_spk_fea_list']
                ])).unsqueeze(0)
            if use_cuda:
                src = src.cuda().transpose(0, 1)
                tgt = tgt.cuda()
                src_len = src_len.cuda()
                tgt_len = tgt_len.cuda()
                feas_tgt = feas_tgt.cuda()

            model.zero_grad()
            # optim.optimizer.zero_grad()

            # aim_list 就是找到有正经说话人的地方的标号
            aim_list = (tgt[1:-1].transpose(0, 1).contiguous().view(-1) !=
                        dict_spk2idx['<EOS>']).nonzero().squeeze()
            aim_list = aim_list.data.cpu().numpy()

            outputs, targets, multi_mask = model(
                src, src_len, tgt, tgt_len,
                dict_spk2idx)  #这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
            print 'mask size:', multi_mask.size()

            if 1 and len(opt.gpus) > 1:
                sgm_loss, num_total, num_correct = model.module.compute_loss(
                    outputs, targets, opt.memory)
            else:
                sgm_loss, num_total, num_correct = model.compute_loss(
                    outputs, targets, opt.memory)
            print 'loss for SGM,this batch:', sgm_loss.data[0] / num_total

            src = src.transpose(0, 1)
            # expand the raw mixed-features to topk_max channel.
            siz = src.size()
            assert len(siz) == 3
            topk_max = config.MAX_MIX  #最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1],
                siz[2]).contiguous().view(-1, siz[1], siz[2])
            x_input_map_multi = x_input_map_multi[aim_list]
            multi_mask = multi_mask.transpose(0, 1)

            if 1 and len(opt.gpus) > 1:
                if config.MLMSE:
                    Var = model.module.update_var(x_input_map_multi,
                                                  multi_mask, feas_tgt)
                    lera.log_image(u'Var weight',
                                   Var.data.cpu().numpy().reshape(
                                       config.speech_fre, config.speech_fre,
                                       1).repeat(3, 2),
                                   clip=(-1, 1))
                    ss_loss = model.module.separation_loss(
                        x_input_map_multi, multi_mask, feas_tgt, Var)
                else:
                    ss_loss = model.module.separation_loss(
                        x_input_map_multi, multi_mask, feas_tgt)
            else:
                ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                                feas_tgt)

            loss = sgm_loss + 5 * ss_loss
            # dis_loss model
            if config.is_dis:
                dis_loss = models.loss.dis_loss(config, topk_max, model_dis,
                                                x_input_map_multi, multi_mask,
                                                feas_tgt, func_dis)
                loss = loss + dis_loss
                # print 'dis_para',model_dis.parameters().next()[0]
                # print 'ss_para',model.parameters().next()[0]

            loss.backward()
            # print 'totallllllllllll loss:',loss
            total_loss_sgm += sgm_loss.data[0]
            total_loss_ss += ss_loss.data[0]
            lera.log({
                'sgm_loss': sgm_loss.data[0],
                'ss_loss': ss_loss.data[0],
                'loss:': loss.data[0],
            })

            if (updates % config.eval_interval) in [
                    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
            ]:
                predicted_maps = multi_mask * x_input_map_multi
                # predicted_maps=Variable(feas_tgt)
                utils.bss_eval(config,
                               predicted_maps,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst='batch_outputjaa')
                del predicted_maps, multi_mask, x_input_map_multi
                # raw_input('wait to continue......')
                sdr_aver_batch = bss_test.cal('batch_outputjaa/')
                lera.log({'SDR sample': sdr_aver_batch})
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                print 'SDR_aver_now:', SDR_SUM.mean()

            total_loss += loss.data[0]
            report_total += num_total
            optim.step()
            if config.is_dis:
                optim_dis.step()

            updates += 1
            if updates % 30 == 0:
                logging(
                    "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f\n"
                    % (time.time() - start_time, epoch, updates, loss /
                       num_total, total_loss_sgm / 30.0, total_loss_ss / 30.0))
                total_loss_sgm, total_loss_ss = 0, 0

            # continue

            if 0 or updates % config.eval_interval == 0 and epoch > 1:
                logging(
                    "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n"
                    % (time.time() - start_time, epoch, updates,
                       total_loss / report_total))
                print('evaluating after %d updates...\r' % updates)
                # score = eval(epoch)
                for metric in config.metric:
                    scores[metric].append(score[metric])
                    lera.log({
                        'sgm_micro_f1': score[metric],
                    })
                    if metric == 'micro_f1' and score[metric] >= max(
                            scores[metric]):
                        save_model(log_path + 'best_' + metric +
                                   '_checkpoint.pt')
                    if metric == 'hamming_loss' and score[metric] <= min(
                            scores[metric]):
                        save_model(log_path + 'best_' + metric +
                                   '_checkpoint.pt')

                model.train()
                total_loss = 0
                start_time = 0
                report_total = 0

        except RuntimeError, eeee:
            print 'Erros here eeee: ', eeee
            continue
        except Exception, dddd:
            print '\n\n\nRare errors: ', dddd
            continue
Beispiel #17
0
def eval(epoch):
    model.eval()
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    print 'Test or valid:', test_or_valid
    eval_data_gen = prepare_data_aim('once', test_or_valid, config.MIN_MIX,
                                     config.MAX_MIX)
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in validloader:
    SDR_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        # for ___ in range(2):
        print '-' * 30
        eval_data = eval_data_gen.next()
        if eval_data == False:
            print 'SDR_aver_eval_epoch:', SDR_SUM.mean()
            break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        raw_tgt = [
            sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(raw_tgt,
                                    eval_data['multi_spk_fea_list'])  #这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 4
            feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True)
            feas_tgt_sum_square = (feas_tgt_sum *
                                   feas_tgt_sum).expand(tmp_size)
            feas_tgt_square = feas_tgt * feas_tgt
            WFM_mask = feas_tgt_square / feas_tgt_sum_square

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()
        try:
            if 1 and len(opt.gpus) > 1:
                # samples, alignment = model.module.sample(src, src_len)
                samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                    src,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
            else:
                samples, alignment, hiddens, predicted_masks = model.beam_sample(
                    src,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
                # samples, alignment, hiddens, predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
        except TabError, info:
            print '**************Error occurs here************:', info
            continue

        if config.top1:
            predicted_masks = torch.cat([predicted_masks, 1 - predicted_masks],
                                        1)

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        topk_max = feas_tgt.size()[1]
        assert samples[0][-1] == dict_spk2idx['<EOS>']
        topk_max = len(samples[0]) - 1
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        if config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask

        if test_or_valid == 'valid':
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(x_input_map_multi,
                                                       predicted_masks,
                                                       feas_tgt, Var)
            else:
                ss_loss = model.separation_loss(x_input_map_multi,
                                                predicted_masks, feas_tgt)
            print 'loss for ss,this batch:', ss_loss.data[0]
            lera.log({
                'ss_loss_' + test_or_valid: ss_loss.data[0],
            })
            del ss_loss, hiddens

        # '''''
        if batch_idx <= (500 / config.batch_size
                         ):  #only the former batches counts the SDR
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval2(config,
                            predicted_maps,
                            eval_data['multi_spk_fea_list'],
                            raw_tgt,
                            eval_data,
                            dst='batch_outputjaa')
            del predicted_maps, predicted_masks, x_input_map_multi
            SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_outputjaa/'))
            print 'SDR_aver_now:', SDR_SUM.mean()
            lera.log({'SDR sample': SDR_SUM.mean()})
            # raw_input('Press any key to continue......')
        elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  #only record the best SDR once.
            print 'Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print 'samples:', samples
        print 'can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                        reference[-1 * config.batch_size:])
        alignments += [align for align in alignment]
        batch_idx += 1
Beispiel #18
0
def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
        print(("Decaying learning rate to %g" % scheduler.get_lr()[0]))
        lera.log({
            'lr': [group['lr'] for group in optim.optimizer.param_groups][0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    # train_data_gen = prepare_data('once', 'train')
    train_data_gen = musdb.DB(root="~/MUSDB18/",
                              subsets='train',
                              split='train')
    train_data_gen = batch_generator(
        list(train_data_gen),
        config.batch_size,
    )
    # while 1:
    #     mix,ref=next(train_data_gen)
    #     import soundfile as sf
    #     sf.write('mix.wav',mix[0,0],44100)
    #     sf.write('vocal.wav',ref[0,0,0],44100)
    #     sf.write('drum.wav',ref[0,1,0],44100)
    #     sf.write('bass.wav',ref[0,2,0],44100)
    #     sf.write('other.wav',ref[0,3,0],44100)
    #     pass

    while True:
        if updates <= config.warmup:  # 如果在warm就开始warmup
            tmp_lr = config.learning_rate * min(
                max(updates, 1)**(-0.5),
                max(updates, 1) * (config.warmup**(-1.5)))
            for param_group in optim.optimizer.param_groups:
                param_group['lr'] = tmp_lr
            scheduler.base_lrs = list(
                [group['lr'] for group in optim.optimizer.param_groups])
            if updates % 100 == 0:  #记录一下
                print(updates)
                print("Warmup learning rate to %g" % tmp_lr)
                lera.log({
                    'lr':
                    [group['lr'] for group in optim.optimizer.param_groups][0],
                })

        train_data = next(train_data_gen)
        if train_data == False:
            print(('SDR_aver_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        padded_mixture, mixture_lengths, padded_source = train_data
        # source:bs,2channel,T  target:bs,4(vocals,drums,bass,other),2channel,T
        padded_mixture = torch.from_numpy(padded_mixture).float()
        topk_this_batch = padded_source.shape[1]
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        # 要保证底下这几个都是longTensor(长整数)

        if use_cuda:
            padded_mixture = padded_mixture.cuda().transpose(0, 1)
            mixture_lengths = mixture_lengths.cuda()
            padded_source = padded_source.cuda()
            # src = src.cuda().transpose(0, 1)
            # tgt = tgt.cuda()
            # src_len = src_len.cuda()
            # tgt_len = tgt_len.cuda()
            # feas_tgt = feas_tgt.cuda()

        if 0 and loss < -5:
            import soundfile as sf
            idx_in_batch = 0
            sf.write(
                str(idx_in_batch) + '_mix.wav',
                padded_mixture.transpose(
                    0, 1).data.cpu().numpy()[idx_in_batch].transpose(), 44100)
            sf.write(
                str(idx_in_batch) + '_ref_vocal.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 0].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_drum.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 1].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_bass.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 2].transpose(),
                44100)
            sf.write(
                str(idx_in_batch) + '_ref_other.wav',
                padded_source.data.cpu().numpy()[idx_in_batch, 3].transpose(),
                44100)

        model.zero_grad()
        outputs, pred, spks_ordre_list, multi_mask, y_map = model(
            None,
            None,
            None,
            None,
            dict_spk2idx,
            None,
            mix_wav=padded_mixture,
            clean_wavs=padded_source.transpose(
                0, 1))  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        print('mask size:', multi_mask.size())
        print('y map size:', y_map.size())
        # print('spk order:', spks_ordre_list) # bs,topk
        # writer.add_histogram('global gamma',gamma, updates)
        multi_mask = multi_mask.transpose(0, 1)
        y_map = y_map.transpose(0, 1)
        spks_ordre_list = spks_ordre_list.transpose(0, 1)

        # expand the raw mixed-features to topk_max channel.
        topk_max = topk_this_batch  # 最多可能的topk个数

        if config.greddy_tf and config.add_last_silence:
            multi_mask, silence_channel = torch.split(multi_mask,
                                                      [topk_this_batch, 1],
                                                      dim=1)
            silence_channel = silence_channel[:, 0]
            assert len(padded_source.shape) == 3
            # padded_source = torch.cat([padded_source,torch.zeros(padded_source.size(0),1,padded_source.size(2))],1)
            if 1 and len(opt.gpus) > 1:
                ss_loss_silence = model.module.silence_loss(silence_channel)
            else:
                ss_loss_silence = model.silence_loss(silence_channel)
            print('loss for SS silence,this batch:',
                  ss_loss_silence.cpu().item())
            writer.add_scalars(
                'scalar/loss',
                {'ss_loss_silence': ss_loss_silence.cpu().item()}, updates)
            lera.log({'ss_loss_silence': ss_loss_silence.cpu().item()})
            if torch.isnan(ss_loss_silence):
                ss_loss_silence = 0

        if config.use_tas:
            # print('source',padded_source)
            # print('est', multi_mask)
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_tas_sdr_order_loss(
                    padded_mixture.transpose(0, 1), multi_mask, y_map,
                    mixture_lengths)
            else:
                ss_loss = model.separation_tas_sdr_order_loss(
                    padded_mixture, multi_mask, y_map, mixture_lengths)
            # best_pmt=[list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx]

        print('loss for SS,this batch:', ss_loss.cpu().item())
        # print('best perms for this batch:', best_pmt)
        print('greddy perms for this batch:',
              [ii for ii in spks_ordre_list.data.cpu().numpy()])
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)

        loss = ss_loss
        if config.add_last_silence:
            loss = loss + 0.1 * ss_loss_silence
        loss.backward()

        # print 'totallllllllllll loss:',loss
        total_loss_ss += ss_loss.cpu().item()
        lera.log({
            'ss_loss_' + str(topk_this_batch): ss_loss.cpu().item(),
            'loss:': loss.cpu().item(),
            'pre_min': multi_mask.data.cpu().numpy().min(),
            'pre_max': multi_mask.data.cpu().numpy().max(),
        })

        if 1 or loss < -5:
            import soundfile as sf
            idx_in_batch = 0
            y0 = multi_mask.data.cpu().numpy()[idx_in_batch, 0]
            y1 = multi_mask.data.cpu().numpy()[idx_in_batch, 1]
            y2 = multi_mask.data.cpu().numpy()[idx_in_batch, 2]
            y3 = multi_mask.data.cpu().numpy()[idx_in_batch, 3]
            # sf.write(str(idx_in_batch)+'_pre_0.wav',multi_mask.data.cpu().numpy()[idx_in_batch,0].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_1.wav',multi_mask.data.cpu().numpy()[idx_in_batch,1].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_2.wav',multi_mask.data.cpu().numpy()[idx_in_batch,2].transpose(),44100)
            # sf.write(str(idx_in_batch)+'_pre_3.wav',multi_mask.data.cpu().numpy()[idx_in_batch,3].transpose(),44100)
            print('y0 range:', y0.min(), y0.max())
            print('y1 range:', y1.min(), y1.max())
            print('y2 range:', y2.min(), y2.max())
            print('y3 range:', y3.min(), y3.max())
            # input('wait')
            print('*' * 50)

        if 0 and updates > 10 and updates % config.eval_interval in [
                0, 1, 2, 3, 4, 5
        ]:
            utils.bss_eval_tas(config,
                               multi_mask,
                               train_data['multi_spk_fea_list'],
                               raw_tgt,
                               train_data,
                               dst=log_path + '/batch_output1')
            sdr_aver_batch, sdri_aver_batch = bss_test.cal(log_path +
                                                           '/batch_output1/')

            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SDRi sample': sdri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': sdri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))

        total_loss += loss.cpu().item()
        optim.step()

        updates += 1
        if updates % 30 == 0:
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f,label acc: %6.6f\n"
                % (time.time() - start_time, epoch, updates, 0,
                   total_loss_sgm / 30.0, total_loss_ss / 30.0, 0))
            # lera.log({'label_acc':report_correct/report_total})
            # writer.add_scalars('scalar/loss',{'label_acc':report_correct/report_total},updates)
            total_loss_sgm, total_loss_ss = 0, 0

        # continue

        if 0 and updates % config.eval_interval == 0 and epoch > 3:  #建议至少跑几个epoch再进行测试,否则模型还没学到东西,会有很多问题。
            logging(
                "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n" %
                (time.time() - start_time, epoch, updates, 0))
            print(('evaluating after %d updates...\r' % updates))
            original_bs = config.batch_size
            score = eval(epoch)  # eval的时候batch_size会变成1
            # print 'Orignal bs:',original_bs
            config.batch_size = original_bs
            # print 'Now bs:',config.batch_size
            for metric in config.metric:
                scores[metric].append(score[metric])
                lera.log({
                    'sgm_micro_f1': score[metric],
                })
                if metric == 'micro_f1' and score[metric] >= max(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')
                if metric == 'hamming_loss' and score[metric] <= min(
                        scores[metric]):
                    save_model(log_path + 'best_' + metric + '_checkpoint.pt')

            model.train()
            total_loss = 0
            start_time = 0
            report_total = 0
            report_correct = 0

        if updates > 10 and updates % config.save_interval == 1:
            save_model(log_path + 'TDAAv4_conditional_{}.pt'.format(updates))
Beispiel #19
0
def train(args):
    try:
        os.makedirs(args.save_img_path)
    except OSError:
        pass

    try:
        os.makedirs(args.weight_path)
    except OSError:
        pass

    lera.log_hyperparams(
        {
            "title": "hw2",
            "batch_size": args.bs,
            "epochs": args.epochs,
            "g_lr": args.g_lr,
            "d_lr": args.d_lr,
            "z_size": args.z_size,
        }
    )

    # dataset
    dataloader = data_loader(
        args.data_path, args.imgsize, args.bs, shuffle=True
    )

    # model
    generator = Generator(args.bs, args.imgsize, z_dim=args.z_size).cuda()
    discriminator = Discriminator(args.bs, args.imgsize).cuda()
    if args.pre_epochs != 0:
        generator.load_state_dict(
            torch.load(
                join(f"{args.weight_path}", f"generator_{args.pre_epochs}.pth")
            )
        )

        discriminator.load_state_dict(
            torch.load(
                join(
                    f"{args.weight_path}",
                    f"discriminator_{args.pre_epochs}.pth",
                )
            )
        )

    # optimizer
    g_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, generator.parameters()), lr=args.g_lr
    )
    d_optimizer = torch.optim.SGD(
        filter(lambda p: p.requires_grad, discriminator.parameters()),
        lr=args.d_lr,
    )

    # validate noise
    fixed_noise = torch.randn(9, args.z_size)
    fixed_noise = torch.tensor(fixed_noise).cuda()

    # train
    for epoch in range(args.pre_epochs, args.epochs):
        for i, data in enumerate(dataloader):
            discriminator.train()
            generator.train()
            # train discriminator
            if i % 5 == 0:
                d_optimizer.zero_grad()
                real_img = torch.tensor(data[0]).cuda() * 2 - 1  # (-1, 1)
                d__real, _, _ = discriminator(real_img)
                z = torch.randn(args.bs, args.z_size)
                z = torch.tensor(z).cuda()
                fake_img, _, _ = generator(z)
                d_fake, _, _ = discriminator(fake_img)

                # hinge loss
                d_loss_real = torch.nn.ReLU()(1.0 - d__real).mean()
                d_loss_fake = torch.nn.ReLU()(1.0 + d_fake).mean()
                d_loss = d_loss_real + d_loss_fake
                d_loss.backward()

                d_optimizer.step()
            # train generator
            g_optimizer.zero_grad()
            z = torch.randn(args.bs, args.z_size)
            z = torch.tensor(z).cuda()
            fake_img, _, _ = generator(z)
            g_fake, _, _ = discriminator(fake_img)

            # hinge loss
            g_loss = -g_fake.mean()
            g_loss.backward()
            g_optimizer.step()

            if i % 100 == 0:
                lera.log({"d_loss": d_loss.item(), "g_loss": g_loss.item()})
                print(
                    "[epoch:%4d/%4d %3d/%3d] \t d_loss: %0.6f \t g_loss: %0.6f"
                    % (
                        epoch + 1,
                        args.epochs,
                        i,
                        len(dataloader),
                        d_loss.item(),
                        g_loss.item(),
                    )
                )
                if i % 300 == 0:
                    validate(
                        generator, i, epoch, args.save_img_path, fixed_noise
                    )

        torch.save(
            discriminator.state_dict(),
            f"./{args.weight_path}/discriminator_{epoch+1}.pth",
        )
        torch.save(
            generator.state_dict(),
            f"./{args.weight_path}/generator_{epoch+1}.pth",
        )
Beispiel #20
0
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval(config,
                           predicted_maps,
                           eval_data['multi_spk_fea_list'],
                           raw_tgt,
                           eval_data,
                           dst='batch_output23jo')
            del predicted_maps, predicted_masks, x_input_map_multi
            SDR, SDRi = bss_test.cal('batch_output23jo/')
            # SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_output23jo/'))
            SDR_SUM = np.append(SDR_SUM, SDR)
            SDRi_SUM = np.append(SDRi_SUM, SDRi)
            print 'SDR_aver_now:', SDR_SUM.mean()
            print 'SDRi_aver_now:', SDRi_SUM.mean()
            lera.log({'SDR sample': SDR_SUM.mean()})
            lera.log({'SDRi sample': SDRi_SUM.mean()})
        elif batch_idx == (5000 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  #only record the best SDR once.
            print 'Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))

        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print 'samples:', samples
Beispiel #21
0
def eval(epoch):
    # config.batch_size=1
    model.eval()
    print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    print('Test or valid:', test_or_valid)
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        print('-' * 30)
        eval_data = eval_data_gen.next()
        if eval_data == False:
            print('SDR_aver_eval_epoch:', SDR_SUM.mean())
            print('SDRi_aver_eval_epoch:', SDRi_SUM.mean())
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        raw_tgt = [
            sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 4
            feas_tgt_sum = torch.sum(feas_tgt, dim=1, keepdim=True)
            feas_tgt_sum_square = (feas_tgt_sum *
                                   feas_tgt_sum).expand(tmp_size)
            feas_tgt_square = feas_tgt * feas_tgt
            WFM_mask = feas_tgt_square / feas_tgt_sum_square

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        if 1 and len(opt.gpus) > 1:
            samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
        else:
            samples, alignment, hiddens, predicted_masks = model.beam_sample(
                src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = len(samples[0]) - 1
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        if config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask

        if 0 and test_or_valid == 'valid':
            if 1 and len(opt.gpus) > 1:
                ss_loss = model.module.separation_loss(
                    x_input_map_multi,
                    predicted_masks,
                    feas_tgt,
                )
            else:
                ss_loss = model.separation_loss(x_input_map_multi,
                                                predicted_masks, feas_tgt)
            print('loss for ss,this batch:', ss_loss.cpu().item())
            lera.log({
                'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
            })
            del ss_loss, hiddens

        # '''''
        if batch_idx <= (500 / config.batch_size
                         ):  # only the former batches counts the SDR
            predicted_maps = predicted_masks * x_input_map_multi
            # predicted_maps=Variable(feas_tgt)
            utils.bss_eval2(config,
                            predicted_maps,
                            eval_data['multi_spk_fea_list'],
                            raw_tgt,
                            eval_data,
                            dst='batch_output')
            del predicted_maps, predicted_masks, x_input_map_multi
            try:
                SDR_SUM, SDRi_SUM = np.append(SDR_SUM,
                                              bss_test.cal('batch_output/'))
            except AssertionError, wrong_info:
                print 'Errors in calculating the SDR', wrong_info
            print('SDR_aver_now:', SDR_SUM.mean())
            print('SDRi_aver_now:', SDRi_SUM.mean())
            lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()})
            # raw_input('Press any key to continue......')
        elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  # only record the best SDR once.
            print('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean()))
            best_SDR = SDR_SUM.mean()
Beispiel #22
0
def train(epoch):
    e = epoch
    model.train()

    if config.schedule:
        scheduler.step()
        print("Decaying learning rate to %g" % scheduler.get_lr()[0])
        if config.is_dis:
            scheduler_dis.step()
        lera.log({
            'lr': scheduler.get_lr()[0],
        })

    if opt.model == 'gated':
        model.current_epoch = epoch

    global e, updates, total_loss, start_time, report_total, total_loss_sgm, total_loss_ss
    if config.MLMSE:
        global Var

    train_data_gen = prepare_data('once', 'train')
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in trainloader:
    while True:
        train_data = train_data_gen.next()
        if train_data == False:
            break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(train_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in train_data['multi_spk_fea_list']]
        raw_tgt = [
            sorted(spk.keys()) for spk in train_data['multi_spk_fea_list']
        ]
        feas_tgt = models.rank_feas(
            raw_tgt, train_data['multi_spk_fea_list'])  #这里是目标的图谱

        # 要保证底下这几个都是longTensor(长整数)
        tgt = Variable(
            torch.from_numpy(
                np.array([[0] + [dict_spk2idx[spk]
                                 for spk in spks] + [dict_spk2idx['<EOS>']]
                          for spks in raw_tgt],
                         dtype=np.int))).transpose(0, 1)  #转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            len(train_data['multi_spk_fea_list'][0])).unsqueeze(0)

        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 4
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_square_sum = torch.sum(feas_tgt_square,
                                            dim=1,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_square_sum + 1e-10)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()
        try:
            model.zero_grad()
            # optim.optimizer.zero_grad()
            outputs, targets, multi_mask = model(
                src, src_len, tgt,
                tgt_len)  #这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
            print 'mask size:', multi_mask.size()

            if 1 and len(opt.gpus) > 1:
                sgm_loss, num_total, num_correct = model.module.compute_loss(
                    outputs, targets, opt.memory)
            else:
                sgm_loss, num_total, num_correct = model.compute_loss(
                    outputs, targets, opt.memory)
            print 'loss for SGM,this batch:', sgm_loss.data[0] / num_total

            if config.unit_norm:  #outputs---[len+1,bs,2*d]
                assert not config.global_emb
                unit_dis = (outputs[0] * outputs[1]).sum(1)
                print 'unit_dis this batch:', unit_dis.data.cpu().numpy()
                unit_dis = torch.masked_select(unit_dis,
                                               unit_dis > config.unit_norm)
                if len(unit_dis) > 0:
                    unit_dis = unit_dis.mean()

            src = src.transpose(0, 1)
            # expand the raw mixed-features to topk channel.
            siz = src.size()
            assert len(siz) == 3
            topk = feas_tgt.size()[1]
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk, siz[1], siz[2])
            multi_mask = multi_mask.transpose(0, 1)

            if config.WFM:
                feas_tgt = x_input_map_multi.data * WFM_mask
            if 1 and len(opt.gpus) > 1:
                if config.MLMSE:
                    Var = model.module.update_var(x_input_map_multi,
                                                  multi_mask, feas_tgt)
                    lera.log_image(u'Var weight',
                                   Var.data.cpu().numpy().reshape(
                                       config.speech_fre, config.speech_fre,
                                       1).repeat(3, 2),
                                   clip=(-1, 1))
                    ss_loss = model.module.separation_loss(
                        x_input_map_multi, multi_mask, feas_tgt, Var)
                else:
                    ss_loss = model.module.separation_loss(
                        x_input_map_multi, multi_mask, feas_tgt)
            else:
                ss_loss = model.separation_loss(x_input_map_multi, multi_mask,
                                                feas_tgt)

            loss = sgm_loss + 5 * ss_loss
            if config.unit_norm and len(unit_dis):
                print 'unit_dis masked mean:', unit_dis.data[0]
                lera.log({
                    'unit_dis': unit_dis.data[0],
                })
                loss = loss + unit_dis
            if config.reID:
                print '#' * 30 + 'ReID part ' + '#' * 30
                predict_multi_map = multi_mask * x_input_map_multi
                predict_multi_map = predict_multi_map.view(
                    -1, mix_speech_len, speech_fre).transpose(0, 1)
                tgt_reID = []
                for spks in raw_tgt:
                    for spk in spks:
                        one_spk = [dict_spk2idx['<BOS>']] + [
                            dict_spk2idx[spk]
                        ] + [dict_spk2idx['<EOS>']]
                        tgt_reID.append(one_spk)
                tgt_reID = Variable(
                    torch.from_numpy(np.array(
                        tgt_reID, dtype=np.int))).transpose(0, 1).cuda()
                src_len_reID = Variable(
                    torch.LongTensor(topk * config.batch_size).zero_() +
                    mix_speech_len).unsqueeze(0).cuda()
                tgt_len_reID = Variable(
                    torch.LongTensor(topk * config.batch_size).zero_() +
                    1).unsqueeze(0).cuda()
                outputs_reID, targets_reID, multi_mask_reID = model(
                    predict_multi_map, src_len_reID, tgt_reID, tgt_len_reID
                )  #这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
                if 1 and len(opt.gpus) > 1:
                    sgm_loss_reID, num_total_reID, _xx = model.module.compute_loss(
                        outputs_reID, targets_reID, opt.memory)
                else:
                    sgm_loss_reID, num_total_reID, _xx = model.compute_loss(
                        outputs_reID, targets_reID, opt.memory)
                print 'loss for SGM-reID mthis batch:', sgm_loss_reID.data[
                    0] / num_total_reID
                loss = loss + sgm_loss_reID
                if config.WFM:
                    feas_tgt = x_input_map_multi.data * WFM_mask
                if 1 and len(opt.gpus) > 1:
                    ss_loss_reID = model.module.separation_loss(
                        predict_multi_map.transpose(0, 1).unsqueeze(1),
                        multi_mask_reID.transpose(0, 1),
                        feas_tgt.view(-1, 1, mix_speech_len, speech_fre))
                else:
                    ss_loss_reID = model.separation_loss(
                        predict_multi_map.transpose(0, 1).unsqueeze(1),
                        multi_mask_reID.transpose(0, 1),
                        feas_tgt.view(-1, 1, mix_speech_len, speech_fre))
                loss = loss + ss_loss_reID
                print '#' * 30 + 'ReID part ' + '#' * 30

            # dis_loss model
            if config.is_dis:
                dis_loss = models.loss.dis_loss(config, topk, model_dis,
                                                x_input_map_multi, multi_mask,
                                                feas_tgt, func_dis)
                loss = loss + dis_loss
                # print 'dis_para',model_dis.parameters().next()[0]
                # print 'ss_para',model.parameters().next()[0]

            loss.backward()
            # print 'totallllllllllll loss:',loss
            total_loss_sgm += sgm_loss.data[0]
            total_loss_ss += ss_loss.data[0]
            lera.log({
                'sgm_loss': sgm_loss.data[0],
                'ss_loss': ss_loss.data[0],
            })
            if config.reID:
                lera.log({
                    'reID_sgm_loss': sgm_loss_reID.data[0],
                    'reID_ss_loss': ss_loss_reID.data[0],
                })
            total_loss += loss.data[0]
            report_total += num_total
            optim.step()
            if config.is_dis:
                optim_dis.step()

            updates += 1
            if updates % 30 == 0:
                logging(
                    "time: %6.3f, epoch: %3d, updates: %8d, train loss this batch: %6.3f,sgm loss: %6.6f,ss loss: %6.6f\n"
                    % (time.time() - start_time, epoch, updates, loss /
                       num_total, total_loss_sgm / 30.0, total_loss_ss / 30.0))
                total_loss_sgm, total_loss_ss = 0, 0

            # continue

            if 0 or updates % config.eval_interval == 0 and epoch > 1:
                logging(
                    "time: %6.3f, epoch: %3d, updates: %8d, train loss: %6.5f\n"
                    % (time.time() - start_time, epoch, updates,
                       total_loss / report_total))
                print('evaluating after %d updates...\r' % updates)
                score = eval(epoch)
                for metric in config.metric:
                    scores[metric].append(score[metric])
                    if metric == 'micro_f1' and score[metric] >= max(
                            scores[metric]):
                        save_model(log_path + 'best_' + metric +
                                   '_checkpoint.pt')
                    if metric == 'hamming_loss' and score[metric] <= min(
                            scores[metric]):
                        save_model(log_path + 'best_' + metric +
                                   '_checkpoint.pt')

                model.train()
                total_loss = 0
                start_time = 0
                report_total = 0
        except RuntimeError, info:
            print '**************Error occurs here************:', info
            continue

        if updates % config.save_interval == 1:
            save_model(log_path + 'TDAA2019_{}.pt'.format(updates))
Beispiel #23
0
def eval(epoch):
    model.eval()
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    print 'Test or valid:', test_or_valid
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    # for raw_src, src, src_len, raw_tgt, tgt, tgt_len in validloader:
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    while True:
        # for ___ in range(2):
        print '-' * 30
        eval_data = eval_data_gen.next()
        if eval_data == False:
            break  #如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        raw_tgt = [
            sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']
        ]
        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        tgt = Variable(torch.ones(
            top_k + 2, config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        feas_tgt = models.rank_feas(raw_tgt,
                                    eval_data['multi_spk_fea_list'])  #这里是目标的图谱
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 4
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_square_sum = torch.sum(feas_tgt_square,
                                            dim=1,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_square_sum + 1e-10)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()
        try:
            if 1 and len(opt.gpus) > 1:
                # samples, alignment = model.module.sample(src, src_len)
                samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                    src,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
            else:
                samples, alignment, hiddens, predicted_masks = model.beam_sample(
                    src,
                    src_len,
                    dict_spk2idx,
                    tgt,
                    beam_size=config.beam_size)
                # samples, alignment, hiddens, predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
        except Exception, info:
            print '**************Error eval occurs here************:', info
            continue
        if len(samples[0]) != 3:
            print 'Wrong num of mixtures, passed.'
            continue

        if config.top1:
            predicted_masks = torch.cat([predicted_masks, 1 - predicted_masks],
                                        1)

        # '''
        # expand the raw mixed-features to topk channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        topk = feas_tgt.size()[1]
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk, siz[1],
                                                      siz[2])
        if config.WFM:
            feas_tgt = x_input_map_multi.data * WFM_mask
        if 1 and len(opt.gpus) > 1:
            ss_loss = model.module.separation_loss(x_input_map_multi,
                                                   predicted_masks, feas_tgt,
                                                   None)
        else:
            ss_loss = model.separation_loss(x_input_map_multi, predicted_masks,
                                            feas_tgt, None)
        print 'loss for ss,this batch:', ss_loss.data[0]
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.data[0],
        })

        del ss_loss, hiddens
        if 0 and config.reID:
            print '#' * 30 + 'ReID part ' + '#' * 30
            predict_multi_map = predicted_masks * x_input_map_multi
            predict_multi_map = predict_multi_map.view(-1, mix_speech_len,
                                                       speech_fre).transpose(
                                                           0, 1)
            tgt_reID = Variable(torch.ones(
                3, top_k * config.batch_size))  # 这里随便给一个tgt,为了测试阶段tgt的名字无所谓其实。
            src_len_reID = Variable(
                torch.LongTensor(topk * config.batch_size).zero_() +
                mix_speech_len).unsqueeze(0).cuda()
            try:
                if 1 and len(opt.gpus) > 1:
                    # samples, alignment = model.module.sample(src, src_len)
                    samples, alignment, hiddens, predicted_masks = model.module.beam_sample(
                        predict_multi_map,
                        src_len_reID,
                        dict_spk2idx,
                        tgt_reID,
                        beam_size=config.beam_size)
                else:
                    samples, alignment, hiddens, predicted_masks = model.beam_sample(
                        predict_multi_map,
                        src_len_reID,
                        dict_spk2idx,
                        tgt_reID,
                        beam_size=config.beam_size)
                    # samples, alignment, hiddens, predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, beam_size=config.beam_size)
            except Exception, info:
                print '**************Error eval occurs here************:', info
            # outputs_reID, targets_reID, multi_mask_reID = model(predict_multi_map, src_len_reID, tgt_reID, tgt_len_reID) #这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
            if batch_idx <= (500 / config.batch_size
                             ):  #only the former batches counts the SDR
                # predicted_maps=predicted_masks*x_input_map_multi
                predicted_maps = predicted_masks * predict_multi_map.transpose(
                    0, 1).unsqueeze(1)
                predicted_maps = predicted_maps.transpose(0, 1)
                # predicted_maps=Variable(feas_tgt)
                utils.bss_eval(config,
                               predicted_maps,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst='batch_output23jo')
                del predicted_maps, predicted_masks, x_input_map_multi, predict_multi_map
                SDR, SDRi = bss_test.cal('batch_output23jo/')
                # SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_output23jo/'))
                SDR_SUM = np.append(SDR_SUM, SDR)
                SDRi_SUM = np.append(SDRi_SUM, SDRi)
                print 'SDR_aver_now:', SDR_SUM.mean()
                print 'SDRi_aver_now:', SDRi_SUM.mean()
                lera.log({'SDR sample': SDR_SUM.mean()})
                lera.log({'SDRi sample': SDRi_SUM.mean()})
            elif batch_idx == (500 / config.batch_size) + 1 and SDR_SUM.mean(
            ) > best_SDR:  #only record the best SDR once.
                print 'Best SDR from {}---->{}'.format(best_SDR,
                                                       SDR_SUM.mean())
                best_SDR = SDR_SUM.mean()
                # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))
            print '#' * 30 + 'ReID part ' + '#' * 30
def train(config):
    print('Random seed: %d' % int(config.seed))
    torch.manual_seed(config.seed)
    
    torch.backends.cudnn.benchmark = True

    dset = config.dataset
    if dset == 'modelnet10' or dset == 'modelnet40':
        dataset = ClsDataset(root=config.root, npoints=config.npoints, train=True)
        test_dataset = ClsDataset(root=config.root, npoints=config.npoints, train=False)
    else:
        raise NotImplementedError('Dataset not supported.')
    
    print('Selected %s' % dset)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.batchsize, shuffle=True, 
                num_workers=config.workers)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batchsize, shuffle=True, 
        num_workers=config.workers)

    num_classes = dataset.num_classes
    print('number of classes: %d' % num_classes)
    print('train set size: %d | test set size: %d' % (len(dataset), len(test_dataset)))
    try:
        os.makedirs(config.outf)
    except:
        pass

    blue = lambda x: '\033[94m' + x + '\033[0m'
    yellow = lambda x: '\033[93m' + x + '\033[0m'
    red = lambda x: '\033[91m' + x + '\033[0m'

    classifier = PointNetCls(k=num_classes)

    if config.model != '':
        classifier.load_state_dict(torch.load(config.model))

    optimizer = optim.SGD(classifier.parameters(), lr=config.lr, momentum=config.momentum)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    classifier.to(device)
    if config.mgpu:
        classifier = torch.nn.DataParallel(classifier, device_ids=config.gpuids)

    num_batch = len(dataset) / config.batchsize

    lera.log_hyperparams({
        'title': dset, 
        'batchsize': config.batchsize, 
        'epochs': config.nepochs, 
        'npoints': config.npoints, 
        'optimizer': 'SGD', 
        'lr': config.lr, 
        })

    for epoch in range(config.nepochs):
        train_acc_epoch, test_acc_epoch = [], []
        for i, data in enumerate(dataloader):
            points, labels = data
            points = points.transpose(2, 1)
            labels = labels[:, 0]
            points, labels = points.to(device), labels.to(device)
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            # print(pred.size(), labels.size())
            loss = F.nll_loss(pred, labels)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(labels.data).cpu().sum()
            train_acc = correct.item() / float(config.batchsize)
            print('epoch %d: %d/%d | train loss: %f | train acc: %f' % (epoch+1, i+1, num_batch+1, loss.item(), train_acc))
            train_acc_epoch.append(train_acc)
            lera.log({
                'train loss': loss.item(), 
                'train acc': train_acc
                })

            if (i+1) % 10 == 0:
                j, data = next(enumerate(test_dataloader, 0))
                points, labels = data
                points = points.transpose(2, 1)
                labels = labels[:, 0]
                points, labels = points.to(device), labels.to(device)
                classifier = classifier.eval()
                with torch.no_grad():
                    pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                loss = F.nll_loss(pred, labels)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(labels.data).cpu().sum()
                test_acc = correct.item() / float(config.batchsize)
                print(blue('epoch %d: %d/%d | test loss: %f | test acc: %f') % (epoch+1, i+1, num_batch+1, loss.item(), test_acc))
                test_acc_epoch.append(test_acc)
                lera.log({
                    'test loss': loss.item(), 
                    'test acc': test_acc
                    })
        print(yellow('epoch %d | mean train acc: %f') % (epoch+1, np.mean(train_acc_epoch)))
        print(red('epoch %d | mean test acc: %f') % (epoch+1, np.mean(test_acc_epoch)))
        lera.log({
            'train acc epoch': np.mean(train_acc_epoch), 
            'test acc epoch': np.mean(test_acc_epoch)})
        torch.save(classifier.state_dict(), '%s/%s_model_%d.pth' % (config.outf, config.dataset, epoch))
def eval(epoch):
    # config.batch_size=1
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    test_or_valid = 'test'
    # test_or_valid = 'valid'
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch
        src = Variable(torch.from_numpy(eval_data['mix_feas']))

        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt, eval_data['multi_spk_fea_list'])  # 这里是目标的图谱

        top_k = len(raw_tgt[0])
        # 要保证底下这几个都是longTensor(长整数)
        # tgt = Variable(torch.from_numpy(np.array([[0]+[dict_spk2idx[spk] for spk in spks]+[dict_spk2idx['<EOS>']] for spks in raw_tgt],dtype=np.int))).transpose(0,1) #转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        # tgt = Variable(torch.from_numpy(np.array([[0,1,2,3,102] for __ in range(config.batch_size)], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        tgt = Variable(
            torch.from_numpy(
                np.array([
                    list(range(top_k + 1)) + [102]
                    for __ in range(config.batch_size)
                ],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)
        # tgt_len = Variable(torch.LongTensor(config.batch_size).zero_()+len(eval_data['multi_spk_fea_list'][0])).unsqueeze(0)
        if config.WFM:
            tmp_size = feas_tgt.size()
            assert len(tmp_size) == 3
            feas_tgt_square = feas_tgt * feas_tgt
            feas_tgt_sum_square = torch.sum(feas_tgt_square,
                                            dim=0,
                                            keepdim=True).expand(tmp_size)
            WFM_mask = feas_tgt_square / (feas_tgt_sum_square + 1e-15)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()
            if config.WFM:
                WFM_mask = WFM_mask.cuda()

        if 1 and len(opt.gpus) > 1:
            outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
                src,
                src_len,
                tgt,
                tgt_len,
                dict_spk2idx,
                None,
                mix_wav=padded_mixture
            )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        else:
            outputs, pred, targets, multi_mask, dec_enc_attn_list = model(
                src,
                src_len,
                tgt,
                tgt_len,
                dict_spk2idx,
                None,
                mix_wav=padded_mixture
            )  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        samples = list(
            pred.view(config.batch_size, top_k + 1,
                      -1).max(2)[1].data.cpu().numpy())
        '''

        if 1 and len(opt.gpus) > 1:
            samples,  predicted_masks = model.module.beam_sample(src, src_len, dict_spk2idx, tgt, config.beam_size,None,padded_mixture)
        else:
            samples,  predicted_masks = model.beam_sample(src, src_len, dict_spk2idx, tgt, config.beam_size, None, padded_mixture)
            multi_mask = predicted_masks
            samples=[samples]
        # except:
        #     continue

        # '''
        # expand the raw mixed-features to topk_max channel.
        src = src.transpose(0, 1)
        siz = src.size()
        assert len(siz) == 3
        # if samples[0][-1] != dict_spk2idx['<EOS>']:
        #     print '*'*40+'\nThe model is far from good. End the evaluation.\n'+'*'*40
        #     break
        topk_max = top_k
        x_input_map_multi = torch.unsqueeze(src,
                                            1).expand(siz[0], topk_max, siz[1],
                                                      siz[2])
        multi_mask = multi_mask.transpose(0, 1)

        if test_or_valid != 'test':
            if config.use_tas:
                if 1 and len(opt.gpus) > 1:
                    ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                        padded_mixture, multi_mask, padded_source,
                        mixture_lengths)
                else:
                    ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                        padded_mixture, multi_mask, padded_source,
                        mixture_lengths)
            print(('loss for ss,this batch:', ss_loss.cpu().item()))
            lera.log({
                'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
            })
            del ss_loss

        # '''''
        if 1 and batch_idx <= (500 / config.batch_size
                               ):  # only the former batches counts the SDR
            utils.bss_eval_tas(config,
                               multi_mask,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output/')
            del multi_mask, x_input_map_multi
            try:
                sdr_aver_batch, sdri_aver_batch = bss_test.cal(log_path +
                                                               'batch_output/')
                SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
                SDRi_SUM = np.append(SDRi_SUM, sdri_aver_batch)
            except (AssertionError):
                print('Errors in calculating the SDR')
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SDRi_aver_now:', SDRi_SUM.mean()))
            lera.log({'SDR sample' + test_or_valid: SDR_SUM.mean()})
            lera.log({'SDRi sample' + test_or_valid: SDRi_SUM.mean()})
            writer.add_scalars('scalar/loss',
                               {'SDR_sample_' + test_or_valid: sdr_aver_batch},
                               updates)
            # raw_input('Press any key to continue......')
        elif batch_idx == (200 / config.batch_size) + 1 and SDR_SUM.mean(
        ) > best_SDR:  # only record the best SDR once.
            print(('Best SDR from {}---->{}'.format(best_SDR, SDR_SUM.mean())))
            best_SDR = SDR_SUM.mean()
            # save_model(log_path+'checkpoint_bestSDR{}.pt'.format(best_SDR))
        '''
        import matplotlib.pyplot as plt
        ax = plt.gca()
        ax.invert_yaxis()

        raw_src=models.rank_feas(raw_tgt,eval_data['multi_spk_fea_list'])
        att_idx=0
        att =dec_enc_attn_list.data.cpu().numpy()[:,att_idx] # head,topk,T
        for spk in range(3):
            xx=att[:,spk]
            plt.matshow(xx.reshape(8,1,-1).repeat(50,1).reshape(-1,751), cmap=plt.cm.hot, vmin=0,vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'spk_{}.png'.format(spk))
            plt.matshow(xx.sum(0).reshape(1, 1, -1).repeat(50, 1).reshape(-1, 751), cmap=plt.cm.hot, vmin=0, vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path + 'batch_output/' + 'spk_sum_{}.png'.format(spk))
        for head in range(8):
            xx=att[head]
            plt.matshow(xx.reshape(3,1,-1).repeat(100,1).reshape(-1,751), cmap=plt.cm.hot, vmin=0,vmax=0.05)
            plt.colorbar()
            plt.savefig(log_path+'batch_output/'+'head_{}.png'.format(head))
        plt.matshow(raw_src[att_idx*2+0].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
        plt.colorbar()
        plt.savefig(log_path+'batch_output/'+'source0.png')
        plt.matshow(raw_src[att_idx*2+1].transpose(0,1), cmap=plt.cm.hot, vmin=0,vmax=2)
        plt.colorbar()
        plt.savefig(log_path+'batch_output/'+'source1.png')
        # '''
        candidate += [
            convertToLabels(dict_idx2spk, s, dict_spk2idx['<EOS>'])
            for s in samples
        ]
        # source += raw_src
        reference += raw_tgt
        print(('samples:', samples))
        print(('can:{}, \nref:{}'.format(candidate[-1 * config.batch_size:],
                                         reference[-1 * config.batch_size:])))
        # alignments += [align for align in alignment]
        batch_idx += 1

        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))

    score = {}
    result = utils.eval_metrics(reference, candidate, dict_spk2idx, log_path)
    logging_csv([e, updates, result['hamming_loss'], \
                 result['micro_f1'], result['micro_precision'], result['micro_recall'],SDR_SUM.mean()])
    print(('hamming_loss: %.8f | micro_f1: %.4f' %
           (result['hamming_loss'], result['micro_f1'])))
    score['hamming_loss'] = result['hamming_loss']
    score['micro_f1'] = result['micro_f1']
    1 / 0
    return score
Beispiel #26
0
def eval(epoch, test_or_valid='train'):
    # config.batch_size=1
    global updates, model
    model.eval()
    # print '\n\n测试的时候请设置config里的batch_size为1!!!please set the batch_size as 1'
    reference, candidate, source, alignments = [], [], [], []
    e = epoch
    print(('Test or valid:', test_or_valid))
    eval_data_gen = prepare_data('once', test_or_valid, config.MIN_MIX,
                                 config.MAX_MIX)
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])
    batch_idx = 0
    global best_SDR, Var
    # for iii in range(2000):
    while True:
        print(('-' * 30))
        eval_data = next(eval_data_gen)
        if eval_data == False:
            print(('SDR_aver_eval_epoch:', SDR_SUM.mean()))
            print(('SDRi_aver_eval_epoch:', SDRi_SUM.mean()))
            break  # 如果这个epoch的生成器没有数据了,直接进入下一个epoch

        src = Variable(torch.from_numpy(eval_data['mix_feas']))
        # raw_tgt = [spk.keys() for spk in eval_data['multi_spk_fea_list']]
        # raw_tgt = [sorted(spk.keys()) for spk in eval_data['multi_spk_fea_list']]
        raw_tgt = eval_data['batch_order']
        feas_tgt = models.rank_feas(
            raw_tgt,
            eval_data['multi_spk_wav_list'])  # 这里是目标的图谱,bs*Topk,time_len

        padded_mixture, mixture_lengths, padded_source = eval_data['tas_zip']
        padded_mixture = torch.from_numpy(padded_mixture).float()
        mixture_lengths = torch.from_numpy(mixture_lengths)
        padded_source = torch.from_numpy(padded_source).float()

        padded_mixture = padded_mixture.cuda().transpose(0, 1)
        mixture_lengths = mixture_lengths.cuda()
        padded_source = padded_source.cuda()

        # 要保证底下这几个都是longTensor(长整数)
        tgt_max_len = config.MAX_MIX + 2  # with bos and eos.
        # tgt = Variable(torch.from_numpy(np.array(
        #     [[0] + [dict_spk2idx[spk] for spk in spks] + (tgt_max_len - len(spks) - 1) * [dict_spk2idx['<EOS>']] for
        #      spks in raw_tgt], dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        tgt = Variable(
            torch.from_numpy(
                np.array([[0, 1, 2, 102] for __ in range(config.batch_size)],
                         dtype=np.int))).transpose(0, 1)  # 转换成数字,然后前后加开始和结束符号。
        src_len = Variable(
            torch.LongTensor(config.batch_size).zero_() +
            mix_speech_len).unsqueeze(0)
        tgt_len = Variable(
            torch.LongTensor([
                len(one_spk) for one_spk in eval_data['multi_spk_fea_list']
            ])).unsqueeze(0)

        if use_cuda:
            src = src.cuda().transpose(0, 1)
            tgt = tgt.cuda()
            src_len = src_len.cuda()
            tgt_len = tgt_len.cuda()
            feas_tgt = feas_tgt.cuda()

        model.zero_grad()
        if config.use_center_loss:
            center_loss.zero_grad()

        multi_mask, enc_attn_list = model(
            src, src_len, tgt, tgt_len,
            dict_spk2idx)  # 这里的outputs就是hidden_outputs,还没有进行最后分类的隐层,可以直接用
        multi_mask = multi_mask.transpose(0, 1)
        print('mask size:', multi_mask.size())  # bs,topk,T,F

        predicted_maps0_spectrogram = multi_mask[:, 0] * src.transpose(
            0, 1)  #bs,T,F
        predicted_maps1_spectrogram = multi_mask[:, 1] * src.transpose(
            0, 1)  #bs,T,F

        if True:  # Analyze the optimal assignments
            predicted_spectrogram = torch.cat([
                predicted_maps0_spectrogram.unsqueeze(1),
                predicted_maps1_spectrogram.unsqueeze(1)
            ], 1)
            feas_tgt_tmp = models.rank_feas(
                raw_tgt,
                eval_data['multi_spk_fea_list'])  # 这里是目标的图谱,bs*Topk,len,fre
            src = src.transpose(0, 1)
            siz = src.size()  # bs,T,F
            assert len(siz) == 3
            # topk_max = config.MAX_MIX  # 最多可能的topk个数
            topk_max = 2  # 最多可能的topk个数
            x_input_map_multi = torch.unsqueeze(src, 1).expand(
                siz[0], topk_max, siz[1], siz[2]).contiguous()  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt_tmp.view(siz[0], -1, siz[1], siz[2])

            angle_tgt = models.rank_feas(
                raw_tgt, eval_data['multi_spk_angle_list']).view(
                    siz[0], -1, siz[1], siz[2])  # bs,topk,T,F
            angle_mix = Variable(
                torch.from_numpy(np.array(
                    eval_data['mix_angle']))).unsqueeze(1).expand(
                        siz[0], topk_max, siz[1], siz[2]).contiguous()
            ang = np.cos(angle_mix - angle_tgt)
            ang = np.clip(ang, 0, None)

            feas_tgt_tmp = feas_tgt_tmp.view(siz[0], -1, siz[1],
                                             siz[2]) * ang  # bs,topk,T,F
            feas_tgt_tmp = feas_tgt_tmp.cuda()
            del x_input_map_multi
            src = src.transpose(0, 1)
            MSE_func = nn.MSELoss().cuda()
            best_perms_this_batch = []
            for bs_idx in range(siz[0]):
                best_perms_this_sample = []
                for tt in range(siz[1]):  # 对每一帧
                    tar = feas_tgt_tmp[bs_idx, :, tt]  #topk,F
                    est = predicted_spectrogram[bs_idx, :, tt]  #topk,F
                    best_loss_mse_this_batch = -1
                    for idx, per in enumerate([[0, 1], [1, 0]]):
                        if idx == 0:
                            best_loss_mse_this_batch = MSE_func(est[per], tar)
                            perm_this_frame = per
                            predicted_spectrogram[bs_idx, :, tt] = est[per]
                        else:
                            loss = MSE_func(est[per], tar)
                            if loss <= best_loss_mse_this_batch:
                                best_loss_mse_this_batch = loss
                                perm_this_frame = per
                                predicted_spectrogram[bs_idx, :, tt] = est[per]

                    best_perms_this_sample.append(perm_this_frame)
                best_perms_this_batch.append(best_perms_this_sample)
            print(
                'different assignment ratio:',
                np.mean(np.min(
                    np.array(best_perms_this_batch).sum(1) / 751, 1)))
            # predicted_maps0_spectrogram = predicted_spectrogram[:,0]
            # predicted_maps1_spectrogram = predicted_spectrogram[:,1]

        _mix_spec = eval_data['mix_phase']  # bs,T,F,2
        angle_mix = np.angle(_mix_spec)
        predicted_maps0_real = predicted_maps0_spectrogram * torch.from_numpy(
            np.cos(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps0_imag = predicted_maps0_spectrogram * torch.from_numpy(
            np.sin(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps1_real = predicted_maps1_spectrogram * torch.from_numpy(
            np.cos(angle_mix)).cuda()  # e(ix) = cosx + isin x
        predicted_maps1_imag = predicted_maps1_spectrogram * torch.from_numpy(
            np.sin(angle_mix)).cuda()  # e(ix) = cosx + isin x

        stft_matrix_spk0 = torch.cat((predicted_maps0_real.unsqueeze(-1),
                                      predicted_maps0_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        stft_matrix_spk1 = torch.cat((predicted_maps1_real.unsqueeze(-1),
                                      predicted_maps1_imag.unsqueeze(-1)),
                                     3).transpose(1, 2)  # bs,F,T,2
        wav_spk0 = models.istft_irfft(stft_matrix_spk0,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        wav_spk1 = models.istft_irfft(stft_matrix_spk1,
                                      length=config.MAX_LEN,
                                      hop_length=config.FRAME_SHIFT,
                                      win_length=config.FRAME_LENGTH,
                                      window='hann')
        predict_wav = torch.cat((wav_spk0.unsqueeze(1), wav_spk1.unsqueeze(1)),
                                1)  # bs,topk,time_len
        if 1 and len(opt.gpus) > 1:
            ss_loss, pmt_list, max_snr_idx, *__ = model.module.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)
        else:
            ss_loss, pmt_list, max_snr_idx, *__ = model.separation_tas_loss(
                padded_mixture, predict_wav, padded_source, mixture_lengths)

        best_pmt = [
            list(pmt_list[int(mm)].data.cpu().numpy()) for mm in max_snr_idx
        ]
        print('loss for SS,this batch:', ss_loss.cpu().item())
        print('best perms for this batch:', best_pmt)
        writer.add_scalars('scalar/loss', {'ss_loss': ss_loss.cpu().item()},
                           updates)
        lera.log({
            'ss_loss_' + test_or_valid: ss_loss.cpu().item(),
        })
        writer.add_scalars('scalar/loss',
                           {'ss_loss_' + test_or_valid: ss_loss.cpu().item()},
                           updates + batch_idx)
        del ss_loss
        # if batch_idx>10:
        #     break

        if False:  #this part is to test the checkpoints sequencially.
            batch_idx += 1
            if batch_idx % 100 == 0:
                updates = updates + 1000
                opt.restore = '/data1/shijing_data/2020-02-14-04:58:17/Transformer_PIT_{}.pt'.format(
                    updates)
                print('loading checkpoint...\n', opt.restore)
                checkpoints = torch.load(opt.restore)
                model.module.load_state_dict(checkpoints['model'])
                break
            continue
        # '''''
        if 1 and batch_idx <= (500 / config.batch_size):
            utils.bss_eval_tas(config,
                               predict_wav,
                               eval_data['multi_spk_fea_list'],
                               raw_tgt,
                               eval_data,
                               dst=log_path + 'batch_output')
            sdr_aver_batch, snri_aver_batch = bss_test.cal(log_path +
                                                           'batch_output/')
            lera.log({'SDR sample': sdr_aver_batch})
            lera.log({'SI-SNRi sample': snri_aver_batch})
            writer.add_scalars('scalar/loss', {
                'SDR_sample': sdr_aver_batch,
                'SDRi_sample': snri_aver_batch
            }, updates)
            SDR_SUM = np.append(SDR_SUM, sdr_aver_batch)
            SDRi_SUM = np.append(SDRi_SUM, snri_aver_batch)
            print(('SDR_aver_now:', SDR_SUM.mean()))
            print(('SNRi_aver_now:', SDRi_SUM.mean()))

        batch_idx += 1
        if batch_idx > 100:
            break
        result = utils.eval_metrics(reference, candidate, dict_spk2idx,
                                    log_path)
        print((
            'hamming_loss: %.8f | micro_f1: %.4f |recall: %.4f | precision: %.4f'
            % (
                result['hamming_loss'],
                result['micro_f1'],
                result['micro_recall'],
                result['micro_precision'],
            )))