コード例 #1
0
ファイル: utils.py プロジェクト: rg321/pointFlow_exp
def validate_sample(loader, model, args, max_samples=None, save_dir=None):
    from metrics.evaluation_metrics import compute_all_metrics, jsd_between_point_cloud_sets as JSD
    all_sample = []
    all_ref = []
    ttl_samples = 0

    iterator = iter(loader)

    for data in iterator:
        idx_b, te_pc = data['idx'], data['test_points']
        # if idx_b > 20:
        # break
        te_pc = te_pc.cuda() if args.gpu is None else te_pc.cuda(args.gpu)
        _, out_pc = model.sample(te_pc.size(0), te_pc.size(1), gpu=args.gpu)

        # denormalize
        m, s = data['mean'].float(), data['std'].float()
        m = m.cuda() if args.gpu is None else m.cuda(args.gpu)
        s = s.cuda() if args.gpu is None else s.cuda(args.gpu)
        out_pc = out_pc * s + m
        te_pc = te_pc * s + m

        all_sample.append(out_pc)
        all_ref.append(te_pc)

        ttl_samples += int(te_pc.size(0))
        if max_samples is not None and ttl_samples >= max_samples:
            break

    sample_pcs = torch.cat(all_sample, dim=0)
    ref_pcs = torch.cat(all_ref, dim=0)
    print("[rank %s] Generation Sample size:%s Ref size: %s" %
          (args.rank, sample_pcs.size(), ref_pcs.size()))

    if save_dir is not None and args.save_val_results:
        smp_pcs_save_name = os.path.join(save_dir,
                                         "smp_syn_pcls_gpu%s.npy" % args.gpu)
        ref_pcs_save_name = os.path.join(save_dir,
                                         "ref_syn_pcls_gpu%s.npy" % args.gpu)
        np.save(smp_pcs_save_name, sample_pcs.cpu().detach().numpy())
        np.save(ref_pcs_save_name, ref_pcs.cpu().detach().numpy())
        print("Saving file:%s %s" % (smp_pcs_save_name, ref_pcs_save_name))

    res = compute_all_metrics(sample_pcs,
                              ref_pcs,
                              args.batch_size,
                              accelerated_cd=True)
    pprint(res)

    sample_pcs = sample_pcs.cpu().detach().numpy()
    ref_pcs = ref_pcs.cpu().detach().numpy()
    jsd = JSD(sample_pcs, ref_pcs)
    jsd = torch.tensor(jsd).cuda() if args.gpu is None else torch.tensor(
        jsd).cuda(args.gpu)
    res.update({"JSD": jsd})
    print("JSD     :%s" % jsd)
    return res
コード例 #2
0
def evaluate_gen(sample_pcs, ref_pcs):
    results = compute_all_metrics(sample_pcs, ref_pcs, args.batch_size, accelerated_cd=True)
    results = {k: (v.cpu().detach().item()
                   if not isinstance(v, float) else v) for k, v in results.items()}

    sample_pcl_npy = sample_pcs.cpu().detach().numpy()
    ref_pcl_npy = ref_pcs.cpu().detach().numpy()
    jsd = JSD(sample_pcl_npy, ref_pcl_npy)
    results.update({'JSD': jsd})

    return results
コード例 #3
0
def evaluate_gen(model, args):
    loader = get_test_loader(args)
    all_sample = []
    all_ref = []
    for data in loader:
        idx_b, te_pc = data['idx'], data['test_points']
        te_pc = te_pc.cuda() if args.gpu is None else te_pc.cuda(args.gpu)
        B, N = te_pc.size(0), te_pc.size(1)
        _, out_pc = model.sample(B, N, gpu=args.gpu)

        # denormalize
        m, s = data['mean'].float(), data['std'].float()
        m = m.cuda() if args.gpu is None else m.cuda(args.gpu)
        s = s.cuda() if args.gpu is None else s.cuda(args.gpu)
        out_pc = out_pc * s + m
        te_pc = te_pc * s + m

        all_sample.append(out_pc)
        all_ref.append(te_pc)

    sample_pcs = torch.cat(all_sample, dim=0)
    ref_pcs = torch.cat(all_ref, dim=0)
    print("Generation sample size:%s reference size: %s" %
          (sample_pcs.size(), ref_pcs.size()))

    # Save the generative output
    save_dir = os.path.dirname(args.resume_checkpoint)
    np.save(os.path.join(save_dir, "model_out_smp.npy"),
            sample_pcs.cpu().detach().numpy())
    np.save(os.path.join(save_dir, "model_out_ref.npy"),
            ref_pcs.cpu().detach().numpy())
    #

    # Compute metrics
    results = compute_all_metrics(sample_pcs,
                                  ref_pcs,
                                  args.batch_size,
                                  accelerated_cd=True)
    results = {
        k: (v.cpu().detach().item() if not isinstance(v, float) else v)
        for k, v in results.items()
    }
    pprint(results)

    sample_pcl_npy = sample_pcs.cpu().detach().numpy()
    ref_pcl_npy = ref_pcs.cpu().detach().numpy()
    jsd = JSD(sample_pcl_npy, ref_pcl_npy)
    print("JSD:%s" % jsd)
コード例 #4
0
ファイル: utils.py プロジェクト: voidstrike/TDPNet
def validate(model, tloader, image_flag=False):
    from metrics.evaluation_metrics import emd_approx, distChamferCUDA, compute_all_metrics, jsd_between_point_cloud_sets

    model.eval()
    cd_list, emd_list = list(), list()
    ttl_samples = 0

    all_sample = list()
    all_ref = list()

    for idx, (multi_view, pc, stat) in enumerate(tloader):
        mv = np.stack(multi_view, axis=1).squeeze(1)
        mv = torch.from_numpy(mv)

        multi_view = mv.cuda()

        tr_pc = pc.cuda()

        out_pc = model.reconstruct(multi_view, 2048)

        loss_1, loss_2 = distChamferCUDA(out_pc, tr_pc)
        cd_list.append(loss_1.mean(dim=1) + loss_2.mean(dim=1))

        all_sample.append(tr_pc)
        all_ref.append(out_pc)

        emd_batch = emd_approx(out_pc, tr_pc)
        emd_list.append(emd_batch)

        ttl_samples += int(tr_pc.size(0))

    cd = torch.cat(cd_list).mean()
    emd = torch.cat(emd_list).mean()

    sample_pcs = torch.cat(all_sample, dim=0)
    ref_pcs = torch.cat(all_ref, dim=0)

    result = compute_all_metrics(sample_pcs, ref_pcs, 64, accelerated_cd=True)
    result = {
        k: (v.cpu().detach().item() if not isinstance(v, float) else v)
        for k, v in result.items()
    }

    print("Chamfer Distance  :%s" % cd.item())
    print("Earth Mover Distance :%s" % emd.item())
コード例 #5
0
def evaluate_gen(itr, model, results_mva, log, log_mva, args):
    print('---- %dth evaluation ----' % itr)
    loader = get_test_loader(args)
    all_sample = []
    all_ref = []

    std_in = args.test_std_n / args.std_max * args.std_scale

    for data in loader:
        idx_b, te_pc = data['idx'], data['test_points']
        te_pc = te_pc.cuda() if args.gpu is None else te_pc.cuda(args.gpu)
        B, N = te_pc.size(0), te_pc.size(1)
        _, out_pc = model.sample(B, N, std_in=std_in, std_z=args.test_std_z)

        # denormalize
        m, s = data['mean'].float(), data['std'].float()
        m = m.cuda() if args.gpu is None else m.cuda(args.gpu)
        s = s.cuda() if args.gpu is None else s.cuda(args.gpu)
        out_pc = out_pc * s + m
        te_pc = te_pc * s + m

        all_sample.append(out_pc)
        all_ref.append(te_pc)
    sample_pcs = torch.cat(all_sample, dim=0)
    ref_pcs = torch.cat(all_ref, dim=0)

    # Compute metrics
    metrics = compute_all_metrics(sample_pcs,
                                  ref_pcs,
                                  args.batch_size,
                                  accelerated_cd=True)
    sample_pcl_npy = sample_pcs.cpu().detach().numpy()
    ref_pcl_npy = ref_pcs.cpu().detach().numpy()
    jsd = JSD(sample_pcl_npy, ref_pcl_npy)
    if itr == 1:
        results = {
            k: (v.cpu().detach().item() if not isinstance(v, float) else v)
            for k, v in metrics.items()
        }
        results['JSD'] = jsd
        results['itr'] = itr
        results_mva = results
    else:
        results = {}
        for k, v in metrics.items():
            if not isinstance(v, float):
                v = v.cpu().detach().item()
            results[k] = v
            results_mva[k] = (results_mva[k] * itr + v) / (itr + 1)

        results['JSD'] = jsd
        results_mva['JSD'] = (results_mva['JSD'] * itr + jsd) / (itr + 1)
        results['itr'] = itr
        results_mva['itr'] = itr

    log.write(json.dumps(results) + '\n')
    log_mva.write(json.dumps(results_mva) + '\n')
    log.flush()
    log_mva.flush()

    pprint(results_mva)

    return results_mva
コード例 #6
0
    idx_batch, tr_test_batch, te_test_batch = data['idx'], data[
        'train_points'], data['test_points']
    net.eval()
    with torch.no_grad():
        test_input = tr_test_batch.cuda()
        test_input_t = torch.transpose(test_input, 1, 2)
        test_output, test_loss, test_cd, test_emd = net(test_input_t,
                                                        optimizer,
                                                        step,
                                                        args.loss_type,
                                                        args.accelerated_cd,
                                                        writer_train=None)
        test_output = test_output.cuda()
        vis.scatter(test_input[0], name='input')
        vis.scatter(test_output[0], name='output')

    all_gen_pc.append(test_output)
    all_input_pc.append(test_input)

    num_all_pcs += test_input.size(0)

sample_pcs = torch.cat(all_gen_pc, dim=0)
ref_pcs = torch.cat(all_input_pc, dim=0)
print("Generation Sample size:%s Ref size: %s" %
      (sample_pcs.size(), ref_pcs.size()))

#calculate mmd_cd and mmd_emd
res = compute_all_metrics(sample_pcs, ref_pcs, args.batch_size,
                          args.accelerated_cd)
print(res)