コード例 #1
0
ファイル: loss.py プロジェクト: owen94/ResidualPointFlow
def cd_margin_loss(x, y, thres=0.01, alpha=0.5):
    dist1, dist2 = ChamferDistance()(x, y)
    dist1, dist2 = dist1[dist1 > thres], dist2[dist2 > thres]

    loss = torch.tensor(0).to(x)
    if dist1.size(0) > 0:
        loss += alpha * torch.mean(dist1)
    if dist2.size(0) > 0:
        loss += (1 - alpha) * torch.mean(dist2)
    return loss
コード例 #2
0
ファイル: model.py プロジェクト: zeta1999/structedit
    def __init__(self, config):
        super(RecursiveDecoder, self).__init__()

        self.conf = config

        self.box_decoder = BoxDecoder(config.feature_size, config.hidden_size)
        self.box_diff_decoder = BoxDiffDecoder(config.feature_size,
                                               config.hidden_size)
        self.child_decoder = ConcatChildDecoder(config.feature_size,
                                                config.hidden_size,
                                                config.max_child_num)
        self.sample_decoder = SampleDecoder(config.feature_size,
                                            config.hidden_size)
        self.leaf_classifier = LeafClassifier(config.feature_size,
                                              config.hidden_size)
        self.bceLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.chamferLoss = ChamferDistance()
        self.ceLoss = nn.CrossEntropyLoss(reduction='none')

        self.node_diff_feature_extractor = NodeDiffFeatureExtractor(
            config.feature_size, config.hidden_size)
        self.node_diff_classifier = NodeDiffClassifier(config.feature_size,
                                                       config.hidden_size)
        self.add_child_decoder = AddChildDecoder(config.feature_size,
                                                 config.hidden_size,
                                                 config.max_child_num)

        self.register_buffer('unit_cube',
                             torch.from_numpy(load_pts('cube.pts')))
コード例 #3
0
    def __init__(self, config):
        super(RecursiveDecoder, self).__init__()

        self.conf = config

        self.node_decoder = NodeDecoder(config.geo_feat_size, config.feature_size, config.num_point)

        self.child_decoder = GNNChildDecoder(
            node_feat_size=config.feature_size,
            hidden_size=config.hidden_size,
            max_child_num=config.max_child_num,
            edge_symmetric_type=config.edge_symmetric_type,
            num_iterations=config.num_dec_gnn_iterations,
            edge_type_num=len(config.edge_types))

        self.sample_decoder = SampleDecoder(config.feature_size, config.hidden_size)

        self.leaf_classifier = LeafClassifier(config.feature_size, config.hidden_size)

        self.bceLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.chamferLoss = ChamferDistance()
        self.semCELoss = nn.CrossEntropyLoss(reduction='none')
        self.mseLoss = nn.MSELoss(reduction='none')

        self.register_buffer('unit_cube', torch.from_numpy(load_pts('cube.pts')))
コード例 #4
0
def cd(x, y):
       # x = x.permute(0, 2, 1)
        #y = y.permute(0, 2, 1)
        d1, d2 = ChamferDistance()(x, y)
        loss=torch.sum(d1, dim=1) + torch.sum(d2, dim=1)
        print(loss)
        return torch.mean(loss,dim=0)
コード例 #5
0
    def __init__(self, feat_len, num_point):
        super(PartDecoder, self).__init__()
        self.num_point = num_point

        self.mlp1 = nn.Linear(feat_len, feat_len)
        self.mlp2 = nn.Linear(feat_len, feat_len)
        self.mlp3 = nn.Linear(feat_len, num_point * 3)

        self.bn1 = nn.BatchNorm1d(feat_len)
        self.bn2 = nn.BatchNorm1d(feat_len)

        self.chamferLoss = ChamferDistance()
コード例 #6
0
 def get_simplification_loss(self,
                             ref_pc,
                             samp_pc,
                             pc_size,
                             gamma=1,
                             delta=0):
     if self.skip_projection or not self.training:
         return torch.tensor(0).to(ref_pc)
     # ref_pc and samp_pc are B x N x 3 matrices
     cost_p1_p2, cost_p2_p1 = ChamferDistance()(samp_pc, ref_pc)
     max_cost = torch.max(cost_p1_p2, dim=1)[0]  # furthest point
     max_cost = torch.mean(max_cost)
     cost_p1_p2 = torch.mean(cost_p1_p2)
     cost_p2_p1 = torch.mean(cost_p2_p1)
     loss = cost_p1_p2 + max_cost + (gamma + delta * pc_size) * cost_p2_p1
     return loss
コード例 #7
0
ファイル: model.py プロジェクト: whigg/pcrnet_pytorch
    def __init__(self, args):
        super(PCRNet, self).__init__()
        self.emb_dims = args.emb_dims
        self.cycle = args.cycle
        self.iterations = args.iterations
        if args.emb_nn == 'pointnet':
            self.emb_nn = PointNet(emb_dims=self.emb_dims)
        elif args.emb_nn == 'dgcnn':
            self.emb_nn = DGCNN(emb_dims=self.emb_dims)
        else:
            raise Exception('Not implemented')

        if args.pointer == 'identity':
            self.pointer = Identity()
        else:
            raise Exception("Not implemented")

        if args.head == 'mlp':
            self.head = MLPHead(args=args)
        else:
            raise Exception('Not implemented')
        self.chamfer = ChamferDistance()
コード例 #8
0
ファイル: utils.py プロジェクト: zqcolorful/GEOMetrics
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch.nn as nn
from tqdm import tqdm
from voxel import *
from torch.utils.data import DataLoader
from PIL import Image

from torchvision.transforms import Normalize as norm
from torchvision import transforms
preprocess = transforms.Compose(
    [transforms.Resize((224, 224)),
     transforms.ToTensor()])

from chamfer_distance import ChamferDistance
chamfer_dist = ChamferDistance()

from tri_distance import TriDistance
tri_dist = TriDistance()


# loads the initial mesh and stores vertex, face, and adjacency matrix information
def load_initial(obj='386.obj'):
    # load obj file
    obj = ObjLoader(obj)
    labels = np.array(obj.vertices)
    features = torch.FloatTensor(labels).cuda()
    faces = torch.LongTensor(np.array(obj.faces) - 1).cuda()

    points = torch.rand([1000, 3]).cuda() - .5
    verts = features.clone()
コード例 #9
0
def main():
    # Parse the training argument
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--training',
        help=
        'Decide whether to train the model or just run testing on previously saved model.'
    )
    args = parser.parse_args()

    is_training = args.training

    if (is_training is None) or (is_training == 'True'):
        is_training = True
    else:
        is_training = False
    print('is_training  mode = ', is_training)

    chamferDist = ChamferDistance()

    # Decide on GPU or CPU
    if torch.cuda.is_available():
        gpu_or_cpu = torch.device('cuda')
    else:
        gpu_or_cpu = torch.device('cpu')

    # Training Configuration


#     image_root = "./../../../datasets/cs253-wi20-public/ShapeNetRendering/"
#     point_cloud_root = "./../../../datasets/cs253-wi20-public/ShapeNet_pointclouds/"
    image_root = "/datasets/cs253-wi20-public/ShapeNetRendering/"
    point_cloud_root = "/datasets/cs253-wi20-public/ShapeNet_pointclouds/"

    num_epochs = 1000
    batch_size = 64
    shuffle = True
    num_workers = 8
    use_2048 = True
    img_size = 227  # I don't know why, but this has to be 227!
    learning_rate = 1e-4
    num_points = 2048
    transform = transforms.Compose([
        transforms.Resize(img_size, interpolation=2),
        transforms.CenterCrop(img_size),
        transforms.ToTensor()
    ])
    # Checkpoint
    use_checkpoint = False

    # Split and Get data. Override the saved files if you change the ratios.
    train_ratio = 0.8
    val_ratio = 0.1
    test_ratio = 0.1

    split_data(train_ratio, val_ratio, test_ratio, overrideFiles=False)

    path_train = 'train_data.txt'
    path_val = 'val_data.txt'
    path_test = 'test_data.txt'

    train_data = read_from_file(path_train)
    val_data = read_from_file(path_val)
    test_data = read_from_file(path_test)

    # Data loader
    train_data_loader = get_loader(image_root, point_cloud_root, train_data,
                                   use_2048, transform, batch_size, shuffle,
                                   num_workers)

    val_data_loader = get_loader(image_root, point_cloud_root, val_data,
                                 use_2048, transform, batch_size, shuffle,
                                 num_workers)
    test_data_loader = get_loader(image_root, point_cloud_root, test_data,
                                  use_2048, transform, batch_size, shuffle,
                                  num_workers)

    print('Len of train loader = ', len(train_data_loader))

    # create model
    print("model building...")
    model = pic2points(num_points=num_points)
    model.to(device=gpu_or_cpu)

    if is_training:
        # Train
        print('Starting training...')
        train_losses, val_loss, best_model = train(
            model,
            train_data_loader,
            val_data_loader,
            chamferDist,
            model_name="Baseline_DL_Vis",
            num_epochs=num_epochs,
            lr=learning_rate,
            use_checkpoint=use_checkpoint)
    else:
        best_model = torch.load('best-Baseline_DL_Vis.pt')
        print('Loaded previously saved model.')

    model = best_model.cuda()
    model.eval()

    # Compute chamfer distance on Pix3D dataset.
    img_path = "/datasets/cs253-wi20-public/pix3d/"
    pc_path = "/datasets/cs253-wi20-public/pix_pointclouds/"

    objects = ['table', 'sofa']

    test_dataset = TestDataset(img_path, pc_path, objects)

    test_data_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                                   batch_size=1,
                                                   shuffle=True,
                                                   num_workers=8)

    print('Starting testing on Pix3D dataset...')
    total_test_loss = 0.
    # Get loss on training data.
    with torch.no_grad():
        for i, (image, point_cloud) in enumerate(test_data_loader):

            image, point_cloud = Variable(image), Variable(point_cloud)

            #         print(image.size())
            if (image.size(1) != 3):
                continue
    #         print('reaching.')

            image, point_cloud = image.float().to(
                device=gpu_or_cpu), point_cloud.float().to(device=gpu_or_cpu)
            pred = model(image)
            dist1, dist2 = chamferDist(pred, point_cloud)
            loss = (torch.mean(dist1)) + (torch.mean(dist2))
            #             emd_cost = torch.sum(dist(pred.cuda().double(), points.cuda().double()))
            total_test_loss += loss.item()

            #         print(total_test_loss)
            #         break

            if i % 100 == 0:
                print('Batch ' + str(i) + ' finished.')

    print('Chamfer distance on Pix3D dataset = ',
          total_test_loss / len(test_data_loader))
コード例 #10
0
        data = data.to(device)
        optimizer.zero_grad()
        decoded, _ = model(data)
        dist1, dist2 = criterion(decoded.reshape(-1, 2048, 3),
                                 data.y.reshape(-1, 2048, 3))
        loss = (torch.mean(dist1)) + (torch.mean(dist2))
        loss.backward()
        total_loss += loss.item() * data.num_graphs
        optimizer.step()
    return total_loss / len(dataset)


if __name__ == '__main__':

    dataset = Completion3D('../data/Completion3D',
                           split='train',
                           categories='Airplane')
    print(dataset[0])
    train_loader = DataLoader(dataset, batch_size=32, shuffle=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = SaNet().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    print(model)
    print('Training started:')
    criterion = ChamferDistance()
    for epoch in range(1, 401):
        loss = train()
        print('Epoch {:03d}, Loss: {:.4f}'.format(epoch, loss))
        if epoch % 10 == 0:
            torch.save(model.state_dict(),
                       './trained/SA_net_Ch' + '{}'.format(epoch) + '.pt')
コード例 #11
0
ファイル: pcn_occo.py プロジェクト: lt6253090/OcCo
	def dist_cd(pc1, pc2):
		
		chamfer_dist = ChamferDistance()
		dist1, dist2 = chamfer_dist(pc1, pc2)
		
		return torch.mean(dist1) + torch.mean(dist2)
コード例 #12
0
ファイル: pointnet_occo.py プロジェクト: zeta1999/OcCo
 def dist_cd(pc1, pc2):
     chamfer_dist = ChamferDistance()
     dist1, dist2 = chamfer_dist(pc1, pc2)
     return (torch.mean(torch.sqrt(dist1)) + torch.mean(torch.sqrt(dist2)))/2
コード例 #13
0
ファイル: loss_function.py プロジェクト: zjl12138/disp_VAE
def compute_chamfer_loss(gt_points, reconstruct_points):
    chamfer_dist = ChamferDistance()
    dist1, dist2 = chamfer_dist(gt_points, reconstruct_points)
    loss = torch.mean(dist1, dim=-1) + torch.mean(dist2, dim=-1)
    return loss
コード例 #14
0
 def __init__(self):
     super(ChamfersDistance, self).__init__()
     self.chamfer_dist = ChamferDistance()
コード例 #15
0
def main(args):
    """
    for each mesh, render 4-view-normals and get mesh-voxels, also compute and save {3D-IoU, Normal errors}
    """

    # flags for visual sanity check
    visualCheck_0 = False

    # init.
    rn = ColoredRenderer()
    rn.camera = ProjectPointsOrthogonal(rt=np.array([0,0,0]), t=np.array([0,0,2]), f=np.array([consts.dim_h*2,consts.dim_h*2]), c=np.array([consts.dim_w,consts.dim_h]), k=np.zeros(5))
    rn.frustum = {'near': 0.5, 'far': 25, 'height': consts.dim_h*2, 'width': consts.dim_w*2}
    chamfer_dist = ChamferDistance()

    # get training/test data indices
    training_inds, testing_inds = get_training_test_indices(args=args,shuffle=False)
    meshRefinedPath_list = compute_split_range(testing_inds=testing_inds,args=args)

    # for each mesh, render 4-view-normals and get mesh-voxels, also compute and save {3D-IoU, Normal errors}
    frameIdx = [0, 0, 0]
    frameIdx[0] = int( meshRefinedPath_list[0].split("/")[-1].split("_meshRefined")[0])
    frameIdx[2] = int(meshRefinedPath_list[-1].split("/")[-1].split("_meshRefined")[0])+1
    count = 0
    timeStart = time.time()
    for meshPath in meshRefinedPath_list:

        # init.
        frameIdx[1] = int(meshPath.split("/")[-1].split("_meshRefined")[0])
        evalMetricsPath      = "%s/%06d_evalMetrics.json" % (args.resultsDir, frameIdx[1])
        evalMetricsPath_Next = "%s/%06d_evalMetrics.json" % (args.resultsDir, frameIdx[1]+1)
        evalMetricsPath_additional      = "%s/%06d_evalMetrics_additional.json" % (args.resultsDir, frameIdx[1])
        evalMetricsPath_Next_additional = "%s/%06d_evalMetrics_additional.json" % (args.resultsDir, frameIdx[1]+1)
        if os.path.exists(evalMetricsPath) and os.path.exists(evalMetricsPath_Next) and os.path.exists(evalMetricsPath_additional) and os.path.exists(evalMetricsPath_Next_additional):
            continue

        # ----- compute point based distance metrics -----
        if True:

            # note that the losses have been multiplied by "scale"
            chamfer_dis, estV_2_gtM_dis, estMesh = compute_point_based_metrics(args=args,estMeshPath=meshPath,preFix=frameIdx[1],chamfer_dist=chamfer_dist,scale=10000.)

        # ----- save the additional eval metrics into .json of args.resultsDir dir -----
        if True:

            evalMetrics_additional = {"chamfer_dis"   : chamfer_dis,
                                      "estV_2_gtM_dis": estV_2_gtM_dis}
            with open(evalMetricsPath_additional, "w") as outfile:
                json.dump(evalMetrics_additional, outfile)
            if visualCheck_0:
                print("check eval metrics additional json results...")
                print(evalMetrics_additional)
                os.system("cp %s ./examples/%06d_evalPrepare_metrics_additional.json" % (evalMetricsPath_additional,frameIdx[1]))
                pdb.set_trace()

        # ----- render front-view-normal & compute normal errors of [cos-dis, l2-dis] -----
        if not args.only_compute_additional_metrics:

            normal_errors = compute_n_save_normal_erros(args=args,estMeshPath=meshPath,rn=rn,preFix=frameIdx[1],estMesh=estMesh)
            assert(len(normal_errors) == 1 and len(normal_errors[0]) == 2)

        # ----- save the eval metrics into .json of args.resultsDir dir -----
        if not args.only_compute_additional_metrics:

            evalMetrics = {"norm_cos_dis_ft": np.array([normal_errors[0][0]]).tolist(),
                           "norm_l2_dis_ft":  np.array([normal_errors[0][1]]).tolist()}
            with open(evalMetricsPath, 'w') as outfile:
                json.dump(evalMetrics, outfile)
            visualCheck = False
            if visualCheck:
                print("check eval metrics json results...")
                print(evalMetrics)
                os.system("cp %s ./examples/%06d_evalPrepare_metrics.json" % (evalMetricsPath,frameIdx[1]))
                pdb.set_trace()

        # compute timing info
        count += 1
        hrsPassed = (time.time()-timeStart) / 3600.
        hrsEachIter = hrsPassed / count
        numItersRemain = len(meshRefinedPath_list) - count
        hrsRemain = numItersRemain * hrsEachIter # hours that remain
        minsRemain = hrsRemain * 60. # minutes that remain

        # log
        expName = args.resultsDir.split("/")[-1]
        print("Exp. %s inference: split %d/%d | frameIdx %06d-%06d-%06d | remains %.3f m(s) ......" % (expName,args.splitIdx,args.splitNum,frameIdx[0],frameIdx[1],frameIdx[2],minsRemain))
コード例 #16
0
ファイル: modelutils.py プロジェクト: jakubzadrozny/thesis
 def cd(x, y):
     x = x.permute(0, 2, 1)
     y = y.permute(0, 2, 1)
     d1, d2 = ChamferDistance()(x, y)
     return torch.sum(d1, dim=1) + torch.sum(d2, dim=1)
コード例 #17
0
ファイル: eval_utils.py プロジェクト: zeta1999/structedit
def compute_gen_sd_numbers(in_dir, data_path, object_list, shapediff_topk,
                           shapediff_metric, self_is_neighbor, tot_shape):
    chamfer_loss = ChamferDistance()
    unit_cube = torch.from_numpy(utils.load_pts('cube.pts'))

    def box_dist(box_feature, gt_box_feature):
        pred_box_pc = utils.transform_pc_batch(unit_cube, box_feature)
        pred_reweight = utils.get_surface_reweighting_batch(
            box_feature[:, 3:6], unit_cube.size(0))
        gt_box_pc = utils.transform_pc_batch(unit_cube, gt_box_feature)
        gt_reweight = utils.get_surface_reweighting_batch(
            gt_box_feature[:, 3:6], unit_cube.size(0))
        dist1, dist2 = chamfer_loss(gt_box_pc, pred_box_pc)
        loss1 = (dist1 * gt_reweight).sum(dim=1) / (gt_reweight.sum(dim=1) +
                                                    1e-12)
        loss2 = (dist2 *
                 pred_reweight).sum(dim=1) / (pred_reweight.sum(dim=1) + 1e-12)
        loss = (loss1 + loss2) / 2
        return loss

    def struct_dist(gt_node, pred_node):
        if gt_node.is_leaf:
            if pred_node.is_leaf:
                return 0
            else:
                return len(pred_node.boxes()) - 1
        else:
            if pred_node.is_leaf:
                return len(gt_node.boxes()) - 1
            else:
                gt_sem = set([node.label for node in gt_node.children])
                pred_sem = set([node.label for node in pred_node.children])
                intersect_sem = set.intersection(gt_sem, pred_sem)

                gt_cnodes_per_sem = dict()
                for node_id, gt_cnode in enumerate(gt_node.children):
                    if gt_cnode.label in intersect_sem:
                        if gt_cnode.label not in gt_cnodes_per_sem:
                            gt_cnodes_per_sem[gt_cnode.label] = []
                        gt_cnodes_per_sem[gt_cnode.label].append(node_id)

                pred_cnodes_per_sem = dict()
                for node_id, pred_cnode in enumerate(pred_node.children):
                    if pred_cnode.label in intersect_sem:
                        if pred_cnode.label not in pred_cnodes_per_sem:
                            pred_cnodes_per_sem[pred_cnode.label] = []
                        pred_cnodes_per_sem[pred_cnode.label].append(node_id)

                matched_gt_idx = []
                matched_pred_idx = []
                matched_gt2pred = np.zeros((100), dtype=np.int32)
                for sem in intersect_sem:
                    gt_boxes = torch.cat([
                        gt_node.children[cid].get_box_quat()
                        for cid in gt_cnodes_per_sem[sem]
                    ],
                                         dim=0)
                    pred_boxes = torch.cat([
                        pred_node.children[cid].get_box_quat()
                        for cid in pred_cnodes_per_sem[sem]
                    ],
                                           dim=0)

                    num_gt = gt_boxes.size(0)
                    num_pred = pred_boxes.size(0)

                    if num_gt == 1 and num_pred == 1:
                        cur_matched_gt_idx = [0]
                        cur_matched_pred_idx = [0]
                    else:
                        gt_boxes_tiled = gt_boxes.unsqueeze(dim=1).repeat(
                            1, num_pred, 1)
                        pred_boxes_tiled = pred_boxes.unsqueeze(dim=0).repeat(
                            num_gt, 1, 1)
                        dmat = box_dist(gt_boxes_tiled.view(-1, 10),
                                        pred_boxes_tiled.view(-1, 10)).view(
                                            -1, num_gt, num_pred)
                        _, cur_matched_gt_idx, cur_matched_pred_idx = utils.linear_assignment(
                            dmat)

                    for i in range(len(cur_matched_gt_idx)):
                        matched_gt_idx.append(
                            gt_cnodes_per_sem[sem][cur_matched_gt_idx[i]])
                        matched_pred_idx.append(
                            pred_cnodes_per_sem[sem][cur_matched_pred_idx[i]])
                        matched_gt2pred[gt_cnodes_per_sem[sem][
                            cur_matched_gt_idx[i]]] = pred_cnodes_per_sem[sem][
                                cur_matched_pred_idx[i]]

                struct_diff = 0.0
                for i in range(len(gt_node.children)):
                    if i not in matched_gt_idx:
                        struct_diff += len(gt_node.children[i].boxes())

                for i in range(len(pred_node.children)):
                    if i not in matched_pred_idx:
                        struct_diff += len(pred_node.children[i].boxes())

                for i in range(len(matched_gt_idx)):
                    gt_id = matched_gt_idx[i]
                    pred_id = matched_pred_idx[i]
                    struct_diff += struct_dist(gt_node.children[gt_id],
                                               pred_node.children[pred_id])

                return struct_diff

    # create dataset and data loader
    data_features = [
        'object', 'name', 'neighbor_diffs', 'neighbor_objs', 'neighbor_names'
    ]
    dataset = PartNetShapeDiffDataset(data_path, object_list, data_features,
                                      shapediff_topk, shapediff_metric,
                                      self_is_neighbor)

    tot_gen = 100
    bar = ProgressBar()
    quality = 0.0
    coverage = 0.0
    for i in bar(range(tot_shape)):
        obj, obj_name, neighbor_diffs, neighbor_objs, neighbor_names = dataset[
            i]

        mat1 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
        mat2 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
        for j in range(tot_gen):
            gen_obj = PartNetDataset.load_object(
                os.path.join(in_dir, obj_name, 'obj2-%03d.json' % j))
            for ni in range(shapediff_topk):
                sd = struct_dist(neighbor_objs[ni].root, gen_obj.root)
                mat1[ni, j] = sd / len(neighbor_objs[ni].root.boxes())
                mat2[ni, j] = sd / len(gen_obj.root.boxes())

        quality += mat2.min(axis=0).mean()
        coverage += mat1.min(axis=1).mean()
        np.save(os.path.join(in_dir, obj_name, 'sd_mat1_stats.npy'), mat1)
        np.save(os.path.join(in_dir, obj_name, 'sd_mat2_stats.npy'), mat2)

    quality /= tot_shape
    coverage /= tot_shape
    print('mean sd quality: ', quality)
    print('mean sd coverage: ', coverage)
    print('q + c: %.5f' % (quality + coverage))
    with open(
            os.path.join(in_dir,
                         'neighbor_%s_sd_stats.txt' % shapediff_metric),
            'w') as fout:
        fout.write('mean sd quality: %f\n' % quality)
        fout.write('mean sd coverage: %f\n' % coverage)
        fout.write('q + c: %.5f\n' % (quality + coverage))
コード例 #18
0
sampler = models.groundtruthSampler(camNum=opt.camNum,
                                    fov=opt.fov,
                                    imHeight=opt.imageHeight,
                                    imWidth=opt.imageWidth,
                                    isNoRenderError=opt.isNoRenderError)

# Define the model and optimizer
lr_scale = 1
pointNet = PointNetRefinePoint()
if opt.isFineTune:
    pointNet.load_state_dict(
        torch.load('%s/pointNet_%d.pth' %
                   (opt.experiment, opt.epochIdFineTune)))
    lr_scale = np.power(0.5, int((opt.epochIdFineTune + 1) / 2.0))
chamferDist = ChamferDistance()
if opt.cuda:
    pointNet = pointNet.cuda()
    chamferDist = chamferDist.cuda()
opPointNet = torch.optim.Adam(pointNet.parameters(),
                              lr=1e-4 * lr_scale,
                              betas=(0.5, 0.999))

j = 0
pointErrsNpList = np.ones([1, 2], dtype=np.float32)

normalErrsNpList = np.ones([1, 2], dtype=np.float32)
meanAngleErrsNpList = np.ones([1, 2], dtype=np.float32)
medianAngleErrsNpList = np.ones([1, 2], dtype=np.float32)

for epoch in list(range(opt.epochIdFineTune + 1, opt.nepoch)):
コード例 #19
0
ファイル: eval_utils.py プロジェクト: zeta1999/structedit
def compute_gen_cd_numbers(in_dir, data_path, object_list, shapediff_topk,
                           shapediff_metric, self_is_neighbor, tot_shape):
    chamfer_loss = ChamferDistance()

    data_features = [
        'object', 'name', 'neighbor_diffs', 'neighbor_objs', 'neighbor_names'
    ]
    dataset = PartNetShapeDiffDataset(data_path, object_list, data_features,
                                      shapediff_topk, shapediff_metric,
                                      self_is_neighbor)

    tot_gen = 100
    bar = ProgressBar()
    quality = 0.0
    coverage = 0.0
    for i in bar(range(tot_shape)):
        obj, obj_name, neighbor_diffs, neighbor_objs, neighbor_names = dataset[
            i]

        mat = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
        gt_pcs = []
        for ni in range(shapediff_topk):
            obbs_np = torch.cat([
                item.view(1, -1)
                for item in neighbor_objs[ni].boxes(leafs_only=True)
            ],
                                dim=0).cpu().numpy()
            mesh_v, mesh_f = utils.gen_obb_mesh(obbs_np)
            pc_sample = utils.sample_pc(mesh_v, mesh_f)
            gt_pcs.append(np.expand_dims(pc_sample, axis=0))
        gt_pcs = np.concatenate(gt_pcs, axis=0)
        gt_pcs = torch.from_numpy(gt_pcs).float().cuda()

        for i in range(tot_gen):
            obj = PartNetDataset.load_object(
                os.path.join(in_dir, obj_name, 'obj2-%03d.json' % i))
            obbs_np = torch.cat(
                [item.view(1, -1) for item in obj.boxes(leafs_only=True)],
                dim=0).cpu().numpy()
            mesh_v, mesh_f = utils.gen_obb_mesh(obbs_np)
            gen_pc = utils.sample_pc(mesh_v, mesh_f)
            gen_pc = np.tile(np.expand_dims(gen_pc, axis=0),
                             [shapediff_topk, 1, 1])
            gen_pc = torch.from_numpy(gen_pc).float().cuda()
            d1, d2 = chamfer_loss(gt_pcs.cuda(), gen_pc)
            mat[:, i] = (d1.sqrt().mean(dim=1) +
                         d2.sqrt().mean(dim=1)).cpu().numpy() / 2

        quality += mat.min(axis=0).mean()
        coverage += mat.min(axis=1).mean()
        np.save(os.path.join(in_dir, obj_name, 'cd_stats.npy'), mat)

    quality /= tot_shape
    coverage /= tot_shape
    print('mean cd quality: %.5f' % quality)
    print('mean cd coverage: %.5f' % coverage)
    print('q + c: %.5f' % (quality + coverage))
    with open(
            os.path.join(in_dir,
                         'neighbor_%s_cd_stats.txt' % shapediff_metric),
            'w') as fout:
        fout.write('mean cd quality: %.5f\n' % quality)
        fout.write('mean cd coverage: %.5f\n' % coverage)
        fout.write('q + c: %.5f\n' % (quality + coverage))
コード例 #20
0
ファイル: loss.py プロジェクト: owen94/ResidualPointFlow
def cd_loss(x, y, alpha=0.5):
    dist1, dist2 = ChamferDistance()(x, y)

    return alpha * torch.mean(dist1) + (1 - alpha) * torch.mean(dist2)
コード例 #21
0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import shapenet_core13_loader
import shapenet_core55_loader

from model import PointCapsNet
import segmentation as seg


import open3d as o3d
import matplotlib.pyplot as plt

from chamfer_distance import ChamferDistance
CD = ChamferDistance()

## MONKEY PATCHING
PointCloud = o3d.geometry.PointCloud
Vector3dVector = o3d.utility.Vector3dVector
draw_geometries = o3d.visualization.draw_geometries
viz = o3d.visualization.Visualizer()

image_id = 0
USE_CUDA = True


def show_points(points_tensor):
    #print("showing tensor of shape", points_tensor.size())
    prc_r_all=points_tensor.transpose(1, 0).contiguous().data.cpu()
    prc_r_all_point=PointCloud()
コード例 #22
0
def main():

    chamferDist = ChamferDistance()

    # Decide on GPU or CPU
    if torch.cuda.is_available():
        gpu_or_cpu = torch.device('cuda')
    else:
        gpu_or_cpu = torch.device('cpu')

    image_root = "/datasets/cs253-wi20-public/ShapeNetRendering/"
    point_cloud_root = "/datasets/cs253-wi20-public/ShapeNet_pointclouds/"

    num_epochs = 20
    batch_size = 64
    shuffle = True
    num_workers = 8
    use_2048 = True
    img_size = 227  # I don't know why, but this has to be 227!
    learning_rate = 5e-4
    num_points = 2500
    transform = transforms.Compose([
        transforms.Resize(img_size, interpolation=2),
        transforms.CenterCrop(img_size),
        transforms.ToTensor()
    ])

    path_test = 'test_data.txt'
    test_data = read_from_file(path_test)

    test_data_loader = get_loader(image_root, point_cloud_root, test_data,
                                  use_2048, transform, batch_size, shuffle,
                                  num_workers)

    model = torch.load('best-Baseline_FixedDL.pt').to(device=gpu_or_cpu)
    model.eval()

    for i, (image, point_cloud) in enumerate(test_data_loader):
        image, point_cloud = Variable(image, requires_grad=False), Variable(
            point_cloud, requires_grad=False)

        image, point_cloud = image.float().to(
            device=gpu_or_cpu), point_cloud.float().to(device=gpu_or_cpu)
        pred = model(image)
        dist1, dist2 = chamferDist(pred, point_cloud)
        loss = (torch.mean(dist1)) + (torch.mean(dist2))

        print('pred size = ', pred.size())
        pred = pred.to('cpu')

        out = []

        for p in pred:
            out.append(p.detach().numpy())

        print('type(out) = ', type(out))

        # Visualize the prediction
        print('asdf')
        Visualize(out).ShowRandom()
        break
コード例 #23
0
ファイル: utils.py プロジェクト: zeta1999/structedit
        vid += v.shape[0]

    all_v = np.vstack(all_v)
    all_f = np.vstack(all_f)
    return all_v, all_f


def sample_pc(v, f, n_points=2048):
    mesh = trimesh.Trimesh(vertices=v, faces=f - 1)
    points, __ = trimesh.sample.sample_surface(mesh=mesh, count=n_points)
    return points


# load unit cube pc
from chamfer_distance import ChamferDistance
chamferLoss = ChamferDistance()
unit_cube = torch.from_numpy(load_pts('cube.pts'))


def boxLoss(box_feature, gt_box_feature):
    global unit_cube
    if unit_cube.device != box_feature.device:
        unit_cube = unit_cube.to(box_feature.device)

    pred_box_pc = transform_pc_batch(unit_cube, box_feature)
    pred_reweight = get_surface_reweighting_batch(box_feature[:, 3:6],
                                                  unit_cube.size(0))
    gt_box_pc = transform_pc_batch(unit_cube, gt_box_feature)
    gt_reweight = get_surface_reweighting_batch(gt_box_feature[:, 3:6],
                                                unit_cube.size(0))
    dist1, dist2 = chamferLoss(gt_box_pc, pred_box_pc)
コード例 #24
0
import os
import sys
from argparse import ArgumentParser
import numpy as np
import torch
from progressbar import ProgressBar
from chamfer_distance import ChamferDistance
from config import add_eval_args
from data import PartNetDataset, PartNetShapeDiffDataset, Tree
from eval_utils import compute_recon_numbers
import utils
import shutil

sys.setrecursionlimit(5000) # this code uses recursion a lot for code simplicity
chamfer_loss = ChamferDistance()

parser = ArgumentParser()
parser = add_eval_args(parser)
parser.add_argument('--start_id', type=int, default=0)
parser.add_argument('--end_id', type=int, default=-1)
parser.add_argument('--baseline_dir', type=str, help='structurenet baseline result directory')
eval_conf = parser.parse_args()

# load train config
conf = torch.load(os.path.join(eval_conf.ckpt_path, eval_conf.exp_name, 'conf.pth'))
eval_conf.category = conf.category
eval_conf.data_path = conf.data_path
if hasattr(conf, 'self_is_neighbor'):
    eval_conf.self_is_neighbor = conf.self_is_neighbor