Esempio n. 1
0
    def __init__(self, config):
        super(RecursiveDecoder, self).__init__()

        self.conf = config

        self.box_decoder = BoxDecoder(config.feature_size, config.hidden_size)

        self.child_decoder = GNNChildDecoder(
            node_feat_size=config.feature_size,
            hidden_size=config.hidden_size,
            max_child_num=config.max_child_num,
            edge_symmetric_type=config.edge_symmetric_type,
            num_iterations=config.num_dec_gnn_iterations,
            edge_type_num=len(config.edge_types))

        self.sample_decoder = SampleDecoder(config.feature_size, config.hidden_size)

        self.leaf_classifier = LeafClassifier(config.feature_size, config.hidden_size)

        self.bceLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.chamferLoss = ChamferDistance()
        self.semCELoss = nn.CrossEntropyLoss(reduction='none')

        self.register_buffer('unit_cube', torch.from_numpy(load_pts('cube.pts')))
        self.register_buffer('anchor', torch.from_numpy(load_pts('anchor.pts')))
Esempio n. 2
0
    def __init__(self, conf, sem_cnt):
        super(Network, self).__init__()
        self.conf = conf

        self.mask_net = MaskNet(conf, sem_cnt)
        self.pose_net = PoseNet(conf, sem_cnt)

        self.unit_cube = torch.from_numpy(load_pts('../utils/cube.pts')).to(
            conf.device)
        self.unit_anchor = torch.from_numpy(
            load_pts('../utils/anchor.pts')).to(conf.device)
Esempio n. 3
0
    def __init__(self, config):
        super(RecursiveDecoder, self).__init__()

        self.conf = config

        self.box_decoder = BoxDecoder(config.feature_size, config.hidden_size)
        self.box_diff_decoder = BoxDiffDecoder(config.feature_size,
                                               config.hidden_size)
        self.child_decoder = ConcatChildDecoder(config.feature_size,
                                                config.hidden_size,
                                                config.max_child_num)
        self.sample_decoder = SampleDecoder(config.feature_size,
                                            config.hidden_size)
        self.leaf_classifier = LeafClassifier(config.feature_size,
                                              config.hidden_size)
        self.bceLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.chamferLoss = ChamferDistance()
        self.ceLoss = nn.CrossEntropyLoss(reduction='none')

        self.node_diff_feature_extractor = NodeDiffFeatureExtractor(
            config.feature_size, config.hidden_size)
        self.node_diff_classifier = NodeDiffClassifier(config.feature_size,
                                                       config.hidden_size)
        self.add_child_decoder = AddChildDecoder(config.feature_size,
                                                 config.hidden_size,
                                                 config.max_child_num)

        self.register_buffer('unit_cube',
                             torch.from_numpy(load_pts('cube.pts')))
Esempio n. 4
0
 def get_pg_real_pcs(self, index, num_shape):
     ids = np.random.choice(len(self.pg_shapes[index]), num_shape, replace=True)
     names = []; out = np.zeros((num_shape, self.num_point, 3), dtype=np.float32);
     for i, idx in enumerate(ids):
         out[i] = utils.load_pts(os.path.join(self.data_dir, self.pg_shapes[index][idx], 'point_sample', 'sample-points-all-pts-nor-rgba-10000.txt'))[:self.num_point]
         names.append(self.pg_shapes[index][idx])
     out = torch.from_numpy(out)
     return (names, out)
Esempio n. 5
0
 def __getitem__(self, index):
     if self.mode == 'sample_by_shape':
         index = self.sample_by_shape_pgids[index]
     ids = np.random.choice(len(self.pg_shapes[index]), self.batch_size, replace=True)
     out = np.zeros((self.batch_size, self.num_point, 3), dtype=np.float32)
     for i, idx in enumerate(ids):
         out[i] = utils.load_pts(os.path.join(self.data_dir, self.pg_shapes[index][idx], 'point_sample', 'sample-points-all-pts-nor-rgba-10000.txt'))[:self.num_point]
     out = torch.from_numpy(out)
     return (index, out)
Esempio n. 6
0
 def get_random_batch(self, batch_size):
     ids = np.random.choice(len(self.shape_ids), batch_size, replace=True)
     out = np.zeros((batch_size, self.num_point, 3), dtype=np.float32)
     names = []
     for i, idx in enumerate(ids):
         names.append(self.shape_ids[idx])
         pts = utils.load_pts(os.path.join(self.data_dir, str(self.shape_ids[idx]), 'point_sample', 'sample-points-all-pts-nor-rgba-10000.txt'))
         out[i] = pts[:self.num_point]
     out = torch.from_numpy(out).float()
     return names, out
Esempio n. 7
0
    def __init__(self, args, device):
        super(Network, self).__init__()

        self.template_encoder = TemplateEncoder(args.template_feat_len, args.hidden_len, args.template_symmetric_type, args.max_part_per_parent, device)
        self.part_decoder = PartDecoder(args.feat_len)

        self.max_part_per_parent = args.max_part_per_parent

        self.mlp1 = nn.Linear(args.feat_len + Tree.num_sem + args.template_feat_len + self.max_part_per_parent, args.hidden_len)
        self.mlp2 = nn.Linear(args.hidden_len, args.feat_len)

        self.register_buffer('base_pc', torch.from_numpy(utils.load_pts('cube.pts')))
Esempio n. 8
0
def compute_gen_sd_numbers(in_dir, data_path, object_list, shapediff_topk,
                           shapediff_metric, self_is_neighbor, tot_shape):
    chamfer_loss = ChamferDistance()
    unit_cube = torch.from_numpy(utils.load_pts('cube.pts'))

    def box_dist(box_feature, gt_box_feature):
        pred_box_pc = utils.transform_pc_batch(unit_cube, box_feature)
        pred_reweight = utils.get_surface_reweighting_batch(
            box_feature[:, 3:6], unit_cube.size(0))
        gt_box_pc = utils.transform_pc_batch(unit_cube, gt_box_feature)
        gt_reweight = utils.get_surface_reweighting_batch(
            gt_box_feature[:, 3:6], unit_cube.size(0))
        dist1, dist2 = chamfer_loss(gt_box_pc, pred_box_pc)
        loss1 = (dist1 * gt_reweight).sum(dim=1) / (gt_reweight.sum(dim=1) +
                                                    1e-12)
        loss2 = (dist2 *
                 pred_reweight).sum(dim=1) / (pred_reweight.sum(dim=1) + 1e-12)
        loss = (loss1 + loss2) / 2
        return loss

    def struct_dist(gt_node, pred_node):
        if gt_node.is_leaf:
            if pred_node.is_leaf:
                return 0
            else:
                return len(pred_node.boxes()) - 1
        else:
            if pred_node.is_leaf:
                return len(gt_node.boxes()) - 1
            else:
                gt_sem = set([node.label for node in gt_node.children])
                pred_sem = set([node.label for node in pred_node.children])
                intersect_sem = set.intersection(gt_sem, pred_sem)

                gt_cnodes_per_sem = dict()
                for node_id, gt_cnode in enumerate(gt_node.children):
                    if gt_cnode.label in intersect_sem:
                        if gt_cnode.label not in gt_cnodes_per_sem:
                            gt_cnodes_per_sem[gt_cnode.label] = []
                        gt_cnodes_per_sem[gt_cnode.label].append(node_id)

                pred_cnodes_per_sem = dict()
                for node_id, pred_cnode in enumerate(pred_node.children):
                    if pred_cnode.label in intersect_sem:
                        if pred_cnode.label not in pred_cnodes_per_sem:
                            pred_cnodes_per_sem[pred_cnode.label] = []
                        pred_cnodes_per_sem[pred_cnode.label].append(node_id)

                matched_gt_idx = []
                matched_pred_idx = []
                matched_gt2pred = np.zeros((100), dtype=np.int32)
                for sem in intersect_sem:
                    gt_boxes = torch.cat([
                        gt_node.children[cid].get_box_quat()
                        for cid in gt_cnodes_per_sem[sem]
                    ],
                                         dim=0)
                    pred_boxes = torch.cat([
                        pred_node.children[cid].get_box_quat()
                        for cid in pred_cnodes_per_sem[sem]
                    ],
                                           dim=0)

                    num_gt = gt_boxes.size(0)
                    num_pred = pred_boxes.size(0)

                    if num_gt == 1 and num_pred == 1:
                        cur_matched_gt_idx = [0]
                        cur_matched_pred_idx = [0]
                    else:
                        gt_boxes_tiled = gt_boxes.unsqueeze(dim=1).repeat(
                            1, num_pred, 1)
                        pred_boxes_tiled = pred_boxes.unsqueeze(dim=0).repeat(
                            num_gt, 1, 1)
                        dmat = box_dist(gt_boxes_tiled.view(-1, 10),
                                        pred_boxes_tiled.view(-1, 10)).view(
                                            -1, num_gt, num_pred)
                        _, cur_matched_gt_idx, cur_matched_pred_idx = utils.linear_assignment(
                            dmat)

                    for i in range(len(cur_matched_gt_idx)):
                        matched_gt_idx.append(
                            gt_cnodes_per_sem[sem][cur_matched_gt_idx[i]])
                        matched_pred_idx.append(
                            pred_cnodes_per_sem[sem][cur_matched_pred_idx[i]])
                        matched_gt2pred[gt_cnodes_per_sem[sem][
                            cur_matched_gt_idx[i]]] = pred_cnodes_per_sem[sem][
                                cur_matched_pred_idx[i]]

                struct_diff = 0.0
                for i in range(len(gt_node.children)):
                    if i not in matched_gt_idx:
                        struct_diff += len(gt_node.children[i].boxes())

                for i in range(len(pred_node.children)):
                    if i not in matched_pred_idx:
                        struct_diff += len(pred_node.children[i].boxes())

                for i in range(len(matched_gt_idx)):
                    gt_id = matched_gt_idx[i]
                    pred_id = matched_pred_idx[i]
                    struct_diff += struct_dist(gt_node.children[gt_id],
                                               pred_node.children[pred_id])

                return struct_diff

    # create dataset and data loader
    data_features = [
        'object', 'name', 'neighbor_diffs', 'neighbor_objs', 'neighbor_names'
    ]
    dataset = PartNetShapeDiffDataset(data_path, object_list, data_features,
                                      shapediff_topk, shapediff_metric,
                                      self_is_neighbor)

    tot_gen = 100
    bar = ProgressBar()
    quality = 0.0
    coverage = 0.0
    for i in bar(range(tot_shape)):
        obj, obj_name, neighbor_diffs, neighbor_objs, neighbor_names = dataset[
            i]

        mat1 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
        mat2 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
        for j in range(tot_gen):
            gen_obj = PartNetDataset.load_object(
                os.path.join(in_dir, obj_name, 'obj2-%03d.json' % j))
            for ni in range(shapediff_topk):
                sd = struct_dist(neighbor_objs[ni].root, gen_obj.root)
                mat1[ni, j] = sd / len(neighbor_objs[ni].root.boxes())
                mat2[ni, j] = sd / len(gen_obj.root.boxes())

        quality += mat2.min(axis=0).mean()
        coverage += mat1.min(axis=1).mean()
        np.save(os.path.join(in_dir, obj_name, 'sd_mat1_stats.npy'), mat1)
        np.save(os.path.join(in_dir, obj_name, 'sd_mat2_stats.npy'), mat2)

    quality /= tot_shape
    coverage /= tot_shape
    print('mean sd quality: ', quality)
    print('mean sd coverage: ', coverage)
    print('q + c: %.5f' % (quality + coverage))
    with open(
            os.path.join(in_dir,
                         'neighbor_%s_sd_stats.txt' % shapediff_metric),
            'w') as fout:
        fout.write('mean sd quality: %f\n' % quality)
        fout.write('mean sd coverage: %f\n' % coverage)
        fout.write('q + c: %.5f\n' % (quality + coverage))
Esempio n. 9
0
if conf.category is not None:
    Tree.load_category_info(conf.category)

# merge training and evaluation configurations, giving evaluation parameters precendence
conf.__dict__.update(eval_conf.__dict__)
print(conf.data_path, conf.category, conf.model_version, conf.baseline_dir, conf.self_is_neighbor, conf.shapediff_topk, conf.shapediff_metric)

# load model
models = utils.get_model_module(conf.model_version)

# set up device
device = torch.device(conf.device)
print(f'Using device: {device}')

# load unit cube pc
unit_cube = torch.from_numpy(utils.load_pts('cube.pts')).to(device)

# check if eval results already exist. If so, delete it.
result_dir = os.path.join(conf.result_path, conf.exp_name + '_recon')
if os.path.exists(result_dir):
    response = input('Eval results directory "%s" already exists, detele it? (y/n) ' % result_dir)
    if response != 'y':
        sys.exit()
    shutil.rmtree(result_dir)

if not os.path.exists(result_dir):
    os.makedirs(result_dir)

# create models
encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=False)
decoder = models.RecursiveDecoder(conf)
Esempio n. 10
0
 def get_pg_real_pc(self, index, j):
     out = utils.load_pts(os.path.join(self.data_dir, self.pg_shapes[index][j], 'point_sample', 'sample-points-all-pts-nor-rgba-10000.txt'))[:self.num_point]
     out = torch.from_numpy(out)
     return out
Esempio n. 11
0
 def __getitem__(self, index):
     pts = utils.load_pts(os.path.join(self.data_dir, str(self.shape_ids[index]), 'point_sample', 'sample-points-all-pts-nor-rgba-10000.txt'))
     out = torch.from_numpy(pts[:self.num_point]).float().unsqueeze(0)
     return (self.shape_ids[index], out)
Esempio n. 12
0
"""
    This file performs online symmetry computation for edges while training the networks.
"""

import numpy as np
import torch
from pyquaternion import Quaternion
from utils import load_pts, export_ply_with_label, transform_pc
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
import scipy.spatial
''' translation symmetry
        Output: ret: T/F, trans: translation vector xyz
        Usage: part_B_point = part_A_point + (x, y, z)
'''
unit_cube = torch.from_numpy(load_pts('cube.pts'))


def compute_trans_sym(obb1, obb2):
    mat1to2 = np.eye(4)[:3, :]
    mat1to2[:, 3] = obb2[:3] - obb1[:3]
    mat2to1 = np.eye(4)[:3, :]
    mat2to1[:, 3] = obb1[:3] - obb2[:3]
    return mat1to2, mat2to1


''' Reflective Symmetry 
        Output: Ret: T/F, mid_pt: middle point xyz, direction: direction unit vector
        Usage: part_B_point = part_A_point + <mid_pt - part_A_point, direction> * 2 * direction
'''