def __init__(self):
     config = Config()
     self.ts_queue = queue.Queue()
     self.max_threads = Response().get_max_threads()
     self.video_format = config.getValue("video_format")
     self.ffmpeg_path = config.getValue("ffmpeg_path")
     self.key_host = Response().get_key_url()
    EmbeddingLoss,
    primitive_loss,
    evaluate_miou,
)
from src.segment_utils import to_one_hot, SIOU_matched_segments
from src.utils import visualize_point_cloud_from_labels, visualize_point_cloud
from src.dataset import generator_iter
from src.mean_shift import MeanShift
from src.segment_utils import SIOU_matched_segments
from src.residual_utils import Evaluation
import time
from src.primitives import SaveParameters

# Use only one gpu.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = Config(sys.argv[1])
if_normals = config.normals

userspace = ""
Loss = EmbeddingLoss(margin=1.0)

if config.mode == 0:
    # Just using points for training
    model = PrimitivesEmbeddingDGCNGn(
        embedding=True,
        emb_size=128,
        primitives=True,
        num_primitives=10,
        loss_function=Loss.triplet_loss,
        mode=config.mode,
        num_channels=3,
Exemple #3
0
from read_config import Config
from src.VisUtils import tessalate_points
from src.dataset import DataSetControlPointsPoisson
from src.dataset import generator_iter
from src.fitting_utils import sample_points_from_control_points_
from src.fitting_utils import up_sample_points_torch_in_range
from src.loss import control_points_permute_reg_loss
from src.loss import laplacian_loss
from src.loss import (
    uniform_knot_bspline,
    spline_reconstruction_loss,
)
from src.model import DGCNNControlPoints
from src.primitive_forward import optimize_open_spline

config = Config(sys.argv[1])

control_decoder = DGCNNControlPoints(20, num_points=10, mode=config.mode)
control_decoder = torch.nn.DataParallel(control_decoder)
control_decoder.cuda()

split_dict = {"train": config.num_train, "val": config.num_val, "test": config.num_test}

dataset = DataSetControlPointsPoisson(
    config.dataset_path,
    config.batch_size,
    splits=split_dict,
    size_v=config.grid_size,
    size_u=config.grid_size)

nu, nv = uniform_knot_bspline(20, 20, 3, 3, 30)
Exemple #4
0
from src.VisUtils import tessalate_points
from src.dataset import DataSetControlPointsPoisson
from src.dataset import generator_iter
from src.fitting_utils import sample_points_from_control_points_
from src.fitting_utils import up_sample_points_torch_in_range
from src.loss import control_points_permute_reg_loss
from src.loss import laplacian_loss
from src.loss import (
    uniform_knot_bspline,
    spline_reconstruction_loss,
)
from src.model import DGCNNControlPoints
from src.primitive_forward import optimize_close_spline
from src.utils import chamfer_distance_single_shape

config = Config(sys.argv[1])

userspace = ".."
print(config.mode)
control_decoder = DGCNNControlPoints(20, num_points=10, mode=config.mode)
control_decoder = torch.nn.DataParallel(control_decoder)
control_decoder.cuda()
config.batch_size = 1
split_dict = {
    "train": config.num_train,
    "val": config.num_val,
    "test": config.num_test
}

dataset = DataSetControlPointsPoisson(path=config.dataset_path,
                                      batch_size=config.batch_size,