def __init__(self, args):
     super(ReconstructionNet, self).__init__()
     self.encoder = DGCNN_Seg_Encoder(args)
     self.decoder = FoldNet_Decoder(args)
     if args.loss == 'ChamferLoss':
         self.loss = ChamferLoss()
     elif args.loss == 'ChamferLoss_m':
         self.loss = ChamferLoss_m()
Example #2
0
 def __init__(self, args):
     super(ReconstructionNet, self).__init__()
     if args.encoder == 'foldnet':
         self.encoder = FoldNet_Encoder(args)
     elif args.encoder == 'dgcnn_cls':
         self.encoder = DGCNN_Cls_Encoder(args)
     elif args.encoder == 'dgcnn_seg':
         self.encoder = DGCNN_Seg_Encoder(args)
     self.decoder = FoldNet_Decoder(args)
     self.loss = ChamferLoss()
 def __init__(self, args):
     super(MultiTaskNet, self).__init__()
     self.encoder = DGCNN_Seg_Encoder(args)
     self.decoder = FoldNet_Decoder(args)
     if args.rec_loss == 'ChamferLoss':
         self.rec_loss = ChamferLoss()
     elif args.rec_loss == 'ChamferLoss_m':
         self.rec_loss = ChamferLoss_m()
     self.classifer = DGCNN_RT_Cls_Classifier(args)
     self.rot_loss = CrossEntropyLoss()
Example #4
0
 def __init__(self, args):
     super(DGCNN_FoldNet, self).__init__()
     if args.encoder == 'foldingnet':
         self.encoder = FoldNet_Encoder(args)
     elif args.encoder == 'dgcnn_classification':
         self.encoder = DGCNN_Cls_Encoder(args)
     elif args.encoder == 'dgcnn_segmentation':
         self.encoder = DGCNN_Seg_Encoder(args)
     elif args.encoder == 'pointnet':
         self.encoder = PointNetEncoder(args)
     self.decoder = FoldNet_Decoder(args)
     if args.loss == 'ChamferLoss':
         self.loss = ChamferLoss()
     elif args.loss == 'ChamferLoss_m':
         self.loss = ChamferLoss_m()
Example #5
0
    def __init__(self, num_patches=32, num_points_per_patch=1024):
        super(PPFFoldNet, self).__init__()
        self.encoder = Encoder(num_patches=num_patches,
                               num_points_per_patch=num_points_per_patch)
        self.decoder = Decoder(num_points_per_patch=num_points_per_patch)
        self.loss = ChamferLoss()

        # Print the params size of this model.
        if torch.cuda.is_available():
            summary(self.cuda(), (1024, 4), batch_size=num_patches)
        else:
            summary(self, (1024, 4), batch_size=num_patches)

        for m in self.modules():
            if isinstance(m, (nn.Conv1d, nn.Linear)):
                nn.init.xavier_normal_(m.weight, gain=1)
            elif isinstance(m, (nn.BatchNorm1d)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Example #6
0
    torch.backends.cudnn.deterministic = DEBUG
    torch.backends.cudnn.benchmark = not DEBUG

    print(f'Using device {DEVICE}')

    # Data loaders
    train_ds = PPFDataset(**TRAIN_DS_ARGS)
    train_dl = DataLoader(train_ds, **TRAIN_DL_ARGS)

    val_ds = PPFDataset(**VAL_DS_ARGS)
    val_dl = DataLoader(val_ds, **VAL_DL_ARGS)

    print('Training set: {} Validation set: {}\n'.format(
        train_ds.__len__(), val_ds.__len__()
    ))

    # Model
    model = AutoEncoder(NUM_PTS_PER_PATCH)
    model.apply(init_weights).to(DEVICE)

    loss_func = ChamferLoss()
    optimizer = Adam(model.parameters(), LR)
    scheduler = OneCycleLR(
        optimizer, MAX_LR, total_steps=len(train_dl)*TRAINER_ARGS.num_epochs
    )

    Path(TRAINER_ARGS.checkpoint_path).parent.mkdir(parents=True, exist_ok=True)

    # Training
    train(model, loss_func, optimizer, scheduler, (train_dl, val_dl), TRAINER_ARGS)
Example #7
0
    def __init__(self, num_points):
        super(FoldNet, self).__init__()

        self.encoder = Encoder(num_points=num_points)
        self.decoder = Decoder(num_points=num_points)
        self.loss = ChamferLoss()