def __init__(self, num_steps, device):
        def weights_init(m):
            if isinstance(m, nn.Conv1d):
                init.kaiming_uniform_(m.weight)
                if m.bias is not None:
                    init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm1d):
                init.ones_(m.weight)
                init.zeros_(m.bias)

        network = Autoencoder(k=128, num_points=2048)
        self.network = network.apply(weights_init).to(device)

        self.loss = ChamferDistance()
        self.optimizer = optim.Adam(self.network.parameters(),
                                    lr=5e-4,
                                    weight_decay=1e-5)
        self.scheduler = CosineAnnealingLR(self.optimizer,
                                           T_max=num_steps,
                                           eta_min=1e-7)
예제 #2
0
parser.add_argument('--model', type=str, default=None)
parser.add_argument('--num_input', type=int, default=2048)
parser.add_argument('--num_coarse', type=int, default=1024)
parser.add_argument('--num_dense', type=int, default=16384)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--loss_d1', type=str, default='cd')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--weight_decay', type=float, default=1e-6)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--num_workers', type=int, default=4)
args = parser.parse_args()

DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

cd_loss = ChamferDistance()
loss_d1 = cd_loss 


train_dataset = ShapeNet(partial_path=args.partial_root, gt_path=args.gt_root, split='train')
val_dataset = ShapeNet(partial_path=args.partial_root, gt_path=args.gt_root, split='val')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

network = Folding()
if args.model is not None:
    print('Loaded trained model from {}.'.format(args.model))
    network.load_state_dict(torch.load(args.model))
else:
    print('Begin training new model.')
network.to(DEVICE)