Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')

    parser.add_argument('--sdf-weight',
                        dest='sdf_weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight for SDF model')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=500,
                        help='epochs for adam and lgd')
    parser.add_argument('--n',
                        dest='n',
                        type=int,
                        metavar='N',
                        default=30000,
                        help='number of points to sample')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        metavar='LEARNING_RATE',
                        default=1e-3,
                        help='learning rate')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    n = args.n
    epoch = args.epoch
    lr = args.lr

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    ds = ObjDataset(args.data)
    sampler = ObjUniformSample(n)

    p = (sampler(ds)['p']).to(device)

    # load
    with torch.no_grad():
        mm = torch.min(p, dim=0)[0]
        mx = torch.max(p, dim=0)[0]

        x = torch.rand(n, 3).to(device) * (mx - mm) + mm
        x.requires_grad_(True)

        x_original = x.clone().detach()

    origin_eval = lambda x: torch.pow(x_original - x, 2).sum(dim=1).mean()
    sdf_eval = lambda x: torch.pow(model(x)[0], 2).sum(dim=1).mean()

    origin_eval_list = lambda x: origin_eval(x[0])
    sdf_eval_list = lambda x: sdf_eval(x[0])

    print("adam")
    optimizer = optim.Adam([x], lr=lr)

    for i in range(epoch):
        optimizer.zero_grad()

        loss = sdf_eval(x)
        loss.backward(retain_graph=True)

        optimizer.step()

        if i % 10 == 0:
            writer.add_scalars("regression_loss", {"Adam": loss},
                               global_step=i)
            writer.add_mesh("point cloud regression_Adam",
                            x.unsqueeze(0),
                            global_step=i)

            writer.add_scalars("chamfer_distance",
                               {"Adam": chamfer_distance(x, p)},
                               global_step=i)

    writer.close()
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')

    parser.add_argument('--weight-save-path',
                        dest='weight_save_path',
                        metavar='PATH',
                        default='../weights/',
                        help='weight checkpoints path')

    parser.add_argument('--sdf-weight',
                        dest='sdf_weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight for SDF model')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=500,
                        help='epochs for adam and lgd')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        metavar='LEARNING_RATE',
                        default=1e-3,
                        help='learning rate')
    parser.add_argument('--lgd-step',
                        dest='lgd_step_per_epoch',
                        type=int,
                        metavar='LGD_STEP_PER_EPOCH',
                        default=5,
                        help='number of simulation steps of LGD per epoch')
    parser.add_argument('--n',
                        dest='n',
                        type=int,
                        metavar='N',
                        default=30000,
                        help='number of points to sample')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    n = args.n
    lr = args.lr
    epoch = args.epoch
    lgd_step_per_epoch = args.lgd_step_per_epoch

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    ds = ObjDataset(args.data)
    sampler = ObjUniformSample(n)

    p = (sampler(ds)['p']).to(device)

    # load
    with torch.no_grad():
        mm = torch.min(p, dim=0)[0]
        mx = torch.max(p, dim=0)[0]

        x = torch.rand(n, 3).to(device) * (mx - mm) + mm
        x.requires_grad_(True)

        x_original = x.clone().detach()

    origin_eval = lambda x: torch.pow(x_original - x, 2).sum(dim=1).mean()
    sdf_eval = lambda x: torch.pow(model(x)[0], 2).sum(dim=1).mean()

    origin_eval_list = lambda x: origin_eval(x[0])
    sdf_eval_list = lambda x: sdf_eval(x[0])

    print("lgd")
    hidden = None

    lgd = LGD(3, 2, k=10).to(device)
    lgd_optimizer = optim.Adam(lgd.parameters(), lr=lr)

    # train LGD
    lgd.train()
    for i in range(epoch):
        print(i)
        # evaluate losses
        samples_n = n // 32
        sample_inds = torch.randperm(n)[:samples_n]

        origin_eval_batch = lambda x: torch.pow(x_original[sample_inds] - x, 2
                                                ).sum(dim=1).mean()
        origin_eval_batch_list = lambda x: origin_eval_batch(x[0])

        # update lgd parameters
        lgd_optimizer.zero_grad()
        lgd.loss_trajectory_backward(x[sample_inds],
                                     [origin_eval_batch_list, sdf_eval_list],
                                     None,
                                     constraints=["None", "Zero"],
                                     batch_size=samples_n,
                                     steps=lgd_step_per_epoch)
        lgd_optimizer.step()

        torch.save(lgd.state_dict(),
                   args.weight_save_path + 'model_%03d.pth' % i)

    writer.close()
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')

    parser.add_argument('--weight-save-path',
                        dest='weight_save_path',
                        metavar='PATH',
                        default='../weights/',
                        help='weight checkpoints path')

    parser.add_argument('--pretrained-weight',
                        dest='weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight')

    parser.add_argument('--activation',
                        dest='activation',
                        metavar='activation',
                        default='relu',
                        help='activation of network; \'relu\' or \'sin\'')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=100,
                        help='epochs')

    parser.add_argument(
        '--abs',
        dest='abs',
        type=bool,
        metavar='BOOL',
        default=False,
        help='whether we should use ABS when evaluating normal loss')

    parser.add_argument('--epsilon',
                        dest='epsilon',
                        type=float,
                        metavar='EPSILON',
                        default=0.1,
                        help='epsilon')
    parser.add_argument('--lambda',
                        dest='lamb',
                        type=float,
                        metavar='LAMBDA',
                        default=0.005,
                        help='hyperparameter for s : normal loss ratio')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.weight != None:
        try:
            model.load_state_dict(torch.load(args.weight))
        except:
            print("Couldn't load pretrained weight: " + args.weight)

    # load
    ds = ObjDataset(args.data)
    samples_n = 20000

    augments = nn.Sequential(ObjUniformSample(samples_n),
                             NormalPerturb(args.epsilon),
                             RandomAugment(samples_n, args.epsilon * 0.5)(ds))

    ds = augments(ds)

    p_aug = ds['p'].detach_().to(device)
    n_aug = ds['n'].detach_().to(device)
    s_aug = ds['s'].detach_().to(device)

    p = p_aug[:samples_n]
    n = n_aug[:samples_n]

    p_gt = p.repeat(2, 1)

    writer.add_mesh("1. n_gt",
                    p.unsqueeze(0),
                    colors=(n.unsqueeze(0) * 128 + 128).int())

    optimizer = optim.Adam(list(model.parameters()), lr=1e-4)

    for epoch in range(args.epoch):
        optimizer.zero_grad()

        # train
        utils.model_train(model)
        loss_t, s, n = train(device,
                             model,
                             p_aug,
                             s_aug,
                             n_aug,
                             backward=True,
                             lamb=args.lamb,
                             use_abs=args.abs)

        #loss_x = 1e2 * torch.sum(torch.pow(p_aug - p_gt, 2))
        #loss_x.backward()

        #writer.add_scalars("loss", {'train': loss_t + loss_x.detach()}, epoch)

        # visualization
        with torch.no_grad():

            n_normalized = n / torch.norm(n, dim=1, keepdim=True)

            n_error = torch.sum(n_normalized * n_aug, dim=1,
                                keepdim=True) / torch.norm(
                                    n_aug, dim=1, keepdim=True)

            n_error_originals = n_error[:p.shape[0]]

            writer.add_scalars(
                "cosine similarity", {
                    'train':
                    n_error_originals[~torch.isnan(n_error_originals)].detach(
                    ).mean()
                }, epoch)

            if epoch % 10 == 0:
                print(epoch)
                writer.add_mesh(
                    "2. n",
                    p_aug[:p.shape[0]].unsqueeze(0).detach().clone(),
                    colors=(n_normalized[:p.shape[0]].unsqueeze(
                        0).detach().clone() * 128 + 128).int(),
                    global_step=epoch)

                writer.add_mesh(
                    "3. cosine similarity",
                    p_aug[:p.shape[0]].unsqueeze(0).detach().clone(),
                    colors=(F.pad(1 - n_error[:p.shape[0]],
                                  (0, 2)).unsqueeze(0).detach().clone() *
                            256).int(),
                    global_step=epoch)

        # update
        optimizer.step()

        torch.save(model.state_dict(),
                   args.weight_save_path + 'model_%03d.pth' % epoch)

    writer.close()
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(
        description='Test',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path',
                        dest='tb_save_path',
                        metavar='PATH',
                        default='../checkpoints/',
                        help='tensorboard checkpoints path')
    parser.add_argument('--sdf-weight',
                        dest='sdf_weight',
                        metavar='PATH',
                        default=None,
                        help='pretrained weight for SDF model')

    parser.add_argument('--batchsize',
                        dest='batchsize',
                        type=int,
                        metavar='BATCHSIZE',
                        default=1,
                        help='batch size')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        metavar='EPOCH',
                        default=200,
                        help='epochs for adam and lgd')

    parser.add_argument('--width',
                        dest='width',
                        type=int,
                        metavar='WIDTH',
                        default=128,
                        help='width for rendered image')
    parser.add_argument('--height',
                        dest='height',
                        type=int,
                        metavar='HEIGHT',
                        default=128,
                        help='height for rendered image')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        metavar='LEARNING_RATE',
                        default=1e-3,
                        help='learning rate')

    parser.add_argument('--outfile',
                        dest='outfile',
                        metavar='OUTFILE',
                        help='output file')

    args = parser.parse_args()

    width = args.width
    height = args.height
    epoch = args.epoch
    lr = args.lr

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3,
                  out_features=1,
                  hidden_features=256,
                  hidden_layers=5,
                  outermost_linear=True).to(device)

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval()
    for param in model.parameters():
        param.requires_grad = False

    # load
    mm = torch.tensor([-0.1, -0.1, 0.1], device=device, dtype=torch.float)
    mx = torch.tensor([0.1, 0.1, 0.1], device=device, dtype=torch.float)
    wh = torch.tensor([width, height, 1], device=device, dtype=torch.int)

    rot = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
                       device=device,
                       dtype=torch.float)
    trans = torch.tensor([[0, 0, -0.8]], device=device, dtype=torch.float)

    p_distribution = GridDataset(mm, mx, wh)

    d = torch.zeros((width * height, 1), device=device,
                    dtype=torch.float).requires_grad_(True)

    sampler = nn.Sequential(UniformSample(width * height), PointTransform(rot))

    p = sampler(p_distribution)

    ds = ObjDataset(args.data)
    objsampler = ObjUniformSample(1000)
    x_preview = (objsampler(ds)['p']).to(device)

    d2_eval = lambda d: torch.pow(d, 2).mean()
    sdf_eval = lambda d: torch.pow(model(d * ray_n + p + trans)[0], 2).sum(
        dim=1).mean()
    d_eval = lambda d: (torch.tanh(d) - 1.).mean() * 0.5

    d2_eval_list = lambda d: d2_eval(d[0])
    sdf_eval_list = lambda d: sdf_eval(d[0])
    d_eval_list = lambda d: d_eval(d[0])

    ray_n = torch.tensor([[0, 0, 1]], device=device,
                         dtype=torch.float).repeat(width * height, 1)

    writer.add_mesh("preview",
                    torch.cat([(p + trans), x_preview]).unsqueeze(0),
                    global_step=0)

    print("adam")
    optimizer = optim.Adam([d], lr=lr)

    for i in range(epoch):
        optimizer.zero_grad()

        loss = sdf_eval(d)
        loss.backward(retain_graph=True)

        optimizer.step()

        if i % 10 == 0:
            writer.add_scalars("regression_loss", {"Adam": loss},
                               global_step=i)
            writer.add_mesh("raymarch_Adam",
                            torch.cat([(d * ray_n + trans + p),
                                       x_preview]).unsqueeze(0),
                            global_step=i)

    writer.close()
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description='Test',
                                 formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('data', metavar='DATA', help='path to file')

    parser.add_argument('--tb-save-path', dest='tb_save_path', metavar='PATH', default='../checkpoints/', 
                            help='tensorboard checkpoints path')

    parser.add_argument('--weight-save-path', dest='weight_save_path', metavar='PATH', default='../weights/', 
                            help='weight checkpoints path')

    parser.add_argument('--sdf-weight', dest='sdf_weight', metavar='PATH', default=None, 
                            help='pretrained weight for SDF model')


    parser.add_argument('--batchsize', dest='batchsize', type=int, metavar='BATCHSIZE', default=1,
                            help='batch size')
    parser.add_argument('--epoch', dest='epoch', type=int,metavar='EPOCH', default=500, 
                            help='epochs for adam and lgd')
    parser.add_argument('--lr', dest='lr', type=float,metavar='LEARNING_RATE', default=5e-3, 
                            help='learning rate')
    parser.add_argument('--lgd-step', dest='lgd_step_per_epoch', type=int,metavar='LGD_STEP_PER_EPOCH', default=5, 
                            help='number of simulation steps of LGD per epoch')

    parser.add_argument('--width', dest='width', type=int,metavar='WIDTH', default=128, 
                            help='width for rendered image')
    parser.add_argument('--height', dest='height', type=int,metavar='HEIGHT', default=128, 
                            help='height for rendered image')

    parser.add_argument('--outfile', dest='outfile', metavar='OUTFILE', 
                            help='output file')

    args = parser.parse_args()

    width = args.width
    height = args.height
    lr = args.lr
    epoch = args.epoch
    lgd_step_per_epoch = args.lgd_step_per_epoch

    writer = SummaryWriter(args.tb_save_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # create models
    model = Siren(in_features=3, out_features=1, hidden_features=256, hidden_layers=5, outermost_linear=True).to(device) 

    if args.sdf_weight != None:
        try:
            model.load_state_dict(torch.load(args.sdf_weight))
        except:
            print("Couldn't load pretrained weight: " + args.sdf_weight)

    model.eval() 
    for param in model.parameters():
        param.requires_grad = False

    
    # load 
    mm = torch.tensor([-0.1, -0.1, 0.1], device=device, dtype=torch.float)
    mx = torch.tensor([0.1, 0.1, 0.1], device=device, dtype=torch.float)
    wh = torch.tensor([width, height, 1], device=device, dtype=torch.int)

    rot = torch.tensor([[1,0,0], [0,1,0], [0,0,1]], device=device, dtype=torch.float)
    trans = torch.tensor([[0, 0, -0.8]], device=device, dtype=torch.float)

    p_distribution = GridDataset(mm, mx, wh)

    d = torch.zeros((width*height, 1), device=device, dtype=torch.float).requires_grad_(True)
    

    sampler = nn.Sequential(UniformSample(width*height), 
                            PointTransform(rot))

    p = sampler(p_distribution)


    ds = ObjDataset(args.data)
    objsampler = ObjUniformSample(1000)
    x_preview = (objsampler(ds)['p']).to(device)


    d2_eval = lambda d: torch.pow(d, 2).mean()
    sdf_eval = lambda d: torch.pow(model(d * ray_n + p + trans)[0], 2).sum(dim=1).mean()
    d_eval = lambda d: (torch.tanh(d) - 1.).mean() * 0.5

    d2_eval_list = lambda d: d2_eval(d[0])
    sdf_eval_list = lambda d: sdf_eval(d[0])
    d_eval_list = lambda d: d_eval(d[0])


    writer.add_mesh("preview", torch.cat([(p + trans),  x_preview]).unsqueeze(0), global_step=0)

    print("lgd")
    hidden = None

    lgd = LGD(1, 3, k=10).to(device)
    lgd_optimizer = optim.Adam(lgd.parameters(), lr= lr)

    # train LGD
    lgd.train()
    for i in range(epoch):
        print(i)
        # evaluate losses
        samples_n = width*height//128
        sample_inds = torch.randperm(width*height)[:samples_n]

        ray_n = torch.tensor([[0,0,1]], device=device, dtype=torch.float).repeat(samples_n, 1)

        sdf_eval_batch = lambda d: torch.pow(model(d * ray_n + p[sample_inds] + trans)[0], 2).sum(dim=1).mean()
        sdf_eval_batch_list = lambda d: sdf_eval_batch(d[0])

        # update lgd parameters
        lgd_optimizer.zero_grad()
        lgd.loss_trajectory_backward(d[sample_inds], [d2_eval_list, sdf_eval_batch_list, d_eval_list], None, 
                                     constraints=["None", "Zero", "Positive"], batch_size=samples_n, steps=lgd_step_per_epoch)
        lgd_optimizer.step()

        torch.save(lgd.state_dict(), args.weight_save_path+'model_%03d.pth' % i)
 
    writer.close()