Beispiel #1
0
class ChamfersDistance(nn.Module):
    '''
    Use NNDModule as a member.
    '''
    def __init__(self):
        super(ChamfersDistance, self).__init__()
        self.nnd = NNDModule()

    def forward(self, input1, input2):  # BxNxK, BxMxK
        dist0, dist1 = self.nnd.forward(input1, input2)  # BxN, BxM
        loss = torch.mean(torch.sqrt(dist0), 1) + torch.mean(
            torch.sqrt(dist1), 1)  # B
        loss = torch.mean(loss)  # 1
        # loss = torch.rand(1,requires_grad=True)
        return loss
Beispiel #2
0
def get_chamfer_dist(get_slow=False):
    try:
        if get_slow: raise ValueError

        import sys
        sys.path.insert(0, './nndistance')
        from modules.nnd import NNDModule
        dist = NNDModule()
    except:
        dist = chamfer_quadratic

    def loss(a, b):
        if a.dim() == 4:
            if a.size(1) == 2:
                a = from_polar(a)

            assert a.size(1) == 3
            a = a.permute(0, 2, 3, 1).contiguous().reshape(a.size(0), -1, 3)

        if b.dim() == 4:
            if b.size(1) == 2:
                b = from_polar(b)

            assert b.size(1) == 3
            b = b.permute(0, 2, 3, 1).contiguous().reshape(b.size(0), -1, 3)

        assert a.dim() == b.dim() == 3
        if a.size(-1) != 3:
            assert a.size(-2) == 3
            a = a.transpose(-2, -1).contiguous()

        if b.size(-1) != 3:
            assert b.size(-2) == 3
            b = a.transpose(-2, -1).contiguous()

        dist_a, dist_b = dist(a, b)
        return dist_a.sum(dim=-1) + dist_b.sum(dim=-1)

    return loss
Beispiel #3
0
from ply import *
import torch.nn.functional as F
import sys
from tqdm import tqdm
import os
import json
import time, datetime
import subprocess
import pandas as pd
try:
    from script.normalize_obj import *
except:
    print('couldnt load normalize obj')
sys.path.append("./nndistance/")
from modules.nnd import NNDModule
distChamfer =  NNDModule()


parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=6)
parser.add_argument('--nepoch', type=int, default=50000, help='number of epochs to train for')
parser.add_argument('--model', type=str, default = 'trained_models/ae_baseline.pth',  help='yuor path to the trained model')
parser.add_argument('--num_points', type=int, default = 2500,  help='number of points fed to poitnet')

opt = parser.parse_args()
print (opt)

blue = lambda x:'\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000) # fix seed
Beispiel #4
0
import torch
import torch.nn as nn
from torch.autograd import Variable

from modules.nnd import NNDModule

dist = NNDModule()

p1 = torch.rand(1, 3, 3) * 20
p2 = torch.rand(1, 3, 3) * 20
# p1 = p1.int()
# p1.random_(0,2)
# p1 = p1.float()
# p2 = p2.int()
# p2.random_(0,2)
p2 = p2.float()
print(p1)
print(p2)

print('cpu')
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
idx1, idx2 = dist(points1, points2)
print(idx1, idx1)

print('gpu')
points1_cuda = Variable(p1.cuda(), requires_grad=True)
points2_cuda = Variable(p2.cuda())
idx1, idx2 = dist(points1_cuda, points2_cuda)
print(idx1, idx2)
Beispiel #5
0
 def __init__(self):
     super(ChamfersDistance, self).__init__()
     self.nnd = NNDModule()
Beispiel #6
0
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


ae.apply(weights_init)

ae.cuda()

optimizer = optim.Adagrad(ae.parameters(), lr=0.001)
nnd = NNDModule()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        optimizer.zero_grad()
        points, _ = data
        points = Variable(points)

        bs = points.size()[0]
        points = points.transpose(2, 1)
        points = points.cuda()

        gen = ae(points)
Beispiel #7
0
 def __init__(self, n_samples=1024, cuda_opt=True):
     super(ChamferLoss, self).__init__()
     self.n_samples = n_samples
     self.dist = NNDModule()
     self.cuda_opt = cuda_opt