Ejemplo n.º 1
0
    def initialize(self, ht, wd):
        """ initialize slam buffers """

        self.ht, self.wd = ht, wd
        ht, wd = ht // 8, wd // 8

        self.fmaps = torch.zeros(1,
                                 self.mem,
                                 128,
                                 ht,
                                 wd,
                                 device='cuda',
                                 dtype=torch.half)
        self.nets = torch.zeros(1,
                                self.mem,
                                128,
                                ht,
                                wd,
                                device='cuda',
                                dtype=torch.half)
        self.inps = torch.zeros(1,
                                self.mem,
                                128,
                                ht,
                                wd,
                                device='cuda',
                                dtype=torch.half)

        self.poses = SE3.Identity(1, 2048, device='cuda')
        self.disps = torch.ones(1, 2048, ht, wd, device='cuda')
        self.intrinsics = torch.zeros(1, 2048, 4, device='cuda')
        self.tstamps = torch.zeros(2048, dtype=torch.long)
Ejemplo n.º 2
0
def all_pairs_distance_matrix(poses, beta=2.5):
    """ compute distance matrix between all pairs of poses """
    poses = np.array(poses, dtype=np.float32)
    poses[:,:3] *= beta # scale to balence rot + trans
    poses = SE3(torch.from_numpy(poses))

    r = (poses[:,None].inv() * poses[None,:]).log()
    return r.norm(dim=-1).cpu().numpy()
Ejemplo n.º 3
0
    def initializer(self, image1):
        """ Initialize coords and transformation maps """

        batch_size, ch, ht, wd = image1.shape
        device = image1.device

        y0, x0 = torch.meshgrid(torch.arange(ht // 8), torch.arange(wd // 8))
        coords0 = torch.stack([x0, y0], dim=-1).float()
        coords0 = coords0[None].repeat(batch_size, 1, 1, 1).to(device)

        Ts = SE3.Identity(batch_size, ht // 8, wd // 8, device=device)
        return Ts, coords0
Ejemplo n.º 4
0
    def _build_dataset_index(self):
        """ build list of images, poses, depths, and intrinsics """
        images, depths, poses, intrinsics = loadtum(self.datapath,
                                                    self.frame_rate)

        # set first pose to identity
        poses = SE3(torch.as_tensor(poses))
        poses = poses[[0]].inv() * poses
        poses = poses.data.cpu().numpy()

        self.images = images
        self.poses = poses
        self.depths = depths
        self.intrinsics = intrinsics
Ejemplo n.º 5
0
    def _build_dataset_index(self):
        """ build list of images, poses, depths, and intrinsics """
        images, depths, poses, intrinsics = loadtum(self.datapath,
                                                    self.frame_rate)
        intrinsic, _ = TUMStream.calib_read(self.datapath)
        intrinsics = np.tile(intrinsic[None], (len(images), 1))

        # set first pose to identity
        poses = SE3(torch.as_tensor(poses))
        poses = poses[[0]].inv() * poses
        poses = poses.data.cpu().numpy()

        self.images = images
        self.poses = poses
        self.depths = depths
        self.intrinsics = intrinsics
Ejemplo n.º 6
0
def step(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
    """ dense gauss newton update """
    
    pts = pops.inv_project(depth, intrinsics)
    pts = pts.permute(0,3,1,2).contiguous()
    
    attn = attention_matrix(ae)
    se3 = Ts.matrix().permute(0,3,4,1,2).contiguous()

    # build the linear system
    H, b = SE3Builder.apply(attn, se3, pts, target, weight, intrinsics)

    I = torch.eye(6, device=H.device)[...,None,None]
    H = H + (lm*H + ep) * I  # damping

    dx = SE3Solver.apply(H, b)
    dx = dx.permute(0,3,4,1,2).squeeze(-1).contiguous()

    Ts = SE3.exp(dx) * Ts
    return Ts
Ejemplo n.º 7
0
def demo(model, index=0):

    images, depths, intrinsics = load_example(index)

    # initial transformation estimate
    if args.transformation == 'SE3':
        Gs = SE3.Identity(1, 2, device='cuda')

    elif args.transformation == 'Sim3':
        Gs = Sim3.Identity(1, 2, device='cuda')
        depths[:, 0] *= 2**(2 * torch.rand(1) - 1.0).cuda()

    images1 = normalize_images(images)
    ests, _ = model(Gs, images1, depths, intrinsics, num_steps=12)

    # only care about last transformation
    Gs = ests[-1]
    T = Gs[:, 0] * Gs[:, 1].inv()

    T = T[0].matrix().double().cpu().numpy()
    sim3_visualization(T, images, depths, intrinsics)
Ejemplo n.º 8
0
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
    """ dense gauss newton update with computing similiarity matrix """
    
    pts = pops.inv_project(depth, intrinsics)
    pts = pts.permute(0,3,1,2).contiguous()

    # tensor representation of SE3
    se3 = Ts.data.permute(0,3,1,2).contiguous()
    ae = ae / 8.0

    # build the linear system
    H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)

    I = torch.eye(6, device=H.device)[...,None,None]
    H = H + (lm*H + ep) * I  # damping

    dx = SE3Solver.apply(H, b)
    dx = dx.permute(0,3,4,1,2).squeeze(-1).contiguous()

    Ts = SE3.exp(dx) * Ts
    return Ts
Ejemplo n.º 9
0
def reproj_test(args, N=2):
    """ Test to make sure project transform correctly maps points """

    db = dataset_factory(args.datasets, n_frames=N)
    train_loader = DataLoader(db, batch_size=1, shuffle=True, num_workers=0)

    for item in train_loader:
        images, poses, depths, intrinsics = [x.to('cuda') for x in item]
        poses = SE3(poses).inv()
        disps = 1.0 / depths

        coords, _ = pops.projective_transform(poses, disps, intrinsics, [0],
                                              [1])
        imagew = bilinear_sampler(images[:, [1]], coords[..., [0, 1]])

        # these two image should show camera motion
        show_image(images[0, 0])
        show_image(images[0, 1])

        # these two images should show the camera motion removed by reprojection / warping
        show_image(images[0, 0])
        show_image(imagew[0, 0])
Ejemplo n.º 10
0
def compute_distance_matrix_flow(poses, disps, intrinsics):
    """ compute flow magnitude between all pairs of frames """
    if not isinstance(poses, SE3):
        poses = torch.from_numpy(poses).float().cuda()[None]
        poses = SE3(poses).inv()

        disps = torch.from_numpy(disps).float().cuda()[None]
        intrinsics = torch.from_numpy(intrinsics).float().cuda()[None]

    N = poses.shape[1]
    
    ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N))
    ii = ii.reshape(-1).cuda()
    jj = jj.reshape(-1).cuda()

    MAX_FLOW = 100.0
    matrix = np.zeros((N, N), dtype=np.float32)

    s = 2048
    for i in range(0, ii.shape[0], s):
        flow1, val1 = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s])
        flow2, val2 = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s])
        
        flow = torch.stack([flow1, flow2], dim=2)
        val = torch.stack([val1, val2], dim=2)
        
        mag = flow.norm(dim=-1).clamp(max=MAX_FLOW)
        mag = mag.view(mag.shape[1], -1)
        val = val.view(val.shape[1], -1)

        mag = (mag * val).mean(-1) / val.mean(-1)
        mag[val.mean(-1) < 0.7] = np.inf

        i1 = ii[i:i+s].cpu().numpy()
        j1 = jj[i:i+s].cpu().numpy()
        matrix[i1, j1] = mag.cpu().numpy()

    return matrix
Ejemplo n.º 11
0
def train(args):
    """ Test to make sure project transform correctly maps points """

    N = args.n_frames
    model = RaftSLAM(args)
    model.cuda()
    model.train()

    if args.ckpt is not None:
        model.load_state_dict(torch.load(args.ckpt))

    db = dataset_factory(args.datasets, n_frames=N, fmin=16.0, fmax=96.0)
    train_loader = DataLoader(db, batch_size=args.batch, shuffle=True, num_workers=4)

    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
    scheduler = optim.lr_scheduler.OneCycleLR(optimizer, 
        args.lr, args.steps, pct_start=0.01, cycle_momentum=False)

    logger = Logger(args.name, scheduler)
    should_keep_training = True
    total_steps = 0

    while should_keep_training:
        for i_batch, item in enumerate(train_loader):
            optimizer.zero_grad()

            graph = OrderedDict()
            for i in range(N):
                graph[i] = [j for j in range(N) if i!=j and abs(i-j) <= 2]
            
            images, poses, depths, intrinsics = [x.to('cuda') for x in item]
            
            # convert poses w2c -> c2w
            Ps = SE3(poses).inv()
            Gs = SE3.Identity(Ps.shape, device='cuda')

            images = normalize_images(images)
            Gs, residuals = model(Gs, images, depths, intrinsics, graph, num_steps=args.iters)

            geo_loss, geo_metrics = geodesic_loss(Ps, Gs, graph)
            res_loss, res_metrics = residual_loss(residuals)

            metrics = {}
            metrics.update(geo_metrics)
            metrics.update(res_metrics)

            loss = args.w1 * geo_loss + args.w2 * res_loss
            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()
            scheduler.step()
            
            logger.push(metrics)
            total_steps += 1

            if total_steps % 10000 == 0:
                PATH = 'checkpoints/%s_%06d.pth' % (args.name, total_steps)
                torch.save(model.state_dict(), PATH)

                run_evaluation(PATH)

            if total_steps >= args.steps:
                should_keep_training = False
                break

    return model
Ejemplo n.º 12
0
def evaluate(model):
    """ evaluate trained model """

    model.cuda()
    model.eval()

    R_THRESHOLD = 0.1
    T_THRESHOLD = 0.01
    S_THRESHOLD = 0.01

    model.eval()
    db = TartanAirTest()
    test_loader = DataLoader(db, batch_size=1, shuffle=False, num_workers=4)

    # random scales, make sure they are the same every time
    from numpy.random import default_rng
    rng = default_rng(1234)
    scales = 2 ** rng.uniform(-1.0, 1.0, 2000)
    scales = scales.astype(np.float32)

    metrics = {'t': [], 'r': [], 's': []}
    for i_batch, item in enumerate(test_loader):
        images, poses, depths, intrinsics = [x.to('cuda') for x in item]

        # convert poses w2c -> c2w
        Ps = SE3(poses).inv()
        batch, num = images.shape[:2]

        if args.transformation == 'SE3':
            Gs = SE3.Identity(Ps.shape, device='cuda')

        elif args.transformation == 'Sim3':
            Ps = Sim3(Ps)
            Gs = Sim3.Identity(Ps.shape, device='cuda')

            s = torch.as_tensor(scales[i_batch]).cuda().unsqueeze(0)
            phi = torch.zeros(batch, num, 7, device='cuda')
            phi[:,0,6] = s.log()

            Ps = Sim3.exp(phi) * Ps
            depths[:,0] *= s[:,None,None]

        images = normalize_images(images)
        Gs, _ = model(Gs, images, depths, intrinsics, num_steps=16)

        Gs = Gs[-1]
        dP = Ps[:,1] * Ps[:,0].inv()
        dG = Gs[:,1] * Gs[:,0].inv()

        dE = Sim3(dP.inv() * dG)
        r_err, t_err, s_err = pose_metrics(dE)

        t_err = t_err * TartanAir.DEPTH_SCALE

        metrics['t'].append(t_err.item())
        metrics['r'].append(r_err.item())
        metrics['s'].append(s_err.item())

    rlist = np.array(metrics['r'])
    tlist = np.array(metrics['t'])
    slist = np.array(metrics['s'])
    
    r_all = np.count_nonzero(rlist < R_THRESHOLD) / len(metrics['r'])
    t_all = np.count_nonzero(tlist < T_THRESHOLD) / len(metrics['t'])
    s_all = np.count_nonzero(slist < S_THRESHOLD) / len(metrics['s'])

    print("Rotation Acc: ", r_all)
    print("Translation Acc: ", t_all)
    print("Scale Acc: ", s_all)
Ejemplo n.º 13
0
def train(args):
    """ Test to make sure project transform correctly maps points """

    model = Sim3Net(args)
    model.cuda()
    model.train()

    if args.ckpt is not None:
        model.load_state_dict(torch.load(args.ckpt))

    db = TartanAir(mode='training', n_frames=2, do_aug=True, fmin=8.0, fmax=100.0)
    train_loader = DataLoader(db, batch_size=args.batch, shuffle=True, num_workers=4)

    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
    scheduler = optim.lr_scheduler.OneCycleLR(optimizer, 
        args.lr, 100000, pct_start=0.01, cycle_momentum=False)

    from collections import OrderedDict
    graph = OrderedDict()
    graph[0] = [1]
    graph[1] = [0]

    logger = Logger(args.name, scheduler)
    should_keep_training = True
    total_steps = 0

    while should_keep_training:
        for i_batch, item in enumerate(train_loader):
            optimizer.zero_grad()
            images, poses, depths, intrinsics = [x.to('cuda') for x in item]
            
            # convert poses w2c -> c2w
            Ps = SE3(poses).inv()
            batch, num = images.shape[:2]

            if args.transformation == 'SE3':
                Gs = SE3.Identity(Ps.shape, device='cuda')

            elif args.transformation == 'Sim3':
                Ps = Sim3(Ps)
                Gs = Sim3.Identity(Ps.shape, device='cuda')

                s = 2**(2*torch.rand(batch) - 1.0).cuda()
                phi = torch.zeros(batch, num, 7, device='cuda')
                phi[:,0,6] = s.log()

                Ps = Sim3.exp(phi) * Ps
                depths[:,0] *= s[:,None,None]

            images = normalize_images(images)
            Gs, residuals = model(Gs, images, depths, intrinsics, num_steps=args.iters)

            geo_loss, geo_metrics = geodesic_loss(Ps, Gs, graph)
            res_loss, res_metrics = residual_loss(residuals)

            metrics = {}
            metrics.update(geo_metrics)
            metrics.update(res_metrics)

            loss = args.w1 * geo_loss + args.w2 * res_loss
            loss.backward()
            
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()
            scheduler.step()
            
            logger.push(metrics)
            total_steps += 1

            if total_steps % 5000 == 0:
                PATH = 'checkpoints/%s_%06d.pth' % (args.name, total_steps)
                torch.save(model.state_dict(), PATH)

                model.train()

    return model
Ejemplo n.º 14
0
def upsample_se3(Ts, mask):
    """ upsample a se3 field """
    tau_phi = Ts.log()
    return SE3.exp(cvx_upsample(tau_phi, mask))
Ejemplo n.º 15
0
 def fn(a, s):
     X = SE3.exp(a)
     X.scale(s)
     return X.log()