예제 #1
0
    def write_vls(self, data_blob, outputs, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / data_blob['mdDepth_pred'],
                                   vmax=0.15,
                                   viewind=0)
        imgrecon = tensor2rgb(outputs[('img1_recon', 2)][:, -1], viewind=0)
        attentionmap = tensor2disp(outputs['attention'],
                                   percentile=95,
                                   viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid = np.concatenate(
            [np.array(depthpredvls),
             np.array(attentionmap)], axis=1)
        img_val = np.concatenate(
            [np.array(img_val_up), np.array(img_val_mid)], axis=0)
        self.writer.add_image('predvls', (torch.from_numpy(img_val).float() /
                                          255).permute([2, 0, 1]), step)

        X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'],
                              outputs)
        self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute(
            [2, 0, 1]), step)
예제 #2
0
    def write_vls_eval(self, data_blob, outputs, tagname, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / outputs[('mDepth', 0)],
                                   vmax=0.15,
                                   viewind=0)
        flowvls = flow_to_image(
            outputs[('flowpred', 0)][0].detach().cpu().permute([1, 2,
                                                                0]).numpy(),
            rad_max=10)
        imgrecon = tensor2rgb(outputs[('reconImg', 0)], ind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid2 = np.concatenate(
            [np.array(depthpredvls), np.array(flowvls)], axis=1)
        img_val_mid3 = np.concatenate(
            [np.array(img1), np.array(imgrecon)], axis=1)
        img_val = np.concatenate([
            np.array(img_val_up),
            np.array(img_val_mid2),
            np.array(img_val_mid3)
        ],
                                 axis=0)
        self.writer.add_image('{}_predvls'.format(tagname),
                              (torch.from_numpy(img_val).float() /
                               255).permute([2, 0, 1]), step)

        X = self.vls_sampling(img1, img2, data_blob['depthvls'], outputs)
        self.writer.add_image('{}_X'.format(tagname),
                              (torch.from_numpy(X).float() / 255).permute(
                                  [2, 0, 1]), step)
예제 #3
0
def get_vls_eval(data_blob, outputs, args):
    img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
    insmap = data_blob['insmap'][0].squeeze().numpy()

    insvls = vls_ins(img1, insmap)

    depthpredvls = tensor2disp(1 / outputs[('depth', 2)], vmax=0.15, viewind=0)
    depthpredvls_org = tensor2disp(1 / data_blob['depthpred'],
                                   vmax=0.15,
                                   viewind=0)

    sigmoidact = outputs[('residualdepth', 2)]
    sigmoidactvls = tensor2grad(sigmoidact,
                                pos_bar=0.1,
                                neg_bar=-0.1,
                                viewind=0)

    img_val_up = np.concatenate(
        [np.array(insvls), np.array(sigmoidactvls)], axis=1)
    img_val_mid2 = np.concatenate(
        [np.array(depthpredvls),
         np.array(depthpredvls_org)], axis=1)
    img_val = np.concatenate(
        [np.array(img_val_up), np.array(img_val_mid2)], axis=0)

    svtag = data_blob['tag'][0]
    svroot = os.path.join(args.svroot, args.restore_ckpt.split('/')[-2])
    os.makedirs(svroot, exist_ok=True)

    seq, imgname = svtag.split(' ')
    figname = "{}_{}".format(seq.split('/')[-1], imgname)
    vlsname = os.path.join(svroot, "{}.png".format(figname))
    Image.fromarray(img_val).save(vlsname)
예제 #4
0
    def write_vls_eval(self, data_blob, outputs, tagname, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / outputs[('depth', 2)],
                                   vmax=0.15,
                                   viewind=0)
        depthgtvls = tensor2disp(1 / data_blob['depthmap'],
                                 vmax=0.15,
                                 viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid2 = np.concatenate(
            [np.array(depthpredvls),
             np.array(depthgtvls)], axis=1)
        img_val = np.concatenate(
            [np.array(img_val_up),
             np.array(img_val_mid2)], axis=0)
        self.writer.add_image('{}_predvls'.format(tagname),
                              (torch.from_numpy(img_val).float() /
                               255).permute([2, 0, 1]), step)

        X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'],
                              data_blob['flowmap'], data_blob['insmap'],
                              outputs)
        self.writer.add_image('{}_X'.format(tagname),
                              (torch.from_numpy(X).float() / 255).permute(
                                  [2, 0, 1]), step)
예제 #5
0
    def write_vls(self, data_blob, outputs, flowselector, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        figmask_flow = tensor2disp(flowselector, vmax=1, viewind=0)
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / outputs[('depth', 2)],
                                   vmax=0.15,
                                   viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid2 = np.concatenate(
            [np.array(depthpredvls),
             np.array(figmask_flow)], axis=1)
        img_val = np.concatenate(
            [np.array(img_val_up),
             np.array(img_val_mid2)], axis=0)
        self.writer.add_image('predvls', (torch.from_numpy(img_val).float() /
                                          255).permute([2, 0, 1]), step)

        X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'],
                              data_blob['flowgt_vls'], data_blob['insmap'],
                              outputs)
        self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute(
            [2, 0, 1]), step)
예제 #6
0
    def write_vls(self, data_blob, outputs, flowselector, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        figmask_flow = tensor2disp(flowselector, vmax=1, viewind=0)
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / outputs[('depth', 2)],
                                   vmax=0.15,
                                   viewind=0)
        flowvls = flow_to_image(
            outputs[('flowpred', 2)][0].detach().cpu().permute([1, 2,
                                                                0]).numpy(),
            rad_max=10)
        imgrecon = tensor2rgb(outputs[('reconImg', 2)], viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid2 = np.concatenate(
            [np.array(depthpredvls),
             np.array(figmask_flow)], axis=1)
        img_val_mid3 = np.concatenate(
            [np.array(imgrecon), np.array(flowvls)], axis=1)
        img_val = np.concatenate([
            np.array(img_val_up),
            np.array(img_val_mid2),
            np.array(img_val_mid3)
        ],
                                 axis=0)
        self.writer.add_image('predvls', (torch.from_numpy(img_val).float() /
                                          255).permute([2, 0, 1]), step)
예제 #7
0
    def write_vls_eval(self, data_blob, outputs, tagname, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        insvls = vls_ins(img1, insmap)

        flowvls = flow_to_image(outputs[-1][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=50)

        img_val_up = np.concatenate([np.array(insvls), np.array(flowvls)], axis=1)
        self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val_up).float() / 255).permute([2, 0, 1]), step)
예제 #8
0
    def write_vls_eval(self, data_blob, outputs, tagname, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / data_blob['mdDepth_pred'], vmax=0.15, viewind=0)
        imgrecon = tensor2rgb(outputs[('img1_recon', 2)][:, -1], viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid = np.concatenate([np.array(depthpredvls), np.array(imgrecon)], axis=1)
        img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid)], axis=0)
        self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
예제 #9
0
    def write_vls(self, data_blob, outputs, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / data_blob['mdDepth_pred'],
                                   vmax=0.15,
                                   viewind=0)
        imgrecon = tensor2rgb(outputs[('img1_recon', 2)][:, -1], viewind=0)

        flow_RAFT = Image.fromarray(
            flow_to_image(data_blob['flowpred'][0].permute([1, 2,
                                                            0]).cpu().numpy()))
        flow_pred = Image.fromarray(
            flow_to_image(outputs[('flowpred',
                                   2)][0].permute([1, 2,
                                                   0]).detach().cpu().numpy()))

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid = np.concatenate(
            [np.array(depthpredvls),
             np.array(imgrecon)], axis=1)
        img_val_flow = np.concatenate(
            [np.array(flow_RAFT), np.array(flow_pred)], axis=1)
        img_val = np.concatenate([
            np.array(img_val_up),
            np.array(img_val_mid),
            np.array(img_val_flow)
        ],
                                 axis=0)
        self.writer.add_image('predvls', (torch.from_numpy(img_val).float() /
                                          255).permute([2, 0, 1]), step)

        X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'],
                              outputs)
        self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute(
            [2, 0, 1]), step)

        X = self.vls_objmvment(np.array(insvls), data_blob['insmap'],
                               data_blob['posepred'])
        self.writer.add_image('objmvment', (torch.from_numpy(X).float() /
                                            255).permute([2, 0, 1]), step)
예제 #10
0
    def write_vls(self, data_blob, outputs, depthselector, step):
        img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
        insmap = data_blob['insmap'][0].squeeze().numpy()

        figmask_depth = tensor2disp(depthselector, vmax=1, viewind=0)
        insvls = vls_ins(img1, insmap)

        depthpredvls = tensor2disp(1 / outputs['depth_predictions'][-1],
                                   vmax=0.15,
                                   viewind=0)
        flowvls = flow_to_image(outputs['flowpred'][0].detach().cpu().permute(
            [1, 2, 0]).numpy(),
                                rad_max=50)
        imgrecon = tensor2rgb(outputs['img1_recon'], viewind=0)

        img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
        img_val_mid2 = np.concatenate(
            [np.array(depthpredvls),
             np.array(figmask_depth)], axis=1)
        img_val_mid3 = np.concatenate(
            [np.array(imgrecon), np.array(flowvls)], axis=1)
        img_val = np.concatenate([
            np.array(img_val_up),
            np.array(img_val_mid2),
            np.array(img_val_mid3)
        ],
                                 axis=0)
        self.writer.add_image('predvls', (torch.from_numpy(img_val).float() /
                                          255).permute([2, 0, 1]), step)

        X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'],
                              data_blob['flowmap'], data_blob['insmap'],
                              outputs)
        self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute(
            [2, 0, 1]), step)
예제 #11
0
def validate_RANSAC_odom_relpose(args, eval_loader, banins=False, bangrad=False, samplenum=50000):
    if bangrad:
        gradComputer = None
    else:
        gradComputer = GradComputer()

    for val_id, data_blob in enumerate(tqdm(eval_loader)):
        insmap = data_blob['insmap']
        intrinsic = data_blob['intrinsic']
        flowpred = data_blob['flowpred_RAFT']
        mdDepth_pred = data_blob['mdDepth']
        pose_bs = data_blob['posepred_bs']
        tag = data_blob['tag'][0]

        if torch.sum(torch.abs(data_blob['img1'] - data_blob['img2'])) < 1:
            R = np.eye(3)
            t = np.array([[0, 0, -1]]).T
            scale = 0
        else:
            R, t, scale, _ = inf_pose_flow(flowpred, insmap, mdDepth_pred, intrinsic, int(val_id + 10), gradComputer=gradComputer, banins=banins, samplenum=samplenum)
        self_pose = np.eye(4)
        self_pose[0:3, 0:3] = R
        self_pose[0:3, 3:4] = t * scale

        pose_bs_np = pose_bs[0].cpu().numpy()
        pose_bs_np = pose_bs_np @ np.linalg.inv(pose_bs_np[0]) @ self_pose
        pose_bs_np[0] = self_pose

        seq, frmidx = tag.split(' ')
        exportfold = os.path.join(args.vls_root, seq, 'image_02')

        insmap_np = data_blob['insmap'].squeeze().numpy()
        xx, yy = np.meshgrid(range(insmap_np.shape[1]), range(insmap_np.shape[0]), indexing='xy')
        fig, ax = plt.subplots(figsize=(16,9))
        ax.imshow(vls_ins(np.array(tensor2rgb(data_blob['img1'], viewind=0)), insmap_np))
        for k in np.unique(insmap_np):
            if k == 0:
                 continue
            xxf = xx[insmap_np == k]
            yyf = yy[insmap_np == k]

            xmin = xxf.min()
            xmax = xxf.max()
            ymin = yyf.min()
            ymax = yyf.max()

            if (ymax - ymin) * (xmax - xmin) < 1000:
                continue

            rect = patches.Rectangle((xmin, ymax), xmax - xmin, ymin - ymax, linewidth=1, facecolor='none', edgecolor='r')
            ax.add_patch(rect)

            ins_relpose = pose_bs[0, k].cpu().numpy() @ np.linalg.inv(pose_bs[0, 0].cpu().numpy())
            mvdist = np.sqrt(np.sum(ins_relpose[0:3, 3:4] ** 2))
            mvdist_transed = np.sqrt(np.sum((pose_bs_np[k] @ np.linalg.inv(pose_bs_np[0]))[0:3, 3:4] ** 2))
            assert np.abs(mvdist_transed - mvdist) < 1e-3
            ax.text(xmin + 5, ymin + 10, '%.3f' % mvdist, fontsize=6, c='r', weight='bold')

        plt.axis('off')
        os.makedirs(exportfold, exist_ok=True)
        plt.savefig(os.path.join(exportfold, "{}.png".format(frmidx)), bbox_inches='tight', pad_inches=0)
        plt.close()