Exemplo n.º 1
0
def demo(args):
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model, map_location=DEVICE))

    model = model.module
    model.to(DEVICE)
    model.eval()

    model = torch.jit.script(model)

    with torch.no_grad():
        images = glob.glob(os.path.join(args.path, '*.png')) + \
                 glob.glob(os.path.join(args.path, '*.jpg'))

        images = sorted(images)
        for imfile1, imfile2 in zip(images[:-1], images[1:]):
            image1 = load_image(imfile1)
            image2 = load_image(imfile2)

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1, image2)

            # export(
            #     model, image1, image2,
            #     torch.tensor([6], dtype=torch.int),
            #     torch.Tensor([False]),
            #     torch.Tensor([False]),
            #     torch.tensor([False], dtype=torch.bool)
            # )
            flow_low, flow_up = model(image1, image2, iters=1, test_mode=True)
            viz(image1, flow_up)
Exemplo n.º 2
0
def run_pair_tensor(model, image1, image2, tresh, force, strength, smooth,
                    intepolation):
    with torch.no_grad():
        padder = InputPadder(image1.shape)
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)

        image1 = padder.unpad(image1)
        flow_up = padder.unpad(flow_up)

        blurImage, normalizedFlow = MotionBlur(image1, flow_up, tresh, force,
                                               strength, smooth, intepolation)
        #return image1, blurImage, flow_up, normalizedFlow

    #img = image1[0].permute(1,2,0).cpu().numpy()
    blur = blurImage[0].permute(1, 2, 0).cpu().numpy()

    flo1 = flow_up[0].permute(1, 2, 0).cpu().numpy()
    flo2 = normalizedFlow.permute(1, 2, 0).cpu().numpy()

    flo1 = flow_viz.flow_to_image(flo1)
    flo2 = flow_viz.flow_to_image(flo2)

    del image1, image2, padder, flow_low, flow_up, blurImage, normalizedFlow

    return blur, flo1, flo2
Exemplo n.º 3
0
def validate_VRKitti2(model, args, eval_loader, group, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    epe_list = torch.zeros(2).cuda(device=args.gpu)
    out_list = torch.zeros(2).cuda(device=args.gpu)

    for val_id, batch in enumerate(tqdm(eval_loader)):
        image1, image2, flow_gt, valid_gt = batch

        image1 = Variable(image1, requires_grad=True)
        image1 = image1.cuda(args.gpu, non_blocking=True)

        image2 = Variable(image2, requires_grad=True)
        image2 = image2.cuda(args.gpu, non_blocking=True)

        flow_gt = Variable(flow_gt, requires_grad=True)
        flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)
        flow_gt = flow_gt[0]

        valid_gt = Variable(valid_gt, requires_grad=True)
        valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)
        valid_gt = valid_gt[0]

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0])

        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe / mag) > 0.05)).float()

        epe_list[0] += epe[val].mean().item()
        epe_list[1] += 1

        out_list[0] += out[val].sum()
        out_list[1] += torch.sum(val)

    if args.distributed:
        dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)

    if args.gpu == 0:
        epe = epe_list[0] / epe_list[1]
        f1 = 100 * out_list[0] / out_list[1]

        print("Validation KITTI: %f, %f" % (epe, f1))
        return {
            'kitti-epe': float(epe.detach().cpu().numpy()),
            'kitti-f1': float(f1.detach().cpu().numpy())
        }
    else:
        return None
Exemplo n.º 4
0
def load_image_list(image_files, input_flow_w):
    images = []
    for imfile in sorted(image_files):
        images.append(load_image(imfile, input_flow_w))

    images = torch.stack(images, dim=0)
    images = images.to(DEVICE)

    padder = InputPadder(images.shape)
    return padder.pad(images)[0]
Exemplo n.º 5
0
def validate_kitti_colorjitter(gpu, model, args, ngpus_per_node, eval_entries, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    interval = np.floor(len(eval_entries) / ngpus_per_node).astype(np.int).item()
    if gpu == ngpus_per_node - 1:
        stidx = int(interval * gpu)
        edidx = len(eval_entries)
    else:
        stidx = int(interval * gpu)
        edidx = int(interval * (gpu + 1))
    print("Initialize Instance on Gpu %d, from %d to %d, total %d" % (gpu, stidx, edidx, len(eval_entries)))
    from tqdm import tqdm
    model.eval()
    model.cuda(gpu)
    with torch.no_grad():
        for val_id, entry in enumerate(tqdm(remove_dup(eval_entries[stidx : edidx]))):
            seq, index = entry.split(' ')
            index = int(index)

            if os.path.exists(os.path.join(args.dataset, seq, 'image_02', 'data', "{}.png".format(str(index).zfill(10)))):
                tmproot = args.dataset
            else:
                tmproot = args.odom_root

            img1path = os.path.join(tmproot, seq, 'image_02', 'data', "{}.png".format(str(index).zfill(10)))
            img2path = os.path.join(tmproot, seq, 'image_02', 'data', "{}.png".format(str(index + 1).zfill(10)))

            if not os.path.exists(img2path):
                img2path = img1path

            image1 = frame_utils.read_gen(img1path)
            image2 = frame_utils.read_gen(img2path)

            image1 = np.array(image1).astype(np.uint8)
            image2 = np.array(image2).astype(np.uint8)

            image1 = torch.from_numpy(image1).permute([2, 0, 1]).float()
            image2 = torch.from_numpy(image2).permute([2, 0, 1]).float()

            svfold = os.path.join(args.exportroot, seq, 'image_02')
            svpath = os.path.join(args.exportroot, seq, 'image_02', "{}.png".format(str(index).zfill(10)))
            os.makedirs(svfold, exist_ok=True)

            image1 = image1[None].cuda(gpu)
            image2 = image2[None].cuda(gpu)

            padder = InputPadder(image1.shape, mode='kitti')
            image1, image2 = padder.pad(image1, image2)

            flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
            flow = padder.unpad(flow_pr[0]).cpu()

            frame_utils.writeFlowKITTI(svpath, flow.permute(1, 2, 0).numpy())
            # Image.fromarray(flow_viz.flow_to_image(flow.permute(1, 2, 0).numpy())).show()
            # tensor2rgb(image1 / 255.0, viewind=0).show()
    return
Exemplo n.º 6
0
def RunImage(img1, img2):
    padder = InputPadder(img1.shape)
    image1, image2 = padder.pad(img1, img2)

    with torch.no_grad():
        flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)

        #print(flow_up.shape)
        #flow = model(image1, image2, iters=20, test_mode=False)[0]
        blurImage, normalizedFlow = MotionBlur(image1, flow_up)
    return image1, blurImage, flow_up, normalizedFlow
    viz(image1, withBlur, flow_up)
Exemplo n.º 7
0
Arquivo: demo.py Projeto: m4nh/RAFT
def demo(args):
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    with torch.no_grad():
        images = glob.glob(os.path.join(args.path, '*.png')) + \
            glob.glob(os.path.join(args.path, '*.jpg')) + \
            glob.glob(os.path.join(args.path, '*.tiff'))

        images = sorted(images)
        for imfile1, imfile2 in zip(images[:-1], images[1:]):
            image1 = load_image(imfile1)
            image2 = load_image(imfile2)

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1, image2)

            t1 = time.perf_counter()
            flow_low, flow_up = model(image1, image2, iters=5, test_mode=True)
            t2 = time.perf_counter()
            print("Time: ", t2 - t1)
            ############
            height, width = image1.shape[-2:]
            grid = geometry.create_meshgrid(height,
                                            width,
                                            normalized_coordinates=False).to(
                                                image1.device)
            print("SPODSAOPDA", flow_up.shape, grid.shape, grid.min(),
                  grid.max())
            grid = flow_up.permute(0, 2, 3, 1) + grid
            flow_up_norm = geometry.normalize_pixel_coordinates(
                grid, height, width)  # BxHxWx2

            image1_warped = F.grid_sample(image2,
                                          flow_up_norm,
                                          align_corners=True)

            view(image1_warped, 'img2_warped')
            view(image1, 'img2')
            viz(image1, flow_up)
Exemplo n.º 8
0
def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
    """ Create submission for the Sintel leaderboard """
    model.eval()
    test_dataset = datasets.KITTI(split='testing', aug_params=None)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    for test_id in range(len(test_dataset)):
        image1, image2, (frame_id, ) = test_dataset[test_id]
        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1[None].to(DEVICE),
                                    image2[None].to(DEVICE))

        _, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()

        output_filename = os.path.join(output_path, frame_id)
        frame_utils.writeFlowKITTI(output_filename, flow)
Exemplo n.º 9
0
def create_sintel_submission(model,
                             iters=32,
                             warm_start=False,
                             output_path='sintel_submission'):
    """ Create submission for the Sintel leaderboard """
    model.eval()
    for dstype in ['clean', 'final']:
        test_dataset = datasets.MpiSintel(split='test',
                                          aug_params=None,
                                          dstype=dstype)

        flow_prev, sequence_prev = None, None
        for test_id in range(len(test_dataset)):
            image1, image2, (sequence, frame) = test_dataset[test_id]
            if sequence != sequence_prev:
                flow_prev = None

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1[None].to(DEVICE),
                                        image2[None].to(DEVICE))

            flow_low, flow_pr = model(image1,
                                      image2,
                                      iters=iters,
                                      flow_init=flow_prev,
                                      test_mode=True)
            flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()

            if warm_start:
                flow_prev = forward_interpolate(flow_low[0])[None].to(DEVICE)

            output_dir = os.path.join(output_path, dstype, sequence)
            output_file = os.path.join(output_dir,
                                       'frame%04d.flo' % (frame + 1))

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            frame_utils.writeFlow(output_file, flow)
            sequence_prev = sequence
Exemplo n.º 10
0
def demo(args):
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    with torch.no_grad():
        images = glob.glob(os.path.join(args.path, '*.png')) + \
                 glob.glob(os.path.join(args.path, '*.jpg'))

        images = sorted(images)
        for imfile1, imfile2 in zip(images[:-1], images[1:]):
            image1 = load_image(imfile1)
            image2 = load_image(imfile2)

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1, image2)

            flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
            viz(image1, flow_up)
Exemplo n.º 11
0
def demo2(args):

    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    sensor = OAKSensor()
    print("Sensor ok")

    with torch.no_grad():
        while True:
            buffer = sensor.grab()
            if buffer:
                im1 = cv2.flip(buffer['left_stream'], 1)
                im2 = cv2.flip(buffer['right_stream'], 1)

                image1 = torch.Tensor(im1).unsqueeze(0).unsqueeze(0).repeat(1, 3, 1, 1).to(DEVICE)
                image2 = torch.Tensor(im2).unsqueeze(0).unsqueeze(0).repeat(1, 3, 1, 1).to(DEVICE)

                padder = InputPadder(image1.shape)
                image1, image2 = padder.pad(image1, image2)

                t1 = time.perf_counter()
                flow_low, flow_up = model(image1, image2, iters=30, test_mode=True)

                t2 = time.perf_counter()
                print("Time: ", t2 - t1)

                # view(image1_warped, 'img2_warped')
                # view(image1, 'img2')
                viz_disparity(image1, flow_up)

                viz_gt(buffer['depth_stream'])
                # cv2.imshow("image", buffer['rgb_stream'])
                cv2.waitKey(1)
Exemplo n.º 12
0
def validate_kitti_colorjitter(model, args, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    from tqdm import tqdm
    model.eval()

    expandentries = read_splits_mapping(args)

    for val_id, entry in enumerate(tqdm(expandentries)):
        seq, idx, _ = entry.split(' ')
        idx = int(idx)
        img1path = os.path.join(args.dataset, seq, 'image_02', 'data', "{}.png".format(str(idx).zfill(10)))
        img2path = os.path.join(args.dataset, seq, 'image_02', 'data', "{}.png".format(str(idx + 1).zfill(10)))

        if not os.path.exists(img2path):
            img2path = img1path

        img1 = np.array(Image.open(img1path)).astype(np.float32)
        img2 = np.array(Image.open(img2path)).astype(np.float32)

        img1 = torch.from_numpy(img1).permute([2, 0, 1]).unsqueeze(0)
        img2 = torch.from_numpy(img2).permute([2, 0, 1]).unsqueeze(0)

        svfold = os.path.join(args.exportroot, seq, 'image_02')
        svpath = os.path.join(args.exportroot, seq, 'image_02', "{}.png".format(str(idx).zfill(10)))
        os.makedirs(svfold, exist_ok=True)

        image1 = img1.cuda()
        image2 = img2.cuda()

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0]).cpu()

        frame_utils.writeFlowKITTI(svpath, flow.permute(1, 2, 0).numpy())
    return
Exemplo n.º 13
0
def validate_sintel(model, iters=32):
    """ Peform validation using the Sintel (train) split """
    model.eval()
    results = {}
    for dstype in ['clean', 'final']:
        val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
        epe_list = []

        for val_id in range(len(val_dataset)):
            image1, image2, flow_gt, _ = val_dataset[val_id]
            image1 = image1[None].to(DEVICE)
            image2 = image2[None].to(DEVICE)

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1, image2)

            flow_low, flow_pr = model(image1,
                                      image2,
                                      iters=iters,
                                      test_mode=True)
            flow = padder.unpad(flow_pr[0]).cpu()

            epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
            epe_list.append(epe.view(-1).numpy())

        epe_all = np.concatenate(epe_list)
        epe = np.mean(epe_all)
        px1 = np.mean(epe_all < 1)
        px3 = np.mean(epe_all < 3)
        px5 = np.mean(epe_all < 5)

        print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" %
              (dstype, epe, px1, px3, px5))
        results[dstype] = np.mean(epe_list)

    return results
Exemplo n.º 14
0
def validate_kitti(model, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    val_dataset = datasets.KITTI(split='training')

    out_list, epe_list = [], []
    for val_id in range(len(val_dataset)):
        image1, image2, flow_gt, valid_gt = val_dataset[val_id]
        image1 = image1[None].to(DEVICE)
        image2 = image2[None].to(DEVICE)

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0]).cpu()

        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe / mag) > 0.05)).float()
        epe_list.append(epe[val].mean().item())
        out_list.append(out[val].cpu().numpy())

    epe_list = np.array(epe_list)
    out_list = np.concatenate(out_list)

    epe = np.mean(epe_list)
    f1 = 100 * np.mean(out_list)

    print("Validation KITTI: %f, %f" % (epe, f1))
    return {'kitti-epe': epe, 'kitti-f1': f1}
Exemplo n.º 15
0
def validate_kitti(model, args, eval_loader, eppCbck, group, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    epe_list = torch.zeros(2).cuda(device=args.gpu)
    out_list = torch.zeros(2).cuda(device=args.gpu)
    eppc_list = torch.zeros(2).cuda(device=args.gpu)

    for val_id, batch in enumerate(tqdm(eval_loader)):
        image1 = batch['img1']
        image1 = Variable(image1)
        image1 = image1.cuda(args.gpu, non_blocking=True)

        image2 = batch['img2']
        image2 = Variable(image2)
        image2 = image2.cuda(args.gpu, non_blocking=True)

        flow_gt = batch['flow']
        flow_gt = Variable(flow_gt)
        flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)
        flow_gt = flow_gt[0]

        valid_gt = batch['valid']
        valid_gt = Variable(valid_gt)
        valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)
        valid_gt = valid_gt[0]

        intrinsic = batch['intrinsic']
        intrinsic = Variable(intrinsic)
        intrinsic = intrinsic.cuda(args.gpu, non_blocking=True)

        rel_pose = batch['rel_pose']
        rel_pose = Variable(rel_pose)
        rel_pose = rel_pose.cuda(args.gpu, non_blocking=True)

        semantic_selector = batch['semantic_selector']
        semantic_selector = Variable(semantic_selector)
        semantic_selector = semantic_selector.cuda(args.gpu, non_blocking=True)

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0])

        eppc = eppCbck.epp_constrain_val(flowest=flow,
                                         intrinsic=intrinsic,
                                         rel_pose=rel_pose,
                                         valid=semantic_selector)
        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe / mag) > 0.05)).float()

        epe_list[0] += epe[val].mean().item()
        epe_list[1] += 1

        out_list[0] += out[val].sum()
        out_list[1] += torch.sum(val)

        eppc_list[0] += eppc
        eppc_list[1] += 1

    if args.distributed:
        dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=eppc_list, op=dist.ReduceOp.SUM, group=group)

    if args.gpu == 0:
        epe = epe_list[0] / epe_list[1]
        f1 = 100 * out_list[0] / out_list[1]
        eppc = eppc_list[0] / eppc_list[1]

        print("Validation KITTI, epe: %f, f1: %f, eppc: %f" % (epe, f1, eppc))
        return {
            'kitti-epe': float(epe.detach().cpu().numpy()),
            'kitti-f1': float(f1.detach().cpu().numpy()),
            'kitti-eppc': float(eppc.detach().cpu().numpy())
        }
    else:
        return None
Exemplo n.º 16
0
def validate_kitti(model, args, eval_loader, eppCbck, eppconcluer, group, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    epe_list = torch.zeros(2).cuda(device=args.gpu)
    out_list = torch.zeros(2).cuda(device=args.gpu)
    eppc_list = torch.zeros(2).cuda(device=args.gpu)

    mvl_list = torch.zeros(2).cuda(device=args.gpu)
    angl_list = torch.zeros(2).cuda(device=args.gpu)
    residual_opt_list = torch.zeros(2).cuda(device=args.gpu)
    residual_gt_list = torch.zeros(2).cuda(device=args.gpu)

    for val_id, batch in enumerate(tqdm(eval_loader)):
        image1 = batch['img1']
        image1 = Variable(image1)
        image1 = image1.cuda(args.gpu, non_blocking=True)

        image2 = batch['img2']
        image2 = Variable(image2)
        image2 = image2.cuda(args.gpu, non_blocking=True)

        flow_gt = batch['flow']
        flow_gt = Variable(flow_gt)
        flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)
        flow_gt = flow_gt[0]

        valid_gt = batch['valid']
        valid_gt = Variable(valid_gt)
        valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)
        valid_gt = valid_gt[0]

        intrinsic = batch['intrinsic']
        intrinsic = Variable(intrinsic)
        intrinsic = intrinsic.cuda(args.gpu, non_blocking=True)

        rel_pose = batch['rel_pose']
        rel_pose = Variable(rel_pose)
        rel_pose = rel_pose.cuda(args.gpu, non_blocking=True)

        E = batch['E']
        E = Variable(E)
        E = E.cuda(args.gpu, non_blocking=True)

        semantic_selector = batch['semantic_selector']
        semantic_selector = Variable(semantic_selector)
        semantic_selector = semantic_selector.cuda(args.gpu, non_blocking=True)

        depth = batch['depth']
        depth = Variable(depth)
        depth = depth.cuda(args.gpu, non_blocking=True)

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0])

        pts1, pts2 = eppconcluer.flowmap2ptspair(flowmap=flow.unsqueeze(0), valid=semantic_selector.unsqueeze(0))
        outputsrec = eppconcluer.newton_gauss_F(pts2d1=pts1, pts2d2=pts2, intrinsic=intrinsic.squeeze(), posegt=rel_pose.squeeze())

        # if outputsrec['loss_mv'] > 0.01 and outputsrec['loss_mv'] < 0.1:
        #     image1_unpad = padder.unpad(image1[0])
        #     image2_unpad = padder.unpad(image2[0])
        #     img1 = image1_unpad.cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8)
        #     img2 = image2_unpad.cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8)
        #
        #     validnp = valid_gt.detach().cpu().numpy() == 1
        #     depthnp = depth[0].cpu().squeeze().numpy()
        #
        #     h, w = image1_unpad.shape[1::]
        #     xx, yy = np.meshgrid(range(w), range(h), indexing='xy')
        #
        #     xxf = xx[validnp]
        #     yyf = yy[validnp]
        #     depthf = depthnp[validnp]
        #     xxf_oview = flow_gt[0].detach().cpu().numpy()[validnp] + xxf
        #     yyf_oview = flow_gt[1].detach().cpu().numpy()[validnp] + yyf
        #
        #     cm = plt.get_cmap('magma')
        #     vmax = 0.15
        #     tnp = 1 / depthf / vmax
        #     tnp = cm(tnp)
        #
        #     fig = plt.figure(figsize=(16, 2.5))
        #     canvas = FigureCanvasAgg(fig)
        #     fig.add_subplot(1, 2, 1)
        #     plt.scatter(xxf, yyf, 1, tnp)
        #     plt.imshow(img1)
        #
        #     fig.add_subplot(1, 2, 2)
        #     plt.scatter(xxf_oview, yyf_oview, 1, tnp)
        #     plt.imshow(img2)
        #
        #     plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
        #     canvas.draw()
        #     buf = canvas.buffer_rgba()
        #     plt.close()
        #     X = np.asarray(buf)
        #     X = np.array(Image.fromarray(X).resize([w * 2, h], Image.BILINEAR))
        #     Image.fromarray(X).show()

        # if outputsrec['loss_mv'] > 0.1:
        #     continue

        eppc = eppCbck.epp_constrain_val(flowest=flow, E=E, valid=semantic_selector)
        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()

        epe_list[0] += epe[val].mean().item()
        epe_list[1] += 1

        out_list[0] += out[val].sum()
        out_list[1] += torch.sum(val)

        eppc_list[0] += eppc
        eppc_list[1] += 1

        mvl_list[0] += outputsrec['loss_mv']
        mvl_list[1] += 1

        angl_list[0] += outputsrec['loss_ang']
        angl_list[1] += 1

        residual_opt_list[0] += outputsrec['loss_constrain']
        residual_opt_list[1] += 1

        residual_gt_list[0] += outputsrec['loss_constrain_gt']
        residual_gt_list[1] += 1

    if args.distributed:
        dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=eppc_list, op=dist.ReduceOp.SUM, group=group)

        dist.all_reduce(tensor=mvl_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=angl_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=residual_opt_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=residual_gt_list, op=dist.ReduceOp.SUM, group=group)

    if args.gpu == 0:
        epe = epe_list[0] / epe_list[1]
        f1 = 100 * out_list[0] / out_list[1]
        eppc = eppc_list[0] / eppc_list[1]

        mvl = mvl_list[0] / mvl_list[1]
        angl = angl_list[0] / angl_list[1]
        residual_optl = residual_opt_list[0] / residual_opt_list[1]
        residual_gtl = residual_gt_list[0] / residual_gt_list[1]

        # print("Validation KITTI, epe: %f, f1: %f, eppc: %f, mvl: %f, angl: %f, residual_optl: %f, residual_gt: %f" % (epe, f1, eppc, mvl, angl, residual_optl, residual_gtl))
        return {'kitti-epe': float(epe.detach().cpu().numpy()), 'kitti-f1': float(f1.detach().cpu().numpy()), 'kitti-eppc': float(eppc.detach().cpu().numpy()),
                'mvl': float(mvl.detach().cpu().numpy()), 'angl': float(angl.detach().cpu().numpy()),
                'residual_optl': float(residual_optl.detach().cpu().numpy()), 'residual_gtl': float(residual_gtl.detach().cpu().numpy())
                }
    else:
        return None
Exemplo n.º 17
0
def validate_kitti(model,
                   args,
                   eval_loader,
                   eppCbck,
                   eppc_dict,
                   group,
                   iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    epe_list = torch.zeros(2).cuda(device=args.gpu)
    out_list = torch.zeros(2).cuda(device=args.gpu)
    eppc_list = torch.zeros(2).cuda(device=args.gpu)

    mvl_list = torch.zeros(2).cuda(device=args.gpu)
    angl_list = torch.zeros(2).cuda(device=args.gpu)
    for val_id, batch in enumerate(tqdm(eval_loader)):
        image1 = batch['img1']
        image1 = Variable(image1)
        image1 = image1.cuda(args.gpu, non_blocking=True)

        image2 = batch['img2']
        image2 = Variable(image2)
        image2 = image2.cuda(args.gpu, non_blocking=True)

        flow_gt = batch['flow']
        flow_gt = Variable(flow_gt)
        flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)
        flow_gt = flow_gt[0]

        valid_gt = batch['valid']
        valid_gt = Variable(valid_gt)
        valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)
        valid_gt = valid_gt[0]

        intrinsic = batch['intrinsic']
        intrinsic = Variable(intrinsic)
        intrinsic = intrinsic.cuda(args.gpu, non_blocking=True)

        rel_pose = batch['rel_pose']
        rel_pose = Variable(rel_pose)
        rel_pose = rel_pose.cuda(args.gpu, non_blocking=True)

        E = batch['E']
        E = Variable(E)
        E = E.cuda(args.gpu, non_blocking=True)

        semantic_selector = batch['semantic_selector']
        semantic_selector = Variable(semantic_selector)
        semantic_selector = semantic_selector.cuda(args.gpu, non_blocking=True)

        depth = batch['depth']
        depth = Variable(depth)
        depth = depth.cuda(args.gpu, non_blocking=True)

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0])

        bz, _, h, w = depth.shape
        eppc_key = "{}_{}".format(h, w)
        if eppc_key not in eppc_dict:
            eppc = EPPCore(bz=1,
                           height=h,
                           width=w,
                           itnum=100,
                           lr=0.1,
                           lap=1e-2,
                           maxinsnum=10)
            eppc.to(f'cuda:{args.gpu}')
            eppc_dict[eppc_key] = eppc
        eppc = eppc_dict[eppc_key]

        instancemap = torch.zeros_like(depth)
        instancemap[semantic_selector.unsqueeze(0) == 0] = -1
        instancemap = instancemap.int()
        t, R = eppc.flow2epp(insmap=instancemap,
                             flowmap=flow.unsqueeze(0),
                             intrinsic=intrinsic)
        ang = eppc.R2ang(R)

        t_est = t[0, 0].squeeze()
        ang_est = ang[0, 0].squeeze()

        t_gt = rel_pose[0, 0:3, 3] / torch.norm(rel_pose[0, 0:3, 3])
        ang_gt = (eppc.R2ang(rel_pose[0, 0:3,
                                      0:3].unsqueeze(0).unsqueeze(0).expand(
                                          [-1, 10, -1, -1]))[0, 0]).squeeze()

        loss_mv = 1 - torch.sum(t_est * t_gt)
        loss_ang = (ang_est - ang_gt).abs().mean()

        if loss_mv > 0.1:
            continue

        eppc = eppCbck.epp_constrain_val(flowest=flow,
                                         E=E,
                                         valid=semantic_selector)
        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe / mag) > 0.05)).float()

        epe_list[0] += epe[val].mean().item()
        epe_list[1] += 1

        out_list[0] += out[val].sum()
        out_list[1] += torch.sum(val)

        eppc_list[0] += eppc
        eppc_list[1] += 1

        mvl_list[0] += loss_mv
        mvl_list[1] += 1

        angl_list[0] += loss_ang
        angl_list[1] += 1

    if args.distributed:
        dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=eppc_list, op=dist.ReduceOp.SUM, group=group)

        dist.all_reduce(tensor=mvl_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=angl_list, op=dist.ReduceOp.SUM, group=group)

    if args.gpu == 0:
        epe = epe_list[0] / epe_list[1]
        f1 = 100 * out_list[0] / out_list[1]
        eppc = eppc_list[0] / eppc_list[1]

        mvl = mvl_list[0] / mvl_list[1]
        angl = angl_list[0] / angl_list[1]

        return {
            'kitti-epe': float(epe.detach().cpu().numpy()),
            'kitti-f1': float(f1.detach().cpu().numpy()),
            'kitti-eppc': float(eppc.detach().cpu().numpy()),
            'mvl': float(mvl.detach().cpu().numpy()),
            'angl': float(angl.detach().cpu().numpy())
        }
    else:
        return None
Exemplo n.º 18
0
def validate_kitti(model, args, eval_loader, eppCbck, eppconcluer, group, iters=24):
    """ Peform validation using the KITTI-2015 (train) split """
    model.eval()
    epe_list = torch.zeros(2).cuda(device=args.gpu)
    out_list = torch.zeros(2).cuda(device=args.gpu)
    eppc_list = torch.zeros(2).cuda(device=args.gpu)

    mvl_list = torch.zeros(2).cuda(device=args.gpu)
    angl_list = torch.zeros(2).cuda(device=args.gpu)
    residual_opt_list = torch.zeros(2).cuda(device=args.gpu)
    residual_gt_list = torch.zeros(2).cuda(device=args.gpu)

    mvlv2d_list = torch.zeros(2).cuda(device=args.gpu)
    anglv2d_list = torch.zeros(2).cuda(device=args.gpu)
    for val_id, batch in enumerate(tqdm(eval_loader)):
        image1 = batch['img1']
        image1 = Variable(image1)
        image1 = image1.cuda(args.gpu, non_blocking=True)

        image2 = batch['img2']
        image2 = Variable(image2)
        image2 = image2.cuda(args.gpu, non_blocking=True)

        flow_gt = batch['flow']
        flow_gt = Variable(flow_gt)
        flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)
        flow_gt = flow_gt[0]

        valid_gt = batch['valid']
        valid_gt = Variable(valid_gt)
        valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)
        valid_gt = valid_gt[0]

        intrinsic = batch['intrinsic']
        intrinsic = Variable(intrinsic)
        intrinsic = intrinsic.cuda(args.gpu, non_blocking=True)

        rel_pose = batch['rel_pose']
        rel_pose = Variable(rel_pose)
        rel_pose = rel_pose.cuda(args.gpu, non_blocking=True)

        E = batch['E']
        E = Variable(E)
        E = E.cuda(args.gpu, non_blocking=True)

        semantic_selector = batch['semantic_selector']
        semantic_selector = Variable(semantic_selector)
        semantic_selector = semantic_selector.cuda(args.gpu, non_blocking=True)

        depth = batch['depth']
        depth = Variable(depth)
        depth = depth.cuda(args.gpu, non_blocking=True)

        rel_pose_deepv2d = read_deepv2d_pose(batch['entry'][0])
        rel_pose_deepv2d = Variable(rel_pose_deepv2d)
        rel_pose_deepv2d = rel_pose_deepv2d.cuda(args.gpu, non_blocking=True)

        padder = InputPadder(image1.shape, mode='kitti')
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
        flow = padder.unpad(flow_pr[0])

        pts1, pts2 = eppconcluer.flowmap2ptspair(flowmap=flow.unsqueeze(0), valid=semantic_selector.unsqueeze(0))
        outputsrec = eppconcluer.newton_gauss_F(pts2d1=pts1, pts2d2=pts2, intrinsic=intrinsic.squeeze(), posegt=rel_pose.squeeze(), posedeepv2d=rel_pose_deepv2d)

        if outputsrec['loss_mv'] > 0.1:
            continue

        if True:
            rel_pose_deepv2d[0:3, 3] = rel_pose_deepv2d[0:3, 3] / torch.norm(rel_pose_deepv2d[0:3, 3]) * torch.norm(rel_pose[0, 0:3, 3])
            poses = [rel_pose.squeeze().cpu().numpy(), rel_pose_deepv2d.squeeze().cpu().numpy(), outputsrec['pose_est'].squeeze().cpu().numpy()]
            vlsroots = ['/media/shengjie/c9c81c9f-511c-41c6-bfe0-2fc19666fb32/Visualizations/kitti_imu_eigen', '/media/shengjie/c9c81c9f-511c-41c6-bfe0-2fc19666fb32/Visualizations/deepv2d_posevls_eigen',
                        '/media/shengjie/c9c81c9f-511c-41c6-bfe0-2fc19666fb32/Visualizations/raft_posevls_eigen_nptsdist']

            seq, imgname, _ = batch['entry'][0].split(' ')

            image1_unpad = padder.unpad(image1[0])
            image2_unpad = padder.unpad(image2[0])
            img1 = image1_unpad.cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8)
            img2 = image2_unpad.cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8)

            depthnp = depth[0].cpu().squeeze().numpy()
            validnp = depthnp > 0

            h, w = image1_unpad.shape[1::]
            xx, yy = np.meshgrid(range(w), range(h), indexing='xy')

            k = 2
            vlsroot = vlsroots[k]
            posec = poses[k]

            xxf = xx[validnp]
            yyf = yy[validnp]
            depthf = depthnp[validnp]

            pts3d = np.stack([xxf * depthf, yyf * depthf, depthf, np.ones_like(xxf)], axis=0)

            intrinsicnp = np.eye(4)
            intrinsicnp[0:3, 0:3] = intrinsic.squeeze().cpu().numpy()
            pts3d_oview = intrinsicnp @ posec @ np.linalg.inv(intrinsicnp) @ pts3d
            pts3d_oview_x = pts3d_oview[0, :] / pts3d_oview[2, :]
            pts3d_oview_y = pts3d_oview[1, :] / pts3d_oview[2, :]

            cm = plt.get_cmap('magma')
            vmax = 0.15
            tnp = 1 / depthf / vmax
            tnp = cm(tnp)

            fig = plt.figure(figsize=(16, 9))
            fig.add_subplot(2, 1, 1)
            plt.scatter(xxf, yyf, 1, tnp)
            plt.imshow(img1)

            fig.add_subplot(2, 1, 2)
            plt.scatter(pts3d_oview_x, pts3d_oview_y, 1, tnp)
            plt.imshow(img2)

            plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
            plt.savefig(os.path.join(vlsroot, "{}_{}.png".format(seq.split("/")[1], imgname.zfill(10))))
            plt.close()

        if False:
            exportroot = '/media/shengjie/disk1/Prediction/RAFT_eigen_pose_nptsdist'
            seq, imgname, _ = batch['entry'][0].split(' ')
            svfold = os.path.join(exportroot, seq)
            os.makedirs(svfold, exist_ok=True)

            picklepath = os.path.join(svfold, "{}.pickle".format(imgname.zfill(10)))
            pose2write = outputsrec['pose_est'].squeeze().cpu().numpy()
            with open(picklepath, 'wb') as handle:
                pickle.dump(pose2write, handle, protocol=pickle.HIGHEST_PROTOCOL)

            exportroot = '/media/shengjie/disk1/Prediction/IMU_eigen_pose_nptsdist'
            seq, imgname, _ = batch['entry'][0].split(' ')
            svfold = os.path.join(exportroot, seq)
            os.makedirs(svfold, exist_ok=True)

            picklepath = os.path.join(svfold, "{}.pickle".format(imgname.zfill(10)))
            pose2write = rel_pose.squeeze().cpu().numpy()
            with open(picklepath, 'wb') as handle:
                pickle.dump(pose2write, handle, protocol=pickle.HIGHEST_PROTOCOL)


        eppc = eppCbck.epp_constrain_val(flowest=flow, E=E, valid=semantic_selector)
        epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
        mag = torch.sum(flow_gt**2, dim=0).sqrt()

        epe = epe.view(-1)
        mag = mag.view(-1)
        val = valid_gt.view(-1) >= 0.5

        out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()

        epe_list[0] += epe[val].mean().item()
        epe_list[1] += 1

        out_list[0] += out[val].sum()
        out_list[1] += torch.sum(val)

        eppc_list[0] += eppc
        eppc_list[1] += 1

        mvl_list[0] += outputsrec['loss_mv']
        mvl_list[1] += 1

        angl_list[0] += outputsrec['loss_ang']
        angl_list[1] += 1

        mvlv2d_list[0] += outputsrec['loss_mv_dv2d']
        mvlv2d_list[1] += 1

        anglv2d_list[0] += outputsrec['loss_ang_dv2d']
        anglv2d_list[1] += 1

        residual_opt_list[0] += outputsrec['loss_constrain']
        residual_opt_list[1] += 1

        residual_gt_list[0] += outputsrec['loss_constrain_gt']
        residual_gt_list[1] += 1

    if args.distributed:
        dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=eppc_list, op=dist.ReduceOp.SUM, group=group)

        dist.all_reduce(tensor=mvl_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=angl_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=residual_opt_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=residual_gt_list, op=dist.ReduceOp.SUM, group=group)

        dist.all_reduce(tensor=mvlv2d_list, op=dist.ReduceOp.SUM, group=group)
        dist.all_reduce(tensor=anglv2d_list, op=dist.ReduceOp.SUM, group=group)

    if args.gpu == 0:
        epe = epe_list[0] / epe_list[1]
        f1 = 100 * out_list[0] / out_list[1]
        eppc = eppc_list[0] / eppc_list[1]

        mvl = mvl_list[0] / mvl_list[1]
        angl = angl_list[0] / angl_list[1]
        residual_optl = residual_opt_list[0] / residual_opt_list[1]
        residual_gtl = residual_gt_list[0] / residual_gt_list[1]

        mvl_dv2d = mvlv2d_list[0] / mvlv2d_list[1]
        angl_dv2d = anglv2d_list[0] / anglv2d_list[1]

        return {'kitti-epe': float(epe.detach().cpu().numpy()), 'kitti-f1': float(f1.detach().cpu().numpy()), 'kitti-eppc': float(eppc.detach().cpu().numpy()),
                'mvl': float(mvl.detach().cpu().numpy()), 'angl': float(angl.detach().cpu().numpy()), 'mvl_dv2d': float(mvl_dv2d.detach().cpu().numpy()), 'angl_dv2d': float(angl_dv2d.detach().cpu().numpy()),
                'residual_optl': float(residual_optl.detach().cpu().numpy()), 'residual_gtl': float(residual_gtl.detach().cpu().numpy())
                }
    else:
        return None