コード例 #1
0
    def mask_valid_correspondences(self, flow_thresh=1, color_thresh=1):
        flow_fmt = pjoin(self.path, "flow", "flow_{:06d}_{:06d}.raw")
        mask_fmt = pjoin(self.path, "mask", "mask_{:06d}_{:06d}.png")
        color_fmt = pjoin(self.path, "color_down", "frame_{:06d}.raw")

        def get_indices(name):
            strs = os.path.splitext(name)[0].split("_")[1:]
            return [int(s) for s in strs]

        os.makedirs(os.path.dirname(mask_fmt), exist_ok=True)
        flow_names = os.listdir(os.path.dirname(flow_fmt))
        for flow_name in flow_names:
            indices = get_indices(flow_name)
            if os.path.isfile(mask_fmt.format(*indices)):
                continue

            indices_pair = [indices, indices[::-1]]
            flow_fns = [flow_fmt.format(*idxs) for idxs in indices_pair]
            mask_fns = [mask_fmt.format(*idxs) for idxs in indices_pair]
            color_fns = [color_fmt.format(idx) for idx in indices]

            flows = [image_io.load_raw_float32_image(fn) for fn in flow_fns]
            colors = [image_io.load_raw_float32_image(fn) for fn in color_fns]

            masks = consistency.consistent_flow_masks(flows, colors,
                                                      flow_thresh,
                                                      color_thresh)

            for mask, mask_fn in zip(masks, mask_fns):
                cv2.imwrite(mask_fn, mask * 255)
コード例 #2
0
    def visualize_flow(self, warp=False):
        flow_fmt = pjoin(self.path, "flow", "flow_{:06d}_{:06d}.raw")
        mask_fmt = pjoin(self.path, "mask", "mask_{:06d}_{:06d}.png")
        color_fmt = pjoin(self.path, "color_down", "frame_{:06d}.raw")
        vis_fmt = pjoin(self.path, "vis_flow", "frame_{:06d}_{:06d}.png")
        warp_fmt = pjoin(self.path, "vis_flow_warped",
                         "frame_{:06d}_{:06d}_warped.png")

        def get_indices(name):
            strs = os.path.splitext(name)[0].split("_")[1:]
            return sorted((int(s) for s in strs))

        for fmt in (vis_fmt, warp_fmt):
            os.makedirs(os.path.dirname(fmt), exist_ok=True)

        flow_names = os.listdir(os.path.dirname(flow_fmt))
        for flow_name in flow_names:
            indices = get_indices(flow_name)
            if os.path.isfile(vis_fmt.format(*indices)) and (
                    not warp or os.path.isfile(warp_fmt.format(*indices))):
                continue

            indices_pair = [indices, indices[::-1]]
            flow_fns = [flow_fmt.format(*idxs) for idxs in indices_pair]
            mask_fns = [mask_fmt.format(*idxs) for idxs in indices_pair]
            color_fns = [color_fmt.format(idx) for idx in indices]

            flows = [image_io.load_raw_float32_image(fn) for fn in flow_fns]
            flow_ims = [flowlib.flow_to_image(np.copy(flow)) for flow in flows]
            colors = [
                image_io.load_raw_float32_image(fn) * 255 for fn in color_fns
            ]
            masks = [cv2.imread(fn, 0) for fn in mask_fns]

            masked_colors = [
                visualization.apply_mask(im, mask)
                for im, mask in zip(colors, masks)
            ]
            masked_flows = [
                visualization.apply_mask(im, mask)
                for im, mask in zip(flow_ims, masks)
            ]

            masked = np.hstack(masked_colors + masked_flows)
            original = np.hstack(colors + flow_ims)
            visual = np.vstack((original, masked))
            cv2.imwrite(vis_fmt.format(*indices), visual)

            if warp:
                warped = [
                    warp_by_flow(color, flow)
                    for color, flow in zip(colors[::-1], flows)
                ]
                for idxs, im in zip([indices, indices[::-1]], warped):
                    cv2.imwrite(warp_fmt.format(*idxs), im)
コード例 #3
0
def make_camera_params_from_colmap(path, sparse_dir):
    cameras, images, points3D = load_colmap.read_model(path=sparse_dir,
                                                       ext=".bin")
    size_new = image_io.load_raw_float32_image(
        pjoin(path, "color_down",
              "frame_{:06d}.raw".format(0))).shape[:2][::-1]
    intrinsics, extrinsics = load_colmap.convert_calibration(
        cameras, images, size_new)
    return intrinsics, extrinsics
コード例 #4
0
    def compute_flow(self, index_pairs, checkpoint):
        """Run Flownet2 with specific <checkpoint> (FlowNet2 or finetuned on KITTI)
        Note that we don't fit homography first for FlowNet2-KITTI model.
        """
        model_name = checkpoint.lower()
        if model_name == "flownet2-kitti":
            model_file = get_model_from_url(
                "https://www.dropbox.com/s/mme80czrpbqal7k/flownet2-kitti.pth.tar?dl=1",
                model_name + ".pth",
            )
        else:
            model_file = f"checkpoints/{model_name}.pth"

        mkdir_ifnotexists("%s/flow" % self.path)

        if self.check_flow_files(index_pairs):
            return

        frame_dir = "%s/color_flow" % self.path
        frame1_fns = [
            "%s/frame_%06d.png" % (frame_dir, pair[0]) for pair in index_pairs
        ]
        frame2_fns = [
            "%s/frame_%06d.png" % (frame_dir, pair[1]) for pair in index_pairs
        ]
        out_fns = [
            "%s/flow/flow_%06d_%06d.raw" % (self.path, i, j)
            for (i, j) in index_pairs
        ]

        tmp = image_io.load_raw_float32_image(
            pjoin(self.path, "color_down", "frame_{:06d}.raw".format(0)))
        size = tmp.shape[:2][::-1]
        print("Resizing flow to", size)

        args = dotdict()
        args.pretrained_model_flownet2 = model_file
        args.im1 = list(frame1_fns)
        args.im2 = list(frame2_fns)
        args.out = list(out_fns)
        args.size = size
        args.fp16 = False
        args.homography = 'KITTI' not in checkpoint
        args.rgb_max = 255.0
        args.visualize = False

        optical_flow_flownet2_homography.process(args)

        self.check_flow_files(index_pairs)
コード例 #5
0
def visualize_calibration_pair(extrinsics, intrinsics, depth_fmt, color_fmt,
                               id_pair, vis_dir):
    assert len(id_pair) == 2

    depth_fns = [depth_fmt.format(id) for id in id_pair]
    if any(not os.path.isfile(fn) for fn in depth_fns):
        return

    color_fns = [color_fmt.format(id) for id in id_pair]
    colors = [load_color(fn, channels_first=True) for fn in color_fns]
    colors = torch.stack(colors, dim=0).to(_device)
    inv_depths = [image_io.load_raw_float32_image(fn) for fn in depth_fns]
    depths = 1.0 / torch.tensor(inv_depths, device=_device).unsqueeze(-3)

    def select_tensor(x):
        return torch.tensor(x[list(id_pair)], device=_device, dtype=_dtype)

    extr = select_tensor(extrinsics)
    intr = select_tensor(intrinsics)

    colors_warped_to_ref = geometry.warp_image(colors, depths, extr, intr,
                                               [1, 0])

    def vis(x):
        x = np.clip(x.permute(1, 2, 0).cpu().numpy(), a_min=0, a_max=1)
        x = x[..., ::-1] * 255  # RGB to BGR, [0, 1] to [0, 255]
        return x

    os.makedirs(vis_dir, exist_ok=True)
    for id, tgt_id, color_warped, color in zip(id_pair, id_pair[::-1],
                                               colors_warped_to_ref, colors):
        cv2.imwrite(pjoin(vis_dir, "frame_{:06d}.png".format(id)), vis(color))
        cv2.imwrite(
            pjoin(vis_dir,
                  "frame_{:06d}_warped_to_{:06d}.png".format(tgt_id, id)),
            vis(color_warped),
        )
コード例 #6
0
def load_image(
    path: str,
    channels_first: bool,
    check_channels: Optional[int] = None,
    post_proc_raw=lambda x: x,
    post_proc_other=lambda x: x,
) -> torch.FloatTensor:
    if os.path.splitext(path)[-1] == ".raw":
        im = image_io.load_raw_float32_image(path)
        im = post_proc_raw(im)
    else:
        im = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        im = post_proc_other(im)
    im = im.reshape(im.shape[:2] + (-1, ))

    if check_channels is not None:
        assert (im.shape[-1] == check_channels
                ), "receive image of shape {} whose #channels != {}".format(
                    im.shape, check_channels)

    if channels_first:
        im = im.transpose((2, 0, 1))
    # to torch
    return torch.tensor(im, dtype=_dtype)
コード例 #7
0
def calibrate_scale(video, out_dir, frame_range, args):
    # COLMAP reconstruction. 重键colMap
    print_banner("COLMAP reconstruction")

    colmap_dir = pjoin(video.path, 'colmap_dense')
    src_meta_file = pjoin(colmap_dir, "metadata.npz")
    # 计算colmap
    colmap = COLMAPProcessor(args.colmap_bin_path)
    dense_dir = colmap.dense_dir(colmap_dir, 0)
    # 确认元数据文件是否存在
    if os.path.isfile(src_meta_file):
        print("Checked metadata file exists.")
    else:
        color_dir = prepare_colmap_color(video)  # 计算col map

        if not colmap.check_dense(
                dense_dir, color_dir, valid_ratio=args.dense_frame_ratio):
            path_args = [color_dir, colmap_dir]
            mask_path = pjoin(video.path, 'colmap_mask')
            if os.path.isdir(mask_path):
                path_args.extend(['--mask_path', mask_path])
            colmap_args = COLMAPParams().parse_args(
                args=path_args +
                ['--dense_max_size', str(args.size)],
                namespace=args)

            colmap.process(colmap_args)

        intrinsics, extrinsics = make_camera_params_from_colmap(
            video.path, colmap.sparse_dir(colmap_dir, 0))
        np.savez(src_meta_file, intrinsics=intrinsics, extrinsics=extrinsics)

    # Convert COLMAP dense depth maps to .raw file format. 将深度图像转换为.raw的rgb图像
    print_banner("Convert COLMAP depth maps")

    converted_depth_fmt = pjoin(video.path, "depth_colmap_dense", "depth",
                                "frame_{:06d}.raw")

    # convert colmap dense depths to .raw 将其转换为.raw图像
    converted_depth_dir = os.path.dirname(converted_depth_fmt)
    dense_depth_dir = pjoin(dense_dir, "stereo", "depth_maps")
    frames = frame_range.frames()
    if not check_frames(
            dense_depth_dir,
            colmap.dense_depth_suffix(),
            converted_depth_dir,
            "",
            frame_names={f"frame_{i:06d}.png"
                         for i in frames},
    ):
        os.makedirs(converted_depth_dir, exist_ok=True)
        colmap_depth_fmt = pjoin(
            dense_depth_dir, "frame_{:06d}.png" + colmap.dense_depth_suffix())
        for i in frames:
            colmap_depth_fn = colmap_depth_fmt.format(i)
            if not os.path.isfile(colmap_depth_fn):
                logging.warning("[SCALE CALIBRATION] %s does not exist.",
                                colmap_depth_fn)
                continue
            cmp_depth = load_colmap.read_array(colmap_depth_fn)
            inv_cmp_depth = 1.0 / cmp_depth
            ix = np.isinf(inv_cmp_depth) | (inv_cmp_depth < 0)
            inv_cmp_depth[ix] = float("nan")
            image_io.save_raw_float32_image(converted_depth_fmt.format(i),
                                            inv_cmp_depth)
        with SuppressedStdout():
            visualization.visualize_depth_dir(
                converted_depth_dir,
                converted_depth_dir,
                force=True,
                min_percentile=0,
                max_percentile=99,
            )

    # Compute scaled depth maps
    print_banner("Compute per-frame scales")

    scaled_depth_dir = pjoin(out_dir, "depth_scaled_by_colmap_dense", "depth")
    scaled_depth_fmt = pjoin(scaled_depth_dir, "frame_{:06d}.raw")
    scales_file = pjoin(out_dir, "scales.csv")
    src_depth_fmt = pjoin(video.path, f"depth_{args.model_type}", "depth",
                          "frame_{:06d}.raw")
    frames = frame_range.frames()

    if (check_frames(converted_depth_dir, ".png",
                     os.path.dirname(scaled_depth_fmt), ".raw")
            and os.path.isfile(scales_file)):
        src_to_colmap_scales = np.loadtxt(scales_file, delimiter=',')
        assert src_to_colmap_scales.shape[0] >= len(frames) * args.dense_frame_ratio \
            and src_to_colmap_scales.shape[1] == 2, \
            (f"scales shape is {src_to_colmap_scales.shape} does not match "
             + f"({len(frames)}, 2) with threshold {args.dense_frame_ratio}")
        print("Existing scales file loaded.")
    else:
        # Scale depth maps
        os.makedirs(scaled_depth_dir, exist_ok=True)
        src_to_colmap_scales_map = {}

        for i in frames:
            converted_depth_fn = converted_depth_fmt.format(i)
            if not os.path.isfile(converted_depth_fn):
                logging.warning("[SCALE CALIBRATION] %s does not exist",
                                converted_depth_fn)
                continue
            # convert colmap_depth to raw
            inv_cmp_depth = image_io.load_raw_float32_image(converted_depth_fn)
            # compute scale for init depths
            inv_src_depth = image_io.load_raw_float32_image(
                src_depth_fmt.format(i))
            # src_depth * scale = (1/inv_src_depth) * scale == cmp_depth
            inv_cmp_depth = cv2.resize(inv_cmp_depth,
                                       inv_src_depth.shape[:2][::-1],
                                       interpolation=cv2.INTER_NEAREST)
            ix = np.isfinite(inv_cmp_depth)

            if np.sum(ix) / ix.size < args.dense_pixel_ratio:
                # not enough pixels are valid and hence the frame is invalid.
                continue

            scales = (inv_src_depth / inv_cmp_depth)[ix]
            scale = np.median(scales)
            print(f"Scale[{i}]: median={scale}, std={np.std(scales)}")
            # scale = np.median(inv_depth) * np.median(cmp_depth)
            src_to_colmap_scales_map[i] = float(scale)
            scaled_inv_src_depth = inv_src_depth / scale
            image_io.save_raw_float32_image(scaled_depth_fmt.format(i),
                                            scaled_inv_src_depth)
        with SuppressedStdout():
            visualization.visualize_depth_dir(scaled_depth_dir,
                                              scaled_depth_dir,
                                              force=True)

        # Write scales.csv
        xs = sorted(src_to_colmap_scales_map.keys())
        ys = [src_to_colmap_scales_map[x] for x in xs]
        src_to_colmap_scales = np.stack((np.array(xs), np.array(ys)), axis=-1)
        np.savetxt(scales_file, src_to_colmap_scales, delimiter=",")

    valid_frames = {int(s) for s in src_to_colmap_scales[:, 0]}

    # Scale the extrinsics' translations
    scaled_meta_file = pjoin(out_dir, "metadata_scaled.npz")
    if os.path.isfile(scaled_meta_file):
        print("Scaled metadata file exists.")
    else:
        scales = src_to_colmap_scales[:, 1]
        mean_scale = scales.mean()
        print(f"[scales] mean={mean_scale}, std={np.std(scales)}")

        with np.load(src_meta_file) as meta_colmap:
            intrinsics = meta_colmap["intrinsics"]
            extrinsics = meta_colmap["extrinsics"]

        extrinsics[..., -1] /= mean_scale
        np.savez(
            scaled_meta_file,
            intrinsics=intrinsics,
            extrinsics=extrinsics,
            scales=src_to_colmap_scales,
        )

        color_fmt = pjoin(video.path, "color_down", "frame_{:06d}.raw")
        vis_dir = pjoin(out_dir, "vis_calibration_dense")
        visualize_all_calibration(
            extrinsics,
            intrinsics,
            scaled_depth_fmt,
            color_fmt,
            frame_range,
            vis_dir,
        )

    return valid_frames