コード例 #1
0
    def __init__(self,
                 root,
                 transform=None,
                 pre_transform=None,
                 fold="train",
                 small=False,
                 pool_size=1):
        assert fold in [
            "train", "val", "test_images"
        ], "Input fold={} should be in [\"train\", \"val\", \"test_images\"]".format(
            fold)
        if fold == "test_images":
            print_utils.print_error(
                "ERROR: fold {} not yet implemented!".format(fold))
            exit()
        self.root = root
        self.fold = fold
        makedirs(self.processed_dir)
        self.small = small
        if self.small:
            print_utils.print_info(
                "INFO: Using small version of the Mapping challenge dataset.")
        self.pool_size = pool_size

        self.coco = None
        self.image_id_list = self.load_image_ids()
        self.stats_filepath = os.path.join(self.processed_dir, "stats.pt")
        self.stats = None
        if os.path.exists(self.stats_filepath):
            self.stats = torch.load(self.stats_filepath)
        self.processed_flag_filepath = os.path.join(
            self.processed_dir,
            "processed-flag-small" if self.small else "processed-flag")

        super(MappingChallenge, self).__init__(root, transform, pre_transform)
def eval_process(gpu, config, shared_dict, barrier):
    from frame_field_learning.evaluate import evaluate

    torch.manual_seed(0)  # Ensure same seed for all processes
    # --- Find data directory --- #
    root_dir_candidates = [
        os.path.join(data_dirpath, config["dataset_params"]["root_dirname"])
        for data_dirpath in config["data_dir_candidates"]
    ]
    root_dir, paths_tried = python_utils.choose_first_existing_path(
        root_dir_candidates, return_tried_paths=True)
    if root_dir is None:
        print_utils.print_error(
            "GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".
            format(gpu, paths_tried))
        raise NotADirectoryError(
            f"Couldn't find a directory in {paths_tried} (gpu:{gpu})")
    print_utils.print_info("GPU {} -> Using data from {}".format(
        gpu, root_dir))
    config["data_root_dir"] = root_dir

    # --- Get dataset
    # - CHANGE HERE TO ADD YOUR OWN DATASET
    eval_ds, = get_folds(
        config, root_dir,
        folds=config["fold"])  # config["fold"] is already a list (of length 1)

    # --- Instantiate backbone network (its backbone will be used to extract features)
    backbone = get_backbone(config["backbone_params"])

    evaluate(gpu, config, shared_dict, barrier, eval_ds, backbone)
コード例 #3
0
def inference_no_patching(config, model, tile_data):
    with torch.no_grad():
        batch = {
            "image": tile_data["image"],
            "image_mean": tile_data["image_mean"],
            "image_std": tile_data["image_std"]
        }
        try:
            pred, batch = network_inference(config, model, batch)
        except RuntimeError as e:
            print_utils.print_error("ERROR: " + str(e))
            if 1 < config["optim_params"]["eval_batch_size"]:
                print_utils.print_info(
                    "INFO: Try lowering the effective batch_size (which is {} currently). "
                    "Note that in eval mode, the effective bath_size is equal to double the batch_size "
                    "because gradients do not need to "
                    "be computed so double the memory is available. "
                    "You can override the effective batch_size with the eval_batch_size parameter."
                    .format(config["optim_params"]["eval_batch_size"]))
            else:
                print_utils.print_info(
                    "INFO: The effective batch_size is 1 but the GPU still ran out of memory."
                    "You can specify parameters to split the image into patches for inference:\n"
                    "--eval_patch_size is the size of the patch and should be chosen as big as memory allows.\n"
                    "--eval_patch_overlap (optional, default=200) adds overlaps between patches to avoid border artifacts."
                    .format(config["optim_params"]["eval_batch_size"]))
            raise e

        tile_data["seg"] = pred["seg"]
        if "crossfield" in pred:
            tile_data["crossfield"] = pred["crossfield"]

    return tile_data
def get_folds(config, root_dir, folds):
    assert set(folds).issubset({"train", "val", "train_val", "test"}), \
        'fold in folds should be in ["train", "val", "train_val", "test"]'

    if config["dataset_params"]["root_dirname"] == "AerialImageDataset":
        return get_inria_aerial_folds(config, root_dir, folds)

    elif config["dataset_params"][
            "root_dirname"] == "luxcarta_precise_buildings":
        return get_luxcarta_buildings(config, root_dir, folds)

    elif config["dataset_params"][
            "root_dirname"] == "mapping_challenge_dataset":
        return get_mapping_challenge(config, root_dir, folds)

    elif config["dataset_params"]["root_dirname"] == "segbuildings":
        return get_opencities_competition(config, root_dir, folds)

    elif config["dataset_params"]["root_dirname"] == "xview2_xbd_dataset":
        return get_xview2_dataset(config, root_dir, folds)

    else:
        print_utils.print_error(
            "ERROR: config[\"data_root_partial_dirpath\"] = \"{}\" is an unknown dataset! "
            "If it is a new dataset, add it in dataset_folds.py's get_folds() function."
            .format(config["dataset_params"]["root_dirname"]))
        exit()
コード例 #5
0
def launch_train(args):
    assert args.config is not None, "Argument --config must be specified. Run 'python main.py --help' for help on arguments."
    config = run_utils.load_config(args.config)
    if config is None:
        print_utils.print_error(
            "ERROR: cannot continue without a config file. Exiting now...")
        sys.exit()
    config["runs_dirpath"] = args.runs_dirpath
    if args.run_name is not None:
        config["run_name"] = args.run_name
    config["new_run"] = args.new_run
    config["init_run_name"] = args.init_run_name
    if args.samples is not None:
        config["samples"] = args.samples
    if args.batch_size is not None:
        config["optim_params"]["batch_size"] = args.batch_size
    if args.max_epoch is not None:
        config["optim_params"]["max_epoch"] = args.max_epoch

    if args.fold is None:
        if "fold" in config:
            fold = set(config["fold"])
        else:
            fold = {"train"}  # Default values for train
    else:
        fold = set(args.fold)
    assert fold == {"train"} or fold == {"train", "val"}, \
        "Argument fold when training should be either: ['train'] or ['train', 'val']"
    config["fold"] = list(fold)
    print_utils.print_info("Training on fold(s): {}".format(config["fold"]))

    config["nodes"] = args.nodes
    config["gpus"] = args.gpus
    config["nr"] = args.nr
    config["world_size"] = args.gpus * args.nodes

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    # Setup num_workers per process:
    if config["num_workers"] is None:
        config["num_workers"] = int(torch.multiprocessing.cpu_count() /
                                    config["gpus"])

    # --- Distributed init:
    os.environ['MASTER_ADDR'] = args.master_addr
    os.environ['MASTER_PORT'] = args.master_port
    manager = torch.multiprocessing.Manager()
    shared_dict = manager.dict()
    shared_dict["run_dirpath"] = None
    shared_dict["init_checkpoints_dirpath"] = None
    barrier = manager.Barrier(args.gpus)

    torch.multiprocessing.spawn(train_process,
                                nprocs=args.gpus,
                                args=(config, shared_dict, barrier))
コード例 #6
0
def launch_inference_from_filepath(args):
    from frame_field_learning.inference_from_filepath import inference_from_filepath

    # --- First step: figure out what run (experiment) is to be evaluated
    # Option 1: the run_name argument is given in which case that's our run
    run_name = None
    config = None
    if args.run_name is not None:
        run_name = args.run_name
    # Else option 2: Check if a config has been given to look for the run_name
    if args.config is not None:
        config = run_utils.load_config(args.config)
        if config is not None and "run_name" in config and run_name is None:
            run_name = config["run_name"]
    # Else abort...
    if run_name is None:
        print_utils.print_error(
            "ERROR: the run to evaluate could no be identified with the given arguments. "
            "Please specify either the --run_name argument or the --config argument "
            "linking to a config file that has a 'run_name' field filled with the name of "
            "the run name to evaluate.")
        sys.exit()

    # --- Second step: get path to the run and if --config was not specified, load the config from the run's folder
    run_dirpath = frame_field_learning.local_utils.get_run_dirpath(
        args.runs_dirpath, run_name)
    if config is None:
        config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            f"ERROR: the default run's config file at {run_dirpath} could not be loaded. "
            f"Exiting now...")
        sys.exit()

    # --- Add command-line arguments
    if args.batch_size is not None:
        config["optim_params"]["batch_size"] = args.batch_size
    if args.eval_batch_size is not None:
        config["optim_params"]["eval_batch_size"] = args.eval_batch_size
    else:
        config["optim_params"][
            "eval_batch_size"] = 2 * config["optim_params"]["batch_size"]

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    config["eval_params"]["run_dirpath"] = run_dirpath
    if args.eval_patch_size is not None:
        config["eval_params"]["patch_size"] = args.eval_patch_size
    if args.eval_patch_overlap is not None:
        config["eval_params"]["patch_overlap"] = args.eval_patch_overlap

    backbone = get_backbone(config["backbone_params"])
    inference_from_filepath(config, args.in_filepath, backbone,
                            args.out_dirpath)
コード例 #7
0
def get_run_dirpath(runs_dirpath, run_name):
    working_dir = os.path.dirname(os.path.abspath(__file__))
    runs_dir = os.path.join(working_dir, runs_dirpath)
    try:
        run_dirpath = run_utils.setup_run_dir(runs_dir,
                                              run_name,
                                              check_exists=True)
    except FileNotFoundError as e:
        print_utils.print_error(f"ERROR: {e}")
        sys.exit()
    return run_dirpath
コード例 #8
0
def load_json(filepath):
    if not os.path.exists(filepath):
        return False
    try:
        with open(filepath, 'r') as f:
            minified = jsmin(f.read())
            data = json.loads(minified)
    except json.decoder.JSONDecodeError as e:
        print_utils.print_error(
            f"ERROR in load_json(filepath): {e} from JSON at {filepath}")
        exit()
    return data
コード例 #9
0
def inference_from_filepath(config, in_filepaths, backbone):
    # --- Online transform performed on the device (GPU):
    eval_online_cuda_transform = data_transforms.get_eval_online_cuda_transform(config)

    print("Loading model...")
    model = FrameFieldModel(config, backbone=backbone, eval_transform=eval_online_cuda_transform)
    model.to(config["device"])
    checkpoints_dirpath = run_utils.setup_run_subdir(config["eval_params"]["run_dirpath"], config["optim_params"]["checkpoints_dirname"])
    model = inference.load_checkpoint(model, checkpoints_dirpath, config["device"])
    model.eval()

    # Read image
    pbar = tqdm(in_filepaths, desc="Infer images")
    for in_filepath in pbar:
        pbar.set_postfix(status="Loading image")
        image = skimage.io.imread(in_filepath)
        if 3 < image.shape[2]:
            print_utils.print_info(f"Image {in_filepath} has more than 3 channels. Keeping the first 3 channels and discarding the rest...")
            image = image[:, :, :3]
        elif image.shape[2] < 3:
            print_utils.print_error(f"Image {in_filepath} has only {image.shape[2]} channels but the network expects 3 channels.")
            raise ValueError
        image_float = image / 255
        mean = np.mean(image_float.reshape(-1, image_float.shape[-1]), axis=0)
        std = np.std(image_float.reshape(-1, image_float.shape[-1]), axis=0)
        sample = {
            "image": torchvision.transforms.functional.to_tensor(image)[None, ...],
            "image_mean": torch.from_numpy(mean)[None, ...],
            "image_std": torch.from_numpy(std)[None, ...],
            "image_filepath": [in_filepath],
        }

        pbar.set_postfix(status="Inference")
        tile_data = inference.inference(config, model, sample, compute_polygonization=True)

        tile_data = local_utils.batch_to_cpu(tile_data)

        # Remove batch dim:
        tile_data = local_utils.split_batch(tile_data)[0]

        pbar.set_postfix(status="Saving output")
        base_filepath = os.path.splitext(in_filepath)[0]
        if config["compute_seg"]:
            seg_mask = 0.5 < tile_data["seg"][0]
            save_utils.save_seg_mask(seg_mask, base_filepath + ".mask", tile_data["image_filepath"])
            save_utils.save_seg(tile_data["seg"], base_filepath, "seg", tile_data["image_filepath"])
            save_utils.save_seg_luxcarta_format(tile_data["seg"], base_filepath, "seg_luxcarta_format", tile_data["image_filepath"])
        if config["compute_crossfield"]:
            save_utils.save_crossfield(tile_data["crossfield"], base_filepath, "crossfield")
        if "poly_viz" in config["eval_params"]["save_individual_outputs"] and \
                config["eval_params"]["save_individual_outputs"]["poly_viz"]:
            save_utils.save_poly_viz(tile_data["image"], tile_data["polygons"], tile_data["polygon_probs"], base_filepath, "poly_viz")
コード例 #10
0
def launch_eval_coco(args):
    # --- Init: fills mode-specific default command-line arguments
    if args.fold is None:
        fold = {"test"}  # Default value for eval_coco
    else:
        fold = set(args.fold)
    assert len(fold) == 1, \
        "Argument fold when evaluating with COCO should be a single fold"

    # --- Find which run and which config file to evaluate the run with
    if args.run_name is None and args.config is None:
        print_utils.print_error(
            "ERROR: At least of one --run_name or --config has to be specified."
        )
        sys.exit()
    elif args.run_name is None and args.config is not None:
        # Load config
        config = run_utils.load_config(args.config)
        # Verify it has a run_name specified
        if "run_name" not in config:
            print_utils.print_error(
                "ERROR: run_name was not found in the provided config file, you can specify it with --run_name"
            )
            sys.exit()
        run_name = config["run_name"]
    elif args.run_name is not None and args.config is None:
        # Load run_name's config
        run_dirpath = frame_field_learning.local_utils.get_run_dirpath(
            args.runs_dirpath, args.run_name)
        config = run_utils.load_config(config_dirpath=run_dirpath)
        run_name = args.run_name
    else:
        # Load specified config and use specified run_name
        config = run_utils.load_config(args.config)
        run_name = args.run_name

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    # --- Second step: Replace parameters in config file from command-line arguments
    config["eval_params"]["run_name"] = run_name
    if args.samples is not None:
        config["samples"] = args.samples
    config["fold"] = list(fold)

    # Setup num_workers per process:
    if config["num_workers"] is None:
        config["num_workers"] = torch.multiprocessing.cpu_count()

    eval_coco(config)
def get_mapping_challenge(config, root_dir, folds):
    from torch_lydorn.torchvision.datasets import MappingChallenge

    if "train" in folds or "val" in folds or "train_val" in folds:
        train_online_cpu_transform = data_transforms.get_online_cpu_transform(
            config, augmentations=config["data_aug_params"]["enable"])
        ds = MappingChallenge(
            root_dir,
            transform=train_online_cpu_transform,
            pre_transform=data_transforms.get_offline_transform_patch(),
            small=config["dataset_params"]["small"],
            fold="train",
            pool_size=config["num_workers"])
        torch.manual_seed(
            config["dataset_params"]["seed"])  # Ensure a seed is set
        train_split_length = int(
            round(config["dataset_params"]["train_fraction"] * len(ds)))
        val_split_length = len(ds) - train_split_length
        train_ds, val_ds = torch.utils.data.random_split(
            ds, [train_split_length, val_split_length])

    ds_list = []
    for fold in folds:
        if fold == "train":
            ds_list.append(train_ds)
        elif fold == "val":
            ds_list.append(val_ds)
        elif fold == "train_val":
            ds_list.append(ds)
        elif fold == "test":
            # The val fold from the original challenge is used as test here
            # because we don't have the ground truth for the test_images fold of the challenge:
            test_online_cpu_transform = data_transforms.get_eval_online_cpu_transform(
            )
            test_ds = MappingChallenge(
                root_dir,
                transform=test_online_cpu_transform,
                pre_transform=data_transforms.get_offline_transform_patch(),
                small=config["dataset_params"]["small"],
                fold="val",
                pool_size=config["num_workers"])
            ds_list.append(test_ds)
        else:
            print_utils.print_error(
                "ERROR: fold \"{}\" not recognized, implement it in dataset_folds.py."
                .format(fold))
            exit()

    return ds_list
def train_process(gpu, config, shared_dict, barrier):
    from frame_field_learning.train import train

    print_utils.print_info(
        "GPU {} -> Ready. There are {} GPU(s) available on this node.".format(
            gpu, torch.cuda.device_count()))

    torch.manual_seed(0)  # Ensure same seed for all processes
    # --- Find data directory --- #
    root_dir_candidates = [
        os.path.join(data_dirpath, config["dataset_params"]["root_dirname"])
        for data_dirpath in config["data_dir_candidates"]
    ]
    root_dir, paths_tried = python_utils.choose_first_existing_path(
        root_dir_candidates, return_tried_paths=True)
    if root_dir is None:
        print_utils.print_error(
            "GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".
            format(gpu, paths_tried))
        exit()
    print_utils.print_info("GPU {} -> Using data from {}".format(
        gpu, root_dir))

    # --- Get dataset splits
    # - CHANGE HERE TO ADD YOUR OWN DATASET
    # We have to adapt the config["fold"] param to the folds argument of the get_folds function
    fold = set(config["fold"])
    if fold == {"train"}:
        # Val will be used for evaluating the model after each epoch:
        train_ds, val_ds = get_folds(config, root_dir, folds=["train", "val"])
    elif fold == {"train", "val"}:
        # Both train and val are meant to be used for training
        train_ds, = get_folds(config, root_dir, folds=["train_val"])
        val_ds = None
    else:
        # Should not arrive here since main makes sure config["fold"] is either one of the above
        print_utils.print_error("ERROR: specified folds not recognized!")
        raise NotImplementedError

    # --- Instantiate backbone network
    if config["backbone_params"]["name"] in ["deeplab50", "deeplab101"]:
        assert 1 < config["optim_params"]["batch_size"], \
            "When using backbone {}, batch_size has to be at least 2 for the batchnorm of the ASPPPooling to work."\
                .format(config["backbone_params"]["name"])
    backbone = get_backbone(config["backbone_params"])

    # --- Launch training
    train(gpu, config, shared_dict, barrier, train_ds, val_ds, backbone)
def get_luxcarta_buildings(config, root_dir, folds):
    from torch_lydorn.torchvision.datasets import LuxcartaBuildings

    # --- Online transform done on the host (CPU):
    online_cpu_transform = data_transforms.get_online_cpu_transform(
        config, augmentations=config["data_aug_params"]["enable"])

    data_patch_size = config["dataset_params"]["data_patch_size"] if config[
        "data_aug_params"]["enable"] else config["input_patch_size"]
    ds = LuxcartaBuildings(
        root_dir,
        transform=online_cpu_transform,
        patch_size=data_patch_size,
        patch_stride=config["dataset_params"]["input_patch_size"],
        pre_transform=data_transforms.get_offline_transform_patch(),
        fold="train",
        pool_size=config["num_workers"])
    torch.manual_seed(config["dataset_params"]["seed"])  # Ensure a seed is set
    train_split_length = int(
        round(config["dataset_params"]["train_fraction"] * len(ds)))
    val_split_length = len(ds) - train_split_length
    train_ds, val_ds = torch.utils.data.random_split(
        ds, [train_split_length, val_split_length])

    ds_list = []
    for fold in folds:
        if fold == "train":
            ds_list.append(train_ds)
        elif fold == "val":
            ds_list.append(val_ds)
        elif fold == "test":
            # TODO: handle patching with multi-GPU processing
            print_utils.print_error(
                "WARNING: handle patching with multi-GPU processing")
            ds = LuxcartaBuildings(
                root_dir,
                transform=online_cpu_transform,
                pre_transform=data_transforms.get_offline_transform_patch(),
                fold="test",
                pool_size=config["num_workers"])
            ds_list.append(ds)
        else:
            print_utils.print_error(
                "ERROR: fold \"{}\" not recognized, implement it in dataset_folds.py."
                .format(fold))

    return ds_list
コード例 #14
0
def eval_coco(config):
    assert len(config["fold"]) == 1, "There should be only one specified fold"
    fold = config["fold"][0]
    if fold != "test":
        raise NotImplementedError

    pool = Pool(processes=config["num_workers"])

    # Find data dir
    root_dir_candidates = [os.path.join(data_dirpath, config["dataset_params"]["root_dirname"]) for data_dirpath in
                           config["data_dir_candidates"]]
    root_dir, paths_tried = python_utils.choose_first_existing_path(root_dir_candidates, return_tried_paths=True)
    if root_dir is None:
        print_utils.print_error(
            "ERROR: Data root directory amongst \"{}\" not found!".format(paths_tried))
        exit()
    print_utils.print_info("Using data from {}".format(root_dir))
    raw_dir = os.path.join(root_dir, "raw")

    # Get run's eval results dir
    results_dirpath = os.path.join(root_dir, config["eval_params"]["results_dirname"])
    run_results_dirpath = run_utils.setup_run_dir(results_dirpath, config["eval_params"]["run_name"], check_exists=True)

    # Setup coco
    annType = 'segm'

    # initialize COCO ground truth api
    gt_annotation_filename = "annotation-small.json" if config["dataset_params"]["small"] else "annotation.json"
    gt_annotation_filepath = os.path.join(raw_dir, "val",
                                          gt_annotation_filename)  # We are using the original val fold as our test fold
    print_utils.print_info("INFO: Load gt from " + gt_annotation_filepath)
    cocoGt = COCO(gt_annotation_filepath)

    # image_id = 0
    # annotation_ids = cocoGt.getAnnIds(imgIds=image_id)
    # annotation_list = cocoGt.loadAnns(annotation_ids)
    # print(annotation_list)

    # initialize COCO detections api
    annotation_filename_list = fnmatch.filter(os.listdir(run_results_dirpath), fold + ".annotation.*.json")
    eval_one_partial = partial(eval_one, run_results_dirpath=run_results_dirpath, cocoGt=cocoGt, config=config, annType=annType, pool=pool)

    # with Pool(8) as p:
    #     r = list(tqdm(p.imap(eval_one_partial, annotation_filename_list), total=len(annotation_filename_list)))
    for annotation_filename in annotation_filename_list:
        eval_one_partial(annotation_filename)
def get_xview2_dataset(config, root_dir, folds):
    from torch_lydorn.torchvision.datasets import xView2Dataset

    if "train" in folds or "val" in folds or "train_val" in folds:
        train_online_cpu_transform = data_transforms.get_online_cpu_transform(
            config, augmentations=config["data_aug_params"]["enable"])
        ds = xView2Dataset(
            root_dir,
            fold="train",
            pre_process=True,
            patch_size=config["dataset_params"]["data_patch_size"],
            pre_transform=data_transforms.get_offline_transform_patch(),
            transform=train_online_cpu_transform,
            small=config["dataset_params"]["small"],
            pool_size=config["num_workers"])
        torch.manual_seed(
            config["dataset_params"]["seed"])  # Ensure a seed is set
        train_split_length = int(
            round(config["dataset_params"]["train_fraction"] * len(ds)))
        val_split_length = len(ds) - train_split_length
        train_ds, val_ds = torch.utils.data.random_split(
            ds, [train_split_length, val_split_length])

    ds_list = []
    for fold in folds:
        if fold == "train":
            ds_list.append(train_ds)
        elif fold == "val":
            ds_list.append(val_ds)
        elif fold == "train_val":
            ds_list.append(ds)
        elif fold == "test":
            raise NotImplementedError(
                "Test fold not yet implemented (skip pre-processing?)")
        elif fold == "hold":
            raise NotImplementedError(
                "Hold fold not yet implemented (skip pre-processing?)")
        else:
            print_utils.print_error(
                "ERROR: fold \"{}\" not recognized, implement it in dataset_folds.py."
                .format(fold))
            exit()

    return ds_list
def get_opencities_competition(config, root_dir, folds):
    from torch_lydorn.torchvision.datasets import RasterizedOpenCities, OpenCitiesTestDataset

    data_patch_size = config["dataset_params"]["data_patch_size"] if config[
        "data_aug_params"]["enable"] else config["input_patch_size"]

    ds_list = []
    for fold in folds:
        if fold == "train":
            train_ds = RasterizedOpenCities(
                tier=1,
                augment=False,
                small_subset=False,
                resize_size=data_patch_size,
                data_dir=root_dir,
                baseline_mode=False,
                val=False,
                val_split=config["dataset_params"]["val_fraction"])
            ds_list.append(train_ds)
        elif fold == "val":
            val_ds = RasterizedOpenCities(
                tier=1,
                augment=False,
                small_subset=False,
                resize_size=data_patch_size,
                data_dir=root_dir,
                baseline_mode=False,
                val=True,
                val_split=config["dataset_params"]["val_fraction"])
            ds_list.append(val_ds)
        elif fold == "test":
            test_ds = OpenCitiesTestDataset(root_dir + "/test/", 1024)
            ds_list.append(test_ds)
        else:
            print_utils.print_error(
                "ERROR: fold \"{}\" not recognized, implement it in dataset_folds.py."
                .format(fold))

    return ds_list
コード例 #17
0
def main():
    torch.manual_seed(0)
    # --- Process args --- #
    args = get_args()

    # --- Setup run --- #
    run_dirpath = local_utils.get_run_dirpath(args.runs_dirpath, args.run_name)
    # Load run's config file:
    config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            "ERROR: cannot continue without a config file. Exiting now...")
        sys.exit()

    config["eval_params"]["run_dirpath"] = run_dirpath
    if args.eval_patch_size is not None:
        config["eval_params"]["patch_size"] = args.eval_patch_size
    if args.eval_patch_overlap is not None:
        config["eval_params"]["patch_overlap"] = args.eval_patch_overlap

    backbone = get_backbone(config["backbone_params"])

    polygonize_mask(config, args.filepath, backbone, args.out_ext)
コード例 #18
0
def get_backbone(backbone_params):
    set_download_dir()
    if backbone_params["name"] == "unet":
        from torchvision.models.segmentation._utils import _SimpleSegmentationModel
        from frame_field_learning.unet import UNetBackbone

        backbone = UNetBackbone(backbone_params["input_features"],
                                backbone_params["features"])
        backbone = _SimpleSegmentationModel(backbone,
                                            classifier=torch.nn.Identity())
    elif backbone_params["name"] == "fcn50":
        backbone = torchvision.models.segmentation.fcn_resnet50(
            pretrained=backbone_params["pretrained"], num_classes=21)
        backbone.classifier = torch.nn.Sequential(
            *list(backbone.classifier.children())[:-1],
            torch.nn.Conv2d(512,
                            backbone_params["features"],
                            kernel_size=(1, 1),
                            stride=(1, 1)))
    elif backbone_params["name"] == "fcn101":
        backbone = torchvision.models.segmentation.fcn_resnet101(
            pretrained=backbone_params["pretrained"], num_classes=21)
        backbone.classifier = torch.nn.Sequential(
            *list(backbone.classifier.children())[:-1],
            torch.nn.Conv2d(512,
                            backbone_params["features"],
                            kernel_size=(1, 1),
                            stride=(1, 1)))

    elif backbone_params["name"] == "deeplab50":
        backbone = torchvision.models.segmentation.deeplabv3_resnet50(
            pretrained=backbone_params["pretrained"], num_classes=21)
        backbone.classifier = torch.nn.Sequential(
            *list(backbone.classifier.children())[:-1],
            torch.nn.Conv2d(256,
                            backbone_params["features"],
                            kernel_size=(1, 1),
                            stride=(1, 1)))
    elif backbone_params["name"] == "deeplab101":
        backbone = torchvision.models.segmentation.deeplabv3_resnet101(
            pretrained=backbone_params["pretrained"], num_classes=21)
        backbone.classifier = torch.nn.Sequential(
            *list(backbone.classifier.children())[:-1],
            torch.nn.Conv2d(256,
                            backbone_params["features"],
                            kernel_size=(1, 1),
                            stride=(1, 1)))
    elif backbone_params["name"] == "unet_resnet":
        from torchvision.models.segmentation._utils import _SimpleSegmentationModel
        from frame_field_learning.unet_resnet import UNetResNetBackbone

        backbone = UNetResNetBackbone(
            backbone_params["encoder_depth"],
            num_filters=backbone_params["num_filters"],
            dropout_2d=backbone_params["dropout_2d"],
            pretrained=backbone_params["pretrained"],
            is_deconv=backbone_params["is_deconv"])
        backbone = _SimpleSegmentationModel(backbone,
                                            classifier=torch.nn.Identity())

    elif backbone_params["name"] == "ictnet":
        from torchvision.models.segmentation._utils import _SimpleSegmentationModel
        from frame_field_learning.ictnet import ICTNetBackbone

        backbone = ICTNetBackbone(in_channels=backbone_params["in_channels"],
                                  out_channels=backbone_params["out_channels"],
                                  preset_model=backbone_params["preset_model"],
                                  dropout_2d=backbone_params["dropout_2d"],
                                  efficient=backbone_params["efficient"])
        backbone = _SimpleSegmentationModel(backbone,
                                            classifier=torch.nn.Identity())
    else:
        print_utils.print_error(
            "ERROR: config[\"backbone_params\"][\"name\"] = \"{}\" is an unknown backbone!"
            "If it is a new backbone you want to use, "
            "add it in backbone.py's get_backbone() function.".format(
                backbone_params["name"]))
        raise RuntimeError("Specified backbone {} unknown".format(
            backbone_params["name"]))
    return backbone
コード例 #19
0
    def __call__(self,
                 polygonize_params,
                 seg_batch,
                 crossfield_batch=None,
                 pre_computed=None):
        """

        :param polygonize_params:
        :param seg_batch: (N, C, H, W)
        :param crossfield_batch: (N, 4, H, W)
        :param pre_computed: None o a Dictionary of pre-computed values used for various methods
        :return:
        """
        assert len(seg_batch.shape) == 4, "seg_batch should be (N, C, H, W)"
        assert pre_computed is None or isinstance(
            pre_computed, dict), "pre_computed should be either None or a dict"
        batch_size = seg_batch.shape[0]

        # Check if polygonize_params["method"] is a list or a string:
        if type(polygonize_params["method"]) == list:
            # --- For speed up, pre-compute anything that is used by multiple methods:
            if pre_computed is None:
                pre_computed = {}
            if ("simple" in polygonize_params["method"]
                    or "acm" in polygonize_params["method"]
                ) and "init_contours_batch" not in pre_computed:
                indicator_batch = seg_batch[:, 0, :, :]
                np_indicator_batch = indicator_batch.cpu().numpy()
                init_contours_batch = polygonize_utils.compute_init_contours_batch(
                    np_indicator_batch,
                    polygonize_params["common_params"]["init_data_level"],
                    pool=self.pool)
                pre_computed["init_contours_batch"] = init_contours_batch
            # ---
            # Run one method after the other:
            out_polygons_dict_batch = [{} for _ in range(batch_size)]
            out_probs_dict_batch = [{} for _ in range(batch_size)]
            for method_name in polygonize_params["method"]:
                new_polygonize_params = polygonize_params.copy()
                new_polygonize_params["method"] = method_name
                polygons_batch, probs_batch = self(
                    new_polygonize_params,
                    seg_batch,
                    crossfield_batch=crossfield_batch,
                    pre_computed=pre_computed)
                if polygons_batch is not None:
                    for i, (polygons, probs) in enumerate(
                            zip(polygons_batch, probs_batch)):
                        out_polygons_dict_batch[i][method_name] = polygons
                        out_probs_dict_batch[i][method_name] = probs
            return out_polygons_dict_batch, out_probs_dict_batch

        # --- Else: run the one method
        if polygonize_params["method"] == "acm":
            if crossfield_batch is None:
                # Cannot run the ACM method
                return None, None
            polygons_batch, probs_batch = polygonize_acm.polygonize(
                seg_batch,
                crossfield_batch,
                polygonize_params["acm_method"],
                pool=self.pool,
                pre_computed=pre_computed)
        elif polygonize_params["method"] == "asm":
            if crossfield_batch is None:
                # Cannot run the ASM method
                return None, None
            polygons_batch, probs_batch = self.polygonizer_asm(
                seg_batch, crossfield_batch, pre_computed=pre_computed)
        elif polygonize_params["method"] == "simple":
            polygons_batch, probs_batch = polygonize_simple.polygonize(
                seg_batch,
                polygonize_params["simple_method"],
                pool=self.pool,
                pre_computed=pre_computed)
        else:
            print_utils.print_error(
                "ERROR: polygonize method {} not recognized!".format(
                    polygonize_params["method"]))
            raise NotImplementedError

        return polygons_batch, probs_batch
コード例 #20
0
def inference_with_patching(config, model, tile_data):
    assert len(tile_data["image"].shape) == 4 and tile_data["image"].shape[0] == 1, \
        f"When using inference with patching, tile_data should have a batch size of 1, " \
        f"with image's shape being (1, C, H, W), not {tile_data['image'].shape}"
    with torch.no_grad():
        # Init tile outputs (image is (N, C, H, W)):
        height = tile_data["image"].shape[2]
        width = tile_data["image"].shape[3]
        seg_channels = config["seg_params"]["compute_interior"] \
                       + config["seg_params"]["compute_edge"] \
                       + config["seg_params"]["compute_vertex"]
        if config["compute_seg"]:
            tile_data["seg"] = torch.zeros((1, seg_channels, height, width),
                                           device=config["device"])
        if config["compute_crossfield"]:
            tile_data["crossfield"] = torch.zeros((1, 4, height, width),
                                                  device=config["device"])
        weight_map = torch.zeros(
            (1, 1, height, width), device=config["device"]
        )  # Count number of patches on top of each pixel

        # Split tile in patches:
        stride = config["eval_params"]["patch_size"] - config["eval_params"][
            "patch_overlap"]
        patch_boundingboxes = image_utils.compute_patch_boundingboxes(
            (height, width),
            stride=stride,
            patch_res=config["eval_params"]["patch_size"])
        # Compute patch pixel weights to merge overlapping patches back together smoothly:
        patch_weights = np.ones((config["eval_params"]["patch_size"] + 2,
                                 config["eval_params"]["patch_size"] + 2),
                                dtype=np.float)
        patch_weights[0, :] = 0
        patch_weights[-1, :] = 0
        patch_weights[:, 0] = 0
        patch_weights[:, -1] = 0
        patch_weights = scipy.ndimage.distance_transform_edt(patch_weights)
        patch_weights = patch_weights[1:-1, 1:-1]
        patch_weights = torch.tensor(patch_weights,
                                     device=config["device"]).float()
        patch_weights = patch_weights[
            None, None, :, :]  # Adding batch and channels dims

        # Predict on each patch and save in outputs:
        for bbox in tqdm(patch_boundingboxes,
                         desc="Running model on patches",
                         leave=False):
            # Crop data
            batch = {
                "image": tile_data["image"][:, :, bbox[0]:bbox[2],
                                            bbox[1]:bbox[3]],
                "image_mean": tile_data["image_mean"],
                "image_std": tile_data["image_std"],
            }
            # Send batch to device
            try:
                pred, batch = network_inference(config, model, batch)
            except RuntimeError as e:
                print_utils.print_error("ERROR: " + str(e))
                print_utils.print_info(
                    "INFO: Reduce --eval_patch_size until the patch fits in memory."
                )
                raise e

            if config["compute_seg"]:
                tile_data[
                    "seg"][:, :, bbox[0]:bbox[2],
                           bbox[1]:bbox[3]] += patch_weights * pred["seg"]
            if config["compute_crossfield"]:
                tile_data["crossfield"][:, :, bbox[0]:bbox[2], bbox[1]:bbox[
                    3]] += patch_weights * pred["crossfield"]
            weight_map[:, :, bbox[0]:bbox[2], bbox[1]:bbox[3]] += patch_weights

        # Take care of overlapping parts
        if config["compute_seg"]:
            tile_data["seg"] /= weight_map
        if config["compute_crossfield"]:
            tile_data["crossfield"] /= weight_map

    return tile_data
コード例 #21
0
def main():
    # Test using transforms from the frame_field_learning project:
    from frame_field_learning import data_transforms

    config = {
        "data_dir_candidates":
        ["/data/titane/user/nigirard/data", "~/data", "/data"],
        "dataset_params": {
            "root_dirname": "xview2_xbd_dataset",
            "pre_process": True,
            "small": False,
            "data_patch_size": 725,
            "input_patch_size": 512,
            "train_fraction": 0.75
        },
        "num_workers":
        8,
        "data_aug_params": {
            "enable": True,
            "vflip": True,
            "affine": True,
            "scaling": [0.9, 1.1],
            "color_jitter": True,
            "device": "cuda"
        }
    }

    # Find data_dir
    data_dir = python_utils.choose_first_existing_path(
        config["data_dir_candidates"])
    if data_dir is None:
        print_utils.print_error("ERROR: Data directory not found!")
        exit()
    else:
        print_utils.print_info("Using data from {}".format(data_dir))
    root_dir = os.path.join(data_dir, config["dataset_params"]["root_dirname"])

    # --- Transforms: --- #
    # --- pre-processing transform (done once then saved on disk):
    # --- Online transform done on the host (CPU):
    online_cpu_transform = data_transforms.get_online_cpu_transform(
        config, augmentations=config["data_aug_params"]["enable"])
    train_online_cuda_transform = data_transforms.get_online_cuda_transform(
        config, augmentations=config["data_aug_params"]["enable"])
    kwargs = {
        "pre_process": config["dataset_params"]["pre_process"],
        "transform": online_cpu_transform,
        "patch_size": config["dataset_params"]["data_patch_size"],
        "pre_transform": data_transforms.get_offline_transform_patch(),
        "small": config["dataset_params"]["small"],
        "pool_size": config["num_workers"],
    }
    # --- --- #
    fold = "train"
    if fold == "train":
        dataset = xView2Dataset(root_dir, fold="train", **kwargs)
    elif fold == "val":
        dataset = xView2Dataset(root_dir, fold="train", **kwargs)
    elif fold == "test":
        dataset = xView2Dataset(root_dir, fold="test", **kwargs)
    else:
        raise NotImplementedError

    print(f"dataset has {len(dataset)} samples.")
    print("# --- Sample 0 --- #")
    sample = dataset[0]
    for key, item in sample.items():
        print("{}: {}".format(key, type(item)))

    print("# --- Samples --- #")
    # for data in tqdm(dataset):
    #     pass

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=1,
        shuffle=False,
        num_workers=config["num_workers"])
    print("# --- Batches --- #")
    for batch in tqdm(data_loader):

        # batch["distances"] = batch["distances"].float()
        # batch["sizes"] = batch["sizes"].float()

        # im = np.array(batch["image"][0])
        # im = np.moveaxis(im, 0, -1)
        # skimage.io.imsave('im_before_transform.png', im)
        #
        # distances = np.array(batch["distances"][0])
        # distances = np.moveaxis(distances, 0, -1)
        # skimage.io.imsave('distances_before_transform.png', distances)
        #
        # sizes = np.array(batch["sizes"][0])
        # sizes = np.moveaxis(sizes, 0, -1)
        # skimage.io.imsave('sizes_before_transform.png', sizes)

        print("----")
        print(batch["name"])

        print("image:", batch["image"].shape, batch["image"].min().item(),
              batch["image"].max().item())
        im = np.array(batch["image"][0])
        im = np.moveaxis(im, 0, -1)
        skimage.io.imsave('im.png', im)

        if "gt_polygons_image" in batch:
            print("gt_polygons_image:", batch["gt_polygons_image"].shape,
                  batch["gt_polygons_image"].min().item(),
                  batch["gt_polygons_image"].max().item())
            seg = np.array(batch["gt_polygons_image"][0]) / 255
            seg = np.moveaxis(seg, 0, -1)
            seg_display = utils.get_seg_display(seg)
            seg_display = (seg_display * 255).astype(np.uint8)
            skimage.io.imsave("gt_seg.png", seg_display)

        if "gt_crossfield_angle" in batch:
            print("gt_crossfield_angle:", batch["gt_crossfield_angle"].shape,
                  batch["gt_crossfield_angle"].min().item(),
                  batch["gt_crossfield_angle"].max().item())
            gt_crossfield_angle = np.array(batch["gt_crossfield_angle"][0])
            gt_crossfield_angle = np.moveaxis(gt_crossfield_angle, 0, -1)
            skimage.io.imsave('gt_crossfield_angle.png', gt_crossfield_angle)

        if "distances" in batch:
            print("distances:", batch["distances"].shape,
                  batch["distances"].float().min().item(),
                  batch["distances"].float().max().item())
            distances = np.array(batch["distances"][0])
            distances = np.moveaxis(distances, 0, -1)
            skimage.io.imsave('distances.png', distances)

        if "sizes" in batch:
            print("sizes:", batch["sizes"].shape,
                  batch["sizes"].float().min().item(),
                  batch["sizes"].float().max().item())
            sizes = np.array(batch["sizes"][0])
            sizes = np.moveaxis(sizes, 0, -1)
            skimage.io.imsave('sizes.png', sizes)

        # valid_mask = np.array(batch["valid_mask"][0])
        # valid_mask = np.moveaxis(valid_mask, 0, -1)
        # skimage.io.imsave('valid_mask.png', valid_mask)

        input("Press enter to continue...")

        print("Apply online tranform:")
        batch = utils.batch_to_cuda(batch)
        batch = train_online_cuda_transform(batch)
        batch = utils.batch_to_cpu(batch)

        print("image:", batch["image"].shape, batch["image"].min().item(),
              batch["image"].max().item())
        print("gt_polygons_image:", batch["gt_polygons_image"].shape,
              batch["gt_polygons_image"].min().item(),
              batch["gt_polygons_image"].max().item())
        print("gt_crossfield_angle:", batch["gt_crossfield_angle"].shape,
              batch["gt_crossfield_angle"].min().item(),
              batch["gt_crossfield_angle"].max().item())
        # print("distances:", batch["distances"].shape, batch["distances"].min().item(), batch["distances"].max().item())
        # print("sizes:", batch["sizes"].shape, batch["sizes"].min().item(), batch["sizes"].max().item())

        # Save output to visualize
        seg = np.array(batch["gt_polygons_image"][0])
        seg = np.moveaxis(seg, 0, -1)
        seg_display = utils.get_seg_display(seg)
        seg_display = (seg_display * 255).astype(np.uint8)
        skimage.io.imsave("gt_seg.png", seg_display)

        im = np.array(batch["image"][0])
        im = np.moveaxis(im, 0, -1)
        skimage.io.imsave('im.png', im)

        gt_crossfield_angle = np.array(batch["gt_crossfield_angle"][0])
        gt_crossfield_angle = np.moveaxis(gt_crossfield_angle, 0, -1)
        skimage.io.imsave('gt_crossfield_angle.png', gt_crossfield_angle)

        distances = np.array(batch["distances"][0])
        distances = np.moveaxis(distances, 0, -1)
        skimage.io.imsave('distances.png', distances)

        sizes = np.array(batch["sizes"][0])
        sizes = np.moveaxis(sizes, 0, -1)
        skimage.io.imsave('sizes.png', sizes)

        # valid_mask = np.array(batch["valid_mask"][0])
        # valid_mask = np.moveaxis(valid_mask, 0, -1)
        # skimage.io.imsave('valid_mask.png', valid_mask)

        input("Press enter to continue...")
コード例 #22
0
def eval_one(annotation_filename, run_results_dirpath, cocoGt, config, annType, pool=None):
    print("---eval_one")
    annotation_name = os.path.splitext(annotation_filename)[0]
    if "samples" in config:
        stats_filepath = os.path.join(run_results_dirpath,
                                      "{}.stats.{}.{}.json".format("test", annotation_name, config["samples"]))
        metrics_filepath = os.path.join(run_results_dirpath,
                                      "{}.metrics.{}.{}.json".format("test", annotation_name, config["samples"]))
    else:
        stats_filepath = os.path.join(run_results_dirpath, "{}.stats.{}.json".format("test", annotation_name))
        metrics_filepath = os.path.join(run_results_dirpath, "{}.metrics.{}.json".format("test", annotation_name))

    res_filepath = os.path.join(run_results_dirpath, annotation_filename)
    if not os.path.exists(res_filepath):
        print_utils.print_warning("WARNING: result not found at filepath {}".format(res_filepath))
        return
    print_utils.print_info("Evaluate {} annotations:".format(annotation_filename))
    try:
        cocoDt = cocoGt.loadRes(res_filepath)
    except AssertionError as e:
        print_utils.print_error("ERROR: {}".format(e))
        print_utils.print_info("INFO: continuing by removing unrecognised images")
        res = json.load(open(res_filepath))
        print("Initial res length:", len(res))
        annsImgIds = [ann["image_id"] for ann in res]
        image_id_rm = set(annsImgIds) - set(cocoGt.getImgIds())
        print_utils.print_warning("Remove {} image ids!".format(len(image_id_rm)))
        new_res = [ann for ann in res if ann["image_id"] not in image_id_rm]
        print("New res length:", len(new_res))
        cocoDt = cocoGt.loadRes(new_res)
        # {4601886185638229705, 4602408603195004682, 4597274499619802317, 4600985465712755606, 4597238470822783353,
        #  4597418614807878173}


    # image_id = 0
    # annotation_ids = cocoDt.getAnnIds(imgIds=image_id)
    # annotation_list = cocoDt.loadAnns(annotation_ids)
    # print(annotation_list)

    if not os.path.exists(stats_filepath):
        # Run COCOeval
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        # Save stats
        stats = {}
        stat_names = ["AP", "AP_50", "AP_75", "AP_S", "AP_M", "AP_L", "AR", "AR_50", "AR_75", "AR_S", "AR_M", "AR_L"]
        assert len(stat_names) == cocoEval.stats.shape[0]
        for i, stat_name in enumerate(stat_names):
            stats[stat_name] = cocoEval.stats[i]

        python_utils.save_json(stats_filepath, stats)
    else:
        print("COCO stats already computed, skipping...")

    if not os.path.exists(metrics_filepath):
        # Verify that cocoDt has polygonal segmentation masks and not raster masks:
        if isinstance(cocoDt.loadAnns(cocoDt.getAnnIds(imgIds=cocoDt.getImgIds()[0]))[0]["segmentation"], list):
            metrics = {}
            # Run additionnal metrics
            print_utils.print_info("INFO: Running contour metrics")
            contour_eval = ContourEval(cocoGt, cocoDt)
            max_angle_diffs = contour_eval.evaluate(pool=pool)
            metrics["max_angle_diffs"] = list(max_angle_diffs)
            python_utils.save_json(metrics_filepath, metrics)
    else:
        print("Contour metrics already computed, skipping...")
def get_inria_aerial_folds(config, root_dir, folds):
    from torch_lydorn.torchvision.datasets import InriaAerial

    # --- Online transform done on the host (CPU):
    online_cpu_transform = data_transforms.get_online_cpu_transform(
        config, augmentations=config["data_aug_params"]["enable"])
    mask_only = config["dataset_params"]["mask_only"]
    kwargs = {
        "pre_process":
        config["dataset_params"]["pre_process"],
        "transform":
        online_cpu_transform,
        "patch_size":
        config["dataset_params"]["data_patch_size"],
        "patch_stride":
        config["dataset_params"]["input_patch_size"],
        "pre_transform":
        data_transforms.get_offline_transform_patch(distances=not mask_only,
                                                    sizes=not mask_only),
        "small":
        config["dataset_params"]["small"],
        "pool_size":
        config["num_workers"],
        "gt_source":
        config["dataset_params"]["gt_source"],
        "gt_type":
        config["dataset_params"]["gt_type"],
        "gt_dirname":
        config["dataset_params"]["gt_dirname"],
        "mask_only":
        mask_only,
    }
    train_val_split_point = config["dataset_params"]["train_fraction"] * 36
    partial_train_tile_filter = functools.partial(
        inria_aerial_train_tile_filter,
        train_val_split_point=train_val_split_point)
    partial_val_tile_filter = functools.partial(
        inria_aerial_val_tile_filter,
        train_val_split_point=train_val_split_point)

    ds_list = []
    for fold in folds:
        if fold == "train":
            ds = InriaAerial(root_dir,
                             fold="train",
                             tile_filter=partial_train_tile_filter,
                             **kwargs)
            ds_list.append(ds)
        elif fold == "val":
            ds = InriaAerial(root_dir,
                             fold="train",
                             tile_filter=partial_val_tile_filter,
                             **kwargs)
            ds_list.append(ds)
        elif fold == "train_val":
            ds = InriaAerial(root_dir, fold="train", **kwargs)
            ds_list.append(ds)
        elif fold == "test":
            ds = InriaAerial(root_dir, fold="test", **kwargs)
            ds_list.append(ds)
        else:
            print_utils.print_error(
                "ERROR: fold \"{}\" not recognized, implement it in dataset_folds.py."
                .format(fold))

    return ds_list
コード例 #24
0
def get_online_cpu_transform(config, augmentations=False):
    if augmentations and config["data_aug_params"]["device"] == "cpu":
        print_utils.print_error(
            "ERROR: CPU augmentations is not supported anymore. "
            "Look at CudaDataAugmentation to see what additional augs would need to be implemented."
        )
        raise NotImplementedError
    online_transform_list = []
    # Convert to PIL images
    if not augmentations \
            or (augmentations and config["data_aug_params"]["device"] == "cpu"):
        online_transform_list.extend([
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.ToPILImage(), key="image"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.ToPILImage(),
                key="gt_polygons_image"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.ToPILImage(),
                key="gt_crossfield_angle"),
        ])
    # Add rotation data augmentation:
    if augmentations and config["data_aug_params"]["device"] == "cpu" and \
            config["data_aug_params"]["affine"]:
        online_transform_list.extend([
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torch_lydorn.torchvision.transforms.SampleUniform(
                    -180, 180),
                outkey="rand_angle"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.functional.rotate,
                key=["image", "rand_angle"],
                outkey="image",
                resample=PIL.Image.BILINEAR),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.functional.rotate,
                key=["gt_polygons_image", "rand_angle"],
                outkey="gt_polygons_image",
                resample=PIL.Image.BILINEAR),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.functional.rotate,
                key=["gt_crossfield_angle", "rand_angle"],
                outkey="gt_crossfield_angle",
                resample=PIL.Image.NEAREST),
        ])

    # Crop to final size
    if not augmentations \
            or (augmentations and config["data_aug_params"]["device"] == "cpu"):
        if "input_patch_size" in config["dataset_params"]:
            online_transform_list.extend([
                torch_lydorn.torchvision.transforms.TransformByKey(
                    transform=torchvision.transforms.CenterCrop(
                        config["dataset_params"]["input_patch_size"]),
                    key="image"),
                torch_lydorn.torchvision.transforms.TransformByKey(
                    transform=torchvision.transforms.CenterCrop(
                        config["dataset_params"]["input_patch_size"]),
                    key="gt_polygons_image"),
                torch_lydorn.torchvision.transforms.TransformByKey(
                    transform=torchvision.transforms.CenterCrop(
                        config["dataset_params"]["input_patch_size"]),
                    key="gt_crossfield_angle"),
                torch_lydorn.torchvision.transforms.TransformByKey(
                    transform=torch_lydorn.torchvision.transforms.CenterCrop(
                        config["dataset_params"]["input_patch_size"]),
                    key="distances"),
                torch_lydorn.torchvision.transforms.TransformByKey(
                    transform=torch_lydorn.torchvision.transforms.CenterCrop(
                        config["dataset_params"]["input_patch_size"]),
                    key="sizes"),
            ])

    # Random Horizontal flip:
    if augmentations and config["data_aug_params"]["device"] == "cpu" and \
            config["data_aug_params"]["vflip"]:
        online_transform_list.extend([
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torch_lydorn.torchvision.transforms.RandomBool(
                    p=0.5),
                outkey="rand_flip"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torch_lydorn.torchvision.transforms.ConditionApply(
                    transform=torchvision.transforms.functional.vflip),
                key=["image", "rand_flip"],
                outkey="image"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torch_lydorn.torchvision.transforms.ConditionApply(
                    transform=torchvision.transforms.functional.vflip),
                key=["gt_polygons_image", "rand_flip"],
                outkey="gt_polygons_image"),
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torch_lydorn.torchvision.transforms.ConditionApply(
                    transform=torchvision.transforms.functional.vflip),
                key=["gt_crossfield_angle", "rand_flip"],
                outkey="gt_crossfield_angle"),
        ])

    # Other augs:
    if augmentations and config["data_aug_params"]["device"] == "cpu" and \
            config["data_aug_params"]["color_jitter"]:
        online_transform_list.append(
            torch_lydorn.torchvision.transforms.TransformByKey(
                transform=torchvision.transforms.ColorJitter(brightness=0.05,
                                                             contrast=0.05,
                                                             saturation=.5,
                                                             hue=.1),
                key="image"))
    # Convert to PyTorch tensors:
    online_transform_list.extend([
        # Print(),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torch_lydorn.torchvision.transforms.ToTensor(),
            key="image"),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torchvision.transforms.Lambda(torch.from_numpy),
            key="image_mean"),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torchvision.transforms.Lambda(torch.from_numpy),
            key="image_std"),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torch_lydorn.torchvision.transforms.ToTensor(),
            key="gt_polygons_image",
            ignore_key_error=True),
        torch_lydorn.torchvision.transforms.TransformByKey(
            torch_lydorn.torchvision.transforms.ToTensor(),
            key="gt_crossfield_angle",
            ignore_key_error=True),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torch_lydorn.torchvision.transforms.ToTensor(),
            key="distances",
            ignore_key_error=True),
        torch_lydorn.torchvision.transforms.TransformByKey(
            transform=torch_lydorn.torchvision.transforms.ToTensor(),
            key="sizes",
            ignore_key_error=True),
    ])

    online_transform_list.append(
        torch_lydorn.torchvision.transforms.RemoveKeys(keys=["gt_polygons"]))

    online_transform = torchvision.transforms.Compose(online_transform_list)
    return online_transform
コード例 #25
0
def main():
    # Test using transforms from the frame_field_learning project:
    from frame_field_learning import data_transforms

    config = {
        "data_dir_candidates": [
            "/data/titane/user/nigirard/data", "~/data", "/data",
            "/home/krishna/building-footprints-custom/frameField/data"
        ],
        "dataset_params": {
            "small": True,
            "root_dirname": "mapping_challenge_dataset",
            "seed": 0,
            "train_fraction": 0.75
        },
        "num_workers":
        8,
        "data_aug_params": {
            "enable": False,
            "vflip": True,
            "affine": True,
            "color_jitter": True,
            "device": "cuda"
        }
    }

    # Find data_dir
    data_dir = python_utils.choose_first_existing_path(
        config["data_dir_candidates"])
    if data_dir is None:
        print_utils.print_error("ERROR: Data directory not found!")
        exit()
    else:
        print_utils.print_info("Using data from {}".format(data_dir))
    root_dir = os.path.join(data_dir, config["dataset_params"]["root_dirname"])

    # --- Transforms: --- #
    # --- pre-processing transform (done once then saved on disk):
    # --- Online transform done on the host (CPU):
    train_online_cpu_transform = data_transforms.get_online_cpu_transform(
        config, augmentations=config["data_aug_params"]["enable"])
    test_online_cpu_transform = data_transforms.get_eval_online_cpu_transform()

    train_online_cuda_transform = data_transforms.get_online_cuda_transform(
        config, augmentations=config["data_aug_params"]["enable"])
    # --- --- #

    dataset = MappingChallenge(
        root_dir,
        transform=test_online_cpu_transform,
        pre_transform=data_transforms.get_offline_transform_patch(),
        fold="train",
        small=config["dataset_params"]["small"],
        pool_size=config["num_workers"])

    print("# --- Sample 0 --- #")
    sample = dataset[0]
    print(sample.keys())

    for key, item in sample.items():
        print("{}: {}".format(key, type(item)))

    print(sample["image"].shape)
    print(len(sample["gt_polygons_image"]))
    print("# --- Samples --- #")
    # for data in tqdm(dataset):
    #     pass

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=10,
        shuffle=True,
        num_workers=config["num_workers"])
    print("# --- Batches --- #")
    for batch in tqdm(data_loader):
        print("Images:")
        print(batch["image_relative_filepath"])
        print(batch["image"].shape)
        print(batch["gt_polygons_image"].shape)

        print("Apply online tranform:")
        batch = utils.batch_to_cuda(batch)
        batch = train_online_cuda_transform(batch)
        batch = utils.batch_to_cpu(batch)

        print(batch["image"].shape)
        print(batch["gt_polygons_image"].shape)

        # Save output to visualize
        seg = np.array(batch["gt_polygons_image"][0])
        seg = np.moveaxis(seg, 0, -1)
        seg_display = utils.get_seg_display(seg)
        seg_display = (seg_display * 255).astype(np.uint8)
        skimage.io.imsave("gt_seg.png", seg_display)
        skimage.io.imsave("gt_seg_edge.png", seg[:, :, 1])

        im = np.array(batch["image"][0])
        im = np.moveaxis(im, 0, -1)
        skimage.io.imsave('im.png', im)

        gt_crossfield_angle = np.array(batch["gt_crossfield_angle"][0])
        gt_crossfield_angle = np.moveaxis(gt_crossfield_angle, 0, -1)
        skimage.io.imsave('gt_crossfield_angle.png', gt_crossfield_angle)

        distances = np.array(batch["distances"][0])
        distances = np.moveaxis(distances, 0, -1)
        skimage.io.imsave('distances.png', distances)

        sizes = np.array(batch["sizes"][0])
        sizes = np.moveaxis(sizes, 0, -1)
        skimage.io.imsave('sizes.png', sizes)

        # valid_mask = np.array(batch["valid_mask"][0])
        # valid_mask = np.moveaxis(valid_mask, 0, -1)
        # skimage.io.imsave('valid_mask.png', valid_mask)

        input("Press enter to continue...")
コード例 #26
0
def launch_eval(args):
    # --- Init: fills mode-specific default command-line arguments
    if args.fold is None:
        fold = {"test"}  # Default value for eval mode
    else:
        fold = set(args.fold)
    assert len(fold) == 1, "Argument 'fold' must be a single fold in eval mode"
    # --- First step: figure out what run (experiment) is to be evaluated
    # Option 1: the run_name argument is given in which case that's our run
    run_name = None
    config = None
    if args.run_name is not None:
        run_name = args.run_name
    # Else option 2: Check if a config has been given to look for the run_name
    if args.config is not None:
        config = run_utils.load_config(args.config)
        if config is not None and "run_name" in config and run_name is None:
            run_name = config["run_name"]
    # Else abort...
    if run_name is None:
        print_utils.print_error(
            "ERROR: the run to evaluate could no be identified with the given arguments. "
            "Please specify either the --run_name argument or the --config argument "
            "linking to a config file that has a 'run_name' field filled with the name of "
            "the run name to evaluate.")
        sys.exit()

    # --- Second step: get path to the run and if --config was not specified, load the config from the run's folder
    run_dirpath = frame_field_learning.local_utils.get_run_dirpath(
        args.runs_dirpath, run_name)
    if config is None:
        config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            f"ERROR: the default run's config file at {run_dirpath} could not be loaded. "
            f"Exiting now...")
        sys.exit()

    # --- Third step: Replace parameters in config file from command-line arguments
    if args.dataset_params is not None:
        config["dataset_params"] = python_utils.load_json(args.dataset_params)
    if args.samples is not None:
        config["samples"] = args.samples
    if args.batch_size is not None:
        config["optim_params"]["batch_size"] = args.batch_size
    if args.eval_batch_size is not None:
        config["optim_params"]["eval_batch_size"] = args.eval_batch_size
    else:
        config["optim_params"][
            "eval_batch_size"] = 2 * config["optim_params"]["batch_size"]
    config["fold"] = list(fold)
    config["nodes"] = args.nodes
    config["gpus"] = args.gpus
    config["nr"] = args.nr
    config["world_size"] = args.gpus * args.nodes

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    config["eval_params"]["run_dirpath"] = run_dirpath
    if args.eval_patch_size is not None:
        config["eval_params"]["patch_size"] = args.eval_patch_size
    if args.eval_patch_overlap is not None:
        config["eval_params"]["patch_overlap"] = args.eval_patch_overlap

    # Setup num_workers per process:
    if config["num_workers"] is None:
        config["num_workers"] = int(torch.multiprocessing.cpu_count() /
                                    config["gpus"])

    # --- Distributed init:
    os.environ['MASTER_ADDR'] = args.master_addr
    os.environ['MASTER_PORT'] = args.master_port
    manager = torch.multiprocessing.Manager()
    shared_dict = manager.dict()
    shared_dict["name_list"] = manager.list()
    shared_dict["iou_list"] = manager.list()
    shared_dict["seg_coco_list"] = manager.list()
    shared_dict["poly_coco_list"] = manager.list()
    barrier = manager.Barrier(args.gpus)

    torch.multiprocessing.spawn(eval_process,
                                nprocs=args.gpus,
                                args=(config, shared_dict, barrier))
コード例 #27
0
def compute_seg_loss_weigths(pred_batch, gt_batch, config):
    """
    Combines distances (from U-Net paper) with sizes (from https://github.com/neptune-ai/open-solution-mapping-challenge).

    @param pred_batch:
    @param gt_batch:
    @return:
    """
    device = gt_batch["distances"].device
    use_dist = config["loss_params"]["seg_loss_params"]["use_dist"]
    use_size = config["loss_params"]["seg_loss_params"]["use_size"]
    w0 = config["loss_params"]["seg_loss_params"]["w0"]
    sigma = config["loss_params"]["seg_loss_params"]["sigma"]
    height = gt_batch["image"].shape[2]
    width = gt_batch["image"].shape[3]
    im_radius = math.sqrt(height * width) / 2

    # --- Class imbalance weight (not forgetting background):
    gt_polygons_mask = (0 < gt_batch["gt_polygons_image"]).float()
    background_freq = 1 - torch.sum(gt_batch["class_freq"], dim=1)
    pixel_class_freq = gt_polygons_mask * gt_batch["class_freq"][:, :, None, None] + \
                       (1 - gt_polygons_mask) * background_freq[:, None, None, None]
    if pixel_class_freq.min() == 0:
        print_utils.print_error(
            "ERROR: pixel_class_freq has some zero values, can't divide by zero!"
        )
        raise ZeroDivisionError
    freq_weights = 1 / pixel_class_freq
    # print("freq_weights:", freq_weights.min().item(), freq_weights.max().item())

    # Compute size weights
    # print("sizes:", gt_batch["sizes"].min().item(), gt_batch["sizes"].max().item())
    # print("distances:", gt_batch["distances"].min().item(), gt_batch["distances"].max().item())
    # print("im_radius:", im_radius)
    size_weights = None
    if use_size:
        if gt_batch["sizes"].min() == 0:
            print_utils.print_error(
                ("ERROR: sizes tensor has zero values, can't divide by zero!"))
            raise ZeroDivisionError
        size_weights = 1 + 1 / (im_radius * gt_batch["sizes"])

    distance_weights = None
    if use_dist:
        # print("distances:", gt_batch["distances"].min().item(), gt_batch["distances"].max().item())
        distance_weights = gt_batch["distances"] * (height + width
                                                    )  # Denormalize distances
        distance_weights = w0 * torch.exp(-(distance_weights**2) / (sigma**2))
        # print("sum(distances == 0):", torch.sum(gt_batch["distances"] == 0).item())
        # print("distance_weights:", distance_weights.min().item(), distance_weights.max().item())

        # print(distance_weights.shape, distance_weights.min().item(), distance_weights.max().item())
        # print(size_weights.shape, size_weights.min().item(), size_weights.max().item())
        # print(freq_weights.shape, freq_weights.min().item(), freq_weights.max().item())

    gt_batch["seg_loss_weights"] = freq_weights
    if use_dist:
        gt_batch["seg_loss_weights"] += distance_weights
    if use_size:
        gt_batch["seg_loss_weights"] *= size_weights

    # print(gt_batch["seg_loss_weights"].shape, gt_batch["seg_loss_weights"].min().item(), gt_batch["seg_loss_weights"].max().item())
    # print("seg_loss_weights:", size_weights.min().item(), size_weights.max().item())

    # print("freq_weights:", freq_weights.min().item(), freq_weights.max().item())
    # print("size_weights:", size_weights.min().item(), size_weights.max().item())
    # print("distance_weights:", distance_weights.min().item(), distance_weights.max().item())

    # Display:
    # display_seg_loss_weights = gt_batch["seg_loss_weights"][0].cpu().detach().numpy()
    # display_distance_weights = distance_weights[0].cpu().detach().numpy()
    # skimage.io.imsave("seg_loss_dist_weights.png", display_distance_weights[0])
    # display_size_weights = size_weights[0].cpu().detach().numpy()
    # skimage.io.imsave("seg_loss_size_weights.png", display_size_weights[0])
    # display_freq_weights = freq_weights[0].cpu().detach().numpy()
    # display_freq_weights = display_freq_weights - display_freq_weights.min()
    # display_freq_weights /= display_freq_weights.max()
    # skimage.io.imsave("seg_loss_freq_weights.png", np.moveaxis(display_freq_weights, 0, -1))
    # for i in range(3):
    #     skimage.io.imsave(f"seg_loss_weights_{i}.png", display_seg_loss_weights[i])
    # skimage.io.imsave(f"freq_weights_{i}.png", display_freq_weights[i])

    return pred_batch, gt_batch
def polygonize_masks(run_dirpath, images_dirpath, gt_filepath, in_filepath,
                     out_filepath, batch_size, batch_size_mult):
    coco_gt = COCO(gt_filepath)
    coco_dt = coco_gt.loadRes(in_filepath)

    # --- Load model --- #
    # Load run's config file:
    config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            "ERROR: cannot continue without a config file. Exiting now...")
        sys.exit()

    config["backbone_params"][
        "pretrained"] = False  # Don't load pretrained model
    backbone = get_backbone(config["backbone_params"])
    eval_online_cuda_transform = data_transforms.get_eval_online_cuda_transform(
        config)
    model = FrameFieldModel(config,
                            backbone=backbone,
                            eval_transform=eval_online_cuda_transform)
    model.to(config["device"])
    checkpoints_dirpath = run_utils.setup_run_subdir(
        run_dirpath, config["optim_params"]["checkpoints_dirname"])
    model = inference.load_checkpoint(model, checkpoints_dirpath,
                                      config["device"])
    model.eval()

    # --- Polygonize input COCO mask detections --- #
    img_ids = coco_dt.getImgIds()
    # img_ids = sorted(img_ids)[:1]  # TODO: rm limit
    output_annotations = []

    model_data_list = [
    ]  # Used to accumulate inputs and run model inference on it.
    poly_data_list = [
    ]  # Used to accumulate inputs and run polygonization on it.
    for img_id in tqdm(img_ids, desc="Polygonizing"):
        # Load image
        img = coco_gt.loadImgs(img_id)[0]
        image = skimage.io.imread(
            os.path.join(images_dirpath, img["file_name"]))

        # Draw mask from input COCO mask annotations
        mask_image = np.zeros((img["height"], img["width"]))
        score_image = np.zeros((img["height"], img["width"]))
        dts = coco_dt.loadAnns(coco_dt.getAnnIds(imgIds=img_id))
        for dt in dts:
            dt_mask = cocomask.decode(dt["segmentation"])
            mask_image = np.maximum(mask_image, dt_mask)
            score_image = np.maximum(score_image, dt_mask * dt["score"])

        # Accumulate inputs into the current batch
        sample_data = {
            "img_id": [img_id],
            "mask_image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(
                mask_image)[None, ...].float(),
            "score_image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(
                score_image)[None, ...].float(),
            "image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(image)[
                None, ...],
            "image_mean":
            torch.tensor(image_mean)[None, ...],
            "image_std":
            torch.tensor(image_std)[None, ...]
        }
        # Accumulate batch for running the model
        model_data_list.append(sample_data)
        if len(model_data_list) == batch_size:
            # Run model
            tile_data = run_model(config, model, model_data_list)
            model_data_list = []  # Empty model batch

            # Accumulate batch for running the polygonization
            poly_data_list.append(tile_data)
            if len(poly_data_list) == batch_size_mult:
                coco_ann = run_polygonization(poly_data_list)
                output_annotations.extend(coco_ann)
                poly_data_list = []
    # Finish with incomplete batches
    if len(model_data_list):
        tile_data = run_model(config, model, model_data_list)
        poly_data_list.append(tile_data)
    if len(poly_data_list):
        coco_ann = run_polygonization(poly_data_list)
        output_annotations.extend(coco_ann)

    print("Saving output...")
    with open(out_filepath, 'w') as outfile:
        json.dump(output_annotations, outfile)