示例#1
0
 def __init__(self,
              nbr_classes,
              backbone='xception',
              deep_supervision=True,
              os=16,
              norm='bn',
              **kwargs):
     super(DeepLabV3Plus, self).__init__()
     self.nbr_classes = nbr_classes
     self.up_method = {'mode': 'bilinear', 'align_corners': True}
     self.backbone = get_backbone(backbone, norm=norm, **kwargs)
     self.core = DeepLabV3PlusCore(in_channels=2048,
                                   out_channels=nbr_classes,
                                   backbone=self.backbone,
                                   up_method=self.up_method,
                                   os=os)
     if deep_supervision:
         self.aux_branch = nn.Sequential(
             nn.Conv2d(1024,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False), get_norm(norm, channels=256),
             nn.ReLU(False), nn.Dropout2d(0.1, False),
             nn.Conv2d(256, nbr_classes, kernel_size=1, stride=1))
     self.deep_supervision = deep_supervision
def eval_process(gpu, config, shared_dict, barrier):
    from frame_field_learning.evaluate import evaluate

    torch.manual_seed(0)  # Ensure same seed for all processes
    # --- Find data directory --- #
    root_dir_candidates = [
        os.path.join(data_dirpath, config["dataset_params"]["root_dirname"])
        for data_dirpath in config["data_dir_candidates"]
    ]
    root_dir, paths_tried = python_utils.choose_first_existing_path(
        root_dir_candidates, return_tried_paths=True)
    if root_dir is None:
        print_utils.print_error(
            "GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".
            format(gpu, paths_tried))
        raise NotADirectoryError(
            f"Couldn't find a directory in {paths_tried} (gpu:{gpu})")
    print_utils.print_info("GPU {} -> Using data from {}".format(
        gpu, root_dir))
    config["data_root_dir"] = root_dir

    # --- Get dataset
    # - CHANGE HERE TO ADD YOUR OWN DATASET
    eval_ds, = get_folds(
        config, root_dir,
        folds=config["fold"])  # config["fold"] is already a list (of length 1)

    # --- Instantiate backbone network (its backbone will be used to extract features)
    backbone = get_backbone(config["backbone_params"])

    evaluate(gpu, config, shared_dict, barrier, eval_ds, backbone)
示例#3
0
 def __init__(self,
              nbr_classes,
              deep_supervision=True,
              backbone='resnet50',
              se_loss=True,
              norm='bn',
              **kwargs):
     super(EncNet, self).__init__()
     self.up_method = {'mode': 'bilinear', 'align_corners': True}
     self.nbr_classes = nbr_classes
     self.backbone = get_backbone(backbone, norm=norm, **kwargs)
     self.core = EncCore(in_channels=2048,
                         out_channels=nbr_classes,
                         se_loss=se_loss,
                         dim_codes=32,
                         norm=norm)
     self.deep_supervision = deep_supervision
     if self.deep_supervision:
         self.aux_branch = nn.Sequential(
             nn.Conv2d(1024,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False), get_norm(norm, channels=256),
             nn.ReLU(False), nn.Dropout2d(0.1, False),
             nn.Conv2d(256, nbr_classes, kernel_size=1, stride=1))
    def __init__(self,
                 nbr_classes,
                 backbone='vgg16',
                 norm='bn',
                 deep_supervision=True,
                 **kwargs):
        super(UNet, self).__init__()
        self.nbr_classes = nbr_classes
        self.deep_supervision = deep_supervision
        self.up_method = {'mode': 'bilinear', 'align_corners': True}
        up_method = 'conv'
        self.backbone = get_backbone(backbone, **kwargs)
        self.core = UNetCore(out_channels=nbr_classes,
                             norm=norm,
                             up_method=up_method,
                             skip_dims=self.backbone.skip_dims)

        if deep_supervision:
            self.aux_branch = nn.Sequential(
                nn.Conv2d(self.backbone.aux_dim,
                          256,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False), get_norm(norm, channels=256),
                nn.ReLU(False), nn.Dropout2d(0.1, False),
                nn.Conv2d(256, nbr_classes, kernel_size=1, stride=1))
示例#5
0
def launch_inference_from_filepath(args):
    from frame_field_learning.inference_from_filepath import inference_from_filepath

    # --- First step: figure out what run (experiment) is to be evaluated
    # Option 1: the run_name argument is given in which case that's our run
    run_name = None
    config = None
    if args.run_name is not None:
        run_name = args.run_name
    # Else option 2: Check if a config has been given to look for the run_name
    if args.config is not None:
        config = run_utils.load_config(args.config)
        if config is not None and "run_name" in config and run_name is None:
            run_name = config["run_name"]
    # Else abort...
    if run_name is None:
        print_utils.print_error(
            "ERROR: the run to evaluate could no be identified with the given arguments. "
            "Please specify either the --run_name argument or the --config argument "
            "linking to a config file that has a 'run_name' field filled with the name of "
            "the run name to evaluate.")
        sys.exit()

    # --- Second step: get path to the run and if --config was not specified, load the config from the run's folder
    run_dirpath = frame_field_learning.local_utils.get_run_dirpath(
        args.runs_dirpath, run_name)
    if config is None:
        config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            f"ERROR: the default run's config file at {run_dirpath} could not be loaded. "
            f"Exiting now...")
        sys.exit()

    # --- Add command-line arguments
    if args.batch_size is not None:
        config["optim_params"]["batch_size"] = args.batch_size
    if args.eval_batch_size is not None:
        config["optim_params"]["eval_batch_size"] = args.eval_batch_size
    else:
        config["optim_params"][
            "eval_batch_size"] = 2 * config["optim_params"]["batch_size"]

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    config["eval_params"]["run_dirpath"] = run_dirpath
    if args.eval_patch_size is not None:
        config["eval_params"]["patch_size"] = args.eval_patch_size
    if args.eval_patch_overlap is not None:
        config["eval_params"]["patch_overlap"] = args.eval_patch_overlap

    backbone = get_backbone(config["backbone_params"])
    inference_from_filepath(config, args.in_filepath, backbone,
                            args.out_dirpath)
 def __init__(self, nbr_classes, deep_supervision=True, backbone='resnet50', norm='bn', S=8, **kwargs):
     super(FCN, self).__init__()
     self.nbr_classes = nbr_classes
     self.up_method = {'mode': 'bilinear', 'align_corners': True}
     self.deep_supervision = deep_supervision
     self.backbone = get_backbone(backbone, norm=norm, **kwargs)
     self.core = FCNCore(out_channels=nbr_classes, skip_dims=self.backbone.skip_dims,
                         ratio_mapping=self.backbone.ratio_mapping,
                         norm=norm, S=S)
     self.S = S
     if deep_supervision:
         self.aux_branch = fcn_conv(self.backbone.aux_dim, nbr_classes, norm)
def train_process(gpu, config, shared_dict, barrier):
    from frame_field_learning.train import train

    print_utils.print_info(
        "GPU {} -> Ready. There are {} GPU(s) available on this node.".format(
            gpu, torch.cuda.device_count()))

    torch.manual_seed(0)  # Ensure same seed for all processes
    # --- Find data directory --- #
    root_dir_candidates = [
        os.path.join(data_dirpath, config["dataset_params"]["root_dirname"])
        for data_dirpath in config["data_dir_candidates"]
    ]
    root_dir, paths_tried = python_utils.choose_first_existing_path(
        root_dir_candidates, return_tried_paths=True)
    if root_dir is None:
        print_utils.print_error(
            "GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".
            format(gpu, paths_tried))
        exit()
    print_utils.print_info("GPU {} -> Using data from {}".format(
        gpu, root_dir))

    # --- Get dataset splits
    # - CHANGE HERE TO ADD YOUR OWN DATASET
    # We have to adapt the config["fold"] param to the folds argument of the get_folds function
    fold = set(config["fold"])
    if fold == {"train"}:
        # Val will be used for evaluating the model after each epoch:
        train_ds, val_ds = get_folds(config, root_dir, folds=["train", "val"])
    elif fold == {"train", "val"}:
        # Both train and val are meant to be used for training
        train_ds, = get_folds(config, root_dir, folds=["train_val"])
        val_ds = None
    else:
        # Should not arrive here since main makes sure config["fold"] is either one of the above
        print_utils.print_error("ERROR: specified folds not recognized!")
        raise NotImplementedError

    # --- Instantiate backbone network
    if config["backbone_params"]["name"] in ["deeplab50", "deeplab101"]:
        assert 1 < config["optim_params"]["batch_size"], \
            "When using backbone {}, batch_size has to be at least 2 for the batchnorm of the ASPPPooling to work."\
                .format(config["backbone_params"]["name"])
    backbone = get_backbone(config["backbone_params"])

    # --- Launch training
    train(gpu, config, shared_dict, barrier, train_ds, val_ds, backbone)
示例#8
0
def main():
    torch.manual_seed(0)
    # --- Process args --- #
    args = get_args()

    # --- Setup run --- #
    run_dirpath = local_utils.get_run_dirpath(args.runs_dirpath, args.run_name)
    # Load run's config file:
    config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            "ERROR: cannot continue without a config file. Exiting now...")
        sys.exit()

    config["eval_params"]["run_dirpath"] = run_dirpath
    if args.eval_patch_size is not None:
        config["eval_params"]["patch_size"] = args.eval_patch_size
    if args.eval_patch_overlap is not None:
        config["eval_params"]["patch_overlap"] = args.eval_patch_overlap

    backbone = get_backbone(config["backbone_params"])

    polygonize_mask(config, args.filepath, backbone, args.out_ext)
def polygonize_masks(run_dirpath, images_dirpath, gt_filepath, in_filepath,
                     out_filepath, batch_size, batch_size_mult):
    coco_gt = COCO(gt_filepath)
    coco_dt = coco_gt.loadRes(in_filepath)

    # --- Load model --- #
    # Load run's config file:
    config = run_utils.load_config(config_dirpath=run_dirpath)
    if config is None:
        print_utils.print_error(
            "ERROR: cannot continue without a config file. Exiting now...")
        sys.exit()

    config["backbone_params"][
        "pretrained"] = False  # Don't load pretrained model
    backbone = get_backbone(config["backbone_params"])
    eval_online_cuda_transform = data_transforms.get_eval_online_cuda_transform(
        config)
    model = FrameFieldModel(config,
                            backbone=backbone,
                            eval_transform=eval_online_cuda_transform)
    model.to(config["device"])
    checkpoints_dirpath = run_utils.setup_run_subdir(
        run_dirpath, config["optim_params"]["checkpoints_dirname"])
    model = inference.load_checkpoint(model, checkpoints_dirpath,
                                      config["device"])
    model.eval()

    # --- Polygonize input COCO mask detections --- #
    img_ids = coco_dt.getImgIds()
    # img_ids = sorted(img_ids)[:1]  # TODO: rm limit
    output_annotations = []

    model_data_list = [
    ]  # Used to accumulate inputs and run model inference on it.
    poly_data_list = [
    ]  # Used to accumulate inputs and run polygonization on it.
    for img_id in tqdm(img_ids, desc="Polygonizing"):
        # Load image
        img = coco_gt.loadImgs(img_id)[0]
        image = skimage.io.imread(
            os.path.join(images_dirpath, img["file_name"]))

        # Draw mask from input COCO mask annotations
        mask_image = np.zeros((img["height"], img["width"]))
        score_image = np.zeros((img["height"], img["width"]))
        dts = coco_dt.loadAnns(coco_dt.getAnnIds(imgIds=img_id))
        for dt in dts:
            dt_mask = cocomask.decode(dt["segmentation"])
            mask_image = np.maximum(mask_image, dt_mask)
            score_image = np.maximum(score_image, dt_mask * dt["score"])

        # Accumulate inputs into the current batch
        sample_data = {
            "img_id": [img_id],
            "mask_image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(
                mask_image)[None, ...].float(),
            "score_image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(
                score_image)[None, ...].float(),
            "image":
            torch_lydorn.torchvision.transforms.functional.to_tensor(image)[
                None, ...],
            "image_mean":
            torch.tensor(image_mean)[None, ...],
            "image_std":
            torch.tensor(image_std)[None, ...]
        }
        # Accumulate batch for running the model
        model_data_list.append(sample_data)
        if len(model_data_list) == batch_size:
            # Run model
            tile_data = run_model(config, model, model_data_list)
            model_data_list = []  # Empty model batch

            # Accumulate batch for running the polygonization
            poly_data_list.append(tile_data)
            if len(poly_data_list) == batch_size_mult:
                coco_ann = run_polygonization(poly_data_list)
                output_annotations.extend(coco_ann)
                poly_data_list = []
    # Finish with incomplete batches
    if len(model_data_list):
        tile_data = run_model(config, model, model_data_list)
        poly_data_list.append(tile_data)
    if len(poly_data_list):
        coco_ann = run_polygonization(poly_data_list)
        output_annotations.extend(coco_ann)

    print("Saving output...")
    with open(out_filepath, 'w') as outfile:
        json.dump(output_annotations, outfile)