示例#1
0
def do_camelyon16():
    """
    camelyon16.
    The train/valid/test sets are already provided.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.init_seed()

    # ===========================

    ds = constants.CAM16
    announce_msg("Processing dataset: {}".format(ds))
    args = {
        "baseurl": get_rootpath_2_dataset(Dict2Obj({'dataset': ds})),
        "dataset": ds,
        "fold_folder": "folds/{}".format(ds),
        "img_extension": "jpg",
        "path_encoding": "folds/{}/encoding-origine.yaml".format(ds)
    }
    # Convert masks into binary masks: already done.
    # create_bin_mask_Oxford_flowers_102(Dict2Obj(args))
    reproducibility.init_seed()
    al_split_camelyon16(Dict2Obj(args))
示例#2
0
def do_Oxford_flowers_102():
    """
    Oxford-flowers-102.
    The train/valid/test sets are already provided.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.init_seed()

    # ===========================

    announce_msg("Processing dataset: {}".format(constants.OXF))
    args = {
        "baseurl": get_rootpath_2_dataset(Dict2Obj({'dataset':
                                                    constants.OXF})),
        "dataset": "Oxford-flowers-102",
        "fold_folder": "folds/Oxford-flowers-102",
        "img_extension": "jpg",
        "path_encoding": "folds/Oxford-flowers-102/encoding-origine.yaml"
    }
    # Convert masks into binary masks: already done.
    # create_bin_mask_Oxford_flowers_102(Dict2Obj(args))
    reproducibility.init_seed()
    al_split_Oxford_flowers_102(Dict2Obj(args))
    get_stats(Dict2Obj(args), split=0, fold=0, subset='train')
示例#3
0
def do_glas():
    """
    GlaS.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============
    reproducibility.init_seed()

    announce_msg("Processing dataset: {}".format(constants.GLAS))

    args = {
        "baseurl": get_rootpath_2_dataset(Dict2Obj({'dataset':
                                                    constants.GLAS})),
        "folding": {
            "vl": 20
        },  # 80 % for train, 20% for validation.
        "dataset": "glas",
        "fold_folder": "folds/glas",
        "img_extension": "bmp",
        # nbr_splits: how many times to perform the k-folds over
        # the available train samples.
        "nbr_splits": 1
    }
    args["nbr_folds"] = math.ceil(100. / args["folding"]["vl"])

    reproducibility.init_seed()
    al_split_glas(Dict2Obj(args))
    get_stats(Dict2Obj(args), split=0, fold=0, subset='train')
示例#4
0
def instantiate_preprocessor(args):
    """
    Instantiate a preprocessor class from preprocess_offline.

    :param args: object. Contains the configuration of the exp that has been read from the yaml file.
    :return: an instance of a preprocessor.
    """
    if args.preprocessor:
        if args.preprocessor["name"] == "Preprocessor":
            if "stain" in args.preprocessor.keys():
                stain = Dict2Obj(args.preprocessor["stain"])
                name_classes = args.name_classes
                preprocessor = prepocess_offline.__dict__["Preprocessor"](
                    stain, name_classes)

                print(
                    "Preprocessor `{}` was successfully instantiated with the stain preprocessing ON .... [OK]"
                    .format(args.preprocessor["name"]))

                return preprocessor
            else:
                raise ValueError(
                    "Unknown preprocessing operation .... [NOT OK]")
        else:
            raise ValueError(
                "Unsupported preprocessor `{}` .... [NOT OK]".format(
                    args.preprocessor["name"]))
    else:
        print("Proceeding WITHOUT preprocessor .... [OK]")
        return None
示例#5
0
def do_camelyon16_WSI_level():
    """
    GlaS.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "brian":
        baseurl = "/media/brian/Seagate Backup Plus Drive/datasets/camelyon16"
    elif username == "sbelharb":
        baseurl = "/project/6004986/sbelharb/workspace/datasets/camelyon16"
    else:
        raise ValueError("username `{}` unknown .... [NOT OK]".format(username))

    args = {"baseurl": baseurl,
            "folding": {"vl": 20},  # 80 % for train, 20% for validation.
            "dataset": "camelyon16",
            "fold_folder": "folds/camelyon16/WSI-level",
            "img_extension": "tif",
            "nbr_folds": 5,
            "nbr_splits": 2  # how many times to perform the k-folds over the available train samples.
            }
    split_valid_camelyon16_WSI_level(Dict2Obj(args))
示例#6
0
def do_breakhis():
    """
    BreakHis.
    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "brian":
        baseurl = "/media/brian/Seagate Backup Plus Drive/datasets/" \
                  "Breast-Cancer-Histopathological-Database-BreakHis/mkfold"
    elif username == "sbelharb":
        baseurl = "/project/6004986/sbelharb/workspace/datasets/" \
                  "Breast-Cancer-Histopathological-Database-BreakHis/mkfold"
    else:
        raise ValueError("username `{}` unknown .... [NOT OK]".format(username))

    args = {"baseurl": baseurl,
            "folding": {"vl": 20},  # 80% for train, 20% for validation.
            "dataset": "breakhis",
            "fold_folder": "folds/breakhis",
            "img_extension": "png",
            "nbr_folds": 5,
            "magnification": ["40X", "100X", "200X", "400X"],
            "nbr_splits": 2  # how many times to perform the k-folds over the available train samples.
            }
    split_valid_breakhis(Dict2Obj(args))
def instantiate_models(args):
    """Instantiate the necessary models.
    Input:
        args: object. Contains the configuration of the exp that has been read
        from the yaml file.

    Output:
        segmentor: instance of module from deepmil.representation; Embeds the
         instance.
        classifier: instance of module from deepmil.decision_pooling; pools
        the score of each class.
    """
    p = Dict2Obj(args.model)

    model = models.__dict__[p.model_name](pretrained=p.pretrained,
                                          dataset_name=args.dataset,
                                          sigma=p.sigma,
                                          w=p.w,
                                          num_classes=p.num_classes,
                                          scale=p.scale_in_cl,
                                          modalities=p.modalities,
                                          kmax=p.kmax,
                                          kmin=p.kmin,
                                          alpha=p.alpha,
                                          dropout=p.dropout,
                                          set_side_cl=p.side_cl)
    if p.side_cl:
        warnings.warn('side_cl is set to true')
    print("Mi-max entropy model `{}` was successfully instantiated. "
          "Nbr.params: {} .... [OK]".format(model.__class__.__name__,
                                            count_nb_params(model)))
    # for name, param in model.named_parameters():
    #     print(name, param.requires_grad, param.grad)
    return model
示例#8
0
def instantiate_models(args):
    """Instantiate the necessary models.
    Input:
        args: object. Contains the configuration of the exp that has been read from the yaml file.

    Output:
        segmentor: instance of module from deepmil.representation; Embeds the instance.
        classifier: instance of module from deepmil.decision_pooling; pools the score of each class.
    """
    p = Dict2Obj(args.model)
    model = models.__dict__[p.name](pretrained=p.pretrained,
                                    num_masks=p.num_masks,
                                    sigma=p.sigma,
                                    w=p.w,
                                    num_classes=p.num_classes,
                                    scale=p.scale,
                                    modalities=p.modalities,
                                    kmax=p.kmax,
                                    kmin=p.kmin,
                                    alpha=p.alpha,
                                    dropout=p.dropout,
                                    nbr_times_erase=args.nbr_times_erase,
                                    sigma_erase=args.sigma_erase)

    print(
        "Mi-max entropy model `{}` was successfully instantiated. Nbr.params: {} .... [OK]"
        .format(model.__class__.__name__, count_nb_params(model)))
    return model
示例#9
0
def instantiante_random_cropper(args):
    """
    Instantiate a random cropper. It is used for sampling su-images from an original image in the train set.

    Classes are located in loader.*

    :param args: object. Contains the configuration of the exp that has been read from the yaml file.
    :return: an instance of a random cropper, or None.
    """
    if args.random_cropper:
        r_cropper_config = Dict2Obj(args.random_cropper)
        patch_splitter_config = Dict2Obj(args.patch_splitter)

        if r_cropper_config.name == "RandomCropper":
            min_height = r_cropper_config.min_height
            min_width = r_cropper_config.min_width
            max_height = r_cropper_config.max_height
            max_width = r_cropper_config.max_width
            make_cropped_perfect_for_split = r_cropper_config.make_cropped_perfect_for_split
            h, w, h_, w_ = None, None, None, None
            if make_cropped_perfect_for_split:
                assert patch_splitter_config.name == "PatchSplitter", "We expected the class `PatchSplitter`" \
                                                                      "but found `{}` .... [NOT OK]".format(
                                                                       patch_splitter_config.name)
                h = patch_splitter_config.h
                w = patch_splitter_config.w
                h_ = patch_splitter_config.h_
                w_ = patch_splitter_config.w_

            random_cropper = loader.__dict__["RandomCropper"](
                min_height, min_width, max_height, max_width,
                make_cropped_perfect_for_split, h, w, h_, w_)

            print(
                "Random cropper `{}` was successfully instantiated .... [OK]".
                format(r_cropper_config.name))

            return random_cropper

        else:
            raise ValueError(
                "Unsuppoerted random cropper `{}` .... [NOT OK]".format(
                    r_cropper_config.name))
    else:
        return None
def instantiate_models(args, verbose=True):
    """Instantiate the necessary models.
    Input:
        args: Dict2Obj. Contains the configuration of the exp that has been read
        from the yaml file.
    Output:
        instance of a model
    """
    p = Dict2Obj(args.model)
    if args.task == constants.CL:
        if p.name_model == constants.LENET5:
            model = models_cl.__dict__[p.name_model](
                num_classes=args.num_classes)
        elif p.name_model == constants.SOTASSL:
            model = models_cl.__dict__[p.name_model](
                num_classes=args.num_classes,
                dropoutnetssl=p.dropoutnetssl,
                modalities=p.modalities,
                kmax=p.kmax,
                kmin=p.kmin,
                alpha=p.alpha,
                dropout=p.dropout)
        else:
            raise ValueError("Unsupported model name: {}.".format(
                p.name_model))
    elif args.task == constants.SEG:
        if p.name_model == 'hybrid_model':
            model = models_seg.__dict__[p.name_model](
                num_classes=args.num_classes,
                num_masks=args.num_masks,
                backbone=p.backbone,
                pretrained=p.pre_trained,
                modalities=p.modalities,
                kmax=p.kmax,
                kmin=p.kmin,
                alpha=p.alpha,
                dropout=p.dropout,
                backbone_dropout=p.backbone_dropout,
                freeze_classifier=args.freeze_classifier,
                base_width=p.base_width,
                leak=p.leak)
        else:
            raise ValueError("Unknown model name for SEG task: {}".format(
                p.name_model))
    else:
        raise ValueError("Unknown task {}.".format(args.task))

    if verbose:
        print("`{}` was successfully instantiated. "
              "Nbr.params: {} .... [OK]".format(model, count_nb_params(model)))
    return model
示例#11
0
def do_Caltech_UCSD_Birds_200_2011():
    """
    Caltech-UCSD-Birds-200-2011.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.init_seed()

    # ===========================

    announce_msg("Processing dataset: {}".format(constants.CUB))

    args = {
        "baseurl": get_rootpath_2_dataset(Dict2Obj({'dataset':
                                                    constants.CUB})),
        "folding": {
            "vl": 20
        },  # 80 % for train, 20% for validation.
        "dataset": "Caltech-UCSD-Birds-200-2011",
        "fold_folder": "folds/Caltech-UCSD-Birds-200-2011",
        "img_extension": "bmp",
        "nbr_splits": 1,  # how many times to perform the k-folds over
        # the available train samples.
        "path_encoding":
        "folds/Caltech-UCSD-Birds-200-2011/encoding-origine.yaml",
        "nbr_classes": None  # Keep only 5 random classes. If you want
        # to use the entire dataset, set this to None.
    }
    args["nbr_folds"] = math.ceil(100. / args["folding"]["vl"])
    reproducibility.init_seed()
    al_split_Caltech_UCSD_Birds_200_2011(Dict2Obj(args))
    get_stats(Dict2Obj(args), split=0, fold=0, subset='train')
示例#12
0
def do_Caltech_UCSD_Birds_200_2011():
    """
    Caltech-UCSD-Birds-200-2011.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "xxxx2020":
        baseurl = "xxxx2020/datasets/Caltech-UCSD-Birds-200-2011"
    elif username == "xxxx2020":
        baseurl = "xxxx2020/datasets/Caltech-UCSD-Birds-200-2011"
    else:
        raise ValueError(
            "Cause: anonymization of the code. username `{}` unknown. Set the absolute path to the Caltech-UCSD-Birds-200-2011 dataset. See above for an example .... [NOT OK]"
            .format(username))

    args = {
        "baseurl": baseurl,
        "folding": {
            "vl": 20
        },  # 80 % for train, 20% for validation.
        "dataset": "Caltech-UCSD-Birds-200-2011",
        "fold_folder": "folds/Caltech-UCSD-Birds-200-2011",
        "img_extension": "bmp",
        "nbr_splits":
        2,  # how many times to perform the k-folds over the available train samples.
        "path_encoding":
        "folds/Caltech-UCSD-Birds-200-2011/encoding-origine.yaml",
        "nbr_classes":
        None  # Keep only 5 random classes. If you want to use the entire dataset, set this to None.
    }
    args["nbr_folds"] = math.ceil(100. / args["folding"]["vl"])
    split_valid_Caltech_UCSD_Birds_200_2011(Dict2Obj(args))
示例#13
0
def instantiate_stain_augmentor(args):
    """
    Instantiate the stain augmentor.
    The possible classes are located in stain_tools.stain_augmentor.

    :param args: object. Contains the configuration of the exp that has been read from the yaml file.
    :return: an instance of stain augmentor, or None.
    """
    if args.stain_augmentor:
        error_msg = "You requested stain augmentation, but there was no stain normalization. It seems inconsistent." \
                    "Modify the code in order to accept a stain augmentation without stain normalization. Stain " \
                    "extraction is time consuming. To augment the stains, we use the same reference stain in the" \
                    "stain normalization phase. If you want to stain augmentation anyway, you need to provide a" \
                    "stain matrix because stain extration takes about 15 to 25 seconds per H&E high image of size" \
                    "hxw: ~1500x2000."
        assert "stain" in args.preprocessor.keys(), error_msg

        method = args.preprocessor["stain"]["method"]

        s_augmentor_config = Dict2Obj(args.stain_augmentor)

        if s_augmentor_config.name == "StainAugmentor":
            sigma1 = s_augmentor_config.sigma1
            sigma2 = s_augmentor_config.sigma2
            augment_background = s_augmentor_config.augment_background

            stain_augmentor = stain_augmentors.__dict__["StainAugmentor"](
                method, sigma1, sigma2, augment_background)

            print(
                "Stain augmentor `{}` was successfully instantiated .... [OK]".
                format(s_augmentor_config.name))

            return stain_augmentor
        else:
            raise ValueError(
                "Unsupported stain augmentor name `{}` .... [NOT OK]".format(
                    s_augmentor_config.name))
    else:
        print("Proceeding WITHOUT stain augmentation .... [OK]")
        return None
示例#14
0
def instantiate_models(args):
    """Instantiate the necessary models.
    Input:
        args: Dict2Obj. Contains the configuration of the exp that has been read
        from the yaml file.

    Output:
        instance of a model.
    """
    p = Dict2Obj(args.model)
    model = models.__dict__[p.name](pretrained=p.pretrained,
                                    num_classes=args.num_classes,
                                    modalities=p.modalities, kmax=p.kmax,
                                    kmin=p.kmin, alpha=p.alpha,
                                    dropout=p.dropout,
                                    poisson=(args.loss == "LossPO"),
                                    tau=args.tau)

    print("`{}` was successfully instantiated. Nbr.params: {} .... [OK]".format(
        model, count_nb_params(model)))
    return model
示例#15
0
def do_glas():
    """
    GlaS.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "xxxx2020":
        baseurl = "xxxx2020/datasets/GlaS-2015/Warwick QU Dataset (Released 2016_07_08)"
    elif username == "sbelharb":
        baseurl = "xxxx2020/datasets/GlaS-2015/Warwick QU Dataset (Released 2016_07_08)"
    else:
        raise ValueError(
            "Cause: anonymization of the code. username `{}` unknown. Set the absolute path to the Caltech-UCSD-Birds-200-2011 dataset. See above for an example .... [NOT OK]"
            .format(username))

    args = {
        "baseurl": baseurl,
        "folding": {
            "vl": 20
        },  # 80 % for train, 20% for validation.
        "dataset": "glas",
        "fold_folder": "folds/glas-test",
        "img_extension": "bmp",
        "nbr_splits":
        2  # how many times to perform the k-folds over the available train samples.
    }
    args["nbr_folds"] = math.ceil(100. / args["folding"]["vl"])
    split_valid_glas(Dict2Obj(args))
def do_glas():
    """
    GlaS.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    warnings.warn("You are accessing an anonymized part of the code. We are going to exit. Come here and fix this "
                  "according to your setup. Issue: absolute path to GlaS dataset.")
    sys.exit(0)

    username = getpass.getuser()
    if username == "XXXXXXXXXX":
        baseurl = "/XXXXXXXXX/XXXXXXXXXX/XXXXXXXXXXXXXXX/datasets/GlaS-2015/Warwick QU Dataset (Released 2016_07_08)"
    elif username == "XXXXXXXXXXXX":
        baseurl = "/XXXXXX/XXXXXXX/XXXXXXXXXXXXXXXXX/workspace/datasets/GlaS-2015/Warwick QU Dataset (Released 2016_07_08)"
    else:
        raise ValueError("Cause: anonymization of the code. username `{}` unknown. Set the absolute path to the GlaS dataset. See above for an example .... [NOT OK]".format(username))

    args = {"baseurl": baseurl,
            "folding": {"vl": 20},  # 80 % for train, 20% for validation.
            "dataset": "glas",
            "fold_folder": "folds/glas-test",
            "img_extension": "bmp",
            "nbr_splits": 2  # how many times to perform the k-folds over the available train samples.
            }
    args["nbr_folds"] = math.ceil(100. / args["folding"]["vl"])
    split_valid_glas(Dict2Obj(args))
示例#17
0
def do_bach_parta_2018():
    """
    BACH (PART A) 2018.
    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "brian":
        baseurl = "/media/brian/Seagate Backup Plus Drive/datasets/ICIAR-2018-BACH-Challenge"
    elif username == "sbelharb":
        baseurl = "/project/6004986/sbelharb/workspace/datasets/ICIAR-2018-BACH-Challenge"
    else:
        raise ValueError("username `{}` unknown .... [NOT OK]".format(username))

    args = {"baseurl": baseurl,
            "test_portion": 0.5,  # percentage of samples to take from test. The left over if for train; and it will
            # be divided into actual train, and validation sets.
            "folding": {"vl": 20},  # vl/100 % of train set will be used for validation, while the leftover (
            # 100-vl)/100% will be used for actual training.
            "name_classes": {'Normal': 0, 'Benign': 1, 'InSitu': 2, 'Invasive': 3},
            "dataset": "bc18bch",
            "fold_folder": "folds/bach-part-a-2018",
            "img_extension": "tif",
            "nbr_folds": 5,
            "nbr_splits": 2  # how many times to perform the k-folds over the available train samples.
            }
    create_k_folds_csv_bach_part_a(Dict2Obj(args))
示例#18
0
def do_Oxford_flowers_102():
    """
    Oxford-flowers-102.
    The train/valid/test sets are already provided.

    :return:
    """
    # ===============
    # Reproducibility
    # ===============

    # ===========================

    reproducibility.set_seed()

    # ===========================

    username = getpass.getuser()
    if username == "xxxx2020":
        baseurl = "xxxxx2020/datasets/Oxford-flowers-102"
    elif username == "xxxx2020":
        baseurl = "xxxx2020/datasets/Oxford-flowers-102"
    else:
        raise ValueError(
            "Cause: anonymization of the code. username `{}` unknown. Set the absolute path to the Caltech-UCSD-Birds-200-2011 dataset. See above for an example .... [NOT OK]"
            .format(username))

    args = {
        "baseurl": baseurl,
        "dataset": "Oxford-flowers-102",
        "fold_folder": "folds/Oxford-flowers-102",
        "img_extension": "jpg",
        "path_encoding": "folds/Oxford-flowers-102/encoding-origine.yaml"
    }
    # Convert masks into binary masks.
    create_bin_mask_Oxford_flowers_102(Dict2Obj(args))
    reproducibility.set_seed()
    split_Oxford_flowers_102(Dict2Obj(args))

    # Find min max size.
    def find_stats(argsx):
        """

        :param argsx:
        :return:
        """
        minh, maxh, minw, maxw = None, None, None, None
        baseurl = argsx.baseurl
        fin = find_files_pattern(join(baseurl, 'jpg'), '*.jpg')
        print("Computing stats from {} dataset ...".format(argsx.dataset))
        for f in tqdm.tqdm(fin, ncols=80, total=len(fin)):
            w, h = Image.open(f, 'r').convert('RGB').size
            if minh is None:
                minh = h
                maxh = h
                minw = w
                maxw = w
            else:
                minh = min(minh, h)
                maxh = max(maxh, h)
                minw = min(minw, w)
                maxw = max(maxw, w)

        print('Stats {}:\n'
              'min h: {} \n'
              'max h: {} \n'
              'min w: {} \n'
              'max w: {} \n'.format(argsx.dataset, minh, maxh, minw, maxw))

    find_stats(Dict2Obj(args))
示例#19
0
def instantiate_patch_splitter(args, deterministic=True):
    """
    Instantiate the patch splitter and its relevant instances.

    For every set.
    However, for train, determninistic is set to False to allow dropout over the patches IF requiested.
    Over valid an test sets, deterministic is True.

    :param args: object. Contains the configuration of the exp that has been read from the yaml file.
    :param deterministic: True/False. If True, dropping some samples will be allowed IF it was requested. Should set
           to True only with the train set.
    :return: an instance of a patch splitter.
    """
    assert args.patch_splitter is not None, "We need a patch splitter, and you didn't specify one! .... [NOT OK]"
    patch_splitter_conf = Dict2Obj(args.patch_splitter)
    random_cropper = Dict2Obj(args.random_cropper)
    if patch_splitter_conf.name == "PatchSplitter":
        keep = 1.  # default value for deterministic scenario: keep all patch (evaluation phase).
        if not deterministic:
            keep = patch_splitter_conf.keep

        h = patch_splitter_conf.h
        w = patch_splitter_conf.w
        h_ = patch_splitter_conf.h_
        w_ = patch_splitter_conf.w_

        # Instantiate the patch transforms if there is any.
        patch_transform = None
        if patch_splitter_conf.patch_transform:
            error_msg = "We support only one or none patch transform for now ... [NOT OK]"
            assert not isinstance(patch_splitter_conf.patch_transform,
                                  Sequence), error_msg

            patch_transform_config = Dict2Obj(
                patch_splitter_conf.patch_transform)
            if patch_transform_config.name == "PseudoFoveation":
                scale_factor = patch_transform_config.scale_factor
                int_eps = patch_transform_config.int_eps
                num_workers = patch_transform_config.num_workers

                patch_transform = loader.__dict__["PseudoFoveation"](
                    h, w, h_, w_, scale_factor, int_eps, num_workers)

                print(
                    "Patch transform `{}` was successfully instantiated WITHIN a patch splitter `{}`"
                    "with `{}` workers.... [OK]".format(
                        patch_transform_config.name, patch_splitter_conf.name,
                        num_workers))

            elif patch_transform_config.name == "FastApproximationPseudoFoveation":
                scale_factor = patch_transform_config.scale_factor
                int_eps = patch_transform_config.int_eps
                nbr_kernels = patch_transform_config.nbr_kernels
                use_gpu = patch_transform_config.use_gpu
                gpu_id = patch_transform_config.gpu_id

                if gpu_id is None:
                    gpu_id = int(args.cudaid)
                    warnings.warn(
                        "You didn't specify the CUDA device ID to run `FastApproximationPseudoFoveation`. "
                        "We set it up to the same device where the model will be run `cuda:{}` .... [NOT "
                        "OK]".format(args.cudaid))

                assert args.num_workers in [0, 1], "'config.num_workers' must be in {0, " \
                                                   "1} if loader.FastApproximationPseudoFoveation() is used. " \
                                                   "Multiprocessing does not play well when Dataloader has uses also " \
                                                   "multiprocessing .... [NOT OK]"

                patch_transform = loader.__dict__[
                    "FastApproximationPseudoFoveation"](h, w, h_, w_,
                                                        scale_factor, int_eps,
                                                        nbr_kernels, use_gpu,
                                                        gpu_id)

                print(
                    "Patch transform `{}` was successfully instantiated WITHIN a patch splitter `{}` "
                    "with `{}` kernels with `{}` GPU and CUDA ID `{}` .... [OK]"
                    .format(patch_transform_config.name,
                            patch_splitter_conf.name, nbr_kernels, use_gpu,
                            gpu_id))

            else:
                raise ValueError(
                    "Unsupported patch transform `{}`  .... [NOT OK]".format(
                        patch_transform_config.name))
        else:
            print("Proceeding WITHOUT any patch transform  ..... [OK]")

        if patch_transform:
            patch_transform = [patch_transform]

        padding_mode = patch_splitter_conf.padding_mode
        assert hasattr(random_cropper, "make_cropped_perfect_for_split"), "The random cropper `{}` does not have the " \
                                                                          "attribute `make_cropped_perfect_for_split`" \
                                                                          "which we expect .... [NO OK]".format(
                                                                          random_cropper.name)
        if random_cropper.make_cropped_perfect_for_split and not deterministic:
            padding_mode = None
        patch_splitter = loader.__dict__["PatchSplitter"](
            h,
            w,
            h_,
            w_,
            padding_mode,
            patch_transforms=patch_transform,
            keep=keep)

        print("Patch splitter `{}` was successfully instantiated .... [OK]".
              format(patch_splitter_conf.name))

    else:
        raise ValueError(
            "Unsupported patch splitter `{}` .... [NOT OK]".format(
                patch_splitter_conf.name))

    return patch_splitter
def instantiate_optimizer(args, model, verbose=True):
    """Instantiate an optimizer.
    Input:
        args: object. Contains the configuration of the exp that has been
        read from the yaml file.
        mode: a pytorch model with parameters.

    Output:
        optimizer: a pytorch optimizer.
        lrate_scheduler: a pytorch learning rate scheduler (or None).
    """
    params = copy.deepcopy(args.optimizer)
    params = standardize_otpmizers_params(params)
    params = Dict2Obj(params)

    if params.name_optimizer == "sgd":
        optimizer = SGD(model.parameters(),
                        lr=params.lr,
                        momentum=params.momentum,
                        dampening=params.dampening,
                        weight_decay=params.weight_decay,
                        nesterov=params.nesterov)
    elif params.name_optimizer == "adam":
        optimizer = Adam(params=model.parameters(),
                         lr=params.lr,
                         betas=(params.beta1, params.beta2),
                         eps=params.eps_adam,
                         weight_decay=params.weight_decay,
                         amsgrad=params.amsgrad)
    else:
        raise ValueError("Unsupported optimizer `{}` .... "
                         "[NOT OK]".format(args.optimizer["name"]))

    if verbose:
        print("Optimizer `{}` was successfully instantiated .... "
              "[OK]".format([
                  key + ":" + str(args.optimizer[key])
                  for key in args.optimizer.keys()
              ]))

    if params.lr_scheduler:
        if params.name_lr_scheduler == "step":
            lrate_scheduler = lr_scheduler.StepLR(optimizer,
                                                  step_size=params.step_size,
                                                  gamma=params.gamma,
                                                  last_epoch=params.last_epoch)

        elif params.name_lr_scheduler == "cosine":
            lrate_scheduler = lr_scheduler.CosineAnnealingLR(
                optimizer,
                T_max=params.t_max,
                eta_min=params.min_lr,
                last_epoch=params.last_epoch)

        elif params.name_lr_scheduler == "mystep":
            lrate_scheduler = my_lr_scheduler.MyStepLR(
                optimizer,
                step_size=params.step_size,
                gamma=params.gamma,
                last_epoch=params.last_epoch,
                min_lr=params.min_lr)

        elif params.name_lr_scheduler == "mycosine":
            lrate_scheduler = my_lr_scheduler.MyCosineLR(
                optimizer,
                coef=params.coef,
                max_epochs=params.max_epochs,
                min_lr=params.min_lr,
                last_epoch=params.last_epoch)

        elif params.name_lr_scheduler == "multistep":
            lrate_scheduler = lr_scheduler.MultiStepLR(
                optimizer,
                milestones=params.milestones,
                gamma=params.gamma,
                last_epoch=params.last_epoch)

        else:
            raise ValueError("Unsupported learning rate scheduler `{}` .... "
                             "[NOT OK]".format(params.name_lr_scheduler))

        if verbose:
            print("Learning scheduler `{}` was successfully "
                  "instantiated "
                  ".... [OK]".format(params.name_lr_scheduler))
    else:
        lrate_scheduler = None

    return optimizer, lrate_scheduler