コード例 #1
0
    def call(self, inputs, targets, *args, **kwargs):
        r = np.random.rand(1)
        batch_size, C, H, W = inputs.size()
        self.device = ifnone(self.device, self.inputs.device)
        lam_a = torch.ones(batch_size, device=self.device)

        target_a = targets
        target_b = targets

        if r < self.conf_prob:
            self._is_active = True

            if self.alpha > 0.0:
                lam = np.random.beta(self.alpha, self.alpha)
            else:
                lam = 1

            rand_index = torch.randperm(batch_size, device=self.device)
            lam = torch.tensor(lam, device=self.device)
            target_a, target_b = targets, targets[rand_index]

            x1, y1, x2, y2 = self.rand_bbox(W, H, lam)
            inputs[:, :, x1:x2, y1:y2] = inputs[rand_index, :, x1:x2, y1:y2]

            # adjust lambda to exactly match pixel ratio
            lam = (1 - ((x2 - x1) * (y2 - y1)) / float(W * H)).item()
            lam_a = lam_a * lam

        else:
            self._is_active = False

        lam_b = 1 - lam_a
        return inputs, lam_a, lam_b, target_a, target_b
コード例 #2
0
    def call(self, inputs, targets, *args, **kwargs):
        r = np.random.rand(1)
        batch_size = inputs.size()[0]

        self.device = ifnone(self.device, self.inputs.device)

        lam_a = torch.ones(batch_size, device=self.device)

        target_a = targets
        target_b = targets

        if r < self.conf_prob:
            self._is_active = True
            rand_index = torch.randperm(batch_size, device=self.device)

            lam = self.distrib

            lam = torch.tensor(lam, device=self.device)
            inputs = lam * inputs + (1 - lam) * inputs[rand_index, :]
            target_a, target_b = targets, targets[rand_index]
            lam_a = lam_a * lam

        else:
            self._is_active = False

        lam_b = 1 - lam_a

        return inputs, lam_a, lam_b, target_a, target_b
コード例 #3
0
ファイル: display.py プロジェクト: benihime91/gale
def show_title(o, ax=None, ctx=None, label=None, color="black", **kwargs):
    "Set title of `ax` to `o`, or print `o` if `ax` is `None`"
    ax = ifnone(ax, ctx)
    if ax is None:
        print(o)
    elif hasattr(ax, "set_title"):
        t = ax.title.get_text()
        if len(t) > 0:
            o = t + "\n" + str(o)
        ax.set_title(o, color=color)
    return ax
コード例 #4
0
ファイル: heads.py プロジェクト: benihime91/gale
    def __init__(
        self,
        input_shape: ShapeSpec,
        num_classes: int,
        act: str = "ReLU",
        lin_ftrs: Optional[List] = None,
        ps: Union[List, int] = 0.5,
        concat_pool: bool = True,
        first_bn: bool = True,
        bn_final: bool = False,
        lr: float = 2e-03,
        wd: float = 0,
        filter_wd: bool = False,
    ):
        super(FastaiHead, self).__init__()
        in_planes = input_shape.channels
        pool = "catavgmax" if concat_pool else "avg"
        pool, nf = _create_pool(in_planes, num_classes, pool, use_conv=False)

        # fmt: off
        lin_ftrs = [nf, 512, num_classes
                    ] if lin_ftrs is None else [nf] + lin_ftrs + [num_classes]
        # fmt: on

        bns = [first_bn] + [True] * len(lin_ftrs[1:])

        ps = L(ps)

        if len(ps) == 1:
            ps = [ps[0] / 2] * (len(lin_ftrs) - 2) + ps

        act = ifnone(act, "ReLU")
        # fmt: off
        actns = [ACTIVATION_REGISTRY.get(act)
                 (inplace=True)] * (len(lin_ftrs) - 2) + [None]
        if bn_final:
            actns[-1] = ACTIVATION_REGISTRY.get(act)(inplace=True)
        # fmt: on

        self.layers = [pool]

        for ni, no, bn, p, actn in zip(lin_ftrs[:-1], lin_ftrs[1:], bns, ps,
                                       actns):
            self.layers += nn.Sequential(nn.BatchNorm1d(ni), nn.Dropout(p),
                                         nn.Linear(ni, no, bias=not bns), actn)

        if bn_final:
            self.layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
        self.layers = nn.Sequential(*[l for l in self.layers if l is not None])

        store_attr("lr, wd, filter_wd")
コード例 #5
0
def register_dataset_from_folders(name: str,
                                  image_root: str,
                                  class_map: Optional[str] = " ",
                                  mapper: Optional[Union[ClassificationMapper,
                                                         Callable]] = None,
                                  **kwargs):
    """
    Register a dataset present in folders (see `FolderParser`) to DatasetCatalog.
    `name` is a `str` that identifies a dataset, e.g. "coco_2014_train".
    """
    parser = FolderParser(root=image_root, class_map="")
    mapper = ifnone(mapper, ClassificationMapper(**kwargs))
    DatasetCatalog.register(
        name, lambda: ClassificationDataset(mapper=mapper, parser=parser))
    _logger.info("Dataset: {} registerd to DatasetCatalog".format(name))
コード例 #6
0
 def __init__(self, pypinm, deps=None, import_nm=None, path=None):
     store_attr('pypinm,deps')
     self.import_nm = ifnone(import_nm, pypinm)
     try:
         self.ver = str(latest_pypi(pypinm))
     except:
         raise ValueError(f'package name: {pypinm} not found on pypi.')
     self.info = pypi_json(f'{pypinm}/{self.ver}')['info']
     self.path = _mkdir(ifnone(path, self.pypinm))
     self.meta = {
         'package': {
             'name': self.pypinm,
             'version': self.ver
         },
         'build': {
             'number': 0,
             'binary_relocation': False,
             'detect_binary_files_with_prefix': False
         },
         'requirements': {
             'host': ['pip', 'python'],
             'run': ['python'] + list(L(self.deps))
         },
         'test': {
             'imports': [self.import_nm],
             'requires': ['pip']
         },
         'about': {
             'home': self.info['home_page'],
             'summary': self.info['summary'],
             'license': self.info['license']
         },
         'extra': {
             'recipe-maintainers': ['jph00']
         }
     }
コード例 #7
0
def seed_everything(seed: int = None) -> int:
    """sets a seed for the environment in :
       `pytorch`, `numpy`, `python.random` and sets `PYTHONHASHSEED` environment variable.
    """
    seed = ifnone(seed, 42)
    np.random.seed(seed)
    random.seed(seed)
    os.environ["PYTHONHASHSEED"] = str(seed)

    # pytorch-seeds
    torch.manual_seed(seed)

    # cuda seeds
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    return seed
コード例 #8
0
def register_dataset_from_df(name: str,
                             df: pd.DataFrame,
                             path_column: str,
                             label_column: str,
                             class_map: Optional[str] = " ",
                             mapper: Optional[Union[ClassificationMapper,
                                                    Callable]] = None,
                             **kwargs):
    """
    Register a dataset present in a pandas dataframe (see `PandasParser`) to DatasetCatalog.
    `name` is a `str` that identifies a dataset, e.g. "coco_2014_train".
    """
    parser = PandasParser(df, path_column, label_column)
    mapper = ifnone(mapper, ClassificationMapper(**kwargs))
    DatasetCatalog.register(
        name, lambda: ClassificationDataset(mapper=mapper, parser=parser))

    _logger.info("Dataset: {} registerd to DatasetCatalog".format(name))
コード例 #9
0
    def __init__(
        self,
        cfg: DictConfig,
        trainer: Optional[pl.Trainer] = None,
        metrics: Union[torchmetrics.Metric, Mapping, Sequence, None] = None,
    ):
        """
        Base class from which all PyTorch Lightning Tasks in Gale should inherit.
        Provides a few helper functions primarily for optimization.

        Arguments:
        1. `cfg` `(DictConfig)`:  configuration object. cfg object should be inherited from `BaseGaleConfig`.
        2. `trainer` `(Optional, pl.Trainer)`: Pytorch Lightning Trainer instance
        3. `metrics` `(Optional)`: Metrics to compute for training and evaluation.
        """
        super().__init__()
        self._cfg = OmegaConf.create(cfg)
        self._cfg = OmegaConf.structured(cfg)

        if trainer is not None and not isinstance(trainer, pl.Trainer):
            raise ValueError(
                f"Trainer constructor argument must be either None or pl.Trainer.But got {type(trainer)} instead."
            )

        self._train_dl = noop
        self._validation_dl = noop
        self._test_dl = noop

        self._optimizer = noop
        self._scheduler = noop

        self._trainer = ifnone(trainer, noop)
        self._metrics = setup_metrics(metrics)
        self._model = noop

        self.save_hyperparameters(self._cfg)

        # if trained is not passed them the Model is being restored
        if self._trainer is not None:
            self.is_restored = False
        else:
            self.is_restored = True
コード例 #10
0
def register_torchvision_dataset(name: str,
                                 dataset: Dataset,
                                 mapper: Optional[Union[ClassificationMapper,
                                                        Callable]] = None,
                                 **kwargs):
    """
    Register a dataset in gale `DatasetCatalog` from a
    existing torchvision dataset. You can pass in `augmentations` to
    specify the transformations you want to apply to the Images
    in the dataset. Optionaly, `mapper` can also be passed in or else
    the default `ClassificationMapper` will be used to map the dataset in
    gale `ClassificationDataset` format.

    `name` is a `str` that identifies a dataset, e.g. "coco_2014_train".
    """
    mapper = ifnone(mapper, ClassificationMapper(**kwargs))

    DatasetCatalog.register(
        name, lambda: ClassificationDataset(mapper=mapper, parser=dataset))
    _logger.info("Dataset: {} registerd to DatasetCatalog".format(name))
コード例 #11
0
def split_dataframe_into_stratified_folds(
    dataframe: pd.DataFrame,
    label_column: str,
    fold_column: str = None,
    **kwargs,
) -> pd.DataFrame:
    """
    Makes stratified folds in `dataframe`. `label_column` is the column to use for split.
    Split Id is given in `fold_column`. Set `random_state` for reproducibility.
    """
    # preserve the original copy of the dataframe
    data = dataframe.copy()
    skf = StratifiedKFold(**kwargs)
    fold_column = ifnone(fold_column, "kfold")

    ys = data[label_column]
    data[fold_column] = -1

    for i, (train_index, test_index) in enumerate(skf.split(X=data, y=ys)):
        data.loc[test_index, fold_column] = i

    return data
コード例 #12
0
ファイル: display.py プロジェクト: benihime91/gale
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
    "Show a PIL or PyTorch image on `ax`."
    # Handle pytorch axis order
    if hasattrs(im, ("data", "cpu", "permute")):
        im = im.data.cpu()
        if im.shape[0] < 5:
            im = im.permute(1, 2, 0)
    elif not isinstance(im, np.ndarray):
        im = array(im)
    # Handle 1-channel images
    if im.shape[-1] == 1:
        im = im[..., 0]

    ax = ifnone(ax, ctx)
    if figsize is None:
        figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
    if ax is None:
        _, ax = plt.subplots(figsize=figsize)
    ax.imshow(im, **kwargs)
    if title is not None:
        ax.set_title(title)
    ax.axis("off")
    return ax
コード例 #13
0
    def __call__(self, inputs: torch.Tensor, targets: torch.Tensor, epoch: int,
                 *args, **kwargs):
        mask = random.random()
        self.device = ifnone(inputs.device, self.device)
        # mixup-without-hesitation implementation
        if epoch >= self.epochs[0]:
            threshold = (self.total_epochs - epoch) / (self.total_epochs -
                                                       self.epochs[0])
            if mask < threshold:
                inputs, targets_a, targets_b = self.mixup_data(inputs, targets)
            else:
                targets_a, targets_b = targets, targets
                self.lam = 1.0
        elif epoch >= self.epochs[1]:
            if epoch % 2 == 0:
                inputs, targets_a, targets_b = self.mixup_data(inputs, targets)
            else:
                targets_a, targets_b = targets, targets
                self.lam = 1.0
        else:
            inputs, targets_a, targets_b = self.mixup_data(inputs, targets)

        self.target_a, self.target_b = targets_a, targets_b
        return inputs
コード例 #14
0
    def process_optim_config(self, opt_conf: DictConfig) -> DictConfig:
        """
        Prepares an optimizer from a string name and its optional config parameters.
        Preprocess the optimization config and adds some infered values like max_steps, max_epochs, etc.
        This method also fills in the values for `max_iters` & `epochs`, `steps_per_epoch` if
        the values are `-1`
        """
        # some optimizers/schedulers need parameters only known dynamically
        # allow users to override the getter to instantiate them lazily

        opt_conf = copy.deepcopy(opt_conf)

        # Force into DictConfig structure
        opt_conf = OmegaConf.create(opt_conf)

        if self._trainer.max_epochs is None and self._trainer.max_steps is None:
            raise ValueError(
                "Either one of max_epochs or max_epochs must be provided in Trainer"
            )
        else:
            max_steps, steps = self.num_training_steps()
            max_epochs = ifnone(self._trainer.max_epochs, max_steps // steps)

        vals = dict(steps_per_epoch=steps,
                    max_steps=max_steps,
                    max_epochs=max_epochs)

        # Force into native dictionary
        opt_conf = OmegaConf.to_container(opt_conf, resolve=True)

        for key, value in vals.items():
            if opt_conf[key] < 1:
                opt_conf[key] = value

        # populate values in learning rate schedulers initialization arguments
        opt_conf = OmegaConf.create(opt_conf)
        sched_config = OmegaConf.to_container(opt_conf.scheduler.init_args,
                                              resolve=True)

        # Force into DictConfig structure
        opt_conf = OmegaConf.create(opt_conf)

        # @TODO: Find a better way to do this
        if "max_iters" in sched_config:
            if sched_config["max_iters"] == -1:
                OmegaConf.update(opt_conf, "scheduler.init_args.max_iters",
                                 max_steps)
                log_main_process(
                    _logger,
                    logging.DEBUG,
                    f"Set the value of 'max_iters' to be {max_steps}.",
                )

        if "epochs" in sched_config:
            if sched_config["epochs"] == -1:
                OmegaConf.update(opt_conf, "scheduler.init_args.epochs",
                                 max_epochs)
                log_main_process(
                    _logger,
                    logging.DEBUG,
                    f"Set the value of 'epochs' to be {max_epochs}.",
                )

        if "steps_per_epoch" in sched_config:
            if sched_config["steps_per_epoch"] == -1:
                OmegaConf.update(opt_conf,
                                 "scheduler.init_args.steps_per_epoch", steps)
                log_main_process(
                    _logger,
                    logging.DEBUG,
                    f"Set the value of 'steps_per_epoch' to be {steps}.",
                )

        if "max_steps" in sched_config:
            if sched_config["max_steps"] == -1:
                OmegaConf.update(opt_conf, "scheduler.init_args.max_steps",
                                 max_steps)
                log_main_process(
                    _logger,
                    logging.DEBUG,
                    f"Set the value of 'max_steps' to be {max_steps}.",
                )

        return opt_conf
コード例 #15
0
    def call(self, inputs, targets, model, *args, **kwargs):
        """For `Snapmix` a `model` must be passed & the `model` should have `get_classifier` method
        and a `forward_features` method.
        """
        assert model is not None, "Snapmix is not possible without model"

        r = np.random.rand(1)
        batch_size, C, H, W = inputs.size()
        self.device = ifnone(self.device, self.inputs.device)

        lam_a = torch.ones(batch_size, device=self.device)
        lam_b = 1 - lam_a

        target_a = targets
        target_b = targets

        if r < self.conf_prob:
            self._is_active = True
            wfmaps, _ = self.get_spm(inputs, targets, model=model)

            if self.alpha > 0.:
                lam = np.random.beta(self.alpha, self.alpha)
                lam1 = np.random.beta(self.alpha, self.alpha)
            else:
                lam = 1
                lam1 = np.random.beta(self.alpha, self.alpha)

            rand_index = torch.randperm(batch_size, device=self.device)
            lam = torch.tensor(lam, device=self.device)
            lam1 = torch.tensor(lam1, device=self.device)

            wfmaps_b = wfmaps[rand_index, :, :]
            target_b = targets[rand_index]

            same_label = targets == target_b
            bbx1, bby1, bbx2, bby2 = self.rand_bbox(W, H, lam)
            bbx1_1, bby1_1, bbx2_1, bby2_1 = self.rand_bbox(W, H, lam1)

            area = (bby2 - bby1) * (bbx2 - bbx1)
            area1 = (bby2_1 - bby1_1) * (bbx2_1 - bbx1_1)

            if area1 > 0 and area > 0:
                ncont = inputs[rand_index, :, bbx1_1:bbx2_1,
                               bby1_1:bby2_1].clone()
                ncont = F.interpolate(ncont,
                                      size=(bbx2 - bbx1, bby2 - bby1),
                                      mode='bilinear',
                                      align_corners=True)
                inputs[:, :, bbx1:bbx2, bby1:bby2] = ncont
                lam_a = 1 - wfmaps[:, bbx1:bbx2, bby1:bby2].sum(2).sum(1) / (
                    wfmaps.sum(2).sum(1) + 1e-8)
                lam_b = wfmaps_b[:, bbx1_1:bbx2_1, bby1_1:bby2_1].sum(2).sum(
                    1) / (wfmaps_b.sum(2).sum(1) + 1e-8)
                tmp = lam_a.clone()
                lam_a[same_label] += lam_b[same_label]
                lam_b[same_label] += tmp[same_label]
                lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) /
                           (inputs.size()[-1] * inputs.size()[-2]))
                lam_a[torch.isnan(lam_a)] = lam
                lam_b[torch.isnan(lam_b)] = 1 - lam
        else:
            self._is_active = False

        return inputs, lam_a, lam_b, target_a, target_b
コード例 #16
0
 def reload_transforms(self, transform, backend=None):
     "change the transformations used after `__init__`"
     self.backend = ifnone(backend, self.backend)
     self._setup_loader()
     self.transform = transform