コード例 #1
0
 def __init__(self,
              line_sep="\n",
              field_sep=r"(?:[\,:\-]\s*)|(?:\s+)",
              ints=True,
              skip_last=True,
              trim_blank=True):
     store_attr()
コード例 #2
0
ファイル: heads.py プロジェクト: benihime91/gale
    def __init__(
        self,
        input_shape: ShapeSpec,
        num_classes: int,
        pool_type: str = "avg",
        drop_rate: float = 0.0,
        use_conv: bool = False,
        lr: float = 2e-03,
        wd: float = 0,
        filter_wd: bool = False,
    ):
        super(FullyConnectedHead, self).__init__()
        self.drop_rate = drop_rate
        in_planes = input_shape.channels
        # fmt: off
        self.global_pool, num_pooled_features = _create_pool(in_planes,
                                                             num_classes,
                                                             pool_type,
                                                             use_conv=use_conv)
        # fmt: on
        self.fc = _create_fc(num_pooled_features,
                             num_classes,
                             use_conv=use_conv)
        self.flatten_after_fc = use_conv and pool_type

        store_attr("lr, wd, filter_wd")
コード例 #3
0
    def __call__(self,
                 inputs: torch.Tensor,
                 targets: torch.Tensor,
                 model: Module = None,
                 *args,
                 **kwargs):
        "calls function"
        store_attr()
        if self.num_iters is not None:
            if self._check_has_ended():
                self._is_active = False
            else:
                if self._curr_iter < self.num_iters:
                    outputs = self.call(inputs, targets=targets, model=model)
                    inputs, self.lam_a, self.lam_b, self.target_a, self.target_b = outputs
                else:
                    self._is_active = False

            if self._check_has_ended():
                if not self._done_logging:
                    self._done_logging = True
                self._is_active = False

        else:
            if self._check_has_ended():
                if not self._done_logging:
                    self._done_logging = True
                self._is_active = False
            else:
                outputs = self.call(inputs, targets=targets, model=model)
                inputs, self.lam_a, self.lam_b, self.target_a, self.target_b = outputs

        return inputs
コード例 #4
0
 def __init__(self, day, year=2020, reader=read, parser={}):
     self.f_reader = (lambda x: x)
     self.fn_part1 = {}
     self.fn_part2 = {}
     self.f_reader = reader
     self.f_parser = parser
     self.f_transformer = (lambda x: x)
     store_attr(but='reader,parser')
コード例 #5
0
 def __init__(
     self,
     eps: float = 0.1,
     reduction: str = "mean",
     weight: Optional[Tensor] = None,
 ):
     super(LabelSmoothingCrossEntropy, self).__init__()
     store_attr("eps, reduction, weight")
コード例 #6
0
 def __init__(
     self,
     alpha: float = -1,
     gamma: float = 2,
     reduction: str = "mean",
 ):
     super(BinarySigmoidFocalLoss, self).__init__()
     store_attr("alpha, gamma, reduction")
コード例 #7
0
 def __init__(self,
              alpha: float = 0.5,
              conf_prob: float = 1.0,
              num_iters: int = None,
              **kwargs):
     store_attr()
     if self.alpha > 0.0:
         self.distrib = np.random.beta(self.alpha, self.alpha)
     else:
         self.distrib = 1
コード例 #8
0
 def __init__(self,
              df,
              fn_col,
              label_col=None,
              transform=None,
              train: bool = True,
              backend="torchvision"):
     store_attr("df, fn_col, label_col, transform, train, backend")
     self.df = df.copy()
     self._setup_loader()
コード例 #9
0
    def __init__(
        self,
        alpha: float = 1,
        gamma: float = 2,
        reduction: str = "mean",
        eps: float = 1e-8,
    ):

        super(FocalLoss, self).__init__()
        store_attr("alpha, gamma, reduction, eps")
コード例 #10
0
ファイル: heads.py プロジェクト: benihime91/gale
    def __init__(
        self,
        input_shape: ShapeSpec,
        num_classes: int,
        act: str = "ReLU",
        lin_ftrs: Optional[List] = None,
        ps: Union[List, int] = 0.5,
        concat_pool: bool = True,
        first_bn: bool = True,
        bn_final: bool = False,
        lr: float = 2e-03,
        wd: float = 0,
        filter_wd: bool = False,
    ):
        super(FastaiHead, self).__init__()
        in_planes = input_shape.channels
        pool = "catavgmax" if concat_pool else "avg"
        pool, nf = _create_pool(in_planes, num_classes, pool, use_conv=False)

        # fmt: off
        lin_ftrs = [nf, 512, num_classes
                    ] if lin_ftrs is None else [nf] + lin_ftrs + [num_classes]
        # fmt: on

        bns = [first_bn] + [True] * len(lin_ftrs[1:])

        ps = L(ps)

        if len(ps) == 1:
            ps = [ps[0] / 2] * (len(lin_ftrs) - 2) + ps

        act = ifnone(act, "ReLU")
        # fmt: off
        actns = [ACTIVATION_REGISTRY.get(act)
                 (inplace=True)] * (len(lin_ftrs) - 2) + [None]
        if bn_final:
            actns[-1] = ACTIVATION_REGISTRY.get(act)(inplace=True)
        # fmt: on

        self.layers = [pool]

        for ni, no, bn, p, actn in zip(lin_ftrs[:-1], lin_ftrs[1:], bns, ps,
                                       actns):
            self.layers += nn.Sequential(nn.BatchNorm1d(ni), nn.Dropout(p),
                                         nn.Linear(ni, no, bias=not bns), actn)

        if bn_final:
            self.layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
        self.layers = nn.Sequential(*[l for l in self.layers if l is not None])

        store_attr("lr, wd, filter_wd")
コード例 #11
0
ファイル: backbones.py プロジェクト: benihime91/gale
    def __init__(
        self,
        model_name: str,
        input_shape: ShapeSpec,
        act: str = None,
        lr: float = 1e-03,
        wd: float = 1e-02,
        lr_div: float = 100,
        freeze_at: int = 0,
        freeze_bn: bool = False,
        **kwargs,
    ):
        super(ResNetBackbone, self).__init__()
        store_attr("freeze_at, wd, lr, lr_div, input_shape, freeze_bn")

        if act is not None:
            act = ACTIVATION_REGISTRY.get(act)

        model = timm.create_model(
            model_name,
            act_layer=act,
            global_pool="",
            num_classes=0,
            in_chans=input_shape.channels,
            **kwargs,
        )

        assert isinstance(model, ResNet), "ResNetBackbone supports only ResNet models"
        # save some of the input information from timm models
        self.num_features = model.num_features
        self.timm_model_cfg = model.default_cfg

        # break up the model
        # the stem for the resnet model consists of a convolutional block, norm, act, pool
        stem = nn.Sequential(model.conv1, model.bn1, model.act1, model.maxpool)

        # stages will consisit of the remaining 4 layers
        stages = [model.layer1, model.layer2, model.layer3, model.layer4]
        stages = nn.Sequential(*stages)

        # creat the module
        self.resnet = nn.Sequential(stem, stages)
        self.prepare_model(self.resnet)
コード例 #12
0
ファイル: backbones.py プロジェクト: benihime91/gale
    def __init__(
        self,
        model_name: str,
        input_shape: ShapeSpec,
        act: str = None,
        lr: float = 1e-03,
        wd: float = 0,
        freeze_bn: bool = False,
        freeze_at: int = False,
        filter_wd: bool = False,
        **kwargs,
    ):
        super(TimmBackboneBase, self).__init__()

        store_attr("lr, wd, filter_wd, input_shape")

        if act is not None:
            act = ACTIVATION_REGISTRY.get(act)

        model = timm.create_model(
            model_name,
            act_layer=act,
            global_pool="",
            num_classes=0,
            in_chans=input_shape.channels,
            **kwargs,
        )

        # save some of information from timm models
        self.num_features = model.num_features
        self.timm_model_cfg = model.default_cfg
        self.model = prepare_backbone(model)

        if not freeze_at:
            self.unfreeze()
        else:
            self.freeze_to(freeze_at)

        if freeze_bn:
            set_bn_eval(self.model)
コード例 #13
0
 def __init__(self, pypinm, deps=None, import_nm=None, path=None):
     store_attr('pypinm,deps')
     self.import_nm = ifnone(import_nm, pypinm)
     try:
         self.ver = str(latest_pypi(pypinm))
     except:
         raise ValueError(f'package name: {pypinm} not found on pypi.')
     self.info = pypi_json(f'{pypinm}/{self.ver}')['info']
     self.path = _mkdir(ifnone(path, self.pypinm))
     self.meta = {
         'package': {
             'name': self.pypinm,
             'version': self.ver
         },
         'build': {
             'number': 0,
             'binary_relocation': False,
             'detect_binary_files_with_prefix': False
         },
         'requirements': {
             'host': ['pip', 'python'],
             'run': ['python'] + list(L(self.deps))
         },
         'test': {
             'imports': [self.import_nm],
             'requires': ['pip']
         },
         'about': {
             'home': self.info['home_page'],
             'summary': self.info['summary'],
             'license': self.info['license']
         },
         'extra': {
             'recipe-maintainers': ['jph00']
         }
     }
コード例 #14
0
 def __init__(self, *args, **kwargs): 
     store_attr()
     for k, v in kwargs.items():
         vars(self)[k] = v
コード例 #15
0
ファイル: ML.py プロジェクト: HiiGHoVuTi/pyqol
 def __init__(self, policy, optimizer, hyperparams):
     store_attr()
コード例 #16
0
 def __init__(self, f): 
     store_attr()
コード例 #17
0
 def __init__(self, default = _NoMatchError,): 
     store_attr()
     self.cases = L()
     self.preprocesses = L()
コード例 #18
0
 def __init__(self, alpha: float, epochs: list, total_epochs: int, *args,
              **kwargs):
     super(MixupWH, self).__init__()
     store_attr()
     self.device = None
コード例 #19
0
ファイル: ML.py プロジェクト: HiiGHoVuTi/pyqol
 def __init__(self, new_length):
     super(_Flatten, self).__init__()
     store_attr()
コード例 #20
0
 def __init__(self, message):
     store_attr()
     self.logs = L()
コード例 #21
0
ファイル: vision_transformer.py プロジェクト: benihime91/gale
    def __init__(
        self,
        model_name: str,
        input_shape: ShapeSpec,
        lr: float = 1e-03,
        wd: float = 1e-05,
        pretrained: bool = True,
        freeze_to: Optional[int] = None,
        finetune: Optional[bool] = None,
        act: Optional[str] = None,
        reset_classifier: bool = True,
        filter_wd: bool = True,
        **kwargs,
    ):
        """
        Arguments:
        1. `input_shape` (ShapeSpec): input image shape. For ViT `height=width` and check the above link for avilable model shapes.
        2. `model_name` (str): name of the ViT model, check the above link for avilable models.
        3. `pretrained` (bool): load weights pretrained on imagenet.
        4. `act` (str): name of the activation layer. Must be registerd in `ACTIVATION_REGISTRY`
        5. `num_classes` (int): num output classes.
        6. `drop_rate` (float): dropout rate.
        7. `attn_drop_rate` (float): attention dropout rate.
        8. `drop_path_rate` (float): stochastic depth rate.
        9. `reset_classifier` (bool): resets the weights of the classifier.
        10. `freeze_to` (int): Freeze the param meter groups of the model upto n.
        11. `finetune` (bool): Freeze all the layers and keep only the `classifier` trainable.
        """
        super(VisionTransformer, self).__init__()
        # create model from timm
        assert input_shape.height == input_shape.width
        in_chans = input_shape.channels

        if act is not None:
            act = ACTIVATION_REGISTRY.get(act)

        self.model = timm.create_model(
            model_name, pretrained, in_chans=in_chans, act=act, **kwargs
        )

        if reset_classifier:
            num_cls = kwargs.pop("num_classes")
            self.model.reset_classifier(num_cls)

        if freeze_to is not None:
            self.freeze_to(freeze_to)

        if finetune:
            if freeze_to is not None and isinstance(freeze_to, int):
                msg = "You have sprecified freeze_to along with finetune"
                _logger.warning(msg)
            _logger.info("Freezing all the model parameters except for the classifier")
            self.freeze()

            classifier = ["head", "head_dist"]

            for name, module in self.model.named_children():
                if name in classifier:
                    for p in module.parameters():
                        p.requires_grad_(True)

        store_attr("wd, lr, filter_wd, input_shape")