Exemple #1
0
        # these were manually filled in.
        subclass = getattr(cannot_be_inferred, name)
    else:
        # this points to the optimizer class in torch.optim (e.g. Adam)
        torch_optimizer_class = getattr(torch.optim, name)

        # dynamically create wrapper class, which has the same name as torch_class
        subclass = type(
            name,
            # must subclass Optimizer to be added to the Registry
            (
                OptimizerConfig, ),
            {},
        )
        # fill in optimizer parameters (except params)
        make_config_class(torch_optimizer_class,
                          blacklist=["params"])(subclass)

    subclass.__hash__ = param_hash
    classes[name] = subclass


@OptimizerConfig.fill_union()
class Optimizer__Union(TaggedUnion):
    @classmethod
    def default(cls, **kwargs):
        """Return default factory for Optimizer (defaulting to Adam)."""
        return (cls(Adam=classes["Adam"]()) if kwargs == {} else cls(
            Adam=classes["Adam"](**kwargs)))

    def make_optimizer_scheduler(self, params):
        return self.value.make_optimizer_scheduler(params)
Exemple #2
0
    for module in cannot_be_inferred_modules:
        if hasattr(module, name):
            cannot_be_inferred_module = module
            break

    if cannot_be_inferred_module is not None:
        # these were manually filled in.
        subclass = getattr(cannot_be_inferred_module, name)
    else:
        torch_lr_scheduler_class = getattr(torch.optim.lr_scheduler, name)
        subclass = type(
            name,
            # must subclass Optimizer to be added to the Registry
            (
                LearningRateSchedulerConfig, ),
            {"__module__": __name__},
        )
        make_config_class(torch_lr_scheduler_class,
                          blacklist=["optimizer"])(subclass)

    subclass.__hash__ = param_hash
    classes[name] = subclass


@LearningRateSchedulerConfig.fill_union()
class LearningRateScheduler__Union(TaggedUnion):
    def make_from_optimizer(
        self, optimizer: torch.optim.Optimizer
    ) -> torch.optim.lr_scheduler._LRScheduler:
        return self.value.make_from_optimizer(optimizer)