示例#1
0
            raw_config = self.config.to_dict()
        raw_config.type = self.config.type
        map_dict = LrSchedulerMappingDict()
        self.map_config = ConfigBackendMapping(
            map_dict.type_mapping_dict, map_dict.params_mapping_dict).backend_mapping(raw_config)
        self._cls = ClassFactory.get_cls(ClassType.LR_SCHEDULER, self.map_config.type)

    def __call__(self, optimizer=None, epochs=None, steps=None):
        """Call lr scheduler class."""
        params = self.map_config.get("params", {})
        logging.debug("Call LrScheduler. name={}, params={}".format(self._cls.__name__, params))

        setattr(self._cls, "by_epoch", True)
        if hasattr(self.config, "by_epoch"):
            setattr(self._cls, "by_epoch", self.config.by_epoch)

        try:
            if params:
                return self._cls(optimizer, **params)
            else:
                return self._cls(optimizer)
        except Exception as ex:
            logging.error("Failed to call LrScheduler name={}, params={}".format(self._cls.__name__, params))
            raise ex


if zeus.is_torch_backend():
    import torch.optim.lr_scheduler as torch_lr

    ClassFactory.register_from_package(torch_lr, ClassType.LR_SCHEDULER)
示例#2
0
        try:
            if params:
                cls_obj = self._cls(**params) if isclass(
                    self._cls) else partial(self._cls, **params)
            else:
                cls_obj = self._cls() if isclass(self._cls) else partial(
                    self._cls)
            if zeus.is_torch_backend() and TrainerConfig().cuda:
                cls_obj = cls_obj.cuda()
            return cls_obj
        except Exception as ex:
            logging.error("Failed to call Loss name={}, params={}".format(
                self._cls.__name__, params))
            raise ex


if zeus.is_torch_backend():
    import torch.nn as torch_nn
    import timm.loss as timm_loss

    ClassFactory.register_from_package(torch_nn, ClassType.LOSS)
    ClassFactory.register_from_package(timm_loss, ClassType.LOSS)
elif zeus.is_tf_backend():
    import tensorflow.compat.v1.losses as tf_loss

    ClassFactory.register_from_package(tf_loss, ClassType.LOSS)
elif zeus.is_ms_backend():
    import mindspore.nn.loss as ms_loss

    ClassFactory.register_from_package(ms_loss, ClassType.LOSS)
示例#3
0
        return pfms

    def reset(self):
        """Reset states for new evaluation after each epoch."""
        for val in self.mdict.values():
            val.reset()

    @property
    def results(self):
        """Return metrics results."""
        res = {}
        for name, metric in self.mdict.items():
            res.update(metric.result)
        return res

    @property
    def objectives(self):
        """Return objectives results."""
        return {name: self.mdict.get(name).objective for name in self.mdict}

    def __getattr__(self, key):
        """Get a metric by key name.

        :param key: metric name
        :type key: str
        """
        return self.mdict[key]


ClassFactory.register_from_package(metrics, ClassType.METRIC)
示例#4
0
                    optimizer = hvd.DistributedOptimizer(optimizer,
                                                         named_parameters=model.named_parameters(),
                                                         compression=hvd.Compression.none)
            elif zeus.is_tf_backend():
                optimizer = dynamic_optimizer(self.optim_cls, **params)
                if distributed:
                    optimizer = hvd.DistributedOptimizer(optimizer) if zeus.is_gpu_device() else \
                        NPUDistributedOptimizer(optimizer)
            elif zeus.is_ms_backend():
                learnable_params = [param for param in model.trainable_params() if param.requires_grad]
                optimizer = self.optim_cls(learnable_params, **params)
            return optimizer
        except Exception as ex:
            logging.error("Failed to call Optimizer name={}, params={}".format(self.optim_cls.__name__, params))
            raise ex


if zeus.is_torch_backend():
    import torch.optim as torch_opt

    ClassFactory.register_from_package(torch_opt, ClassType.OPTIMIZER)
elif zeus.is_tf_backend():
    import tensorflow.compat.v1.train as tf_train

    ClassFactory.register_from_package(tf_train, ClassType.OPTIMIZER)

elif zeus.is_ms_backend():
    import mindspore.nn.optim as ms_opt

    ClassFactory.register_from_package(ms_opt, ClassType.OPTIMIZER)