Example #1
0
            map_dict.params_mapping_dict).backend_mapping(raw_config)
        self._cls = ClassFactory.get_cls(ClassType.LR_SCHEDULER,
                                         self.map_config.type)

    def __call__(self, optimizer=None, epochs=None, steps=None):
        """Call lr scheduler class."""
        params = self.map_config.get("params", {})
        logging.debug("Call LrScheduler. name={}, params={}".format(
            self._cls.__name__, params))

        setattr(self._cls, "by_epoch", True)
        if hasattr(self.config, "by_epoch"):
            setattr(self._cls, "by_epoch", self.config.by_epoch)

        try:
            if params:
                return self._cls(optimizer, **params)
            else:
                return self._cls(optimizer)
        except Exception as ex:
            logging.error(
                "Failed to call LrScheduler name={}, params={}".format(
                    self._cls.__name__, params))
            raise ex


if vega.is_torch_backend():
    import torch.optim.lr_scheduler as torch_lr

    ClassFactory.register_from_package(torch_lr, ClassType.LR_SCHEDULER)
Example #2
0
    @property
    def results(self):
        """Return metrics results."""
        res = {}
        for name, metric in self.mdict.items():
            res.update(metric.result)
        return res

    @property
    def objectives(self):
        """Return objectives results."""
        _objs = {}
        for name in self.mdict:
            objective = self.mdict.get(name).objective
            if isinstance(objective, dict):
                _objs = dict(_objs, **objective)
            else:
                _objs[name] = objective
        return _objs

    def __getattr__(self, key):
        """Get a metric by key name.

        :param key: metric name
        :type key: str
        """
        return self.mdict[key]


ClassFactory.register_from_package(metrics, ClassType.METRIC)
Example #3
0
                    self._cls) else partial(self._cls, **params)
            else:
                cls_obj = self._cls() if isclass(self._cls) else partial(
                    self._cls)
            if vega.is_torch_backend():
                if vega.is_gpu_device():
                    cls_obj = cls_obj.cuda()
                elif vega.is_npu_device():
                    cls_obj = cls_obj.npu()
            return cls_obj
        except Exception as ex:
            logging.error("Failed to call Loss name={}, params={}".format(
                self._cls.__name__, params))
            raise ex


if vega.is_torch_backend():
    import torch.nn as torch_nn
    ClassFactory.register_from_package(torch_nn, ClassType.LOSS)
    try:
        import timm.loss as timm_loss
        ClassFactory.register_from_package(timm_loss, ClassType.LOSS)
    except Exception:
        pass
elif vega.is_tf_backend():
    import tensorflow.compat.v1.losses as tf_loss
    ClassFactory.register_from_package(tf_loss, ClassType.LOSS)
elif vega.is_ms_backend():
    import mindspore.nn.loss as ms_loss
    ClassFactory.register_from_package(ms_loss, ClassType.LOSS)
Example #4
0
            raise ex

    @classmethod
    def set_distributed(cls, optimizer, model=None):
        """Set distributed optimizer."""
        if vega.is_torch_backend():
            optimizer = hvd.DistributedOptimizer(
                optimizer,
                named_parameters=model.named_parameters(),
                compression=hvd.Compression.none)
        elif vega.is_tf_backend():
            optim_class = hvd.DistributedOptimizer if vega.is_gpu_device(
            ) else NPUDistributedOptimizer
            optimizer = dynamic_distributed_optimizer(optim_class, optimizer)
        return optimizer


if vega.is_torch_backend():
    import torch.optim as torch_opt

    ClassFactory.register_from_package(torch_opt, ClassType.OPTIMIZER)
elif vega.is_tf_backend():
    import tensorflow.compat.v1.train as tf_train

    ClassFactory.register_from_package(tf_train, ClassType.OPTIMIZER)

elif vega.is_ms_backend():
    import mindspore.nn.optim as ms_opt

    ClassFactory.register_from_package(ms_opt, ClassType.OPTIMIZER)