def build(config): if 'transforms' in config: config['transforms'] = [build(transform) for transform in config['transforms']] try: return object_from_dict(config, A) except AttributeError: return object_from_dict(config, pytorch)
def build(config): if "transforms" in config: config["transforms"] = [ build(transform) for transform in config["transforms"] ] if hasattr(A, config["type"]): return object_from_dict(config, A) elif hasattr(pytorch, config["type"]): return object_from_dict(config, pytorch) else: return object_from_dict(config, ALBUMENTATIONS)
def build(config: ConfigDict) -> T: if "transforms" in config: config["transforms"] = [ build(transform) for transform in config["transforms"] ] try: return object_from_dict(config, A) except AttributeError: try: return object_from_dict(config, pytorch) except AttributeError: return object_from_dict(config)
def metrics(self) -> Optional[Dict[str, T]]: if hasattr(self.config, "METRICS"): return { k: object_from_dict(v) for k, v in self.config.METRICS.items() } return None
def dataset(self, mode: str) -> Dataset: arguments = self.config.DATA[mode] arguments.update({ "image_transforms": self.transform(mode, level="image_transforms") }) arguments.update( {"crop_transforms": self.transform(mode, level="crop_transforms")}) return object_from_dict(arguments)
def losses(self) -> Dict[str, T]: losses = { k: object_from_dict(v) for k, v in self.config.LOSSES.items() } if self._has_bdp_hook: return { k: BalancedDataParallelCriterion(v) for k, v in losses.items() } return losses
def make_model( config: ConfigDict, device: torch.device = torch.device("cpu") ) -> torch.nn.Module: model = object_from_dict(config) return model.to(device)