from deep_privacy.utils import Registry, build_from_cfg HOOK_REGISTRY = Registry("HOOKS") def build_hooks(cfg, trainer): for _hook in cfg.trainer.hooks: if _hook.type == "CheckpointHook": hook = build_from_cfg(_hook, HOOK_REGISTRY, output_dir=cfg.output_dir) else: hook = build_from_cfg(_hook, HOOK_REGISTRY) trainer.register_hook(_hook.type, hook) class HookBase: def before_train(self): pass def after_train(self): pass def before_step(self): pass def after_step(self): pass def after_extend(self): """
from deep_privacy.utils import build_from_cfg, Registry from .utils import NetworkWrapper DISCRIMINATOR_REGISTRY = Registry("DISCRIMINATOR_REGISTRY") GENERATOR_REGISTRY = Registry("GENERATOR_REGISTRY") def build_discriminator(cfg, data_parallel): discriminator = build_from_cfg(cfg.models.discriminator, DISCRIMINATOR_REGISTRY, cfg=cfg, max_imsize=cfg.models.max_imsize, pose_size=cfg.models.pose_size, image_channels=cfg.models.image_channels, conv_size=cfg.models.conv_size) if data_parallel: discriminator = NetworkWrapper(discriminator) discriminator = extend_model(cfg, discriminator) return discriminator def build_generator(cfg, data_parallel): generator = build_from_cfg(cfg.models.generator, GENERATOR_REGISTRY, cfg=cfg, max_imsize=cfg.models.max_imsize, conv_size=cfg.models.conv_size, image_channels=cfg.models.image_channels, pose_size=cfg.models.pose_size) if data_parallel: generator = NetworkWrapper(generator)
from deep_privacy.utils import build_from_cfg, Registry DETECTOR_REGISTRY = Registry("DETECTOR_REGISTRY") def build_detector(cfg, *args, **kwargs): print(cfg) return build_from_cfg(cfg, DETECTOR_REGISTRY, *args, **kwargs)
from deep_privacy.utils import Registry, build_from_cfg from .utils import fast_collate, DataPrefetcher, progressive_decorator from .transforms import build_transforms import torch DATASET_REGISTRY = Registry("DATASET") def get_dataloader(cfg, imsize, get_transition_value, is_train): cfg_data = cfg.data_val if is_train: cfg_data = cfg.data_train if cfg_data.dataset.type == "MNISTDataset": assert cfg.models.pose_size == 0 transform = build_transforms(cfg_data.transforms, imsize=imsize) dataset = build_from_cfg(cfg_data.dataset, DATASET_REGISTRY, imsize=imsize, transform=transform) batch_size = cfg.trainer.batch_size_schedule[imsize] dataloader = torch.utils.data.DataLoader(dataset, pin_memory=False, collate_fn=fast_collate, batch_size=batch_size, **cfg_data.loader) dataloader = DataPrefetcher(dataloader, infinite_loader=is_train) # If progressive growing, perform GPU image interpolation if not cfg.trainer.progressive.enabled: return dataloader if get_transition_value is not None: assert cfg.trainer.progressive.enabled dataloader.next = progressive_decorator(dataloader.next,
from deep_privacy.utils import Registry CRITERION_REGISTRY = Registry("CRITERION")
import torchvision from deep_privacy.utils import Registry, build_from_cfg TRANSFORM_REGISTRY = Registry("TRANSFORM") def build_transforms(transforms, imsize): transforms = [ build_from_cfg(t, TRANSFORM_REGISTRY, imsize=imsize) for t in transforms ] return torchvision.transforms.Compose(transforms)