Пример #1
0
def build_iteration_strategy(
    config: DictConfig,
    dataloaders: Dict[str, torch.utils.data.DataLoader],
    *args,
    **kwargs,
) -> IterationStrategy:
    if not config.get("enabled", True):
        return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)
    else:
        assert (
            "type" in config
        ), "multitasking config must define 'type' attribute if enabled"
        # This assumes all dataloaders will have same dataset type
        iteration_strategy_class = registry.get_iteration_strategy_class(config.type)
        config = config.get("params", {})
        dataset_type = dataloaders[list(dataloaders.keys())[0]].dataset.dataset_type
        if dataset_type != "train":
            logger.info(
                f"{iteration_strategy_class.__name__} updated to size "
                + f"proportional for {dataset_type}"
            )
            return SizeProportionalIterationStrategy.from_params(
                dataloaders, *args, **kwargs
            )
        else:
            return iteration_strategy_class(config, dataloaders, *args, **kwargs)
Пример #2
0
def build_iteration_strategy(
    config: DictConfig,
    dataloaders: Dict[str, torch.utils.data.DataLoader],
    *args,
    **kwargs,
) -> IterationStrategy:
    if not config.get("enabled", True):
        return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)
    else:
        assert (
            "type" in config
        ), "multitasking config must define 'type' attribute if enabled"
        # This assumes all dataloaders will have same dataset type
        iteration_strategy_class = registry.get_iteration_strategy_class(config.type)
        config = config.get("params", {})
        # val and test splits won't be affected as test reporter iterates
        # over the datasets one by one without using any iteration strategy
        return iteration_strategy_class(config, dataloaders, *args, **kwargs)