Ejemplo n.º 1
0
def make_reproducible(seed=42):
    """ Make the run reproducible by setting the seed """
    torch.set_deterministic(True)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
Ejemplo n.º 2
0
def prepare_device(n_gpu: int, keep_reproducibility=False):
    """
    Choose to use CPU or GPU depend on the value of "n_gpu".

    Args:
        n_gpu(int): the number of GPUs used in the experiment. if n_gpu == 0, use CPU; if n_gpu >= 1, use GPU.
        keep_reproducibility (bool): if we need to consider the repeatability of experiment, set keep_reproducibility to True.

    See Also
        Reproducibility: https://pytorch.org/docs/stable/notes/randomness.html
    """
    if n_gpu == 0:
        print("Using CPU in the experiment.")
        device = torch.device("cpu")
    else:
        # possibly at the cost of reduced performance
        if keep_reproducibility:
            print("Using CuDNN deterministic mode in the experiment.")
            torch.backends.cudnn.benchmark = False  # ensures that CUDA selects the same convolution algorithm each time
            torch.set_deterministic(
                True
            )  # configures PyTorch only to use deterministic implementation
        else:
            # causes cuDNN to benchmark multiple convolution algorithms and select the fastest
            torch.backends.cudnn.benchmark = True

        device = torch.device("cuda:0")

    return device
Ejemplo n.º 3
0
def main(params):
    # reproducitiblity
    torch.manual_seed(0)
    np.random.seed(0)
    torch.backends.cudnn.deterministic = False  # cuDNN deterministically select an algorithm, possibly at the cost of reduced performance
    torch.set_deterministic(
        True
    )  # optional, some operation is without a deterministic alternative and would throw an error

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print('using device', device)

    # prepare dataset
    transform = get_transform()
    trainset = XXXDataset(root='./data/train', transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=2)

    # prepare network
    net = XXXNet().to(device)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=params['lr'],
                          momentum=params['momentum'])

    # train
    train(trainloader, net, criterion, optimizer, params, device)

    # save model
    save_model_path = os.path.join('models', params['model_name'])
    torch.save(net.state_dict(), save_model_path)
Ejemplo n.º 4
0
def process_params(params, dest_dir):
    # Fill params
    params = update_default_dict(params)

    # Restart model
    if params['model_restart']:
        model_file = params['model_restart']
        with open(
                op.join(dest_dir, "models", params['model_restart'] + '.json'),
                "r") as f:
            params = json.load(f)
            params['model_restart'] = model_file

    # Seed
    if params['seed'] is None:
        params['seed'] = np.random.choice(2147483648)  # 2^31

    # Hidden layers
    if type(params['model']['hidden_layers']) == str:
        params['model']['hidden_layers'] = [
            int(h) for h in params['model']['hidden_layers'].strip(']').strip(
                '[').split(',')
        ]

    # Set random seed
    torch.manual_seed(params['seed'])
    torch.set_deterministic(True)
    np.random.seed(params['seed'])

    # Data normalization is not possible with Bjorck layers because they guarnatee a
    # Lipshitz constant of exactly one
    if params['normalize_input'] or params['normalize_output']:
        assert params['model'][
            'linear'] != 'bjorck', "Bjorck layers incompatible with data normalization"
    return params
Ejemplo n.º 5
0
def set_seeds():
    random.seed(42)
    np.random.seed(12345)
    torch.manual_seed(1234)
    torch.set_deterministic(True)
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = False
Ejemplo n.º 6
0
    def __init__(
        self,
        learning_rate: float = 1e-3,
        buffer_capacity: int = 200,
        max_epochs_per_task: int = 10,
        weight_decay: float = 1e-6,
        seed: int = None,
    ):
        self.learning_rate = learning_rate
        self.weight_decay = weight_decay
        self.buffer_capacity = buffer_capacity

        self.net: ResNet
        self.buffer: Optional[Buffer] = None
        self.optim: torch.optim.Optimizer
        self.task: int = 0
        self.rng = np.random.RandomState(seed)
        self.seed = seed
        if seed:
            torch.manual_seed(seed)
            torch.set_deterministic(True)

        self.epochs_per_task: int = max_epochs_per_task
        self.early_stop_patience: int = 2

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Ejemplo n.º 7
0
def set_seed(seed, torch_deterministic=False):
    if seed == -1 and torch_deterministic:
        seed = 42
    elif seed == -1:
        seed = np.random.randint(0, 10000)
    print("Setting seed: {}".format(seed))

    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    if torch_deterministic:
        # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
        os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True
        torch.set_deterministic(True)
    else:
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False

    return seed
Ejemplo n.º 8
0
def init_env(config):
    # set a debug environment variable CUBLAS_WORKSPACE_CONFIG to ":16:8" (may limit overall performance) or ":4096:8" (will increase library footprint in GPU memory by approximately 24MiB).
    # https://docs.nvidia.com/cuda/cublas/index.html
    os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
    if config.seed is not None:
        if config.seed >= 0:
            np.random.seed(config.seed)
            torch.manual_seed(config.seed)
            torch.set_deterministic(True)
            torch.backends.cudnn.benchmark = False
        else:
            logging.warning(
                f'the random seed should be a non-negative integer')

    config.device = None
    if not config.cpu and torch.cuda.is_available():
        config.device = torch.device('cuda')
    else:
        config.device = torch.device('cpu')
        # https://github.com/pytorch/pytorch/issues/11201
        torch.multiprocessing.set_sharing_strategy('file_system')
    logging.info(f'Using device: {config.device}')

    config.run_name = '{}_{}_{}'.format(
        config.data_name,
        Path(config.config).stem if config.config else config.model_name,
        datetime.now().strftime('%Y%m%d%H%M%S'),
    )
    logging.info(f'Run name: {config.run_name}')

    return config
Ejemplo n.º 9
0
    def fit(self, model, dataset):
        # init loggers
        self.tensorboard = SummaryWriter()
        self.save_dir = self.tensorboard.log_dir
        self.logger = self.init_logger()

        self.logger.info(f'seed torch and numpy: {self.seed}')
        torch.manual_seed(self.seed)
        np.random.seed(self.seed)
        torch.set_deterministic(True)

        self.logger.info(f'setup dataset ...')
        dataset.setup()
        self.logger.info('done.')

        self.logger.info(f'start training ...')
        self.train_tqdm = tqdm(range(self.max_epochs),
                               desc='epoch',
                               dynamic_ncols=True,
                               postfix=0)
        for i in self.train_tqdm:
            self.num_steps_this_epoch = 0
            self.train_epoch(model, dataset)
            if self.should_stop:
                break

        self.train_tqdm.close()
        self.tensorboard.close()
Ejemplo n.º 10
0
    def __init__(self, args: Namespace, wildcards: dict, descriptions: dict = None):
        super().__init__()

        # args, seed
        self.args = args
        self.save_dir = self._parsed_argument('save_dir', args)
        self.is_test_run = self._parsed_argument('is_test_run', args)
        self.seed = self._parsed_argument('seed', args)
        self.is_deterministic = self._parsed_argument('is_deterministic', args)
        random.seed(self.seed)
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        if self.is_deterministic:
            # see https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
            os.environ.setdefault("CUBLAS_WORKSPACE_CONFIG", ":4096:8")
            torch.set_deterministic(self.is_deterministic)

        # maybe delete old dir, note arguments, save run_config
        if self._parsed_argument('save_del_old', args):
            shutil.rmtree(self.save_dir, ignore_errors=True)
        os.makedirs(self.save_dir, exist_ok=True)
        save_as_json(args, get_task_config_path(self.save_dir), wildcards)
        dump_system_info(self.save_dir + 'sysinfo.txt')

        # logging
        self.log_file = '%slog_task.txt' % self.save_dir
        LoggerManager().set_logging(default_save_file=self.log_file)
        self.logger = self.new_logger(index=None)
        log_args(self.logger, None, self.args, add_git_hash=True, descriptions=descriptions)
        Register.log_all(self.logger)

        # reset weight strategies so that consecutive tasks do not conflict with each other
        StrategyManager().reset()

        self.methods = []
def set_random_seeds(random_seed=0):
    torch.manual_seed(random_seed)
    #torch.backends.cudnn.deterministic = True
    torch.set_deterministic(True)
    #torch.backends.cudnn.benchmark = False
    np.random.seed(random_seed)
    random.seed(random_seed)
Ejemplo n.º 12
0
    def test_linear_transformation(self):
        c, h, w = 3, 24, 32

        tensor, _ = self._create_data(h, w, channels=c, device=self.device)

        matrix = torch.rand(c * h * w, c * h * w, device=self.device)
        mean_vector = torch.rand(c * h * w, device=self.device)

        fn = T.LinearTransformation(matrix, mean_vector)
        scripted_fn = torch.jit.script(fn)

        self._test_transform_vs_scripted(fn, scripted_fn, tensor)

        batch_tensors = torch.rand(4, c, h, w, device=self.device)
        # We skip some tests from _test_transform_vs_scripted_on_batch as
        # results for scripted and non-scripted transformations are not exactly the same
        torch.manual_seed(12)
        torch.set_deterministic(True)
        transformed_batch = fn(batch_tensors)
        torch.manual_seed(12)
        s_transformed_batch = scripted_fn(batch_tensors)
        self.assertTrue(transformed_batch.equal(s_transformed_batch))

        with get_tmp_dir() as tmp_dir:
            scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt"))
Ejemplo n.º 13
0
def force_determinism(is_deterministic=False):
    if not is_deterministic:
        return
    torch.manual_seed(0)
    np.random.seed(0)
    torch.backends.cudnn.benchmark = False
    torch.set_deterministic(True)
Ejemplo n.º 14
0
def set_determinism(
    seed: Optional[int] = NP_MAX,
    use_deterministic_algorithms: Optional[bool] = None,
    additional_settings: Optional[Union[Sequence[Callable[[int], Any]], Callable[[int], Any]]] = None,
) -> None:
    """
    Set random seed for modules to enable or disable deterministic training.

    Args:
        seed: the random seed to use, default is np.iinfo(np.int32).max.
            It is recommended to set a large seed, i.e. a number that has a good balance
            of 0 and 1 bits. Avoid having many 0 bits in the seed.
            if set to None, will disable deterministic training.
        use_deterministic_algorithms: Set whether PyTorch operations must use "deterministic" algorithms.
        additional_settings: additional settings that need to set random seed.

    Note:

        This function will not affect the randomizable objects in :py:class:`monai.transforms.Randomizable`, which
        have independent random states. For those objects, the ``set_random_state()`` method should be used to
        ensure the deterministic behavior (alternatively, :py:class:`monai.data.DataLoader` by default sets the seeds
        according to the global random state, please see also: :py:class:`monai.data.utils.worker_init_fn` and
        :py:class:`monai.data.utils.set_rnd`).
    """
    if seed is None:
        # cast to 32 bit seed for CUDA
        seed_ = torch.default_generator.seed() % MAX_SEED
        torch.manual_seed(seed_)
    else:
        seed = int(seed) % MAX_SEED
        torch.manual_seed(seed)

    global _seed
    _seed = seed
    random.seed(seed)
    np.random.seed(seed)

    if additional_settings is not None:
        additional_settings = ensure_tuple(additional_settings)
        for func in additional_settings:
            func(seed)

    if torch.backends.flags_frozen():
        warnings.warn("PyTorch global flag support of backends is disabled, enable it to set global `cudnn` flags.")
        torch.backends.__allow_nonbracketed_mutation_flag = True

    if seed is not None:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:  # restore the original flags
        torch.backends.cudnn.deterministic = _flag_deterministic
        torch.backends.cudnn.benchmark = _flag_cudnn_benchmark
    if use_deterministic_algorithms is not None:
        if hasattr(torch, "use_deterministic_algorithms"):  # `use_deterministic_algorithms` is new in torch 1.8.0
            torch.use_deterministic_algorithms(use_deterministic_algorithms)
        elif hasattr(torch, "set_deterministic"):  # `set_deterministic` is new in torch 1.7.0
            torch.set_deterministic(use_deterministic_algorithms)  # type: ignore
        else:
            warnings.warn("use_deterministic_algorithms=True, but PyTorch version is too old to set the mode.")
Ejemplo n.º 15
0
def set_seed(cfg):
    if cfg.use_seed:
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True
        torch.manual_seed(cfg.seed)
        torch.cuda.manual_seed(cfg.seed)
        torch.cuda.manual_seed_all(cfg.seed)
        torch.set_deterministic(True)
Ejemplo n.º 16
0
def set_seed(seed):
    print(f"setting seed to {seed}")
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.set_deterministic(True)
    torch.backends.cudnn.benchmark = False
Ejemplo n.º 17
0
    def __init__(self,
                 text,
                 *,
                 lr=.07,
                 image_size=512,
                 gradient_accumulate_every=1,
                 save_every=50,
                 epochs=20,
                 iterations=1050,
                 save_progress=False,
                 bilinear=False,
                 open_folder=True,
                 seed=None,
                 torch_deterministic=False,
                 max_classes=None,
                 class_temperature=2.,
                 save_date_time=False,
                 save_best=False):
        super().__init__()

        if exists(seed):
            print(f'setting seed of {seed}')
            if seed == 0:
                print(
                    'you can override this with --seed argument in the command line, or --random for a randomly chosen one'
                )
            torch.manual_seed(seed)

        if torch_deterministic:
            assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation (PyTorch 1.7.1)'
            torch.set_deterministic(True)

        self.epochs = epochs
        self.iterations = iterations

        model = BigSleep(image_size=image_size,
                         bilinear=bilinear,
                         max_classes=max_classes,
                         class_temperature=class_temperature).cuda()

        self.model = model

        self.lr = lr
        self.optimizer = Adam(model.model.latents.parameters(), lr)
        self.gradient_accumulate_every = gradient_accumulate_every
        self.save_every = save_every

        self.save_progress = save_progress
        self.save_date_time = save_date_time

        self.save_best = save_best
        self.current_best_score = 0

        self.open_folder = open_folder
        self.total_image_updates = (self.epochs *
                                    self.iterations) / self.save_every

        self.set_text(text)
Ejemplo n.º 18
0
def set_deterministic(deterministic: bool = True):
    # if hasattr(torch, 'backends') and hasattr(torch.backends, 'cudnn'):
    #     torch.backends.cudnn.enabled = not deterministic
    #     torch.backends.cudnn.benchmark = not deterministic
    #     torch.backends.cudnn.deterministic = deterministic
    if hasattr(torch, 'set_deterministic'):
        torch.set_deterministic(deterministic)
    else:
        torch.use_deterministic_algorithms(deterministic)
def set_deterministic():
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True

    if torch.__version__ <= Version("1.7"):
        torch.set_deterministic(True)
    else:
        torch.use_deterministic_algorithms(True)
Ejemplo n.º 20
0
def fix_all_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    os.environ["PYTHONHASHSEED"] = str(seed)
    torch.set_deterministic(True)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
Ejemplo n.º 21
0
def torch_seed(s: int = 72163) -> torch.Generator:
    """
    seeding for reproducibility"""
    generator = torch.manual_seed(s)
    if False:  # Disabled for now
        torch.set_deterministic(True)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(s)
        torch.backends.cudnn.deterministic = True
    return generator
Ejemplo n.º 22
0
 def test_equalize(self):
     torch.set_deterministic(False)
     self._test_adjust_fn(
         F.equalize,
         F_pil.equalize,
         F_t.equalize,
         [{}],
         tol=1.0,
         agg_method="max",
         dts=(None,)
     )
Ejemplo n.º 23
0
def test_equalize(device):
    torch.set_deterministic(False)
    check_functional_vs_PIL_vs_scripted(
        F.equalize,
        F_pil.equalize,
        F_t.equalize,
        {},
        device,
        dtype=None,
        tol=1.0,
        agg_method="max",
    )
Ejemplo n.º 24
0
def make_deterministic(seed):

    np.random.seed(seed)
    torch.manual_seed(seed)
    # see https://github.com/pytorch/pytorch/issues/47672
    cuda_version = torch.version.cuda
    if cuda_version is not None and float(torch.version.cuda) >= 10.2:
        os.environ['CUBLAS_WORKSPACE_CONFIG'] = '4096:8'
    else:
        torch.set_deterministic(True)  # Not all Operations support this.
    # This is only for Convolution no problem
    torch.backends.cudnn.deterministic = True
 def _init_deterministic(self, deterministic: bool) -> None:
     self.deterministic = deterministic
     if _TORCH_GREATER_EQUAL_1_8:
         torch.use_deterministic_algorithms(deterministic)
     else:
         torch.set_deterministic(deterministic)
     if deterministic:
         # fixing non-deterministic part of horovod
         # https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383
         os.environ["HOROVOD_FUSION_THRESHOLD"] = str(0)
         # https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
         os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
Ejemplo n.º 26
0
    def __init__(self, model, seed, **kwargs):
        super().__init__(**kwargs, seed=seed, model=model)
        # set deterministic behavior
        os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
        seed_everything(seed)

        if re.match('^vgg.*', model.lower()) is not None:
            log.warning("VGG requires non-deterministic implementation, "
                        "disabling pytorch determinism...")
            return

        torch.set_deterministic(True)
        torch.backends.cudnn.deterministic = True
Ejemplo n.º 27
0
    def __init__(self, value: int, cuda: bool = False):
        self.value = value
        self.cuda = cuda

        self.no_side_effect = False
        if self.no_side_effect:
            self._last_seed = torch.initial_seed()
        numpy.random.seed(self.value)
        torch.manual_seed(self.value)
        if False:  # Disabled for now
            torch.set_deterministic(True)
        if self.cuda:
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
Ejemplo n.º 28
0
def setup_cudnn(deterministic: bool = False, use_gpu: bool = torch.cuda.is_available(), seed: Union[int, Tuple[int]] = None):
    """ Setup CUDNN backend and Random Number Generators to given seed(s).  
    Args:
        - deterministic: Set to `True` for deterministic/reproducible results: Makes training procedure reproducible by calling `torch.set_deterministic` (may have small performance impact).  
        - seed: Seed(s) argument which can either be a single seed passed to `deepcv.utils.set_seeds` or a Tuple of seeds which will be passed to `deepcv.utils.set_each_seeds` as positional seed arguments 
    .. See `deepcv.utils.set_seeds` and [`torch.set_deterministic` torch documentation](https://pytorch.org/docs/stable/generated/torch.set_deterministic.html) for more details.  
    """
    torch.set_deterministic(deterministic) # (Pytorch 1.7+)
    if use_gpu:
        torch.backends.cudnn.benchmark = use_gpu and not torch.backends.cudnn.deterministic
        torch.backends.cudnn.fastest = use_gpu  # Disable this if memory issues
    if isinstance(seed, int):
        set_seeds(seed)
    else:
        # Assume seed contains a tuple of seeds to be given as `deepcv.utils.set_each_seeds` positional arguments
        set_each_seeds(seed)
Ejemplo n.º 29
0
def set_deterministic(seed: int = None, by_rank: bool = False):
    """ Set seed of `torch`, `random` and `numpy` to `seed` for making it deterministic. Because of CUDA's limitation,
    this may not make everything deterministic, however.
    """

    with set_seed(seed, by_rank):
        if seed is not None:
            if hasattr(torch, "set_deterministic"):
                torch.set_deterministic(True)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            logger.info(
                "Set deterministic. But some GPU computations might be still non-deterministic. "
                "Also, this may affect the performance.")
        yield
    if hasattr(torch, "set_deterministic"):
        torch.set_deterministic(False)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True
    logger.info("Back to non-deterministic.")
Ejemplo n.º 30
0
def set_all_seeds(seed=42, deterministic_cudnn=True):
    """
    Setting multiple seeds to make runs reproducible.
    Enabling "deterministic_cudnn" gives full reproducibility with CUDA, but might slow down training

    Args:
    param seed: number to use as seed
    type deterministic_cudnn: bool
    return: None
    """
    os.environ['PYTHONHASHSEED'] = str(seed)
    os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"  # or ":16:8"
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic_cudnn:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        torch.set_deterministic(True)