def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task, holdout_fraction, single_test_envs, hparams): args_list = [] for trial_seed in range(n_trials): for dataset in dataset_names: for algorithm in algorithms: if single_test_envs: all_test_envs = [ [i] for i in range(datasets.num_environments(dataset)) ] else: all_test_envs = all_test_env_combinations( datasets.num_environments(dataset)) for test_envs in all_test_envs: for hparams_seed in range(n_hparams_from, n_hparams): train_args = {} train_args['dataset'] = dataset train_args['algorithm'] = algorithm train_args['test_envs'] = test_envs train_args['holdout_fraction'] = holdout_fraction train_args['hparams_seed'] = hparams_seed train_args['data_dir'] = data_dir train_args['task'] = task train_args['trial_seed'] = trial_seed train_args['seed'] = misc.seed_hash( dataset, algorithm, test_envs, hparams_seed, trial_seed) if steps is not None: train_args['steps'] = steps if hparams is not None: train_args['hparams'] = hparams args_list.append(train_args) return args_list
def make_args_list(n_trials, dataset_names, algorithms, n_hparams, steps, data_dir, hparams): args_list = [] for trial_seed in range(n_trials): for dataset in dataset_names: for algorithm in algorithms: all_test_envs = all_test_env_combinations( datasets.NUM_ENVIRONMENTS[dataset]) for test_envs in all_test_envs: for hparams_seed in range(n_hparams): train_args = {} train_args['dataset'] = dataset train_args['algorithm'] = algorithm train_args['test_envs'] = test_envs train_args['hparams_seed'] = hparams_seed train_args['data_dir'] = data_dir train_args['trial_seed'] = trial_seed train_args['seed'] = misc.seed_hash( dataset, algorithm, test_envs, hparams_seed, trial_seed) if steps is not None: train_args['steps'] = steps if hparams is not None: train_args['hparams'] = hparams args_list.append(train_args) return args_list
def _hparam(name, default_val, random_val_fn): """Define a hyperparameter. random_val_fn takes a RandomState and returns a random hyperparameter value.""" assert(name not in hparams) random_state = np.random.RandomState( misc.seed_hash(random_seed, name) ) hparams[name] = (default_val, random_val_fn(random_state))
print("\tCUDA: {}".format(torch.version.cuda)) print("\tCUDNN: {}".format(torch.backends.cudnn.version())) print("\tNumPy: {}".format(np.__version__)) print("\tPIL: {}".format(PIL.__version__)) print('Args:') for k, v in sorted(vars(args).items()): print('\t{}: {}'.format(k, v)) if args.hparams_seed == 0: hparams = hparams_registry.default_hparams(args.algorithm, args.dataset) else: hparams = hparams_registry.random_hparams( args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed, args.trial_seed)) if args.hparams: hparams.update(json.loads(args.hparams)) print('HParams:') for k, v in sorted(hparams.items()): print('\t{}: {}'.format(k, v)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): device = "cuda"
uda = [] for tfm_idx, env in enumerate(envs): if env_i in args.test_envs: out, in_ = misc_aug.split_dataset( env, int(len(env) * args.holdout_fraction), misc_aug.seed_hash(args.trial_seed, env_i)) outs_.append(out) ins_.append(in_) in_val, in_ = misc_aug.split_dataset( in_, int(len(in_) * args.holdout_fraction), misc_aug.seed_hash(args.trial_seed, env_i)) in_vals.append(in_val) else: # traain_env out, in_ = misc.split_dataset( env, int(len(env) * args.holdout_fraction), misc.seed_hash(args.trial_seed, env_i)) in_val, in_ = misc_aug.split_dataset( in_, int(len(in_) * args.holdout_fraction), misc_aug.seed_hash(args.trial_seed, env_i)) in_vals.append(in_val) if tfm_idx == 0: ins_.append( in_ ) #append only the datasets without our out_augs as the in splits outs_.append( out ) #make sure no augmentation on out for train env val val if env_i in args.test_envs: uda, in_ = misc_aug.split_dataset( in_, int(len(in_) * args.uda_holdout_fraction),
print("\tCUDA: {}".format(torch.version.cuda)) print("\tCUDNN: {}".format(torch.backends.cudnn.version())) print("\tNumPy: {}".format(np.__version__)) print("\tPIL: {}".format(PIL.__version__)) print('Args:') for k, v in sorted(vars(args).items()): print('\t{}: {}'.format(k, v)) if args.hparams_seed == 0: hparams = hparams_registry.default_hparams(args.algorithm, args.dataset) else: # seed_hash(hparams_seed, 0) is for backwards-compatibility hparams = hparams_registry.random_hparams( args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed, 0)) if args.hparams: hparams.update(json.loads(args.hparams)) print('HParams:') for k, v in sorted(hparams.items()): print('\t{}: {}'.format(k, v)) random.seed(args.trial_seed) np.random.seed(args.trial_seed) torch.manual_seed(args.trial_seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): device = "cuda"