help="rank of attention matrix (default: 5)") args = parser.parse_args() # Set seed for all randomness sources utils.seed(args.seed) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Device: {device}\n") # Load environment env = utils.make_env(args.env, args.seed) for _ in range(args.shift): env.reset() print("Environment loaded\n") # Load agent model_dir = utils.get_model_dir(args.model) agent = utils.Agent(env.observation_space, env.action_space, model_dir, device=device, argmax=args.argmax, use_memory=args.memory, use_text=args.text, hca_returns=args.hcareturns,
wandb_dir = wandb.run.dir if args.wandb is not None else None # Set seed for all randomness sources utils.seed(args.seed) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") txt_logger.info(f"Device: {device}\n") # Load environments envs = [] for i in range(args.procs): envs.append(utils.make_env(args.env, args.seed + 10000 * i)) txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir) except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded\n") # Load observations preprocessor if args.algo in ("attention"): from rl_credit.algos.attention import get_obss_preprocessor obs_space, preprocess_obss = get_obss_preprocessor(
args = parser.parse_args() # Set seed for all randomness sources utils.seed(args.seed) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Device: {device}\n") # Load environments envs = [] for i in range(args.procs): env = utils.make_env(args.env, args.seed + 10000 * i) envs.append(env) env = ParallelEnv(envs) print("Environments loaded\n") # Load agent model_dir = utils.get_model_dir(args.model) agent = utils.Agent(env.observation_space, env.action_space, model_dir, device=device, argmax=args.argmax, num_envs=args.procs, use_memory=args.memory, use_text=args.text) print("Agent loaded\n") # Initialize logs logs = {"num_frames_per_episode": [], "return_per_episode": []}