def preprocess_observation(observation_set, device=None): selected_frame_stacks = [] for observation in observation_set: frames = np.reshape(observation[:-5],(1+HISTORY_QUEUE_LENGTH+breadcrumb_frames,SIDE_LENGTH*SIDE_LENGTH)) current_frame = np.reshape(frames[0],(SIDE_LENGTH,SIDE_LENGTH)) current_frame[current_frame == 3] = 0 current_frame[current_frame == 1] = 66 current_frame[current_frame == 2] = 100 current_frame[current_frame == 11] == 33 current_frame = current_frame/100 breadcrumbs = np.reshape(frames[-1],(SIDE_LENGTH,SIDE_LENGTH))/20 selected_frame_stacks.append(np.stack((current_frame,breadcrumbs),axis=-1)) input_array = np.array(selected_frame_stacks) input_tensor = torch.tensor(input_array, device='cpu', dtype=torch.float) return torch_ac.DictList({ "input": input_tensor, })
def preprocess_obss(obss, device=None): return torch_ac.DictList({ "image": preprocess_images([obs["image"] for obs in obss], device=device), "text": preprocess_texts([obs["mission"] for obs in obss], vocab, device=device) })
def preprocess_obss(obss, device=None): print("heare") # # print("----------------------") # print(len(obss)) # print(type(obss[0])) # print(obss[0]) # print(obss[0].keys()) return torch_ac.DictList({ "image": preprocess_images([obs["image"] for obs in obss], device=device), "text": preprocess_texts([obs["mission"] for obs in obss], vocab, device=device) })
def preprocess_obss(obss, device=None): return torch_ac.DictList( {"image": preprocess_images(obss, device=device)})
def preprocess_obss(obss, device=None): return torch_ac.DictList({ "text": preprocess_texts([obs["text"] for obs in obss], vocab, vocab_space, gnn=gnn, device=device, ast=tree_builder) })
def preprocess_obss(obss, device=None): return torch_ac.DictList({ "progress_info": torch.stack([torch.tensor(obs["progress_info"], dtype=torch.float) for obs in obss], dim=0).to(device) })