def to_batch_items(self, data_dict): data_dict['images'] = torch.stack([ tensor_from_numpy_image(i, self.use_color_jitter) for i in data_dict['images'] ]) # S x C x H x W if data_dict['images'].dim() == 3: data_dict['images'].unsqueeze_(1) data_dict['word_indices'] = torch.tensor(data_dict['word_indices'], dtype=torch.long) return data_dict
def to_batch_items(self, data_dict): data_dict['images'] = torch.stack([ tensor_from_numpy_image(i, self.use_color_jitter) for i in data_dict['images'] ]) # S x C x H x W if data_dict['images'].dim() == 3: data_dict['images'].unsqueeze_(1) data_dict['word_indices'] = torch.tensor(data_dict['word_indices'], dtype=torch.long) data_dict['actions'] = torch.tensor(data_dict['actions'], dtype=torch.float32) data_dict['controls'] = packb( ([str(c) for c in data_dict['controls']])) data_dict['states'] = packb(([str(s) for s in data_dict['states']])) return data_dict
def to_batch_items(self, data_dict): data_dict['images'] = torch.stack([ tensor_from_numpy_image(i, self.use_color_jitter) for i in data_dict['images'] ]) # S x C x H x W if data_dict['images'].dim() == 3: data_dict['images'].unsqueeze_(1) data_dict['actions'] = torch.tensor(data_dict['actions'], dtype=torch.float32) data_dict['type'] = fetch_name_from_road_option(data_dict['type']) data_dict['controls'] = packb( ([str(c) for c in data_dict['controls']])) data_dict['states'] = packb(([str(s) for s in data_dict['states']])) data_dict['stops'] = torch.tensor(data_dict['stops'], dtype=torch.float32).view( data_dict['actions'].size(0), 1) return data_dict
def _tensor_from_numpy_image(image: np.ndarray) -> torch.Tensor: c = image.shape[2] return tensor_from_numpy_image(image, False).view(1, 1, c, IMAGE_HEIGHT, IMAGE_WIDTH)