def on_batch_end(self, state: RunnerState): names = state.input[self.outpath_key] images = state.input[self.input_key].cpu() images = tensor_to_ndimage(images, dtype=np.uint8) for image, name in zip(images, names): fname = self.get_image_path(state, name, self.filename_suffix) imageio.imwrite(fname, image)
def apply(self, img: torch.Tensor, **params) -> np.ndarray: """Apply the transform to the image""" if len(img.shape) == 2: img = img.unsqueeze(0) return tensor_to_ndimage(img, denormalize=self.denormalize, move_channels_dim=self.move_channels_dim)
def on_batch_end(self, state: RunnerState): if self.valid_only and state.need_backward: return names = state.input[self.input_image_name_key] images = tensor_to_ndimage(state.input[self.input_image_key].cpu(), dtype=np.uint8) in_masks = state.input[self.input_mask_key].cpu().numpy() out_masks = state.output[self.output_mask_key] in_objects = state.input[self.output_objects_key] out_objects = state.output[self.output_objects_key] image_metrics = state.output[self.output_image_metrics_key] for image_idx, image in enumerate(images): name = names[image_idx] metrics = image_metrics[image_idx] assert self.iou_threshold in metrics vis_folders = self._get_categories(metrics[self.iou_threshold]) if not vis_folders: continue # original self.save_everywhere(state, vis_folders, name, "img", image) # masks for suffix, mask in zip( ("m_in", "m_out"), (in_masks[image_idx], out_masks[image_idx])): for d_cls_id, d_tm_id, d_name, d_classified in zip( self.target_map_info.d_class_ids, self.target_map_info.d_tm_ids, self.target_map_info.d_class_names, self.target_map_info.d_classified): save_img = Visualizer.draw_hitmap(mask[d_tm_id], image) self.save_everywhere(state, vis_folders, name, f"{d_name}.{suffix}", save_img) if d_classified and not self.detection_only: for c_name, c_tm_id in zip( self.target_map_info.get_c_names(d_cls_id), self.target_map_info.get_c_tm_ids(d_cls_id)): save_img = Visualizer.draw_hitmap( mask[c_tm_id], image) self.save_everywhere(state, vis_folders, name, f"{c_name}.{suffix}", save_img) # objects for suffix, objects in zip( ("o_in", "o_out"), (in_objects[image_idx], out_objects[image_idx])): objects_mask = self.converter.build_target_map( objects, image_size=image.shape[:2], for_visualization=True) save_img = Visualizer.draw_target_on_image(objects_mask, image) self.save_everywhere(state, vis_folders, name, suffix, save_img)
def on_batch_end(self, state: RunnerState): names = state.input[self.outpath_key] images = tensor_to_ndimage(state.input[self.input_key].cpu()) masks = state.output[self.output_key] for name, image, mask in zip(names, images, masks): image = mask_to_overlay_image(image, mask, self.mask_strength) fname = self.get_image_path(state, name, self.filename_suffix) imageio.imwrite(fname, image)
def test_tensor_to_ndimage(): orig_images = np.random.randint(0, 255, (2, 20, 10, 3), np.uint8) torch_images = torch.stack([ normalize(to_tensor(im), _IMAGENET_MEAN, _IMAGENET_STD) for im in orig_images ], dim=0) byte_images = tensor_to_ndimage(torch_images, dtype=np.uint8) float_images = tensor_to_ndimage(torch_images, dtype=np.float32) assert np.allclose(byte_images, orig_images) assert np.allclose(float_images, orig_images / 255, atol=1e-3, rtol=1e-3) assert np.allclose(tensor_to_ndimage(torch_images[0]), orig_images[0] / 255, atol=1e-3, rtol=1e-3)
def on_batch_end(self, state: RunnerState): names = state.input[self.outpath_key] images = tensor_to_ndimage(state.input[self.input_key].cpu()) if self.mask_in_input: masks = state.input[self.mask_key].cpu().squeeze(1).numpy() else: masks = state.output[self.mask_key] for name, image, mask in zip(names, images, masks): image = Visualizer.draw_target_on_image(mask, image, self.mask_strength) fname = self.get_image_path(state, name, self.filename_suffix) imageio.imwrite(fname, image)