def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) if os.path.exists(self.mask_list[index]): maskmap = ImageHelper.pil_open_p(self.mask_list[index]) else: maskmap = ImageHelper.np2img(np.ones((img.size[1], img.size[0]), dtype=np.uint8)) kpts, bboxes = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None and len(bboxes) > 0: img, maskmap, kpts, bboxes = self.aug_transform(img, mask=maskmap, kpts=kpts, bboxes=bboxes) elif self.aug_transform is not None: img, maskmap, kpts = self.aug_transform(img, mask=maskmap, kpts=kpts) width, height = maskmap.size maskmap = ImageHelper.resize(maskmap, (width // self.configer.get('network', 'stride'), height // self.configer.get('network', 'stride')), Image.NEAREST) maskmap = np.expand_dims(np.array(maskmap, dtype=np.float32), axis=2) heatmap = self.pose_data_utilizer.generate_heatmap(kpts=kpts, mask=maskmap) vecmap = self.pose_data_utilizer.generate_paf(kpts=kpts, mask=maskmap) if self.img_transform is not None: img = self.img_transform(img) if self.label_transform is not None: heatmap = self.label_transform(heatmap) vecmap = self.label_transform(vecmap) maskmap = self.label_transform(maskmap) return img, heatmap, maskmap, vecmap
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) cur_img_rgb = ImageHelper.resize(ori_img_rgb, self.configer.get( 'data', 'input_size'), interpolation=Image.CUBIC) ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(cur_img_rgb) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( cur_img_rgb, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(cur_img_rgb, subset, candidate) for i in range(len(json_dict['objects'])): for index in range(len(json_dict['objects'][i]['keypoints'])): if json_dict['objects'][i]['keypoints'][index][2] == -1: continue json_dict['objects'][i]['keypoints'][index][0] *= ( ori_img_rgb.shape[1] / cur_img_rgb.shape[1]) json_dict['objects'][i]['keypoints'][index][1] *= ( ori_img_rgb.shape[0] / cur_img_rgb.shape[0]) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Save Path: {}'.format(json_path)) with open(json_path, 'w') as save_stream: save_stream.write(json.dumps(json_dict))
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb) inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores = self.__decode(bbox, cls) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict
def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) label = self.label_list[index] if self.aug_transform is not None: img = self.aug_transform(img) if self.img_transform is not None: img = self.img_transform(img) return img, label
def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) labels, bboxes = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None: img, bboxes = self.aug_transform(img, bboxes=bboxes) img, bboxes, labels = ResizeBoxes()(img, bboxes, labels) bboxes_target, labels_target = self.det_data_utilizer.encode( bboxes, labels) if self.img_transform is not None: img = self.img_transform(img) return img, bboxes_target, labels_target
def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) kpts, bboxes = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None: img, kpts, bboxes = self.aug_transform(img, kpts=kpts, bboxes=bboxes) heatmap = self.pose_data_utilizer.generate_heatmap(kpts=kpts) if self.img_transform is not None: img = self.img_transform(img) if self.label_transform is not None: heatmap = self.label_transform(heatmap) return img, heatmap
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(ori_img_rgb) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( ori_img_rgb, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(ori_img_rgb, subset, candidate) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Save Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path)
def __test_img(self, image_path, save_path): image = ImageHelper.pil_open_rgb(image_path) ori_width, ori_height = image.size image = Scale(size=self.configer.get('data', 'input_size'))(image) image = ToTensor()(image) image = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(image) with torch.no_grad(): inputs = image.unsqueeze(0).to(self.device) results = self.seg_net.forward(inputs) label_map = results.data.cpu().numpy().argmax(axis=1)[0].squeeze() label_img = np.array(label_map, dtype=np.uint8) if not self.configer.is_empty('details', 'label_list'): label_img = self.__relabel(label_img) label_img = Image.fromarray(label_img, 'P') label_img = label_img.resize((ori_width, ori_height), Image.NEAREST) label_img.save(save_path)