def main(_): args = default_argument_parser().parse_args() test_dir = args.test_data_dir save_dir = args.save_data_dir wmlu.create_empty_dir(save_dir, remove_if_exists=True) files = glob.glob(os.path.join(test_dir, "*.jpg")) m = predm.PredictModel() m.restoreVariables() m.remove_batch() def id_to_text(id): return m.trainer.category_index[id] for file in files: img = wmli.imread(file) img = np.expand_dims(img, axis=0) m.predictImages(img) save_path = os.path.join(save_dir, os.path.basename(file)) xml_path = wmlu.change_suffix(save_path, "xml") shutil.copy(file, save_path) labels = [id_to_text(id) for id in m.res_data[RD_LABELS]] pvt.writeVOCXml(xml_path, m.res_data[RD_BOXES], labels) if RD_FULL_SIZE_MASKS in m.res_data: annotations = lmt.trans_odresult_to_annotations_list(m.res_data) json_path = wmlu.change_suffix(save_path, "json") lmt.save_labelme_datav1(json_path, file, img, annotations, label_to_text=id_to_text) img_save_path = wmlu.change_suffix(xml_path, "jpg") wmli.imwrite(img_save_path, m.res_data[RD_RESULT_IMAGE])
def trans_one_file(xml_file,save_dir,labels,img_suffix): img_file = wmlu.change_suffix(xml_file,img_suffix) if not osp.exists(img_file): print(f"Find {img_file} faild.") return shape, bboxes, labels_names, difficult, truncated,probs = read_voc_xml(xml_file,absolute_coord=True) _bboxes = [] _labels_name = [] remove_nr = 0 remove_labels = [] bboxes2remove = [] for i,l in enumerate(labels_names): if l in labels: remove_nr += 1 bboxes2remove.append(bboxes[i]) remove_labels.append(l) continue _bboxes.append(bboxes[i]) _labels_name.append(l) if remove_nr==0: wmlu.try_link(img_file, save_dir) shutil.copy(xml_file,save_dir) else: print(f"{wmlu.base_name(xml_file)} remove {remove_nr} labels, labels is {remove_labels}") img_save_path = osp.join(save_dir,osp.basename(img_file)) xml_save_path = osp.join(save_dir,osp.basename(xml_file)) img = wmli.imread(img_file) img = wmli.remove_boxes_of_img(img,np.array(bboxes2remove).astype(np.int32)) wmli.imwrite(img_save_path,img) write_voc_xml(xml_save_path,img_save_path,shape,_bboxes,_labels_name,is_relative_coordinate=False)
def writeVOCXmlByImg(img, img_save_path, bboxes, labels, difficult=None, truncated=None, probs=None, is_relative_coordinate=True): if isinstance(bboxes, np.ndarray): bboxes = bboxes.tolist() if isinstance(labels, np.ndarray): labels = labels.tolist() if isinstance(difficult, np.ndarray): difficult = difficult.tolist() if isinstance(truncated, np.ndarray): truncated = truncated.tolist() img_shape = img.shape dir_path = os.path.dirname(img_save_path) base_name = os.path.basename(img_save_path) base_name = base_name[:-4] + ".xml" save_path = os.path.join(dir_path, base_name) wmli.imwrite(img_save_path, img) write_voc_xml(save_path, img_save_path, img_shape, bboxes, labels, difficult, truncated, probs=probs, is_relative_coordinate=is_relative_coordinate)
def trans_data(data_dir,save_dir): global name_to_id_dict wmlu.show_dict(name_to_id_dict) wmlu.create_empty_dir(save_dir,remove_if_exists=False) def name_to_id(x): return name_to_id_dict[x] ignored_labels = ["construction--barrier--ambiguous","construction--barrier--separator"] data = LabelMeData(label_text2id=name_to_id, shuffle=False) data.read_data(data_dir) for i,x in enumerate(data.get_items()): full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x if len(category_ids) == 0: print(f"Skip {full_path}") continue new_mask = odm.dense_mask_to_sparse_mask(binary_mask,category_ids,default_label=255) #r_base_name = wmlu.base_name(full_path) r_base_name = f"IMG_{i+1:05d}" base_name = r_base_name+".png" save_path = os.path.join(save_dir,base_name) if resize_size is not None: new_mask = wmli.resize_img(new_mask,resize_size,keep_aspect_ratio=True,interpolation=cv2.INTER_NEAREST) img = wmli.imread(full_path) img = wmli.resize_img(img,resize_size,keep_aspect_ratio=True) img_save_path = os.path.join(save_dir,r_base_name+".jpg") wmli.imwrite(img_save_path,img) new_mask = new_mask.astype(np.uint8) if os.path.exists(save_path): print(f"WARNING: File {save_path} exists.") cv2.imwrite(save_path,new_mask) sys.stdout.write(f"\r{i}")
def main(_): is_training = False args = default_argument_parser().parse_args() cfg = setup(args) data_loader = DataLoader(cfg=cfg, is_training=is_training) data_args = DATASETS_REGISTRY[cfg.DATASETS.TEST] data, num_classes = data_loader.load_data(*data_args, batch_size=1, is_training=False) cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes cfg.MODEL.SSD.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.NUM_CLASSES = num_classes cfg.MODEL.CENTERNET.NUM_CLASSES = num_classes cfg.MODEL.YOLACT.NUM_CLASSES = num_classes cfg.MODEL.FCOS.NUM_CLASSES = num_classes cfg.MODEL.NUM_CLASSES = num_classes cfg.DATASETS.NUM_CLASSES = num_classes cfg.freeze() config.set_global_cfg(cfg) model = PredictModel(cfg=cfg,is_remove_batch=True) model.restoreVariables() data_path = args.test_data_dir if os.path.isdir(data_path): files = wmlu.recurse_get_filepath_in_dir(data_path,suffix=".jpg") else: files = [data_path] save_path = args.save_data_dir wmlu.create_empty_dir(save_path,remove_if_exists=True) for file in files: img = wmli.imread(file) imgs = np.expand_dims(img,axis=0) res = model.predictImages(imgs) if RD_MASKS in res: r_img = odv.draw_bboxes_and_mask(img,res[RD_LABELS],res[RD_PROBABILITY],res[RD_BOXES], res[RD_MASKS], show_text=True) else: r_img = odv.bboxes_draw_on_imgv2(img,res[RD_LABELS],res[RD_PROBABILITY],res[RD_BOXES], text_fn=text_fn, show_text=True) name = wmlu.base_name(file) img_save_path = os.path.join(save_path,name+".png") wmli.imwrite(img_save_path,r_img)
if res[l, 0] < res[r, 0] or res[l, 2] < 0.1 or res[r, 2] < 0.1: is_good = False break head_bbox = self.get_head_pos(mpii_kps) if is_good and head_bbox is not None and self.kps_in_bbox( coco_kps[self.coco_idxs], head_bbox): res[self.coco_idxs] = coco_kps[self.coco_idxs] return res if __name__ == "__main__": file_path = '/home/wj/ai/mldata1/crowd_pose/CrowdPose/crowdpose_train.json' images_dir = '/home/wj/ai/mldata1/crowd_pose/images' save_dir = '/home/wj/ai/mldata1/crowd_pose/tmp/vis' wmlu.create_empty_dir(save_dir, remove_if_exists=False) datas = read_crowd_pose(file_path) do_vis = True for data in datas: image_name, kps, bbox = data image = osp.join(images_dir, image_name) img = wmli.imread(image) img = odv.draw_keypoints(img, kps, no_line=True) t_bboxes = np.array([bbox]) t_bboxes = odb.npchangexyorder(t_bboxes) img = odv.draw_bboxes(img, bboxes=t_bboxes, is_relative_coordinate=False) save_path = osp.join(save_dir, image_name) wmli.imwrite(save_path, img)
def view_data(name, save_dir, nr=20): print(f"View {name}") raw_name = name names = name.split("--") if names[0] == "void" or "ambiguous" in raw_name: return if "road" not in raw_name: return for x in names: save_dir = os.path.join(save_dir, x) wmlu.create_empty_dir(save_dir, remove_if_exists=False) allowed_names = [raw_name] NAME2ID = {} ID2NAME = {} def name_to_id(x): global lid if x in NAME2ID: return NAME2ID[x] else: NAME2ID[x] = lid ID2NAME[lid] = x lid += 1 return NAME2ID[x] data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=None, label_map=None, allowed_labels_fn=allowed_names) data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas")) i = 0 for x in data.get_items(): full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x img = wmli.imread(full_path) def text_fn(classes, scores): return f"{ID2NAME[classes]}" if len(category_ids) == 0: continue wmlu.show_dict(NAME2ID) odv.draw_bboxes_and_maskv2(img=img, classes=category_ids, scores=None, bboxes=boxes, masks=binary_mask, color_fn=None, text_fn=text_fn, thickness=4, show_text=True, fontScale=0.8) base_name = os.path.basename(full_path) save_path = os.path.join(save_dir, base_name) wmli.imwrite(save_path, img) i += 1 if i >= nr: break
def main(_): is_training = False args = default_argument_parser().parse_args() cfg = setup(args) data_loader = DataLoader(cfg=cfg, is_training=is_training) data_args = DATASETS_REGISTRY[cfg.DATASETS.TEST] data, num_classes = data_loader.load_data(*data_args, batch_size=1, is_training=False) cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes cfg.MODEL.SSD.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.NUM_CLASSES = num_classes cfg.MODEL.CENTERNET.NUM_CLASSES = num_classes cfg.MODEL.YOLACT.NUM_CLASSES = num_classes cfg.MODEL.FCOS.NUM_CLASSES = num_classes cfg.DATASETS.NUM_CLASSES = num_classes cfg.freeze() config.set_global_cfg(cfg) model = PredictModel(cfg=cfg, is_remove_batch=True) model.restoreVariables() save_path = args.save_data_dir wmlu.create_empty_dir(save_path, remove_if_exists=True) metrics = COCOEvaluation(num_classes=90) items = eval_dataset() for data in items: full_path, shape, gt_labels, category_names, gt_boxes, binary_masks, area, is_crowd, num_annotations_skipped = data img = wmli.imread(full_path) imgs = np.expand_dims(img, axis=0) res = model.predictImages(imgs) if RD_MASKS in res: r_img = odv.draw_bboxes_and_mask(img, res[RD_LABELS], res[RD_PROBABILITY], res[RD_BOXES], res[RD_MASKS], show_text=True) else: r_img = odv.bboxes_draw_on_imgv2(img, res[RD_LABELS], res[RD_PROBABILITY], res[RD_BOXES], text_fn=text_fn, show_text=True) kwargs = {} kwargs['gtboxes'] = gt_boxes kwargs['gtlabels'] = gt_labels kwargs['boxes'] = res[RD_BOXES] kwargs['labels'] = res[RD_LABELS] kwargs['probability'] = res[RD_PROBABILITY] kwargs['img_size'] = shape metrics(**kwargs) if model.step % 100 == 0: metrics.show() name = wmlu.base_name(full_path) img_save_path = os.path.join(save_path, name + ".png") wmli.imwrite(img_save_path, r_img) metrics.show()
dataset.read_data("/home/wj/ai/mldata/boesemantic") save_dir = wmlu.home_dir("ai/tmp/boe_images2") wmlu.create_empty_dir(save_dir,remove_if_exists=False) color_map = fill_colormap_and_names("/home/wj/ai/mldata/mapillary_vistas/config_v2.0.json") def text_fn(l): if l in ID_TO_READABLE_NAME: return ID_TO_READABLE_NAME[l] else: return "NA" def color_fn(l): return color_map[l*3:l*3+3] legend_img = draw_legend(list(ID_TO_NAME.keys()),text_fn,img_size=(2448,300),color_fn=color_fn) for ifn,img,mask in dataset.get_items(): base_name = wmlu.base_name(ifn) wmlu.safe_copy(ifn,save_dir) rgb_mask = convert_semantic_to_rgb(mask,color_map,True) if rgb_mask.shape[0] != legend_img.shape[0]: legend_img = wmli.resize_img(legend_img,(legend_img.shape[1],rgb_mask.shape[0])) rgb_mask = np.concatenate([rgb_mask,legend_img],axis=1) mask_path = os.path.join(save_dir,base_name+".png") wmli.imwrite(mask_path,rgb_mask) mask_image = draw_semantic_on_image(img,mask,color_map,ignored_label=255) mask_image = np.concatenate([mask_image,legend_img],axis=1) mask_image_path = os.path.join(save_dir,base_name+"1.png") wmli.imwrite(mask_image_path,mask_image)