def draw_final_outputs_blackwhite(img, results): """ Args: results: [DetectionResult] """ img_bw = img.mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if len(results) == 0: return img_bw boxes = np.asarray([r.box for r in results]) all_masks = [r.mask for r in results] if all_masks[0] is not None: m = all_masks[0] > 0 for m2 in all_masks[1:]: m = m | (m2 > 0) img_bw[m] = img[m] tags = [ "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score) for r in results ] ret = viz.draw_boxes(img_bw, boxes, tags) return ret
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ if len(results) == 0: return img # Display in largest to smallest order to reduce occlusion boxes = np.asarray([r.box for r in results]) areas = np_area(boxes) sorted_inds = np.argsort(-areas) ret = img tags = [] for result_id in sorted_inds: r = results[result_id] if r.mask is not None: ret = draw_mask(ret, r.mask) for r in results: tags.append("{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) ret = viz.draw_boxes(ret, boxes, tags) return ret
def draw_final_outputs(img, results,objectfile): """ Args: results: [DetectionResult] """ if len(results) == 0: return img tags = [] for r in results: tags.append( "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) objectfile.write(cfg.DATA.CLASS_NAMES[r.class_id]) objectfile.write('\t') objectfile.write('-1') objectfile.write('\t') objectfile.write('-1') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('{:d}'.format(int(r[0][0]))) objectfile.write('\t') objectfile.write('{:d}'.format(int(r[0][1]))) objectfile.write('\t') objectfile.write('{:d}'.format(int(r[0][2]))) objectfile.write('\t') objectfile.write('{:d}'.format(int(r[0][3]))) objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('0.0') objectfile.write('\t') objectfile.write('{:.2f}'.format(r.score)) objectfile.write('\t') objectfile.write('\n') boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes, tags) #ret = viz.draw_boxes(img, boxes) objectfile.close() for r in results: if r.mask is not None: ret = draw_mask(ret, r.mask) return ret
def draw_predictions(img, boxes, scores): """ Args: boxes: kx4 scores: kxC """ if len(boxes) == 0: return img labels = scores.argmax(axis=1) scores = scores.max(axis=1) tags = ["{},{:.2f}".format(cfg.DATA.CLASS_NAMES[lb], score) for lb, score in zip(labels, scores)] return viz.draw_boxes(img, boxes, tags)
def draw_predictions(img, boxes, scores): """ Args: boxes: kx4 scores: kxC """ if len(boxes) == 0: return img labels = scores.argmax(axis=1) scores = scores.max(axis=1) tags = ["{},{:.2f}".format(COCOMeta.class_names[lb], score) for lb, score in zip(labels, scores)] return viz.draw_boxes(img, boxes, tags)
def __getitem__(self, imageid): filename = self.cocodb.lookup[imageid]['file_name'] I = cv2.imread(self.prefix + '/' + filename) obj = self._d[imageid] labels = ['%d:%.2f' % (ll, ss) for ll, ss in zip(obj.label, obj.score)] print(obj.bbox) boxes = [[bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]] for bb in obj.bbox] I = viz.draw_boxes(I, boxes, labels) print('total # boxes:%d' % len(boxes)) return I
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ if len(results) == 0: return img tags = [] for label, _, score in results: tags.append( "{},{:.2f}".format(config.CLASS_NAMES[label], score)) boxes = np.asarray([x.box for x in results]) return viz.draw_boxes(img, boxes, tags)
def draw_annotation(img, boxes, klass, is_crowd=None): labels = [] assert len(boxes) == len(klass) if is_crowd is not None: assert len(boxes) == len(is_crowd) for cls, crd in zip(klass, is_crowd): clsname = config.CLASS_NAMES[cls] if crd == 1: clsname += ';Crowd' labels.append(clsname) else: for cls in klass: labels.append(config.CLASS_NAMES[cls]) img = viz.draw_boxes(img, boxes, labels) return img
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ all_boxes = [] all_tags = [] for class_id, boxes, scores in results: all_boxes.extend(boxes) all_tags.extend( ["{},{:.2f}".format(COCOMeta.class_names[class_id], sc) for sc in scores]) all_boxes = np.asarray(all_boxes) if all_boxes.shape[0] == 0: return img return viz.draw_boxes(img, all_boxes, all_tags)
def draw_mask(im, mask, box, label, alpha=0.5, color=None): """ Overlay a mask on top of the image. Args: im: a 3-channel uint8 image in BGR mask: a binary 1-channel image of the same size color: if None, will choose automatically """ if color is None: color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1] im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2), im * (1 - alpha) + color * alpha, im) im = im.astype('uint8') color_tuple = tuple([int(c) for c in color]) im = viz.draw_boxes(im, box[np.newaxis, :], [label], color=color_tuple) return im
def draw_annotation(img, boxes, klass, is_crowd=None): """Will not modify img""" labels = [] assert len(boxes) == len(klass) if is_crowd is not None: assert len(boxes) == len(is_crowd) for cls, crd in zip(klass, is_crowd): clsname = cfg.DATA.CLASS_NAMES[cls] if crd == 1: clsname += ";Crowd" labels.append(clsname) else: for cls in klass: labels.append(cfg.DATA.CLASS_NAMES[cls]) img = viz.draw_boxes(img, boxes, labels) return img
def draw_proposal_recall(img, proposals, proposal_scores, gt_boxes): """ Draw top3 proposals for each gt. Args: proposals: NPx4 proposal_scores: NP gt_boxes: NG """ box_ious = np_iou(gt_boxes, proposals) # ng x np box_ious_argsort = np.argsort(-box_ious, axis=1) good_proposals_ind = box_ious_argsort[:, :3] # for each gt, find 3 best proposals good_proposals_ind = np.unique(good_proposals_ind.ravel()) proposals = proposals[good_proposals_ind, :] tags = list(map(str, proposal_scores[good_proposals_ind])) img = viz.draw_boxes(img, proposals, tags) return img, good_proposals_ind
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ if len(results) == 0: return img tags = [] for r in results: tags.append( "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes, tags) for r in results: if r.mask is not None: ret = draw_mask(ret, r.mask) return ret
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ if len(results) == 0: return img tags = [] for r in results: tags.append("{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes) #, tags) for r in results: if r.mask is not None: ret = draw_mask(ret, r.mask) return ret
def draw_annotation(img, boxes, klass, polygons=None, is_crowd=None): """Will not modify img""" labels = [] assert len(boxes) == len(klass) if is_crowd is not None: assert len(boxes) == len(is_crowd) for cls, crd in zip(klass, is_crowd): clsname = cfg.DATA.CLASS_NAMES[cls] if crd == 1: clsname += ';Crowd' labels.append(clsname) else: for cls in klass: labels.append(cfg.DATA.CLASS_NAMES[cls]) img = viz.draw_boxes(img, boxes, labels) if polygons is not None: for p in polygons: mask = polygons_to_mask(p, img.shape[0], img.shape[1]) img = draw_mask(img, mask) return img
def draw_final_outputs(img, results, tags_on=True, bb_list_input=False): """ Args: results: [DetectionResult] """ if len(results) == 0: return img if tags_on: tags = [] for r in results: tags.append("{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) else: tags = None if bb_list_input: boxes = np.asarray(results) else: boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes, tags) return ret
def draw_outputs(img, final_boxes, final_scores, final_labels, threshold=0.8): """ Args: results: [DetectionResult] """ results = [DetectionResult(*args) for args in zip(final_boxes, final_scores, final_labels, [None] * len(final_labels)) if args[1]>threshold] if len(results) == 0: return img tags = [] for r in results: tags.append( "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes, tags) for r in results: if r.mask is not None: ret = draw_mask(ret, r.mask) return ret
def draw_final_outputs(img, results, show_ids=None): """ Args: results: [DetectionResult] """ if len(results) == 0: return img if show_ids is not None: assert isinstance(show_ids, set) results = [r for r in results if r.class_id in show_ids] tags = [] for r in results: tags.append( "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) boxes = np.asarray([r.box for r in results]) ret = viz.draw_boxes(img, boxes, tags) # for r in results: # if r.mask is not None: # ret = draw_mask(ret, r.mask) return ret
except StopIteration: break orig_shape = batch_image.shape[:2] feed_dict = {image_P: batch_image} final_boxes_, final_labels_, final_probs_ = sess.run( [final_boxes, final_labels, final_probs], feed_dict) final_boxes_ = clip_boxes(final_boxes_, orig_shape) final_boxes_ = sess.run(final_boxes_) final_boxes_ = final_boxes_.astype('int32') if np.any(final_boxes_): tags = [ "{},{:.2f}".format(cfg.DATA.CLASS_NAMES[lb], score) for lb, score in zip(final_labels_, final_probs_) ] final_viz = viz.draw_boxes(batch_image, final_boxes_, tags) gt_viz = draw_annotation(batch_image, batch_gt_boxes, batch_gt_labels) img_out = np.hstack((final_viz, gt_viz)) imageio.imwrite(os.path.join(save_path, str(iter) + ".jpg"), img_out) Detection = [] for ik in range(final_boxes_.shape[0]): Detection.append([ cfg.DATA.CLASS_NAMES[final_labels_[ik]], float(final_probs_[ik]), final_boxes_[ik, 0], final_boxes_[ik, 1], final_boxes_[ik, 2] - final_boxes_[ik, 0], final_boxes_[ik, 3] - final_boxes_[ik, 1] ]) Detection = np.array(Detection) #np.savetxt(os.path.join(save_folder, 'detections/',str(iter) + '.txt'), Detection, fmt='%s %1.2f %1.0f %1.0f %1.0f %1.0f')
def visualize_dataflow2(cfg, unlabled2017_used=True, VISPATH="./", maxvis=50): """Visualize the dataflow with labeled and unlabled strong augmentation.""" def prase_name(x): if not unlabled2017_used: return x + "-unlabeled" else: # return coco2017 unlabeled data return "coco_unlabeled2017" def remove_no_box_data(_roidbs, filter_fn): num = len(_roidbs) _roidbs = filter_fn(_roidbs) logger.info( "Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}" .format(num - len(_roidbs), len(_roidbs))) return _roidbs pseudo_path = os.path.join(os.environ["PSEUDO_PATH"], "pseudo_data.npy") pseudo_targets = dd.io.load(pseudo_path) roidbs = list( itertools.chain.from_iterable( DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN)) roidbs_u = list( itertools.chain.from_iterable( DatasetRegistry.get(prase_name(x)).training_roidbs() for x in cfg.DATA.TRAIN)) roidbs = remove_no_box_data( roidbs, lambda x: list( filter(lambda img: len(img["boxes"][img["is_crowd"] == 0]) > 0, x)) ) roidbs_u = remove_no_box_data( roidbs_u, lambda x: list( filter( lambda img: len(pseudo_targets[img["image_id"]]["boxes"]) > 0, x))) print_class_histogram(roidbs) print_class_histogram(roidbs_u) preprocess = TrainingDataPreprocessorSSlAug( cfg, confidence=cfg.TRAIN.CONFIDENCE, pseudo_targets=pseudo_targets) for jj, (rob, robu) in tqdm(enumerate(zip(roidbs, roidbs_u))): data = preprocess((rob, robu)) # import pdb; pdb.set_trace() nn = len(pseudo_targets[robu["image_id"]]["boxes"]) if data is None or len(data["gt_boxes_strong"]) == 0: print("empty annotation, {} (original {})".format(jj, nn)) continue ims = viz.draw_boxes(data["image"], data["gt_boxes"], [str(a) for a in data["gt_labels"]]) ims_t = viz.draw_boxes(data["image_strong"], data["gt_boxes_strong"], [ str(a) for a in data["gt_labels_strong"][:len(data["gt_boxes_strong"])] ]) ims = cv2.resize(ims, (ims_t.shape[1], ims_t.shape[0])) vis = np.concatenate((ims, ims_t), axis=1) if not os.path.exists( os.path.dirname( os.path.join(VISPATH, "result_{}.jpeg".format(jj)))): os.makedirs( os.path.dirname( os.path.join(VISPATH, "result_{}.jpeg".format(jj)))) assert cv2.imwrite(os.path.join(VISPATH, "result_{}.jpeg".format(jj)), vis) if jj > maxvis: break
for _ in tqdm(range(len(imagelist)),'Doing Predictions:'): try: _b,_s,_l=self.session.run([self.box,self.score,self.label]) obj = {'boxes': _b, 'labels': _l, 'scores': _s} ret.append(obj) except tf.errors.OutOfRangeError: break return ret import tensorpack.utils.viz as viz if __name__ == '__main__': with open('../../data/coco.names') as fs: names=fs.readlines() print(names[0]) istraining=False model_path='/home/zxk/AI/tensorpack/FRCNN/COCO-R50C4-MaskRCNN-Standard.npz' service=FRCnnService(cfg,model_path) imagelist=['../../data/demo_data/611.jpg'] result=service.predict_imagelist(imagelist) im=cv2.imread(imagelist[0]) for r in result: # print(r['boxes'].shape,r['labels'].shape,r['scores'].shape) # print(r['scores']) labels=['%s:%.2f'%(names[ll-1],round(ss,2)) for ll,ss in zip(r['labels'],r['scores'])] print(r['labels']) print(labels) im=viz.draw_boxes(im,r['boxes'],labels) viz.interactive_imshow(im)
def draw_final_outputs(img, results): """ Args: results: [DetectionResult] """ # new_results = [] # for r in results: # if r.score <=0.49: # new_results.append(r) # results = new_results if len(results) == 0: return img # Display in largest to smallest order to reduce occlusion boxes = np.asarray([r.box for r in results]) areas = np_area(boxes) sorted_inds = np.argsort(-areas) ret = img tags = [] new_boxes = [] # rm_lst = class_nms(results, sorted_inds) rm_lst = box_class_nms(results, sorted_inds) print("rm_lst = ", rm_lst) for result_id in sorted_inds: if result_id in rm_lst: continue r = results[result_id] # print("r = ", r) if r.mask is not None: level = str(r.class_id).split(" ")[0] if "1" in level: # color = (0, 255, 0) # color = [0.000, 255.000, 0.000] color_id = 23 # color_id = 9 elif "2" in level: color_id = 22 # color_id = 9 # color = [0.000, 255.000, 255.000] # color = (0, 255, 255) elif "3" in level: color_id = 9 # color = [0.000, 0.000, 255.000] # color = (0, 0, 255) else: color = [0.000, 255.000, 0.000] # color = (0, 255, 0) print("error level!") ret = draw_mask(ret, r.mask, color=None, color_id=color_id) for result_id in sorted_inds: if result_id in rm_lst: continue r = results[result_id] new_boxes.append(r.box) tags.append("{}, {:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) # for r in results: # tags.append( # "{}, {:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score)) ret = viz.draw_boxes(ret, new_boxes, tags) return ret
images_and_shape=list(map(common.processImage, imagelist)) images=np.array([im for im,_ in images_and_shape]) orgin_shape=[s for _,s in images_and_shape] result=self.predict_416(images,batchSize,score_thresh,iou_thresh) for r,orgin in zip(result,orgin_shape): w,h=orgin if len(r['boxes'])==0 :continue bb=r['boxes']*np.array([w,h,w,h])/416 bb[:, [0, 2]] = np.maximum(bb[:, [0, 2]], 0) bb[:, [1, 3]] = np.maximum(bb[:, [1, 3]], 0) bb[:, [0, 2]] = np.minimum(bb[:, [0, 2]], w) bb[:, [1, 3]] = np.minimum(bb[:, [1, 3]], h) r['boxes']=bb return result import tensorpack.utils.viz as viz if __name__ == '__main__': model_path='/home/zxk/AI/tensorflow-yolov3/checkpoint/yolov3.ckpt' service=YoLoService(model_path) imagelist=['/home/zxk/PycharmProjects/deepAI1/daily/8/DeepLearning/myproject/yolo3/data/demo_data/611.jpg'] result=service.predict_imagelist(imagelist) # for r in result: # print(r['boxes'].shape,r['labels'].shape,r['scores'].shape) # print(r['boxes']) img=cv2.imread('/home/zxk/AI/coco/val2017/000000579893.jpg') bbox=service.predict(img) img=viz.draw_boxes(img,bbox['boxes']) viz.interactive_imshow(img)