def score_chipmatch_coverage(ibs, qaid, chipmatch, qreq, method=0): # THIS IS TERRIBLE AWFUL LOCAL IMPORT from ibeis.model.hots import matching_functions as mf prescore_method = 'csum' nShortlist = 100 daids_ = set(qreq._daids) (aid2_fm, aid2_fs, aid2_fk) = chipmatch aid2_prescore = mf.score_chipmatch(ibs, qaid, chipmatch, prescore_method, qreq) topx2_aid = aid2_prescore.argsort( )[::-1] # Only allow indexed aids to be in the top results topx2_aid = [aid for aid in iter(topx2_aid) if aid in daids_] nRerank = min(len(topx2_aid), nShortlist) aid2_score = [0 for _ in range(len(aid2_fm))] mark_progress, end_progress = utool.progress_func( nRerank, flush_after=10, lbl='[cov] Compute coverage') for topx in range(nRerank): mark_progress(topx) aid2 = topx2_aid[topx] fm = aid2_fm[aid2] fs = aid2_fs[aid2] covscore = get_match_coverage_score(ibs, qaid, aid2, fm, fs, method=method) aid2_score[aid2] = covscore end_progress() return aid2_score
def warp_srcimg_to_kpts(fx2_kp, srcimg, chip_shape, fx2_score=None, **kwargs): if len(fx2_kp) == 0: return None if fx2_score is None: fx2_score = np.ones(len(fx2_kp)) scale_factor = kwargs.get('scale_Factor', SCALE_FACTOR_DEFAULT) # Build destination image (h, w) = list( map(int, (chip_shape[0] * scale_factor, chip_shape[1] * scale_factor))) dstimg = np.zeros((h, w), dtype=np.float32) dst_copy = dstimg.copy() src_shape = srcimg.shape # Build keypoint transforms fx2_M = build_kpts_transforms(fx2_kp, (h, w), src_shape, scale_factor) # cv2 warp flags dsize = (w, h) flags = cv2.INTER_LINEAR # cv2.INTER_LANCZOS4 boderMode = cv2.BORDER_CONSTANT # mark prooress mark_progress, end_progress = utool.progress_func(len(fx2_M), flush_after=20, mark_after=1000, lbl='coverage warp ') # For each keypoint warp a gaussian scaled by the feature score # into the image count = 0 for count, (M, score) in enumerate(zip(fx2_M, fx2_score)): mark_progress(count) warped = cv2.warpAffine(srcimg * score, M, dsize, dst=dst_copy, flags=flags, borderMode=boderMode, borderValue=0).T catmat = np.dstack((warped.T, dstimg)) dstimg = catmat.max(axis=2) mark_progress(count) end_progress() return dstimg
def detect_species_bboxes(src_gpath_list, species, quick=True, use_chunks=False, **detectkw): """ Generates bounding boxes for each source image For each image yeilds a list of bounding boxes """ nImgs = len(src_gpath_list) print('[detect.rf] Begining %s detection' % (species,)) detect_lbl = 'detect %s ' % species mark_prog, end_prog = utool.progress_func(nImgs, detect_lbl, flush_after=1) detect_config = _get_detect_config(**detectkw) detector, forest = _get_detector(species, quick=quick) detector.set_detect_params(**detect_config) dst_gpath_list = [splitext(gpath)[0] for gpath in src_gpath_list] # FIXME: Doing this in a generator may cause unnecessary page-faults # Maybe there is a better way of doing this, or generating results # in batch. It could be a utool batch serial process chunksize = 8 use_chunks_ = use_chunks and nImgs >= chunksize if use_chunks_: print('[rf] detect in chunks') pathtup_iter = zip(src_gpath_list, dst_gpath_list) for ic, chunk in enumerate(utool.ichunks(pathtup_iter, chunksize)): src_gpath_list = [tup[0] for tup in chunk] dst_gpath_list = [tup[1] for tup in chunk] mark_prog(ic * chunksize) results_list = detector.detect_many(forest, src_gpath_list, dst_gpath_list) for results in results_list: bboxes = [(minx, miny, (maxx - minx), (maxy - miny)) for (centx, centy, minx, miny, maxx, maxy, confidence, supressed) in results if supressed == 0] #x_arr = results[:, 2] #y_arr = results[:, 3] #w_arr = results[:, 4] - results[:, 2] #h_arr = results[:, 5] - results[:, 3] #bboxes = np.hstack((x_arr, y_arr, w_arr, h_arr)) # Unpack unsupressed bounding boxes confidences = [confidence for (centx, centy, minx, miny, maxx, maxy, confidence, supressed) in results if supressed == 0] if len(results) > 0: image_confidence = max([float(result[6]) for result in results]) else: image_confidence = 0.0 yield bboxes, confidences, image_confidence else: print('[rf] detect one image at a time') pathtup_iter = izip(src_gpath_list, dst_gpath_list) for ix, (src_gpath, dst_gpath) in enumerate(pathtup_iter): mark_prog(ix) results = detector.detect(forest, src_gpath, dst_gpath) bboxes = [(minx, miny, (maxx - minx), (maxy - miny)) for (centx, centy, minx, miny, maxx, maxy, confidence, supressed) in results if supressed == 0] confidences = [confidence for (centx, centy, minx, miny, maxx, maxy, confidence, supressed) in results if supressed == 0] if len(results) > 0: image_confidence = max([float(result[6]) for result in results]) else: image_confidence = 0.0 yield bboxes, confidences, image_confidence end_prog()