def cx2_(cm, cx_list, *dynargs): 'request chip data' 'conviencience function to get many properties' #logdbg('Requested Data: %s of CX= %s' % (str(dynargs), str(cx_list))) to_return = [] cid = cm.cx2_cid[cx_list] invalid_x = pylab.find(cid == 0) if len(invalid_x) > 0: logerr('Requested invalid cxs: '+str(cx_list[invalid_x])) for arg in dynargs: if arg == 'cx': to_return.append(cx_list) elif arg == 'cid': to_return.append(cm.cx2_cid[cx_list]) elif arg == 'nid': to_return.append(cm.cx2_nid(cx_list)) elif arg == 'gid': to_return.append(cm.cx2_gid(cx_list)) elif arg == 'chip': to_return.append(cm.cx2_chip(cx_list)) elif arg == 'name': to_return.append(cm.cx2_name(cx_list)) elif arg == 'gname': to_return.append(cm.cx2_gname(cx_list)) else: to_return.append('__UNFILLED__') # mark unfilled requests return to_return
def draw(uim): '''Tells the HotSpotterAPI to draw the current selection in the current mode. It will automatically switch tabs to the current view.''' cm, gm = uim.hs.get_managers('cm','gm') #current_tab = uim.hsgui.main_skel.tablesTabWidget.currentIndex if uim.state in ['splash_view']: uim.hs.dm.show_splash() elif uim.state in ['annotate']: if gm.is_valid(uim.sel_gid): uim.hs.dm.show_image(gm.gx(uim.sel_gid)) elif uim.state in ['chip_view']: if cm.is_valid(uim.sel_cid): uim.hs.dm.show_chip(cm.cx(uim.sel_cid)) uim.select_tab('chip', block_draw=True) elif uim.state in ['image_view']: if gm.is_valid(uim.sel_gid): uim.hs.dm.show_image(gm.gx(uim.sel_gid)) uim.select_tab('image', block_draw=True) elif uim.state in ['result_view']: if uim.sel_res != None: logdbg('Drawing Query Results') uim.hs.dm.show_query(uim.sel_res) uim.select_tab('result', block_draw=True) else: logerr('I dont know how to draw in state: '+str(uim.state))
def get_result_rank_histogram(em): '''returns a histogram of the number of queries with correct matches with some rank. The rank is the index into the histogram''' if em.cx2_res is None: logerr('You cant get results on unrun experiments') cm,nm,am = em.hs.get_managers('cm','nm','am') # gt hist shows how often a chip is at rank X # opt is the optimistic rank. Precision # pas is the pesemistic rank. Recallish rank_hist_opt = np.zeros(len(cm.get_valid_cxs())+2) # add 2 because we arent using 0 rank_hist_pes = np.zeros(len(cm.get_valid_cxs())+2) # add 2 because we arent using 0 for res in em.cx2_res: if res == []: continue cx = res.rr.qcx qnid = res.rr.qnid # Evaluate considering the top returned chips and names top_cx = res.cx_sort() gt_pos_chip = (1+pylab.find(qnid == cm.cx2_nid(top_cx))) #Overflow, the last position is past the num_top if len(gt_pos_chip) == 0: rank_hist_opt[-1] += 1 rank_hist_pes[-1] += 1 else: rank_hist_opt[min(gt_pos_chip)] += 1 rank_hist_pes[max(gt_pos_chip)-len(gt_pos_chip)+1] += 1 return rank_hist_opt
def get_result_rank_histogram(em): '''returns a histogram of the number of queries with correct matches with some rank. The rank is the index into the histogram''' if em.cx2_res is None: logerr('You cant get results on unrun experiments') cm, nm, am = em.hs.get_managers('cm', 'nm', 'am') # gt hist shows how often a chip is at rank X # opt is the optimistic rank. Precision # pas is the pesemistic rank. Recallish rank_hist_opt = np.zeros(len(cm.get_valid_cxs()) + 2) # add 2 because we arent using 0 rank_hist_pes = np.zeros(len(cm.get_valid_cxs()) + 2) # add 2 because we arent using 0 for res in em.cx2_res: if res == []: continue cx = res.rr.qcx qnid = res.rr.qnid # Evaluate considering the top returned chips and names top_cx = res.cx_sort() gt_pos_chip = (1 + pylab.find(qnid == cm.cx2_nid(top_cx))) #Overflow, the last position is past the num_top if len(gt_pos_chip) == 0: rank_hist_opt[-1] += 1 rank_hist_pes[-1] += 1 else: rank_hist_opt[min(gt_pos_chip)] += 1 rank_hist_pes[max(gt_pos_chip) - len(gt_pos_chip) + 1] += 1 return rank_hist_opt
def delete_computed_cid(cm, cid): iom = cm.hs.iom if np.iterable(cid): logerr('this function only works for a single cid') logmsg('Removing CID=%d\'s computed files' % cid) cid_fname_pattern = iom.get_chip_prefix(cid, []) + '*' iom.remove_computed_files_with_pattern(cid_fname_pattern)
def restart(hs, db_dpath=None, autoload=True, save_pref_bit=True): hs.data_loaded_bit = False if hs.db_dpath != None and db_dpath == None: db_dpath = hs.db_dpath db_dpath = hs.smartget_db_dpath(db_dpath) # -- hs.db_dpath = None if hs.is_valid_db_dpath(db_dpath): hs.db_dpath = db_dpath if save_pref_bit: logdbg('Setting db_dpath = '+str(db_dpath)) hs.core_prefs.update('database_dpath',db_dpath) if hs.db_dpath is None: logerr('Invalid Database. '+\ 'Select an existing HotSpotter, StripeSpotter database. '+\ 'To create a new database, select and empty directory. ') hs.gm = ImageManager(hs) hs.nm = NameManager(hs) hs.cm = ChipManager(hs) hs.vm = VisualModel(hs) hs.qm = QueryManager(hs) hs.em = ExperimentManager(hs) if autoload == True: hs.load_tables() else: logdbg('autoload is false.')
def draw(uim): '''Tells the HotSpotterAPI to draw the current selection in the current mode. It will automatically switch tabs to the current view.''' cm, gm = uim.hs.get_managers('cm', 'gm') #current_tab = uim.hsgui.main_skel.tablesTabWidget.currentIndex if uim.state in ['splash_view']: uim.hs.dm.show_splash() elif uim.state in ['annotate']: if gm.is_valid(uim.sel_gid): uim.hs.dm.show_image(gm.gx(uim.sel_gid)) elif uim.state in ['chip_view']: if cm.is_valid(uim.sel_cid): uim.hs.dm.show_chip(cm.cx(uim.sel_cid)) uim.select_tab('chip', block_draw=True) elif uim.state in ['image_view']: if gm.is_valid(uim.sel_gid): uim.hs.dm.show_image(gm.gx(uim.sel_gid)) uim.select_tab('image', block_draw=True) elif uim.state in ['result_view']: if uim.sel_res != None: logdbg('Drawing Query Results') uim.hs.dm.show_query(uim.sel_res) uim.select_tab('result', block_draw=True) else: logerr('I dont know how to draw in state: ' + str(uim.state))
def cx2_(cm, cx_list, *dynargs): 'request chip data' 'conviencience function to get many properties' #logdbg('Requested Data: %s of CX= %s' % (str(dynargs), str(cx_list))) to_return = [] cid = cm.cx2_cid[cx_list] invalid_x = pylab.find(cid == 0) if len(invalid_x) > 0: logerr('Requested invalid cxs: ' + str(cx_list[invalid_x])) for arg in dynargs: if arg == 'cx': to_return.append(cx_list) elif arg == 'cid': to_return.append(cm.cx2_cid[cx_list]) elif arg == 'nid': to_return.append(cm.cx2_nid(cx_list)) elif arg == 'gid': to_return.append(cm.cx2_gid(cx_list)) elif arg == 'chip': to_return.append(cm.cx2_chip(cx_list)) elif arg == 'name': to_return.append(cm.cx2_name(cx_list)) elif arg == 'gname': to_return.append(cm.cx2_gname(cx_list)) else: to_return.append('__UNFILLED__') # mark unfilled requests return to_return
def change_roi(cm, cx, new_roi): cid = cm.cx2_cid[cx] logmsg('Giving cid=%d new roi: %r' % (cid, new_roi)) assert not new_roi is None if new_roi is None: logerr('The ROI is np.empty') cm.hs.on_cx_modified(cx) cm.cx2_roi[cx] = new_roi
def _annotate(uim, annotate_fn): if not uim.hs.gm.is_valid(uim.sel_gid): logerr('Select an Image before you draw an ROI') prev_state = uim.update_state('annotate') uim.draw() to_return = annotate_fn() uim.update_state('annotate_done') uim.update_state(prev_state) return to_return
def add_user_prop(cm, new_prop): if not new_prop in cm.user_props.keys(): if ',' in new_prop or '\n' in new_prop: logerr('Properties cannot have commas or newlines') return False # Allocate data for property # TODO: User prop must be a string cm.user_props[new_prop] = np.empty(len(cm.cx2_cid),dtype=object) # Add property to label map cm.x2_lbl[new_prop] = lambda _: cm.user_props[new_prop][_] for cx in iter(cm.get_valid_cxs()): cm.user_props[new_prop][cx] = ''
def add_user_prop(cm, new_prop): if not new_prop in cm.user_props.keys(): if ',' in new_prop or '\n' in new_prop: logerr('Properties cannot have commas or newlines') return False # Allocate data for property # TODO: User prop must be a string cm.user_props[new_prop] = np.empty(len(cm.cx2_cid), dtype=object) # Add property to label map cm.x2_lbl[new_prop] = lambda _: cm.user_props[new_prop][_] for cx in iter(cm.get_valid_cxs()): cm.user_props[new_prop][cx] = ''
def compute_features(am, chip): 'Computes features of a chip. Uses settings in AlgorithmManager' logdbg('Calling feature detector') external_detectors = ['heshesaff', 'heslapaff', 'heslap', 'harlap', 'dense'] external_descriptors = ['SIFT'] if am.algo_prefs.chiprep.kpts_detector in external_detectors: (kpts, desc) = am.external_feature_computers(chip) if am.algo_prefs.chiprep.kpts_extractor in external_descriptors: return (kpts, desc) else: logerr('Only External Keypoint Detectors are implemented: '+str(external_detectors)) logerr('Only External Keypoint Descriptors are implemented: '+str(external_descriptors)) # http://stackoverflow.com/questions/10764895/opencv-python-sample-error # http://stackoverflow.com/questions/12491022/opencv-freak-fast-retina-keypoint-descriptor # The following detector types are supported: # FAST, STAR, SIFT (nonfree), SURF (nonfree), # ORB, BRISK, MSER, GFTT (good features to track) # HARRIS, Dense, SimpleBlob # Also: Grid, GridFAST, PyramidStar # see http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html#Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType) im = cv2.cvtColor(chip, cv2.COLOR_BGR2GRAY) logdbg('Making detector: %r' % am.algo_prefs.chiprep.kpts_detector) cvFeatDetector = cv2.FeatureDetector_create(am.algo_prefs.chiprep.kpts_extractor) #cvFeatDetector = cv2.PyramidAdaptedFeatureDetector(cvFeatDetector_,4) logdbg('Made %r, Making extractor: %r' % (cvFeatDetector, am.algo_prefs.chiprep.kpts_detector)) cvFeatExtractor = cv2.DescriptorExtractor_create(am.algo_prefs.chiprep.kpts_extractor) logdbg('Made %r, Detecting keypoints on image' % cvFeatExtractor ) cvKpts_= cvFeatDetector.detect(im) # Tinker with Keypoint if am.algo_prefs.chiprep['use_gravity_vector']: for cvKp in cvKpts_: cvKp.angle = 0 r = cvKp.size #scale = (r**2)/27 logdbg('Made %r, Keypoint description with %d kpts ' % (cvFeatExtractor, len(cvKpts_)) ) (cvKpts, cvDesc) = cvFeatExtractor.compute(im,cvKpts_) logdbg('Detected %d features ' % len(cvKpts) ) kpts = np.zeros((len(cvKpts), 5),dtype=np.float32) desc = np.array(cvDesc,dtype=np.uint8) fx = 0 # * Convert to representation in: M. Perdoc, O. Chum, and J. Matas. CVPR 2009 # * Efficient representation of local geometry for large scale object retrieval for cvKP in cvKpts: (x,y) = cvKP.pt theta = cvKP.angle scale = (float(cvKp.size)**2)/27 detA = 1./(scale) (a,c,d) = (detA, 0, detA) kpts[fx] = (x,y,a,c,d) fx += 1 return (kpts, desc)
def update_state(uim, new_state): 'Updates the state of the UI' old_state = uim.state logdbg('State Change: from: '+str(old_state)+', to: '+str(new_state)) if old_state == 'annotate': if new_state != 'annotate_done': uim.state = 'annotate_done' logerr('Cannot enter new state while selecting an ROI. Attempting to recover') elif old_state == 'querying': if new_state != 'done_querying': uim.state = 'done_querying' logerr('Cannot enter new state while querying. Attempting to recover') uim.state = new_state uim.updateStateLabelSignal.emit(new_state) return old_state
def find_hotspotter_root_dir(iom): # Find the HotSpotter root dir even in installed packages hsroot = realpath(dirname(__file__)) while True: root_landmark = join(hsroot, "__HOTSPOTTER_ROOT__") logdbg("Testing Existence:" + str(root_landmark)) if not os.path.exists(root_landmark): logdbg("No landmark here") else: logdbg("Found the landmark") break _newroot = dirname(hsroot) if _newroot == hsroot: logerr("Cannot Find HotSpotter Root") hsroot = _newroot iom._hsroot = hsroot
def find_hotspotter_root_dir(iom): # Find the HotSpotter root dir even in installed packages hsroot = realpath(dirname(__file__)) while True: root_landmark = join(hsroot, '__HOTSPOTTER_ROOT__') logdbg('Testing Existence:'+str(root_landmark)) if not os.path.exists(root_landmark): logdbg('No landmark here') else: logdbg('Found the landmark') break _newroot = dirname(hsroot) if _newroot == hsroot: logerr('Cannot Find HotSpotter Root') hsroot = _newroot iom._hsroot = hsroot
def update_state(uim, new_state): 'Updates the state of the UI' old_state = uim.state logdbg('State Change: from: ' + str(old_state) + ', to: ' + str(new_state)) if old_state == 'annotate': if new_state != 'annotate_done': uim.state = 'annotate_done' logerr( 'Cannot enter new state while selecting an ROI. Attempting to recover' ) elif old_state == 'querying': if new_state != 'done_querying': uim.state = 'done_querying' logerr( 'Cannot enter new state while querying. Attempting to recover' ) uim.state = new_state uim.updateStateLabelSignal.emit(new_state) return old_state
def smartget_db_dpath(hs, db_dpath): ''' Performs a smart update of the db_dpath Trys a number of various options to get it right None = Read from preferences '' = Prompt the User For database ''' if db_dpath is None: # If requested to read prefs db_dpath = str(hs.core_prefs.database_dpath) if db_dpath in [None, 'None'] or\ not os.path.exists(db_dpath): # Check validity logwarn('db_dpath='+repr(db_dpath)+' is invalid') db_dpath = '' if db_dpath == '': # Prompt The User. TODO Move this to Facade/UIManager logmsg('what database should I open?') try: db_dpath = hs.uim.select_database() except: logerr(' Was unable to prompt user with QT') return db_dpath
def batch_rename(hs, name1, name2): logmsg('Batch Renaming %s to %s' % (name1, name2)) cm, nm = hs.get_managers('cm','nm') if name1 == nm.UNIDEN_NAME(): logerr('Cannot batch rename '+str(name1)+'. It is UNIDENTIFIED and has special meaning') if name1 not in nm.name2_nx.keys(): logerr('Cannot batch rename. '+str(name1)+' does not exist') cx_list = nm.name2_cx_list(name1)[:] # COPY BEFORE YOU CHANGE. Man, sneaky errors num_chips = len(cx_list) if num_chips == 0: logerr('Cannot batch rename. '+str(name1)+' has no chips') logmsg('Renaming '+str(num_chips)+' chips: '+str(cx_list)) for cx in cx_list: logdbg('Batch Rename '+str(cx)) cm.rename_chip(cx, name2) return True
def cx(cm, cid): 'maps cid to cx with error checks' if not cm.is_valid(cid): logerr('CID=%s is invalid' % str(cid)) return cm.cid2_cx[cid]
def add_chip(cm, cid, nx, gx, roi, theta, props={}, delete_prev=False): nm = cm.hs.nm gm = cm.hs.gm # Fails if cid is not available; cid = -1 means pick for you cx = -1 if cid < 0: cid = cm.next_cid else: if cm.cid2_valid_bit(cid): #New CID must be invalid logerr('CID Already in database Chip Not Added') logerr( 'Offending String: (cid, nx, gx, [roi]) = (%d, %d, %d, %s)' % (cid, nx, gx, str(roi))) cid = 0 return #Manage Memory cx = cm.next_cx logdbg( ''' Adding Chip = ( cid, nx, gx, [tl_x tl_y w h ]) ( %4d, %4d, %4d, %s) ''' % (cid, nx, gx, str('[ %4.1f %4.1f %4.1f %4.1f ]' % tuple(roi)))) if cx >= len(cm.cx2_cid): curr_alloc = len(cm.cx2_cid) cm.chip_alloc((curr_alloc + 1) * 2 + 1) # Add the information to the flat table logdbg(' * Adding cx=' + str(cx) + ' to the tables') if nx == 0 or gx == 0 or len(roi) != 4: logerr('Chip information is invalid. Cannot add.') if delete_prev: cm.delete_computed_cid(cid) cm.cx2_cid[cx] = cid cm.cx2_nx[cx] = nx cm.cx2_gx[cx] = gx cm.cx2_roi[cx] = roi cm.cx2_theta[cx] = theta cm.max_roi = map(lambda (a, b): max(a, b), zip(cm.max_roi, roi)) # Add This Chip To Reverse Indexing if cid >= len(cm.cid2_cx): idAlloc = max(cid + 1, len(cm.cid2_cx) * 2 + 1) logdbg('Allocating: ' + str(idAlloc) + ' more cids') cm.cid2_cx = np.append(cm.cid2_cx, np.zeros(idAlloc, dtype=np.uint32)) cm.cid2_cx[cid] = cx nm.nx2_cx_list[nx].append(cx) gm.gx2_cx_list[gx].append(cx) # Add user props for key in cm.user_props.keys(): if not key in props.keys(): cm.user_props[key][cx] = '' for key, val in props.iteritems(): cm.add_user_prop(key) cm.user_props[key][cx] = val # Increment Data Counters cm.next_cx = max(cm.next_cx + 1, cx + 1) cm.next_cid = max(cm.next_cid + 1, cid + 1) cm.max_cx = max(cm.max_cx, cx) cm.max_cid = max(cm.max_cid, cid) cm.num_c = cm.num_c + 1 cm.hs.vm.isDirty = True return cid
def cid(cm, cx): 'maps cx to cid with error checks' if not cm.iscx_valid(cx): logerr('CX=%s is invalid' % str(cx)) return cm.cx2_cid[cx]
def external_feature_computers(am, chip): 'Write chip ; call extern executable ; read output ; return (kpts,desc)' logdbg('Calling external kpt detector') iom = am.hs.iom chip = Image.fromarray(chip) tmp_chip_fpath = iom.get_temp_fpath('tmp.ppm') chip.save(tmp_chip_fpath,'PPM') perdoch_external = ['heshesaff'] mikolajczyk_external = ['heslapaff','dense'] if am.algo_prefs.chiprep.kpts_detector in perdoch_external: exename = iom.get_hesaff_exec() outname = tmp_chip_fpath+'.hesaff.sift' args = '"'+tmp_chip_fpath+'"' elif am.algo_prefs.chiprep.kpts_detector in mikolajczyk_external: exename = iom.get_inria_exec() feature_name = am.algo_prefs.chiprep.kpts_detector if feature_name == 'heslapaff': feature_name = 'hesaff' suffix = 'hesaff' if feature_name == 'dense': feature_name = feature_name+' 6 6' suffix = 'dense' outname = tmp_chip_fpath+'.'+suffix+'.sift' args = '-'+feature_name+' -sift -i "'+tmp_chip_fpath+'"' else: logerr('Method %r + %r is invalid in extern_detect_kpts.m'\ % (am.algo_prefs.chiprep.kpts_detector, am.algo_prefs.chiprep.kpts_extractor)) cmd = exename+' '+args logdbg('External Executing: %r ' % cmd) try: proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logdbg('External Execution did not throw an error') (out, err) = proc.communicate() logdbg(str(out)+' '+str(err)) if proc.returncode != 0: logerr('Failed to execute '+cmd+'\n OUTPUT: '+out) if not os.path.exists(outname): logerr('The output file doesnt exist: '+outname) logdbg('External Output:\n'+out[:-1]) except Exception as ex: logwarn('An Exception occurred while calling the keypoint detector: '+str(ex)) try: ret2 = os.system(cmd) if ret2 != 0: logerr(str(ex)+'\nThe backup keypoint detector didnt work either!') except Exception as ex2: logerr(str(ex2)) fid = file(outname,'r') ndims = int(fid.readline()) nkpts = int(fid.readline()) if ndims != 128: raise Exception(' These are not SIFT dexcriptors ') kpts = np.zeros((nkpts,5), dtype=np.float32) desc = np.zeros((nkpts,ndims),dtype=np.uint8) lines = fid.readlines() # SIFT descriptors are computed with a radius of r=3*np.sqrt(3*s) # s = (det A_i) ^ (-1/2) OR # s = sqrtm(inv(det(A_i))) for i in range(nkpts): nums = lines[i].split(' ') kpts[i,:] = np.array(map(lambda _: float(_) , nums[0:5]),dtype=np.float32) desc[i,:] = np.array(map(lambda _: np.uint8(_), nums[5:]),dtype=np.uint8) fid.close() return (kpts, desc) '''
def compute_features(am, chip): 'Computes features of a chip. Uses settings in AlgorithmManager' logdbg('Calling feature detector') external_detectors = [ 'heshesaff', 'heslapaff', 'heslap', 'harlap', 'dense' ] external_descriptors = ['SIFT'] if am.algo_prefs.chiprep.kpts_detector in external_detectors: (kpts, desc) = am.external_feature_computers(chip) if am.algo_prefs.chiprep.kpts_extractor in external_descriptors: return (kpts, desc) else: logerr('Only External Keypoint Detectors are implemented: ' + str(external_detectors)) logerr('Only External Keypoint Descriptors are implemented: ' + str(external_descriptors)) # http://stackoverflow.com/questions/10764895/opencv-python-sample-error # http://stackoverflow.com/questions/12491022/opencv-freak-fast-retina-keypoint-descriptor # The following detector types are supported: # FAST, STAR, SIFT (nonfree), SURF (nonfree), # ORB, BRISK, MSER, GFTT (good features to track) # HARRIS, Dense, SimpleBlob # Also: Grid, GridFAST, PyramidStar # see http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html#Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType) im = cv2.cvtColor(chip, cv2.COLOR_BGR2GRAY) logdbg('Making detector: %r' % am.algo_prefs.chiprep.kpts_detector) cvFeatDetector = cv2.FeatureDetector_create( am.algo_prefs.chiprep.kpts_extractor) #cvFeatDetector = cv2.PyramidAdaptedFeatureDetector(cvFeatDetector_,4) logdbg('Made %r, Making extractor: %r' % (cvFeatDetector, am.algo_prefs.chiprep.kpts_detector)) cvFeatExtractor = cv2.DescriptorExtractor_create( am.algo_prefs.chiprep.kpts_extractor) logdbg('Made %r, Detecting keypoints on image' % cvFeatExtractor) cvKpts_ = cvFeatDetector.detect(im) # Tinker with Keypoint if am.algo_prefs.chiprep['use_gravity_vector']: for cvKp in cvKpts_: cvKp.angle = 0 r = cvKp.size #scale = (r**2)/27 logdbg('Made %r, Keypoint description with %d kpts ' % (cvFeatExtractor, len(cvKpts_))) (cvKpts, cvDesc) = cvFeatExtractor.compute(im, cvKpts_) logdbg('Detected %d features ' % len(cvKpts)) kpts = np.zeros((len(cvKpts), 5), dtype=np.float32) desc = np.array(cvDesc, dtype=np.uint8) fx = 0 # * Convert to representation in: M. Perdoc, O. Chum, and J. Matas. CVPR 2009 # * Efficient representation of local geometry for large scale object retrieval for cvKP in cvKpts: (x, y) = cvKP.pt theta = cvKP.angle scale = (float(cvKp.size)**2) / 27 detA = 1. / (scale) (a, c, d) = (detA, 0, detA) kpts[fx] = (x, y, a, c, d) fx += 1 return (kpts, desc)
def delete_computed_cid(cm, cid): iom = cm.hs.iom if np.iterable(cid): logerr('this function only works for a single cid') logmsg('Removing CID=%d\'s computed files' % cid) cid_fname_pattern = iom.get_chip_prefix(cid, [])+'*' iom.remove_computed_files_with_pattern(cid_fname_pattern)
def add_chip(cm, cid, nx, gx, roi, theta, props={}, delete_prev=False): nm = cm.hs.nm gm = cm.hs.gm # Fails if cid is not available; cid = -1 means pick for you cx = -1 if cid < 0: cid = cm.next_cid else: if cm.cid2_valid_bit(cid): #New CID must be invalid logerr('CID Already in database Chip Not Added') logerr('Offending String: (cid, nx, gx, [roi]) = (%d, %d, %d, %s)' % (cid, nx, gx, str(roi))) cid = 0 return #Manage Memory cx = cm.next_cx logdbg(''' Adding Chip = ( cid, nx, gx, [tl_x tl_y w h ]) ( %4d, %4d, %4d, %s) '''% (cid, nx, gx, str('[ %4.1f %4.1f %4.1f %4.1f ]' % tuple(roi)))) if cx >= len(cm.cx2_cid): curr_alloc = len(cm.cx2_cid) cm.chip_alloc((curr_alloc+1)*2+1) # Add the information to the flat table logdbg(' * Adding cx='+str(cx)+' to the tables') if nx == 0 or gx == 0 or len(roi) != 4: logerr('Chip information is invalid. Cannot add.') if delete_prev: cm.delete_computed_cid(cid) cm.cx2_cid[cx] = cid cm.cx2_nx [cx] = nx cm.cx2_gx [cx] = gx cm.cx2_roi[cx] = roi cm.cx2_theta[cx] = theta cm.max_roi = map(lambda (a,b): max(a,b), zip(cm.max_roi, roi)) # Add This Chip To Reverse Indexing if cid >= len(cm.cid2_cx): idAlloc = max(cid+1,len(cm.cid2_cx)*2 + 1) logdbg('Allocating: '+str(idAlloc)+' more cids') cm.cid2_cx = np.append(cm.cid2_cx, np.zeros(idAlloc,dtype=np.uint32)) cm.cid2_cx[cid] = cx nm.nx2_cx_list[nx].append(cx) gm.gx2_cx_list[gx].append(cx) # Add user props for key in cm.user_props.keys(): if not key in props.keys(): cm.user_props[key][cx] = '' for key,val in props.iteritems(): cm.add_user_prop(key) cm.user_props[key][cx] = val # Increment Data Counters cm.next_cx = max(cm.next_cx + 1, cx+1) cm.next_cid = max(cm.next_cid+1, cid+1) cm.max_cx = max(cm.max_cx, cx) cm.max_cid = max(cm.max_cid, cid) cm.num_c = cm.num_c + 1 cm.hs.vm.isDirty = True return cid
def assign_feature_matches_1vM(rr, hs, K, method, cids_to_remove): '''Assigns each query feature to its K nearest database features with a similarity-score. Each feature votes for its assigned chip with this weight.''' logdbg('Assigning feature matches and initial scores') # Get managers cm = hs.cm nm = hs.nm vm = hs.vm # Get intermediate results qcx = rr.qcx qcid = rr.qcid qfdsc = rr.qfdsc qfpts = rr.qfpts num_qf = qfpts.shape[0] # define: Prefix K = list of K+1 nearest; k = K nearest # Everything is done in a flat manner, and reshaped at the end. if len(cids_to_remove) > 0: K += len(cids_to_remove) logdbg('K = %d. Increased by %d to account for removing results' % (K, len(cids_to_remove))) # qfx = Query Feature Index # Kwxs = the Kth result word index ; Kdists = the Kth result distance (qfx2_Kwxs, qfx2_Kdists) = vm.nearest_neighbors(qfdsc, K + 1) # --- # Candidate score the nearest neighbor matches # p - pth nearest ; o - k+1th nearest score_fn_dict = { 'DIFF': lambda p, o: o - p, 'RAT': lambda p, o: o / p, 'LNRAT': lambda p, o: np.log2(o / p), 'COUNT': lambda p, o: 1, 'NDIST': lambda p, o: 10e16 - p, 'TFIDF': lambda wx2_tf, wx_idf, wx: wx2_tf[wx] * wx_idf[wx] } score_fn = score_fn_dict[method] if method == 'TFIDF': # The wx2_qtf could really be per k or as agged across all K w_histo = bincount(qfx2_wxs, minlength=vm.numWords()) wx2_qtf = np.array(w_histo, dtype=np.float32) / num_qf qfx2_vweight = score_fn(wx2_qtf, vm.wx2_idf, qfx2_wxs) else: # Distances to the 0-K results p_vote = qfx2_Kdists[:, 0:K] + 1 # Distance to the K+1th result o_norm = np.tile(qfx2_Kdists[:, -1].reshape(num_qf, 1) + 1, (1, K)) # Use score method to get weight qfx2_kweight = np.array( [score_fn(p, o) for (p, o) in iter(zip(p_vote.flat, o_norm.flat))], dtype=np.float32) qfx2_kweight.shape = (num_qf, K) # --- # Use the scores to cast weighted votes for database chips # if len(cids_to_remove) > 0: # Remove the query from results # query feature index 2 agg descriptor indexes -> cids -> self_query_bit -> clean_axs # # Feature Matches -> Chip Ids logdbg('Query qcid=%r are being removed from results ' % cids_to_remove) qfx2_Kaxs_ = vm.wx2_axs[qfx2_Kwxs] qfx2_Kcids_ = [vm.ax2_cid[axs] for axs in qfx2_Kaxs_.flat] # Test if each FeatureMatch-ChipId is the Query-ChipId. qfx2_Ksqbit_ = [ True - np.in1d(cids, cids_to_remove) for cids in qfx2_Kcids_ ] # Remove FeatureMatches to the Query-ChipId qfx2_Kaxs = [np.array(axs)[sqbit].tolist() for (axs, sqbit) in\ iter(zip(qfx2_Kaxs_.flat, qfx2_Ksqbit_))] else: qfx2_Kaxs_ = vm.wx2_axs[qfx2_Kwxs] qfx2_Kaxs = [np.array(axs).tolist() for axs in qfx2_Kaxs_.flat] # Clean Vote for Info qfx2_Kcxs = np.array([vm.ax2_cx(axs) for axs in qfx2_Kaxs]) qfx2_Kfxs = np.array([vm.ax2_fx[axs] for axs in qfx2_Kaxs]) qfx2_Knxs = np.array([cm.cx2_nx[cxs] for cxs in qfx2_Kcxs]) if qfx2_Kfxs.size == 0: logerr('Cannot query when there is one chip in database') # Reshape Vote for Info qfx2_Kcxs = np.array(qfx2_Kcxs).reshape(num_qf, K + 1) qfx2_Kfxs = np.array(qfx2_Kfxs).reshape(num_qf, K + 1) qfx2_Knxs = np.array(qfx2_Knxs).reshape(num_qf, K + 1) # Using the K=K+1 results, make k=K scores qfx2_kcxs_vote = qfx2_Kcxs[:, 0:K] # vote for cx qfx2_kfxs_vote = qfx2_Kfxs[:, 0:K] # vote for fx qfx2_knxs_vote = qfx2_Knxs[:, 0:K] # check with nx # Attempt to recover from problems where K is too small qfx2_knxs_norm = np.tile(qfx2_Knxs[:, K].reshape(num_qf, 1), (1, K)) qfx2_knxs_norm[qfx2_knxs_norm == nm.UNIDEN_NX()] = 0 # Remove Unidentifieds from this test qfx2_kcxs_norm = np.tile(qfx2_Kcxs[:, K].reshape(num_qf, 1), (1, K)) # If the normalizer has the same name, but is a different chip, there is a good chance # it is a correct match and was peanalized by the scoring function qfx2_normgood_bit = np.logical_and(qfx2_kcxs_vote != qfx2_kcxs_norm, \ qfx2_knxs_vote == qfx2_knxs_norm) #qfx2_kweight[qfx2_normgood_bit] = 2 # ----- # Build FeatureMatches and FeaturesScores # cx2_fm = alloc_lists(cm.max_cx + 1) cx2_fs_ = alloc_lists(cm.max_cx + 1) qfx2_qfx = np.tile(np.arange(0, num_qf).reshape(num_qf, 1), (1, K)) # Add matches and scores for (qfx, qfs, cxs, fxs)\ in iter(zip(qfx2_qfx.flat, \ qfx2_kweight.flat, \ qfx2_kcxs_vote.flat, \ qfx2_kfxs_vote.flat)): if cxs.size == 0: continue for (vote_cx, vote_fx) in iter(zip(np.nditer(cxs), np.nditer(fxs))): cx2_fm[vote_cx].append((qfx, vote_fx)) cx2_fs_[vote_cx].append(qfs) # Convert correspondences to to numpy for cx in xrange(len(cx2_fs_)): num_m = len(cx2_fm[cx]) cx2_fs_[cx] = np.array(cx2_fs_[cx], dtype=np.float32) cx2_fm[cx] = np.array(cx2_fm[cx], dtype=np.uint32).reshape(num_m, 2) logdbg('Setting feature assignments') rr.cx2_fm = cx2_fm rr.cx2_fs_ = cx2_fs_
def assign_feature_matches_1vM(rr, hs, K, method, cids_to_remove): '''Assigns each query feature to its K nearest database features with a similarity-score. Each feature votes for its assigned chip with this weight.''' logdbg('Assigning feature matches and initial scores') # Get managers cm = hs.cm nm = hs.nm vm = hs.vm # Get intermediate results qcx = rr.qcx qcid = rr.qcid qfdsc = rr.qfdsc qfpts = rr.qfpts num_qf = qfpts.shape[0] # define: Prefix K = list of K+1 nearest; k = K nearest # Everything is done in a flat manner, and reshaped at the end. if len(cids_to_remove) > 0: K += len(cids_to_remove) logdbg('K = %d. Increased by %d to account for removing results' % (K, len(cids_to_remove))) # qfx = Query Feature Index # Kwxs = the Kth result word index ; Kdists = the Kth result distance (qfx2_Kwxs, qfx2_Kdists) = vm.nearest_neighbors(qfdsc, K+1) # --- # Candidate score the nearest neighbor matches # p - pth nearest ; o - k+1th nearest score_fn_dict = { 'DIFF' : lambda p, o: o - p, 'RAT' : lambda p, o: o / p, 'LNRAT' : lambda p, o: np.log2(o / p), 'COUNT' : lambda p, o: 1, 'NDIST' : lambda p, o: 10e16 - p, 'TFIDF' : lambda wx2_tf, wx_idf, wx: wx2_tf[wx] * wx_idf[wx] } score_fn = score_fn_dict[method] if method == 'TFIDF': # The wx2_qtf could really be per k or as agged across all K w_histo = bincount(qfx2_wxs, minlength=vm.numWords()) wx2_qtf = np.array(w_histo, dtype=np.float32) / num_qf qfx2_vweight = score_fn(wx2_qtf, vm.wx2_idf, qfx2_wxs) else: # Distances to the 0-K results p_vote = qfx2_Kdists[:, 0:K] + 1 # Distance to the K+1th result o_norm = np.tile( qfx2_Kdists[:, -1].reshape(num_qf, 1) + 1, (1, K)) # Use score method to get weight qfx2_kweight = np.array( [score_fn(p, o) for (p, o) in iter(zip(p_vote.flat, o_norm.flat))], dtype=np.float32) qfx2_kweight.shape = (num_qf, K) # --- # Use the scores to cast weighted votes for database chips # if len(cids_to_remove) > 0: # Remove the query from results # query feature index 2 agg descriptor indexes -> cids -> self_query_bit -> clean_axs # # Feature Matches -> Chip Ids logdbg('Query qcid=%r are being removed from results ' % cids_to_remove) qfx2_Kaxs_ = vm.wx2_axs[qfx2_Kwxs] qfx2_Kcids_ = [vm.ax2_cid[axs] for axs in qfx2_Kaxs_.flat] # Test if each FeatureMatch-ChipId is the Query-ChipId. qfx2_Ksqbit_ = [True - np.in1d(cids, cids_to_remove) for cids in qfx2_Kcids_] # Remove FeatureMatches to the Query-ChipId qfx2_Kaxs = [np.array(axs)[sqbit].tolist() for (axs, sqbit) in\ iter(zip(qfx2_Kaxs_.flat, qfx2_Ksqbit_))] else: qfx2_Kaxs_ = vm.wx2_axs[qfx2_Kwxs] qfx2_Kaxs = [np.array(axs).tolist() for axs in qfx2_Kaxs_.flat] # Clean Vote for Info qfx2_Kcxs = np.array([vm.ax2_cx(axs) for axs in qfx2_Kaxs]) qfx2_Kfxs = np.array([vm.ax2_fx[axs] for axs in qfx2_Kaxs]) qfx2_Knxs = np.array([cm.cx2_nx[cxs] for cxs in qfx2_Kcxs]) if qfx2_Kfxs.size == 0: logerr('Cannot query when there is one chip in database') # Reshape Vote for Info qfx2_Kcxs = np.array(qfx2_Kcxs).reshape(num_qf, K+1) qfx2_Kfxs = np.array(qfx2_Kfxs).reshape(num_qf, K+1) qfx2_Knxs = np.array(qfx2_Knxs).reshape(num_qf, K+1) # Using the K=K+1 results, make k=K scores qfx2_kcxs_vote = qfx2_Kcxs[:, 0:K] # vote for cx qfx2_kfxs_vote = qfx2_Kfxs[:, 0:K] # vote for fx qfx2_knxs_vote = qfx2_Knxs[:, 0:K] # check with nx # Attempt to recover from problems where K is too small qfx2_knxs_norm = np.tile(qfx2_Knxs[:, K].reshape(num_qf, 1), (1, K)) qfx2_knxs_norm[qfx2_knxs_norm == nm.UNIDEN_NX()] = 0 # Remove Unidentifieds from this test qfx2_kcxs_norm = np.tile(qfx2_Kcxs[:, K].reshape(num_qf, 1), (1, K)) # If the normalizer has the same name, but is a different chip, there is a good chance # it is a correct match and was peanalized by the scoring function qfx2_normgood_bit = np.logical_and(qfx2_kcxs_vote != qfx2_kcxs_norm, \ qfx2_knxs_vote == qfx2_knxs_norm) #qfx2_kweight[qfx2_normgood_bit] = 2 # ----- # Build FeatureMatches and FeaturesScores # cx2_fm = alloc_lists(cm.max_cx + 1) cx2_fs_ = alloc_lists(cm.max_cx + 1) qfx2_qfx = np.tile(np.arange(0, num_qf).reshape(num_qf, 1), (1, K)) # Add matches and scores for (qfx, qfs, cxs, fxs)\ in iter(zip(qfx2_qfx.flat, \ qfx2_kweight.flat, \ qfx2_kcxs_vote.flat, \ qfx2_kfxs_vote.flat)): if cxs.size == 0: continue for (vote_cx, vote_fx) in iter(zip(np.nditer(cxs), np.nditer(fxs))): cx2_fm[vote_cx].append((qfx, vote_fx)) cx2_fs_[vote_cx].append(qfs) # Convert correspondences to to numpy for cx in xrange(len(cx2_fs_)): num_m = len(cx2_fm[cx]) cx2_fs_[cx] = np.array(cx2_fs_[cx], dtype=np.float32) cx2_fm[cx] = np.array(cx2_fm[cx], dtype=np.uint32).reshape(num_m, 2) logdbg('Setting feature assignments') rr.cx2_fm = cx2_fm rr.cx2_fs_ = cx2_fs_
def external_feature_computers(am, chip): 'Write chip ; call extern executable ; read output ; return (kpts,desc)' logdbg('Calling external kpt detector') iom = am.hs.iom chip = Image.fromarray(chip) tmp_chip_fpath = iom.get_temp_fpath('tmp.ppm') chip.save(tmp_chip_fpath, 'PPM') perdoch_external = ['heshesaff'] mikolajczyk_external = ['heslapaff', 'dense'] if am.algo_prefs.chiprep.kpts_detector in perdoch_external: exename = iom.get_hesaff_exec() outname = tmp_chip_fpath + '.hesaff.sift' args = '"' + tmp_chip_fpath + '"' elif am.algo_prefs.chiprep.kpts_detector in mikolajczyk_external: exename = iom.get_inria_exec() feature_name = am.algo_prefs.chiprep.kpts_detector if feature_name == 'heslapaff': feature_name = 'hesaff' suffix = 'hesaff' if feature_name == 'dense': feature_name = feature_name + ' 6 6' suffix = 'dense' outname = tmp_chip_fpath + '.' + suffix + '.sift' args = '-' + feature_name + ' -sift -i "' + tmp_chip_fpath + '"' else: logerr('Method %r + %r is invalid in extern_detect_kpts.m'\ % (am.algo_prefs.chiprep.kpts_detector, am.algo_prefs.chiprep.kpts_extractor)) cmd = exename + ' ' + args logdbg('External Executing: %r ' % cmd) try: proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logdbg('External Execution did not throw an error') (out, err) = proc.communicate() logdbg(str(out) + ' ' + str(err)) if proc.returncode != 0: logerr('Failed to execute ' + cmd + '\n OUTPUT: ' + out) if not os.path.exists(outname): logerr('The output file doesnt exist: ' + outname) logdbg('External Output:\n' + out[:-1]) except Exception as ex: logwarn( 'An Exception occurred while calling the keypoint detector: ' + str(ex)) try: ret2 = os.system(cmd) if ret2 != 0: logerr( str(ex) + '\nThe backup keypoint detector didnt work either!') except Exception as ex2: logerr(str(ex2)) fid = file(outname, 'r') ndims = int(fid.readline()) nkpts = int(fid.readline()) if ndims != 128: raise Exception(' These are not SIFT dexcriptors ') kpts = np.zeros((nkpts, 5), dtype=np.float32) desc = np.zeros((nkpts, ndims), dtype=np.uint8) lines = fid.readlines() # SIFT descriptors are computed with a radius of r=3*np.sqrt(3*s) # s = (det A_i) ^ (-1/2) OR # s = sqrtm(inv(det(A_i))) for i in range(nkpts): nums = lines[i].split(' ') kpts[i, :] = np.array(map(lambda _: float(_), nums[0:5]), dtype=np.float32) desc[i, :] = np.array(map(lambda _: np.uint8(_), nums[5:]), dtype=np.uint8) fid.close() return (kpts, desc) '''