def characterize(C): print >>sys.stderr,"start" INFO("matchdir=%s" % C.matchdir) start = time.time() results = util_cs188.Counter() C.initdirs() count = 0 g_count = 0 y_count = 0 r_count = 0 b_count = 0 o_count = 0 try: open(os.path.join(C.pose_param['resultsdir'],C.pose_param['pose_file']),'w').close() open(os.path.join(C.pose_param['resultsdir'],C.pose_param['extras_file']),'w').close() except Exception, e: INFO(e)
def estimate_threads_avail(): if os.getenv('NUM_THREADS'): return int(os.getenv('NUM_THREADS')) global t_avg, last_sampled if time.time() - last_sampled > 5: last_sampled = time.time() gb_free = get_free_mem_gb() t = int((gb_free - 10.0)/thread_mem_gb) t_avg = t*.3 + t_avg*.7 INFO("I think we have enough memory for %d threads" % t_avg) t = max(1, min(tmax, int(t_avg))) if os.getenv('MAX_THREADS'): return min(t, int(os.getenv('MAX_THREADS'))) return t
def rerank_ransac(counts, C, Q): sorted_counts = sorted(counts.iteritems(), key=lambda x: len(x[1]), reverse=True) if C.amb_cutoff: assert C.amb_cutoff > 1 old = sorted_counts sorted_counts = amb_cutoff(C, Q, sorted_counts) print "amb cutoff: %d -> %d" % (len(old), len(sorted_counts)) if not sorted_counts: sorted_counts = old # tuples of (len, siftfile, matches) ordered [(1..), (2..), (3..)] reranked = [] num_filt = 0 for siftfile, matches in sorted_counts: # the bottom g items in 'reranked' are in their final order g = len(reranked) - bisect.bisect(reranked, (len(matches), None, None)) if g >= C.ranking_min_consistent or num_filt >= C.ranking_max_considered: if C.verbosity > 0: INFO('stopped after filtering %d' % num_filt) break num_filt += 1 # perform ransac F, inliers = corr.find_corr(matches) if any(inliers): m = np.compress(inliers, matches) bisect.insort(reranked, (len(m), siftfile, m)) if not reranked: INFO('W: no db matches passed ransac, not filtering') return condense2(sorted_counts), False else: reranked.reverse() return condense3(reranked), True
def _test(): db = TagCollection('testdata/tags.csv') import os idir = 'testdata/input' odir = 'testdata/output' distlog = [] for f in os.listdir(idir): if '.jpg' in f: output = os.path.join(odir, f[:-4] + '.png') jpg = os.path.join(idir, f) source = EarthmineImageInfo(jpg, jpg[:-4] + '.info') img = TaggedImage(jpg, source, db) points = img.map_tags_camera() for tag, (dist, point) in points: distlog.append(dist) img.draw(points, output) for i in [1, 10, 50, 100, len(distlog)]: INFO('top %d error is %f at %d samples' % (i, sum(sorted(distlog)[:i])/i, len(distlog)))
def map_tags_earthmine(self): "Returns (tag, (dist, pixel)) pairs using earthmine pixel data." THRESHOLD = 10.0 possible_tags = self.get_frustum() locs = self.source.get_pixel_locations(list(self.get_pixels())) assert locs is not None mapping = dict([(tag, (999999, None)) for tag in possible_tags]) for pixel in self.get_pixels(): if pixel not in locs: continue for tag in possible_tags: dist = tag.distance(locs[pixel]) if dist < mapping[tag][0]: mapping[tag] = (dist, pixel) tags = [] for tag in possible_tags: if mapping[tag][0] < THRESHOLD: tags.append((tag, mapping[tag])) INFO("mapped %d/%d possible tags" % (len(tags), len(possible_tags))) return tags
def match(C, Q): if C.shuffle_cells: C._dbdir = None if C.override_cells: INFO('override cells') cells_in_range = [(c,0) for c in C.override_cells] else: # compute closest cells closest_cells = util.getclosestcells(Q.query_lat, Q.query_lon, C.dbdir) if C.restrict_cells: closest_cells = filter(lambda c: c[0] in C.restrict_cells, closest_cells) cells_in_range = [(cell, dist) for cell, dist in closest_cells[0:C.ncells] if dist < C.cellradius + C.ambiguity + C.matchdistance] INFO('Using %d cells' % len(cells_in_range)) if C.shuffle_cells: import reader sr = reader.get_reader('sift') supercell = sr.get_supercelldir( C.dbdir, [c for (c,d) in cells_in_range], C.overlap_method) C._dbdir = supercell if not cells_in_range: raise LocationOutOfRangeError # cache for fuzz runs if C.cacheEnable: key = derive_key(C, cells_in_range, Q.siftname) if key in cache: print 'cache hit' return cache[key] else: print 'cache miss' # compute output file paths for the cells cellpath = [c for c,d in cells_in_range] listofimages = [] if C.one_big_cell: INFO('Using 1 big cell (%d union)' % len(cells_in_range)) outputFilePaths = [os.path.join(C.matchdir, Q.siftname + ',' + getcellid(cellpath) + ".res")] #listofimages = lexiconrank.addImagetoList(listofimages, C.dbdir + cellpath) cellpath = [cellpath] else: outputFilePaths = [] for cell, dist in cells_in_range: if ',' in cell: latcell, loncell = cell.split(',') latcell = float(latcell) loncell = float(loncell) else: latcell, loncell = 0,0 actualdist = info.distance(Q.query_lat, Q.query_lon, latcell, loncell) outputFilePath = os.path.join(C.matchdir, Q.siftname + ',' + cell + ',' + str(actualdist) + ".res") outputFilePaths.append(outputFilePath) #listofimages = lexiconrank.addImagetoList(listofimages, C.dbdir + cell) # start query query.run_parallel(C, Q, cellpath, outputFilePaths, estimate_threads_avail()) #d, lexiconmatchedimg = lexiconrank.returnTopMatch_random(C.dbdump, listofimages, Q.jpgpath) # combine results if C.spatial_comb: comb_matches = corr.combine_spatial(outputFilePaths) else: print outputFilePaths comb_matches = corr.combine_matches(outputFilePaths) #geometric consistency reranking if C.disable_filter_step: imm = condense2(sorted(comb_matches.iteritems(), key=lambda x: len(x[1]), reverse=True)) rsc_ok = True else: imm, rsc_ok = rerank_ransac(comb_matches, C, Q) if C.weight_by_coverage: #print 1 ranked = weight_by_coverage(C, Q, imm) elif C.weight_by_distance: #print 2 ranked = weight_by_distance(C, Q, imm) else: #print 3 ranked = distance_sort(C, Q, imm) # top 1 stats = check_topn_img(C, Q, ranked, 1) # return statistics and top result matchedimg = ranked[0][0] matches = comb_matches[matchedimg + 'sift.txt'] if C.cacheEnable: cache[key] = (stats, matchedimg, matches, ranked) if C.match_callback: C.match_callback(C, Q, stats, matchedimg, ranked, cells_in_range, rsc_ok) # compute homography and draw images maybe if MultiprocessExecution.pool: MultiprocessExecution.pool.apply_async(compute_hom, [C.pickleable(), Q, ranked, comb_matches]) else: compute_hom(C, Q, ranked, comb_matches) ### Query Pose Estimation ### match = any(check_img(C, Q, ranked[0])) if (C.solve_pose and match and Q.name not in C.pose_remove) or C.pose_param['solve_bad']: #computePose.draw_dbimage(C, Q, matchedimg, match) if MultiprocessExecution.pool: MultiprocessExecution.pool.apply_async(computePose.estimate_pose, [C.pickleable(), Q, matchedimg, match]) else: computePose.estimate_pose(C, Q, matchedimg, match) # done return stats, matchedimg, matches, ranked
o_count = 0 try: open(os.path.join(C.pose_param['resultsdir'],C.pose_param['pose_file']),'w').close() open(os.path.join(C.pose_param['resultsdir'],C.pose_param['extras_file']),'w').close() except Exception, e: INFO(e) for Q in C.iter_queries(): if C.verbosity>0: print '-- query', Q.name, '--' for loc in C.locator_function(C, Q): Q.setQueryCoord(*loc) count += 1 try: [g, y, r, b, o], matchedimg, matches, combined = match(C, Q) except LocationOutOfRangeError: INFO('Exception: location out of cell range') continue # compile statistics # top n for n in C.topnresults: result = check_topn_img(C, Q, combined, n) results[n] += reduce(lambda x,y: x or y, result) if count % C.print_per == 0 and C.verbosity > 0: INFO('speed is %f' % ((time.time()-start)/count)) for n in C.topnresults: print "matched {0}\t out of {1}\t in the top {2}\t amb: {3}, ncells:{4}".format(results[n], count, n, C.ambiguity, C.ncells) if g: g_count += 1 if C.verbosity > 0 and count % C.print_per == 0: print "G match-g:{0} y:{1} r:{2} b:{3} o:{4} out of {5}".format(g_count, y_count, r_count, b_count, o_count, count)
def draw(self, points, output): image = self.taggedcopy(points, self.image) image.save(output, 'png') INFO("saved to %s" % output)