def run(): flag = True # A bool to control the running of the program hide = True # A bool to control superimposition of image view = 1 # which view to be change with given commands, 1-Front, 2-Side, 3-Top with open('lengths.p', 'rb') as fp: # to load the template from the PC lens = pickle.load(fp) img = outline('man_2.jpg') # outline is captured simg = outline('side.jpg') while flag: if hide: front = np.zeros((500, 480)) side = np.zeros((500, 480)) else: front = img.copy() side = simg.copy() top = np.zeros((480, 480)) display(front, side, top, lens) # to show the templates flag, hide, view = controller(lens, flag, hide, view) # A user interface to get inputs
def color_segment(self,text_color=RGBPixel(255,255,0),\ instaff_color=RGBPixel(0,255,0),\ other_color=RGBPixel(100,100,100), classified_color=None, classified_box=False): """ Segment image in three with colors Segment the image into three parts: - text - inside staff - Others/relevant for Classifier Keyword arguments: text_color --- What color to use for text ccs instaff_color --- The color to use for in-staff ccs other_color --- The color of the rest. classified_color --- If set we will try to classify stuff in the image and give them the given color classified_box --- If set we will try to classify and but instead of highlight I will box them. """ ret = self.to_rgb().to_onebit().to_rgb() classify = False if not(classified_color is None and classified_box is None): classify = True if classified_color is None: classified_color = RGBPixel(255,0,0) text,instaff,other,classified = self.segment(classify=classify) # Painting inside staff things green for c in instaff: ret.highlight(c,instaff_color) # Painting relevant ccs' red. for c in other: ret.highlight(c,other_color) for c in classified: if classified_box: outline(ret,c,width=2.0,color=classified_color) else: ret.highlight(c,classified_color) # Painting text yellow for c in text: ret.highlight(c,text_color) return ret
def color_segment(self,text_color=RGBPixel(255,255,0),\ instaff_color=RGBPixel(0,255,0),\ other_color=RGBPixel(100,100,100), classified_color=None, classified_box=False): """ Segment image in three with colors Segment the image into three parts: - text - inside staff - Others/relevant for Classifier Keyword arguments: text_color --- What color to use for text ccs instaff_color --- The color to use for in-staff ccs other_color --- The color of the rest. classified_color --- If set we will try to classify stuff in the image and give them the given color classified_box --- If set we will try to classify and but instead of highlight I will box them. """ ret = self.to_rgb().to_onebit().to_rgb() classify = False if not (classified_color is None and classified_box is None): classify = True if classified_color is None: classified_color = RGBPixel(255, 0, 0) text, instaff, other, classified = self.segment(classify=classify) # Painting inside staff things green for c in instaff: ret.highlight(c, instaff_color) # Painting relevant ccs' red. for c in other: ret.highlight(c, other_color) for c in classified: if classified_box: outline(ret, c, width=2.0, color=classified_color) else: ret.highlight(c, classified_color) # Painting text yellow for c in text: ret.highlight(c, text_color) return ret
def test_outline_contains_elements(self): story = outline() for s in story: if isinstance(s.subject, common.Person): break else: raise AssertionError('Person not exists in outline') for s in story: if isinstance(s.subject, common.Stage): break else: raise AssertionError('Stage not exits is outline') for s in story: if isinstance(s.subject, common.DayTime): break else: raise AssertionError("DayTime not exits is outline")
self.images.append(ret) return ret if __name__ == '__main__': FORMAT = "%(asctime)-15s %(levelname)s [%(name)s.%(funcName)s-%(lineno)s] %(message)s" logging.basicConfig(level=logging.DEBUG, format=FORMAT) start = time.time() init_gamera() c = Classifier_with_remove(sys.argv[1], float(sys.argv[2])) d_t = c.d_t() logging.debug("Loaded Gamera and classifier in %f seconds", (time.time() - start)) logging.debug("count_of_training=%d, k=%d, e_fp=%f, d_t=%f", (len(c.stats), c.k, c.e_fp, d_t)) sys.stdout.flush() start = time.time() for imgname in sys.argv[3:]: m = re.match(r"^(.*)\.[^\.]+$", imgname) noend = m.group(1) ci = c.classify_image(imgname) rgbimg = ci.image.to_rgb() cg = ci.classified_glyphs(d_t) [outline(rgbimg, g, 3.0, RGBPixel(255, 0, 0)) for g in cg] rgbimg.save_PNG("class_%s.png" % noend) print "Saved class_%s.png: %d glypgs found" % (noend, len(cg)) sys.stdout.flush() print "Parsed %d images in %f seconds" % (len( sys.argv[3:]), time.time() - start)
import sys import re import time init_gamera() progress = 0 amount = len(sys.argv[1:]) elapsed = 0 for i, imgname in enumerate(sys.argv[1:]): progress = ((i + 1) / float(amount)) * 100 m = re.match(r"^(.*)\.[^\.]+$", imgname) noend = m.group(1) image = load_image(imgname) rgbimg = image.to_rgb() image = image.to_onebit() ms = remstaves(image) ccs = ms.image.cc_analysis() cond = inout_staff_condition(ms.get_staffpos()) for c in ccs: if cond(c): # insid col = RGBPixel(255, 0, 0) else: col = RGBPixel(0, 255, 0) outline(rgbimg, c, 2.0, col) savename = "insideoutside_%s.png" % noend rgbimg.save_PNG(savename) print "Saved %s" % savename sys.stdout.flush()
else: relevant_cc = ccs ret = Classified_image(self,mi,relevant_cc) self.images.append(ret) return ret if __name__ == '__main__': FORMAT = "%(asctime)-15s %(levelname)s [%(name)s.%(funcName)s-%(lineno)s] %(message)s" logging.basicConfig(level=logging.DEBUG,format=FORMAT) start=time.time() init_gamera() c = Classifier_with_remove(sys.argv[1],float(sys.argv[2])) d_t = c.d_t() logging.debug("Loaded Gamera and classifier in %f seconds",(time.time()-start)) logging.debug("count_of_training=%d, k=%d, e_fp=%f, d_t=%f",(len(c.stats),c.k,c.e_fp,d_t)) sys.stdout.flush() start=time.time() for imgname in sys.argv[3:]: m = re.match(r"^(.*)\.[^\.]+$",imgname) noend = m.group(1) ci = c.classify_image(imgname) rgbimg = ci.image.to_rgb() cg = ci.classified_glyphs(d_t) [outline(rgbimg,g,3.0,RGBPixel(255,0,0)) for g in cg] rgbimg.save_PNG("class_%s.png"%noend) print "Saved class_%s.png: %d glypgs found"%(noend,len(cg)) sys.stdout.flush() print "Parsed %d images in %f seconds"%(len(sys.argv[3:]),time.time()-start)
def test_outline_all_actions(self): actions = outline() for a in actions: self.assertIsInstance(a, common.Act)
def test_e_fp(filename,expected_count=10): init_gamera() c = Classifier_with_remove() c.set_k(1) c.change_features(["volume64regions"]) ci = c.classify_image(filename) #files = ["mergedyn2.xml", "mergedyn.xml","only-dynamics.xml", # "newtrain-dynamic.xml", "preomr.xml"] files = ["preomr.xml","preomr_edited.xml","preomr_edited_cnn.xml"] import os.path # try to match with different trainingsets. for dynamic in ([ d for d in files if os.path.isfile(d) ]): ci.load_new_training_data(dynamic) print "%s - count_of_training=%d, k=%d"%(dynamic,len(c.stats),c.k) result = {} # Push into buckets based on the count of found glyphs. csv = {} sys.stdout.flush() # Try with different epsilon for false_positives: e_fp for e_fp in arange(0.01,1.01,0.01): c.e_fp=e_fp count = len(ci.classified_glyphs()) # Init bucket. if not result.has_key(count): result[count] = [] result[count].append((e_fp,c.d_t())) csv[e_fp] = count # Find the best match to the wanted result. k,res,diff = find_nearest(result,expected_count) confid = [ (len(v),key,v[0][0],v[0][1]) for key,v in result.iteritems() ] confid2 = [ (key,len(v)) for key,v in result.iteritems() ] confid.sort(reverse=True) confid2.sort() print "efp,count" for e_fp,c in sorted(csv.iteritems()): print "%s,%s"%(e_fp,c) print print "count,spansize" for count,spansize in confid2: print "%s,%s"%(count,spansize) return ret = [] for i in range(0,min(10,len(confid))+1): ret.append(confid[i]) if not result.has_key(expected_count): print "Never found the desired amount with %s"%dynamic print "Found in %d(%d): %s"%(k,diff,[r for r in res]) rgbimg = ci.image.to_rgb() cg = ci.classified_glyphs(res[0].d_t) [outline(rgbimg,g,3.0,RGBPixel(255,0,0)) for g in cg] rgbimg.save_PNG("class_%s_%s.png"%(filename,dynamic)) print
def test_e_fp(filename, expected_count=10): init_gamera() c = Classifier_with_remove() c.set_k(1) c.change_features(["volume64regions"]) ci = c.classify_image(filename) #files = ["mergedyn2.xml", "mergedyn.xml","only-dynamics.xml", # "newtrain-dynamic.xml", "preomr.xml"] files = ["preomr.xml", "preomr_edited.xml", "preomr_edited_cnn.xml"] import os.path # try to match with different trainingsets. for dynamic in ([d for d in files if os.path.isfile(d)]): ci.load_new_training_data(dynamic) print "%s - count_of_training=%d, k=%d" % (dynamic, len(c.stats), c.k) result = {} # Push into buckets based on the count of found glyphs. csv = {} sys.stdout.flush() # Try with different epsilon for false_positives: e_fp for e_fp in arange(0.01, 1.01, 0.01): c.e_fp = e_fp count = len(ci.classified_glyphs()) # Init bucket. if not result.has_key(count): result[count] = [] result[count].append((e_fp, c.d_t())) csv[e_fp] = count # Find the best match to the wanted result. k, res, diff = find_nearest(result, expected_count) confid = [(len(v), key, v[0][0], v[0][1]) for key, v in result.iteritems()] confid2 = [(key, len(v)) for key, v in result.iteritems()] confid.sort(reverse=True) confid2.sort() print "efp,count" for e_fp, c in sorted(csv.iteritems()): print "%s,%s" % (e_fp, c) print print "count,spansize" for count, spansize in confid2: print "%s,%s" % (count, spansize) return ret = [] for i in range(0, min(10, len(confid)) + 1): ret.append(confid[i]) if not result.has_key(expected_count): print "Never found the desired amount with %s" % dynamic print "Found in %d(%d): %s" % (k, diff, [r for r in res]) rgbimg = ci.image.to_rgb() cg = ci.classified_glyphs(res[0].d_t) [outline(rgbimg, g, 3.0, RGBPixel(255, 0, 0)) for g in cg] rgbimg.save_PNG("class_%s_%s.png" % (filename, dynamic)) print
import sys import re import time init_gamera() progress=0 amount = len(sys.argv[1:]) elapsed = 0 for i,imgname in enumerate(sys.argv[1:]): progress = ((i+1)/float(amount))*100 m = re.match(r"^(.*)\.[^\.]+$",imgname) noend = m.group(1) image = load_image(imgname) rgbimg = image.to_rgb() image = image.to_onebit() ms = remstaves(image) ccs = ms.image.cc_analysis() cond = inout_staff_condition(ms.get_staffpos()) for c in ccs: if cond(c): # insid col = RGBPixel(255,0,0) else: col = RGBPixel(0,255,0) outline(rgbimg,c,2.0,col) savename = "insideoutside_%s.png"%noend rgbimg.save_PNG(savename) print "Saved %s"%savename sys.stdout.flush()