def once(): global depth, rgb if not FOR_REAL: dataset.advance() depth = dataset.depth rgb = dataset.rgb else: opennpy.sync_update() depth, _ = opennpy.sync_get_depth() rgb, _ = opennpy.sync_get_video() main.update_frame(depth, rgb) blockdraw.clear() try: c, _ = hashalign.find_best_alignment(grid.occ, 0 * grid.occ, target_model, ~target_model) except ValueError: pass else: tm = hashalign.apply_correction(target_model, *c) tm = np.ascontiguousarray(tm) not_filled = tm & ~grid.occ correct = tm & grid.occ incorrect = ~tm & grid.occ try: next_layer = np.min(np.nonzero(not_filled)[1]) except ValueError: blockdraw.show_grid('0', grid.occ, color=np.array([0.2, 1, 0.2, 1])) else: blockdraw.show_grid('1', incorrect, color=np.array([1, 1, 0.1, 1])) nf = not_filled * 0 nf[:, next_layer, :] = 1 nf = nf & not_filled blockdraw.show_grid('2', nf, color=np.array([1, 0.2, 1.0, 1])) blockdraw.show_grid('3', correct, color=np.array([0.1, 0.3, 0.1, 1])) window.clearcolor = [0, 0, 0, 0] window.flag_drawgrid = False if 'R_correct' in main.__dict__: window.modelmat = main.R_display #show_rgb(rgb) window.Refresh() pylab.waitforbuttonpress(0.005) sys.stdout.flush()
def once(): global depth, rgb if not FOR_REAL: dataset.advance() depth = dataset.depth rgb = dataset.rgb else: opennpy.sync_update() depth,_ = opennpy.sync_get_depth() rgb,_ = opennpy.sync_get_video() main.update_frame(depth, rgb) blockdraw.clear() try: c,_ = hashalign.find_best_alignment(grid.occ,0*grid.occ, target_model,~target_model) except ValueError: pass else: tm = hashalign.apply_correction(target_model, *c) tm = np.ascontiguousarray(tm) not_filled = tm & ~grid.occ correct = tm & grid.occ incorrect = ~tm & grid.occ try: next_layer = np.min(np.nonzero(not_filled)[1]) except ValueError: blockdraw.show_grid('0', grid.occ, color=np.array([0.2,1,0.2,1])) else: blockdraw.show_grid('1', incorrect, color=np.array([1,1,0.1,1])) nf = not_filled*0 nf[:,next_layer,:] = 1 nf = nf & not_filled blockdraw.show_grid('2', nf, color=np.array([1,0.2,1.0,1])) blockdraw.show_grid('3', correct, color=np.array([0.1,0.3,0.1,1])) window.clearcolor=[0,0,0,0] window.flag_drawgrid = False if 'R_correct' in main.__dict__: window.modelmat = main.R_display #show_rgb(rgb) window.Refresh() pylab.waitforbuttonpress(0.005) sys.stdout.flush()
def once(): global problem problem = GT.copy() problem = grid.apply_correction(problem, np.random.randint(-18, 18), np.random.randint(-18, 18), np.random.randint(0, 4)) featureA = hashalign.find_features(GT) featureB = hashalign.find_features(problem) matches = hashalign.match_features(featureA, featureB, GT.shape[0]) bestmatch, besterr = hashalign.find_best_alignment(GT, GT * 0, problem, problem * 0) if 0: for match in matches: show_align(GT, hashalign.apply_correction(problem, *match)) window.Refresh() pylab.waitforbuttonpress(0.01) print besterr, bestmatch show_align(GT, hashalign.apply_correction(problem, *bestmatch))
def once(): global problem problem = GT.copy() problem = grid.apply_correction(problem, np.random.randint(-18,18), np.random.randint(-18,18), np.random.randint(0,4)) featureA = hashalign.find_features(GT) featureB = hashalign.find_features(problem) matches = hashalign.match_features(featureA, featureB, GT.shape[0]) bestmatch, besterr = hashalign.find_best_alignment(GT, GT*0, problem, problem*0) if 0: for match in matches: show_align(GT, hashalign.apply_correction(problem, *match)) window.Refresh() pylab.waitforbuttonpress(0.01) print besterr, bestmatch show_align(GT, hashalign.apply_correction(problem, *bestmatch))
def run_error(): datasets = glob.glob('data/sets/study*') datasets.sort() d = {} for name in datasets: if 'user4' in name: continue dataset.load_dataset(name) name = os.path.split(name)[1] print name # Open the ground truth file custom = os.path.join('data/sets/', name, 'gt.txt') if os.path.exists(custom): # Try dataset directory first fname = custom else: import re # Fall back on generic ground truth file match = re.match('.*_z(\d)m_(.*)', name) if match is None: continue number = int(match.groups()[0]) fname = 'data/experiments/gt/gt%d.txt' % number # Load the ground truth file removed = None added = None with open(fname) as fp: s = fp.read() config.GT = grid.gt2grid(s, chars='*') removed = grid.gt2grid(s, chars='rR') added = grid.gt2grid(s, chars='aA') import re number, kind = re.match('.*_z(\d)m_(.*)$', name).groups() number = int(number) with open( os.path.join('data/experiments/output/', name, 'final_output.txt'), 'r') as f: final_output = f.read() out = grid.gt2grid(final_output) gt = config.GT try: c, _ = hashalign.find_best_alignment(out, 0 * out, gt, 0 * gt) except ValueError: red = gt | added yellow = 0 * out green = 0 * out purple = 0 * out blue = 0 * out else: gt = hashalign.apply_correction(gt, *c) added = hashalign.apply_correction(added, *c) removed = hashalign.apply_correction(removed, *c) should_be_there = gt | added shouldnot_be_there = (~gt & ~added) | removed # Sanity check assert np.all(shouldnot_be_there ^ should_be_there) red = ~out & should_be_there yellow = out & shouldnot_be_there # Incorrect add purple = ~out & removed # Correct remove blue = out & added # Correct add green = out & gt # Correct block place # Sanity checks assert np.all(red + yellow + purple + blue + green <= 1) assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum( should_be_there) err = np.sum(yellow | red) totalblocks = np.sum(red | green | blue) print err, totalblocks, float(err) / totalblocks import re re.match('dfsdf', name) #_,_,err1,_,_ = grid.xcorr_correction(out, config.GT) #assert err == err1/float(gt.sum()) d[name] = { 'err': err, 'tot': (red + green + blue).sum(), 'rel': err / float(totalblocks), 'num': number, 'type': kind, } d[name].update( dict(green=green, red=red, yellow=yellow, purple=purple, blue=blue)) with open('data/experiments/error_output.pkl', 'w') as f: pickle.dump(d, f)
def run_grid(clearout=True): global ds ds = [] if clearout: try: shutil.rmtree(out_path) except: pass try: os.makedirs(out_path) except OSError: pass for name in ('ThreeCanvas.js','Plane.js','Cube.js','video.js', 'video-js.css','default.jpg','blocksetup.js'): shutil.copy('makewww/' + name, out_path) with open('makewww/blockviewiframetemplate.html','r') as f: tmp = template.Template(f.read()) datasets = glob.glob('data/sets/study*') datasets.sort() total = len(datasets) for x,name in enumerate(datasets): dataset.load_dataset(name) name = os.path.split(name)[1] # Open the ground truth file custom = os.path.join('data/sets/', name, 'gt.txt') if os.path.exists(custom): # Try dataset directory first fname = custom else: # Fall back on generic ground truth file match = re.match('.*_z(\d)m_.*', name) if match is None: continue number = int(match.groups()[0]) fname = 'data/experiments/gt/gt%d.txt' % number # Load the ground truth file removed = None added = None with open(fname) as fp: s = fp.read() config.GT = grid.gt2grid(s, chars='*') removed = grid.gt2grid(s, chars='rR') added = grid.gt2grid(s, chars='aA') try: with open(os.path.join('data/experiments/output/', name, 'output.pkl'),'r') as f: output = pickle.load(f) except IOError: continue with open(os.path.join('data/experiments/output/',name, 'final_output.txt'),'r') as f: final_output = f.read() folder = os.path.join(out_path, name) os.mkdir(folder) print "(%d/%d)" % (x+1,total), name out = grid.gt2grid(final_output) gt = config.GT try: c,_ = hashalign.find_best_alignment(out, 0*out, gt, 0*gt) except ValueError: red = gt | added yellow = 0*out green = 0*out purple = 0*out blue = 0*out else: gt = hashalign.apply_correction(gt, *c) added = hashalign.apply_correction(added, *c) removed = hashalign.apply_correction(removed, *c) should_be_there = gt | added shouldnot_be_there = (~gt & ~added) | removed # Sanity check assert np.all(shouldnot_be_there ^ should_be_there) red = ~out & should_be_there yellow = out & shouldnot_be_there # Incorrect add purple = ~out & removed # Correct remove blue = out & added # Correct add green = out & gt # Correct block place # Sanity checks assert np.all((red+yellow+purple+blue+green) <= 1) assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(should_be_there) #err = np.sum(yellow | red) #print err, gt.sum(), err/float(gt.sum()) # Output display with open(os.path.join(out_path, '%s_block.html' % name),'w') as f: f.write(tmp.render(template.Context(dict( red=gridstr(red), blue=gridstr(blue), green=gridstr(green), yellow=gridstr(yellow), purple=gridstr(purple), msg="Output" )))) # Ground truth display with open(os.path.join(out_path, '%s_gt.html' % name),'w') as f: f.write(tmp.render(template.Context(dict( blue=gridstr(added), green=gridstr(gt), purple=gridstr(removed), red='[]', yellow='[]', msg="Ground Truth" )))) totalblocks = np.sum(green + red + blue) incorrect = np.sum(red + yellow) with open(os.path.join(out_path, '%s_block.txt' % name) ,'w') as f: f.write(grid.gt2grid(final_output)) # Only take the metadata we need from the output d = dict([(key,output[key]) for key in ('frames', 'time')]) d['name'] = name d['total'] = totalblocks d['incorrect'] = incorrect d['error'] = float(incorrect) / totalblocks * 100 ds.append(d) return ds
def run_grid(clearout=True): global ds ds = [] if clearout: try: shutil.rmtree(out_path) except: pass try: os.makedirs(out_path) except OSError: pass for name in ('ThreeCanvas.js', 'Plane.js', 'Cube.js', 'video.js', 'video-js.css', 'default.jpg', 'blocksetup.js'): shutil.copy('makewww/' + name, out_path) with open('makewww/blockviewiframetemplate.html', 'r') as f: tmp = template.Template(f.read()) datasets = glob.glob('data/sets/study*') datasets.sort() total = len(datasets) for x, name in enumerate(datasets): dataset.load_dataset(name) name = os.path.split(name)[1] # Open the ground truth file custom = os.path.join('data/sets/', name, 'gt.txt') if os.path.exists(custom): # Try dataset directory first fname = custom else: # Fall back on generic ground truth file match = re.match('.*_z(\d)m_.*', name) if match is None: continue number = int(match.groups()[0]) fname = 'data/experiments/gt/gt%d.txt' % number # Load the ground truth file removed = None added = None with open(fname) as fp: s = fp.read() config.GT = grid.gt2grid(s, chars='*') removed = grid.gt2grid(s, chars='rR') added = grid.gt2grid(s, chars='aA') try: with open( os.path.join('data/experiments/output/', name, 'output.pkl'), 'r') as f: output = pickle.load(f) except IOError: continue with open( os.path.join('data/experiments/output/', name, 'final_output.txt'), 'r') as f: final_output = f.read() folder = os.path.join(out_path, name) os.mkdir(folder) print "(%d/%d)" % (x + 1, total), name out = grid.gt2grid(final_output) gt = config.GT try: c, _ = hashalign.find_best_alignment(out, 0 * out, gt, 0 * gt) except ValueError: red = gt | added yellow = 0 * out green = 0 * out purple = 0 * out blue = 0 * out else: gt = hashalign.apply_correction(gt, *c) added = hashalign.apply_correction(added, *c) removed = hashalign.apply_correction(removed, *c) should_be_there = gt | added shouldnot_be_there = (~gt & ~added) | removed # Sanity check assert np.all(shouldnot_be_there ^ should_be_there) red = ~out & should_be_there yellow = out & shouldnot_be_there # Incorrect add purple = ~out & removed # Correct remove blue = out & added # Correct add green = out & gt # Correct block place # Sanity checks assert np.all((red + yellow + purple + blue + green) <= 1) assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum( should_be_there) #err = np.sum(yellow | red) #print err, gt.sum(), err/float(gt.sum()) # Output display with open(os.path.join(out_path, '%s_block.html' % name), 'w') as f: f.write( tmp.render( template.Context( dict(red=gridstr(red), blue=gridstr(blue), green=gridstr(green), yellow=gridstr(yellow), purple=gridstr(purple), msg="Output")))) # Ground truth display with open(os.path.join(out_path, '%s_gt.html' % name), 'w') as f: f.write( tmp.render( template.Context( dict(blue=gridstr(added), green=gridstr(gt), purple=gridstr(removed), red='[]', yellow='[]', msg="Ground Truth")))) totalblocks = np.sum(green + red + blue) incorrect = np.sum(red + yellow) with open(os.path.join(out_path, '%s_block.txt' % name), 'w') as f: f.write(grid.gt2grid(final_output)) # Only take the metadata we need from the output d = dict([(key, output[key]) for key in ('frames', 'time')]) d['name'] = name d['total'] = totalblocks d['incorrect'] = incorrect d['error'] = float(incorrect) / totalblocks * 100 ds.append(d) return ds
def run_error(): datasets = glob.glob('data/sets/study*') datasets.sort() d = {} for name in datasets: dataset.load_dataset(name) name = os.path.split(name)[1] print name # Open the ground truth file custom = os.path.join('data/sets/', name, 'gt.txt') if os.path.exists(custom): # Try dataset directory first fname = custom else: import re # Fall back on generic ground truth file match = re.match('.*_z(\d)m_(.*)', name) if match is None: continue number = int(match.groups()[0]) fname = 'data/experiments/gt/gt%d.txt' % number # Load the ground truth file removed = None added = None with open(fname) as fp: s = fp.read() config.GT = grid.gt2grid(s, chars='*') removed = grid.gt2grid(s, chars='rR') added = grid.gt2grid(s, chars='aA') import re number, kind = re.match('.*_z(\d)m_(.*)$', name).groups() number = int(number) with open(os.path.join('data/experiments/output/',name, 'final_output.txt'),'r') as f: final_output = f.read() out = grid.gt2grid(final_output) gt = config.GT try: c,_ = hashalign.find_best_alignment(out, 0*out, gt, 0*gt) except ValueError: red = gt | added yellow = 0*out green = 0*out purple = 0*out blue = 0*out else: gt = hashalign.apply_correction(gt, *c) added = hashalign.apply_correction(added, *c) removed = hashalign.apply_correction(removed, *c) should_be_there = gt | added shouldnot_be_there = (~gt & ~added) | removed # Sanity check assert np.all(shouldnot_be_there ^ should_be_there) red = ~out & should_be_there yellow = out & shouldnot_be_there # Incorrect add purple = ~out & removed # Correct remove blue = out & added # Correct add green = out & gt # Correct block place # Sanity checks assert np.all(red+yellow+purple+blue+green <= 1) assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(should_be_there) err = np.sum(yellow | red) totalblocks = np.sum(red | green | blue) print err, totalblocks, float(err)/totalblocks #_,_,err1,_,_ = grid.xcorr_correction(out, config.GT) #assert err == err1/float(gt.sum()) d[name] = {'err': err, 'tot': (red+green+blue).sum(), 'rel': err/float(totalblocks), 'num': number, 'type': kind,} d[name].update(dict(green=green, red=red, yellow=yellow, purple=purple, blue=blue)) with open('data/experiments/error_output.pkl','w') as f: pickle.dump(d, f)