示例#1
0
def start(dset=None, frame_num=0):
    main.initialize()

    if not FOR_REAL:
        if dset is None:
            dataset.load_random_dataset()
        else:
            dataset.load_dataset(dset)
        while dataset.frame_num < frame_num:
            dataset.advance()

        name = dset
        name = os.path.split(name)[1]
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            import re
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_(.*)', name)            
            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number

        with open(fname) as f:
            GT = grid.gt2grid(f.read())
        grid.initialize_with_groundtruth(GT)

    else:
        config.load('data/newest_calibration')
        opennpy.align_depth_to_rgb()
        dataset.setup_opencl()
示例#2
0
def start(dset=None, frame_num=0):
    main.initialize()

    if not FOR_REAL:
        if dset is None:
            dataset.load_random_dataset()
        else:
            dataset.load_dataset(dset)
        while dataset.frame_num < frame_num:
            dataset.advance()

        name = dset
        name = os.path.split(name)[1]
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            import re
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_(.*)', name)
            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number

        with open(fname) as f:
            GT = grid.gt2grid(f.read())
        grid.initialize_with_groundtruth(GT)

    else:
        config.load('data/newest_calibration')
        opennpy.align_depth_to_rgb()
        dataset.setup_opencl()
示例#3
0
def run_grid():

    datasets = glob.glob('data/sets/study_*')
    try:
        shutil.rmtree(out_path)
    except:
        pass
    os.mkdir(out_path)

    for name in datasets:
        #for name in ('data/sets/cube',):
        dataset.load_dataset(name)
        name = os.path.split(name)[1]

        d = dict(name=name)
        folder = os.path.join(out_path, name)
        os.mkdir(folder)

        global modelmat
        modelmat = None
        main.initialize()

        import re
        number = int(re.match('.*_z(\d)m_.*', name).groups()[0])
        with open('data/experiments/gt/gt%d.txt' % number) as f:
            GT = grid.gt2grid(f.read())
        grid.initialize_with_groundtruth(GT)

        total = 0
        output = []
        try:
            while 1:
                try:
                    dataset.advance()
                except (IOError, ValueError):
                    break
                if dataset.frame_num % 30 == 0:
                    print name, dataset.frame_num
                t1 = time.time()
                once()
                t2 = time.time()
                total += t2 - t1

                output.append((main.R_correct.copy(), grid.occ.copy()))
        except Exception as e:
            print e

        d['frames'] = dataset.frame_num
        d['time'] = total
        d['output'] = output
        with open(os.path.join(folder, 'output.pkl'), 'w') as f:
            pickle.dump(d, f)

        with open(os.path.join(folder, 'final_output.txt'), 'w') as f:
            f.write(grid.grid2gt(grid.occ))
示例#4
0
def run_grid():

    datasets = glob.glob('data/sets/study_*')
    try:
        shutil.rmtree(out_path)
    except:
        pass
    os.mkdir(out_path)

    for name in datasets:
    #for name in ('data/sets/cube',):
        dataset.load_dataset(name)
        name = os.path.split(name)[1]

        d = dict(name=name)
        folder = os.path.join(out_path, name)
        os.mkdir(folder)

        global modelmat
        modelmat = None
        main.initialize()

        import re
        number = int(re.match('.*_z(\d)m_.*', name).groups()[0])
        with open('data/experiments/gt/gt%d.txt' % number) as f:
            GT = grid.gt2grid(f.read())
        grid.initialize_with_groundtruth(GT)

        total = 0
        output = []
        try:
            while 1:
                try:
                    dataset.advance()
                except (IOError, ValueError):
                    break
                if dataset.frame_num % 30 == 0:
                    print name, dataset.frame_num
                t1 = time.time()
                once()
                t2 = time.time()
                total += t2-t1

                output.append((main.R_correct.copy(), grid.occ.copy()))
        except Exception as e:
            print e

        d['frames'] = dataset.frame_num
        d['time'] = total
        d['output'] = output
        with open(os.path.join(folder, 'output.pkl'),'w') as f:
            pickle.dump(d, f)

        with open(os.path.join(folder, 'final_output.txt'),'w') as f:
            f.write(grid.grid2gt(grid.occ))
示例#5
0
def run_grid():
    datasets = glob.glob('data/sets/study_*')
    try:
        os.mkdir(out_path)
    except OSError:
        print "Couldn't make create output directory [%s], it may already exist." % out_path
        print "Remove it and try again."
        return False

    for name in datasets:
        dataset.load_dataset(name)
        name = os.path.split(name)[1]

        d = dict(name=name)
        folder = os.path.join(out_path, name)
        os.mkdir(folder)

        main.initialize()

        import re
        number = int(re.match('.*_z(\d)m_.*', name).groups()[0])
        with open('data/experiments/gt/gt%d.txt' % number) as f:
            GT = grid.gt2grid(f.read())
        grid.initialize_with_groundtruth(GT)

        total = 0
        output = []
        try:
            while 1:
                try:
                    dataset.advance()
                except (IOError, ValueError):
                    break
                if dataset.frame_num % 30 == 0:
                    print name, dataset.frame_num
                t1 = time.time()
                once()
                t2 = time.time()
                total += t2-t1

                output.append((main.R_correct.copy(), grid.occ.copy()))
        except Exception as e:
            print e

        d['frames'] = dataset.frame_num
        d['time'] = total
        d['output'] = output
        with open(os.path.join(folder, 'output.pkl'),'w') as f:
            pickle.dump(d, f)

        with open(os.path.join(folder, 'final_output.txt'),'w') as f:
            f.write(grid.grid2gt(grid.occ))
示例#6
0
def start(dset=None, frame_num=0):
    main.initialize()

    #with open('data/experiments/collab/2011.txt') as f:
    global target_model
    with open('data/experiments/collab/block.txt') as f:
        target_model = grid.gt2grid(f.read())
    #grid.initialize_with_groundtruth(GT)

    if not FOR_REAL:
        if dset is None:
            dataset.load_random_dataset()
        else:
            dataset.load_dataset(dset)
        while dataset.frame_num < frame_num:
            dataset.advance()
    else:
        config.load('data/newest_calibration')
        opennpy.align_depth_to_rgb()
        dataset.setup_opencl()
示例#7
0
def run_error():

    datasets = glob.glob('data/sets/study*')
    datasets.sort()
    d = {}
    for name in datasets:
        if 'user4' in name: continue
        dataset.load_dataset(name)
        name = os.path.split(name)[1]
        print name

        # Open the ground truth file
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            import re
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_(.*)', name)
            if match is None:
                continue

            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number

        # Load the ground truth file
        removed = None
        added = None
        with open(fname) as fp:
            s = fp.read()
            config.GT = grid.gt2grid(s, chars='*')
            removed = grid.gt2grid(s, chars='rR')
            added = grid.gt2grid(s, chars='aA')

        import re
        number, kind = re.match('.*_z(\d)m_(.*)$', name).groups()
        number = int(number)

        with open(
                os.path.join('data/experiments/output/', name,
                             'final_output.txt'), 'r') as f:
            final_output = f.read()

        out = grid.gt2grid(final_output)
        gt = config.GT

        try:
            c, _ = hashalign.find_best_alignment(out, 0 * out, gt, 0 * gt)
        except ValueError:
            red = gt | added
            yellow = 0 * out
            green = 0 * out
            purple = 0 * out
            blue = 0 * out
        else:
            gt = hashalign.apply_correction(gt, *c)
            added = hashalign.apply_correction(added, *c)
            removed = hashalign.apply_correction(removed, *c)
            should_be_there = gt | added
            shouldnot_be_there = (~gt & ~added) | removed
            # Sanity check
            assert np.all(shouldnot_be_there ^ should_be_there)

            red = ~out & should_be_there
            yellow = out & shouldnot_be_there  # Incorrect add
            purple = ~out & removed  # Correct remove
            blue = out & added  # Correct add
            green = out & gt  # Correct block place

            # Sanity checks
            assert np.all(red + yellow + purple + blue + green <= 1)
            assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(
                should_be_there)

        err = np.sum(yellow | red)
        totalblocks = np.sum(red | green | blue)
        print err, totalblocks, float(err) / totalblocks

        import re
        re.match('dfsdf', name)

        #_,_,err1,_,_ = grid.xcorr_correction(out, config.GT)
        #assert err == err1/float(gt.sum())
        d[name] = {
            'err': err,
            'tot': (red + green + blue).sum(),
            'rel': err / float(totalblocks),
            'num': number,
            'type': kind,
        }
        d[name].update(
            dict(green=green, red=red, yellow=yellow, purple=purple,
                 blue=blue))
    with open('data/experiments/error_output.pkl', 'w') as f:
        pickle.dump(d, f)
示例#8
0
def load_gt():
    global GT
    with open('data/experiments/gt/gt1.txt') as f:
        GT = grid.gt2grid(f.read())
示例#9
0
def run_grid(clearout=True):
    global ds
    ds = []
    
    if clearout:
      try:
          shutil.rmtree(out_path)
      except:
          pass
    try:
        os.makedirs(out_path)
    except OSError:
        pass
    
    for name in ('ThreeCanvas.js','Plane.js','Cube.js','video.js',
            'video-js.css','default.jpg','blocksetup.js'):
        shutil.copy('makewww/' + name, out_path)

    with open('makewww/blockviewiframetemplate.html','r') as f:
        tmp = template.Template(f.read())

    datasets = glob.glob('data/sets/study*')
    datasets.sort()
    total = len(datasets)
    for x,name in enumerate(datasets):
        dataset.load_dataset(name)
        name = os.path.split(name)[1]
        
        # Open the ground truth file
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_.*', name)
            if match is None:
                continue
            
            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number
        
        # Load the ground truth file
        removed = None
        added = None
        with open(fname) as fp:
            s = fp.read()
            config.GT = grid.gt2grid(s, chars='*')
            removed = grid.gt2grid(s, chars='rR')
            added = grid.gt2grid(s, chars='aA')

        try:
            with open(os.path.join('data/experiments/output/', name,
                    'output.pkl'),'r') as f:
                output = pickle.load(f)
        except IOError:
            continue
        
        with open(os.path.join('data/experiments/output/',name,
                'final_output.txt'),'r') as f:
            final_output = f.read()

        folder = os.path.join(out_path, name)
        os.mkdir(folder)

        print "(%d/%d)" % (x+1,total), name
        out = grid.gt2grid(final_output)
        gt = config.GT

        try:
            c,_ = hashalign.find_best_alignment(out, 0*out, gt, 0*gt)
        except ValueError:
            red = gt | added
            yellow = 0*out
            green = 0*out
            purple = 0*out
            blue = 0*out
        else:
            gt = hashalign.apply_correction(gt, *c)
            added = hashalign.apply_correction(added, *c)
            removed = hashalign.apply_correction(removed, *c)
            should_be_there = gt | added 
            shouldnot_be_there = (~gt & ~added) | removed
            # Sanity check
            assert np.all(shouldnot_be_there ^ should_be_there)

            red = ~out & should_be_there
            yellow = out & shouldnot_be_there # Incorrect add
            purple = ~out & removed # Correct remove
            blue = out & added # Correct add
            green = out & gt # Correct block place

            # Sanity checks
            assert np.all((red+yellow+purple+blue+green) <= 1)
            assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(should_be_there)

            #err = np.sum(yellow | red) 
            #print err, gt.sum(), err/float(gt.sum())
        
        # Output display
        with open(os.path.join(out_path, '%s_block.html' % name),'w') as f:
            f.write(tmp.render(template.Context(dict(
                red=gridstr(red),
                blue=gridstr(blue),
                green=gridstr(green),
                yellow=gridstr(yellow),
                purple=gridstr(purple),
                msg="Output"
            ))))
        
        # Ground truth display
        with open(os.path.join(out_path, '%s_gt.html' % name),'w') as f:
            f.write(tmp.render(template.Context(dict(
                blue=gridstr(added),
                green=gridstr(gt),
                purple=gridstr(removed),
                red='[]',
                yellow='[]',
                msg="Ground Truth"
            ))))
        
        totalblocks = np.sum(green + red + blue)
        incorrect = np.sum(red + yellow)
        
        with open(os.path.join(out_path, '%s_block.txt' % name) ,'w') as f:
            f.write(grid.gt2grid(final_output))
        
        # Only take the metadata we need from the output
        d = dict([(key,output[key]) for key in ('frames', 'time')])
        d['name'] = name
        d['total'] = totalblocks
        d['incorrect'] = incorrect
        d['error'] = float(incorrect) / totalblocks * 100
        ds.append(d)
    return ds
示例#10
0
def load_gt():
    global GT
    with open('data/experiments/gt/gt1.txt') as f:
        GT = grid.gt2grid(f.read())
示例#11
0
def run_grid(clearout=True):
    global ds
    ds = []

    if clearout:
        try:
            shutil.rmtree(out_path)
        except:
            pass
    try:
        os.makedirs(out_path)
    except OSError:
        pass

    for name in ('ThreeCanvas.js', 'Plane.js', 'Cube.js', 'video.js',
                 'video-js.css', 'default.jpg', 'blocksetup.js'):
        shutil.copy('makewww/' + name, out_path)

    with open('makewww/blockviewiframetemplate.html', 'r') as f:
        tmp = template.Template(f.read())

    datasets = glob.glob('data/sets/study*')
    datasets.sort()
    total = len(datasets)
    for x, name in enumerate(datasets):
        dataset.load_dataset(name)
        name = os.path.split(name)[1]

        # Open the ground truth file
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_.*', name)
            if match is None:
                continue

            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number

        # Load the ground truth file
        removed = None
        added = None
        with open(fname) as fp:
            s = fp.read()
            config.GT = grid.gt2grid(s, chars='*')
            removed = grid.gt2grid(s, chars='rR')
            added = grid.gt2grid(s, chars='aA')

        try:
            with open(
                    os.path.join('data/experiments/output/', name,
                                 'output.pkl'), 'r') as f:
                output = pickle.load(f)
        except IOError:
            continue

        with open(
                os.path.join('data/experiments/output/', name,
                             'final_output.txt'), 'r') as f:
            final_output = f.read()

        folder = os.path.join(out_path, name)
        os.mkdir(folder)

        print "(%d/%d)" % (x + 1, total), name
        out = grid.gt2grid(final_output)
        gt = config.GT

        try:
            c, _ = hashalign.find_best_alignment(out, 0 * out, gt, 0 * gt)
        except ValueError:
            red = gt | added
            yellow = 0 * out
            green = 0 * out
            purple = 0 * out
            blue = 0 * out
        else:
            gt = hashalign.apply_correction(gt, *c)
            added = hashalign.apply_correction(added, *c)
            removed = hashalign.apply_correction(removed, *c)
            should_be_there = gt | added
            shouldnot_be_there = (~gt & ~added) | removed
            # Sanity check
            assert np.all(shouldnot_be_there ^ should_be_there)

            red = ~out & should_be_there
            yellow = out & shouldnot_be_there  # Incorrect add
            purple = ~out & removed  # Correct remove
            blue = out & added  # Correct add
            green = out & gt  # Correct block place

            # Sanity checks
            assert np.all((red + yellow + purple + blue + green) <= 1)
            assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(
                should_be_there)

            #err = np.sum(yellow | red)
            #print err, gt.sum(), err/float(gt.sum())

        # Output display
        with open(os.path.join(out_path, '%s_block.html' % name), 'w') as f:
            f.write(
                tmp.render(
                    template.Context(
                        dict(red=gridstr(red),
                             blue=gridstr(blue),
                             green=gridstr(green),
                             yellow=gridstr(yellow),
                             purple=gridstr(purple),
                             msg="Output"))))

        # Ground truth display
        with open(os.path.join(out_path, '%s_gt.html' % name), 'w') as f:
            f.write(
                tmp.render(
                    template.Context(
                        dict(blue=gridstr(added),
                             green=gridstr(gt),
                             purple=gridstr(removed),
                             red='[]',
                             yellow='[]',
                             msg="Ground Truth"))))

        totalblocks = np.sum(green + red + blue)
        incorrect = np.sum(red + yellow)

        with open(os.path.join(out_path, '%s_block.txt' % name), 'w') as f:
            f.write(grid.gt2grid(final_output))

        # Only take the metadata we need from the output
        d = dict([(key, output[key]) for key in ('frames', 'time')])
        d['name'] = name
        d['total'] = totalblocks
        d['incorrect'] = incorrect
        d['error'] = float(incorrect) / totalblocks * 100
        ds.append(d)
    return ds
示例#12
0
def run_error():

    datasets = glob.glob('data/sets/study*')
    datasets.sort()
    d = {}
    for name in datasets:
        dataset.load_dataset(name)
        name = os.path.split(name)[1]
        print name

        # Open the ground truth file
        custom = os.path.join('data/sets/', name, 'gt.txt')
        if os.path.exists(custom):
            # Try dataset directory first
            fname = custom
        else:
            import re
            # Fall back on generic ground truth file
            match = re.match('.*_z(\d)m_(.*)', name)
            if match is None:
                continue
            
            number = int(match.groups()[0])
            fname = 'data/experiments/gt/gt%d.txt' % number
        
        # Load the ground truth file
        removed = None
        added = None
        with open(fname) as fp:
            s = fp.read()
            config.GT = grid.gt2grid(s, chars='*')
            removed = grid.gt2grid(s, chars='rR')
            added = grid.gt2grid(s, chars='aA')

        import re
        number, kind = re.match('.*_z(\d)m_(.*)$', name).groups()
        number = int(number)

        with open(os.path.join('data/experiments/output/',name,
                               'final_output.txt'),'r') as f:
            final_output = f.read()

        out = grid.gt2grid(final_output)
        gt = config.GT

        try:
            c,_ = hashalign.find_best_alignment(out, 0*out, gt, 0*gt)
        except ValueError:
            red = gt | added
            yellow = 0*out
            green = 0*out
            purple = 0*out
            blue = 0*out
        else:
            gt = hashalign.apply_correction(gt, *c)
            added = hashalign.apply_correction(added, *c)
            removed = hashalign.apply_correction(removed, *c)
            should_be_there = gt | added 
            shouldnot_be_there = (~gt & ~added) | removed

            # Sanity check
            assert np.all(shouldnot_be_there ^ should_be_there)

            red = ~out & should_be_there
            yellow = out & shouldnot_be_there # Incorrect add
            purple = ~out & removed # Correct remove
            blue = out & added # Correct add
            green = out & gt # Correct block place

            # Sanity checks
            assert np.all(red+yellow+purple+blue+green <= 1)
            assert np.sum(red) + np.sum(blue) + np.sum(green) == np.sum(should_be_there)

        err = np.sum(yellow | red) 
        totalblocks = np.sum(red | green | blue)
        print err, totalblocks, float(err)/totalblocks

        #_,_,err1,_,_ = grid.xcorr_correction(out, config.GT)
        #assert err == err1/float(gt.sum())
        d[name] = {'err': err,
                   'tot': (red+green+blue).sum(),
                   'rel': err/float(totalblocks),
                   'num': number,
                   'type': kind,}
        d[name].update(dict(green=green,
                            red=red,
                            yellow=yellow,
                            purple=purple,
                            blue=blue))
    with open('data/experiments/error_output.pkl','w') as f:
        pickle.dump(d, f)