예제 #1
0
def main():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    args = rlargs.parse()
    assert args.samplein, "--samplein <input dir>"
    puzzle = curiosity.RigidPuzzle(args, 0)

    def filer():
        index = 0
        while True:
            fn = '{}/{}.npz'.format(args.samplein, index)
            print(fn)
            if not os.path.exists(fn):
                break
            d = np.load(fn)
            for q in d['Qs']:
                yield q
            index += 1
        return

    def imager():
        for q in filer():
            puzzle.qstate = q
            rgb, _ = puzzle.vstate
            yield rgb[0]  # First view

    rlreanimator.reanimate(imager(), fps=20)
def test_rldriver_main():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    init_state = np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5],
                          dtype=np.float32)
    g = tf.Graph()
    with g.as_default():
        driver = rldriver.RLDriver(
            ['../res/alpha/env-1.2.obj', '../res/alpha/robot.obj'],
            np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5],
                     dtype=np.float32), [(30.0, 12), (-30.0, 12), (0, 4),
                                         (90, 1), (-90, 1)],
            config.SV_VISCFG,
            config.MV_VISCFG,
            use_rgb=True)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
    thread = threading.Thread(target=test_rldriver_worker,
                              args=(dpy, glctx, driver, g))
    thread.start()
    r = driver.renderer
    w = r.pbufferWidth
    h = r.pbufferHeight
    r.state = np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    r.render_mvrgbd()
    img = r.mvrgb.reshape(w * r.views.shape[0], h, 3)
    dep = r.mvdepth.reshape(w * r.views.shape[0], h)
    depimg = Image.fromarray(dep)
    imsave('mvrgb-master.png', img)
    depimg.save('mvdepth-master.tiff')
    thread.join()
예제 #3
0
    def __init__(self, env, rob, center=None, res=224, flat_surface=False):
        pyosr.init()
        self.dpy = pyosr.create_display()
        self.glctx = pyosr.create_gl_context(self.dpy)

        r = pyosr.Renderer()
        r.avi = True
        r.flat_surface = flat_surface
        r.pbufferWidth = res
        r.pbufferHeight = res
        r.setup()

        r.loadModelFromFile(env)
        r.loadRobotFromFile(rob)
        r.scaleToUnit()
        r.angleModel(0.0, 0.0)
        r.default_depth = 0.0
        if center is not None:
            r.enforceRobotCenter(center)
        r.views = np.array([[0.0, 0.0]], dtype=np.float32)

        self.r = r
        self.res = res
        self.rgb_shape = (res, res, 3)
        self.dep_shape = (res, res, 1)
def render_everything_to(src, dst, label):
    pyosr.init()
    pyosr.create_gl_context(pyosr.create_display())
    modelId = 0
    r=pyosr.Renderer()
    r.setup()
    r.default_depth = 0.0
    view_array = []
    for angle,ncam in config.VIEW_CFG:
        view_array += [ [angle,float(i)] for i in np.arange(0.0, 360.0, 360.0/float(ncam)) ]
    r.views = np.array(view_array)
    w = r.pbufferWidth
    h = r.pbufferHeight
    for root, dirs, files in os.walk(src):
        for fn in files:
            # print(fn)
            _,ext = os.path.splitext(fn)
            if ext not in ['.dae', '.obj', '.ply']:
                continue
            ffn = os.path.join(root, fn)
            r.loadModelFromFile(ffn)
            r.scaleToUnit()
            r.angleModel(0.0, 0.0)
            dep = r.render_mvdepth_to_buffer()
            dep = dep.reshape(r.views.shape[0],w,h,1)
            outfn = os.path.join(dst, '%07d.train' % modelId)
            with open(outfn, 'w') as f:
                if label >= 0:
                    labelbytes = np.array([label],dtype=np.int32).tobytes()
                    f.write(labelbytes)
                f.write(dep.tobytes())
            print('{} -> {}'.format(ffn, outfn))
            modelId += 1
def verifymap(mapfn):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    '''
    st0 = np.array([21.782108575648873,11.070742691783639,13.072090341969885,0.99496368307688909,-0.050573680994590003,0.08255004745739393,0.025981951687884433], dtype=np.float64)
    st0 = r.translate_to_unit_state(st0.transpose())
    st1 = np.array([24.404447383193428,16.614281021136808,17.241012748680941,0.89856334412742611,-0.42392368380753659,0.035352511370216902,0.10780921499298371], dtype=np.float64)
    st1 = r.translate_to_unit_state(st1.transpose())
    print(r.transit_state_to(st0, st1, 0.00125))
    return
    '''
    gt = pyosr.GTGenerator(r)
    gt.rl_stepping_size = 0.0125
    # gt.verify_magnitude = 0.0125
    gt.load_roadmap_file(mapfn)
    # gt.init_gt()
    gt.save_verified_roadmap_file(mapfn + ".verified")
    return
예제 #6
0
def test():
    pyosr.init()
    pyosr.create_gl_context(pyosr.create_display())

    r=pyosr.Renderer()
    r.setup()
    r.loadModelFromFile('../res/alpha/env-1.2.obj')
    r.loadRobotFromFile('../res/alpha/robot.obj')
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[30.0, float(i)] for i in range(0, 360, 30)], dtype=np.float32)
    print(r.views)
    print(r.views.shape)
    w = r.pbufferWidth
    h = r.pbufferHeight
    r.state = np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    r.render_mvrgbd()
    img = r.mvrgb.reshape(w * r.views.shape[0], h, 3)
    dep = r.mvdepth.reshape(w * r.views.shape[0], h)
    depimg = Image.fromarray(dep)
    imsave('mvrgb.png', img)
    depimg.save('mvdepth.tiff')

    # Vision NN

    vis0 = vision.VisionLayerConfig(16)
    vis1 = vision.VisionLayerConfig(16)
    vis1.strides = [1, 3, 3, 1]
    vis1.kernel_size = [5, 5]
    vis2 = vision.VisionLayerConfig(32)
    vis3 = vision.VisionLayerConfig(64)

    imgin = img.reshape(r.views.shape[0], w, h, 3)
    depin = r.mvdepth.reshape(r.views.shape[0], w , h, 1)
    print('imgin shape: {}'.format(imgin.shape))
    mv_color = vision.VisionNetwork(imgin.shape,
            [vis0, vis1, vis2, vis3], 0, 256)
    featvec = mv_color.features
    print('mv_color.featvec.shape = {}'.format(featvec.shape))

    sq_featvec = tf.reshape(featvec, [-1, 16, 16, 1]) # Squared feature vector
    chmv_featvec = tf.transpose(sq_featvec, [3, 1, 2, 0])
    print('chmv_featvec {}'.format(chmv_featvec.shape))

    pvis0 = vision.VisionLayerConfig(64)
    color = vision.VisionNetwork(None, [pvis0], 0, 256, chmv_featvec)
    featvec = color.features
    print('featvec.shape = {}'.format(featvec.shape))

    exit()

    mvpix = r.render_mvdepth_to_buffer()
    img = mvpix.reshape(w * r.views.shape[0], h)
    print(img.shape)
    plt.pcolor(img)
    plt.show()
    r.teardown()
    pyosr.shutdown()
예제 #7
0
def eval_classification():
    pyosr.init()
    pyosr.create_gl_context(pyosr.create_display()) # FIXME: Each thread has one ctx
    init_state = np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    with tf.Graph().as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()
        cater = cat.TrainCat(global_step, data_dir='./cat/eval_data')
        # cater = cat.TrainCat(global_step)
        cater.eval()
def torus_master():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step,
                                              1,
                                              name='increment_global_step')
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                                         init_state,
                                         view_config,
                                         config.SV_VISCFG,
                                         config.MV_VISCFG,
                                         use_rgb=True)
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        with tf.Session() as sess:
            threads = []
            for i in range(THREAD):
                thread_args = (i, dpy, glctx, masterdriver, g, grad_applier,
                               learning_rate_input, global_step,
                               increment_global_step, sess, saver)
                thread = threading.Thread(target=torus_worker,
                                          args=thread_args)
                thread.start()
                graph_completes[i].wait()
                threads.append(thread)
            '''
            We need to run the initializer because only master's variables are stored.
            '''
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))

            init_done.set()
            for thread in threads:
                thread.join()
            print("Saving final checkpoint")
            fn = saver.save(sess,
                            ckpt_dir + ckpt_prefix,
                            global_step=global_step)
            print("Saved checkpoint to {}".format(fn))
예제 #9
0
def test_rldriver_main():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                init_state,
                view_config,
                config.SV_VISCFG,
                config.MV_VISCFG,
                use_rgb=True)
        driver = rldriver.RLDriver(MODELS,
                    init_state,
                    view_config,
                    config.SV_VISCFG,
                    config.MV_VISCFG,
                    use_rgb=True,
                    master_driver=masterdriver,
                    grads_applier=grad_applier)
        driver.get_sync_from_master_op()
        driver.get_apply_grads_op()
        driver.learning_rate_input = learning_rate_input
        driver.a3c_local_t = 32
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step, 1, name='increment_global_step')
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        last_time = time.time()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))
            while epoch < 100 * 1000:
                driver.train_a3c(sess)
                epoch += 1
                sess.run(increment_global_step)
                if epoch % 1000 == 0 or time.time() - last_time >= 10 * 60:
                    print("Saving checkpoint")
                    fn = saver.save(sess, ckpt_dir+ckpt_prefix, global_step=global_step)
                    print("Saved checkpoint to {}".format(fn))
                    last_time = time.time()
                print("Epoch {}".format(epoch))
        '''
예제 #10
0
def test_2():
    pyosr.init()
    args = rlargs.parse()
    r = rlutil.create_renderer(args)
    r.state = np.array(r.translate_to_unit_state(args.istateraw))
    for action in args.actionset:
        nstate, done, ratio = r.transit_state(r.state, action, args.amag,
                                              args.vmag)
        print("{} {} {}".format(nstate, done, ratio))
        r.state = nstate
예제 #11
0
def _create_renderer(args):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    if hasattr(args, '_fb_resolution'):
        r.pbufferWidth = args._fb_resolution
        r.pbufferHeight = args._fb_resolution
    r.setup()
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    return r
예제 #12
0
def process_main(args, mpqueue=None):
    '''
    CAVEAT: WITHOUT ALLOW_GRWTH, WE MUST CREATE RENDERER BEFORE CALLING ANY TF ROUTINE
    '''
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    # Create Training/Evaluation Engine
    engine = curiosity_create_engine(args, mpqueue=mpqueue)
    # Engine execution
    engine.run()
예제 #13
0
def train_puzzle():
    pyosr.init()
    pyosr.create_gl_context(pyosr.create_display()) # FIXME: Each thread has one ctx
    init_state = np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    with tf.Graph().as_default():
        driver = rldriver.RLDriver(['../res/alpha/env-1.2.obj', '../res/alpha/robot.obj'],
                np.array([0.2, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32),
                [(30.0, 12), (-30.0, 12), (0, 4), (90, 1), (-90, 1)],
                config.SV_VISCFG,
                config.MV_VISCFG,
                use_rgb=True)
def test_rldriver_main():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    with g.as_default():
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(
            ['../res/alpha/env-1.2.obj', '../res/alpha/robot.obj'],
            init_state,
            view_config,
            config.SV_VISCFG,
            config.MV_VISCFG,
            use_rgb=True)
        driver = rldriver.RLDriver(
            ['../res/alpha/env-1.2.obj', '../res/alpha/robot.obj'],
            init_state,
            view_config,
            config.SV_VISCFG,
            config.MV_VISCFG,
            use_rgb=True,
            master_driver=masterdriver,
            grads_applier=grad_applier)
        driver.get_sync_from_master_op()
        driver.get_apply_grads_op()
        driver.learning_rate_input = learning_rate_input
        driver.a3c_local_t = 2
        global_step = tf.contrib.framework.get_or_create_global_step()
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        last_time = time.time()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Restored!')
            epoch = 0
            while True:
                driver.train_a3c(sess)
                epoch += 1
                if epoch % 1000 == 0 or time.time() - last_time >= 10:
                    print("Saving checkpoint")
                    saver.save(sess, ckpt_dir, global_step=global_step)
                    last_time = time.time()
                print("Epoch {}".format(epoch))
        '''
예제 #15
0
def test():
    pyosr.init()
    pyosr.create_gl_context(pyosr.create_display())

    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile('../res/alpha/env-1.2.obj')
    r.loadRobotFromFile('../res/alpha/robot.obj')
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[30.0, float(i)] for i in range(0, 360, 30)],
                       dtype=np.float32)
    print(r.views)
    print(r.views.shape)
    w = r.pbufferWidth
    h = r.pbufferHeight
    r.state = np.array([0.125, -0.075, 0.0, 0.5, 0.5, 0.5, 0.5],
                       dtype=np.float32)
    r.render_mvrgbd()
    img = r.mvrgb.reshape(w * r.views.shape[0], h, 3)
    dep = r.mvdepth.reshape(w * r.views.shape[0], h)
    depimg = Image.fromarray(dep)
    imsave('mvrgb.png', img)
    depimg.save('mvdepth.tiff')
    print("Is colliding free? {} {}".format(r.state,
                                            r.is_valid_state(r.state)))
    r.state = np.array([0.04, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    print("Is colliding free? {} {} Expecting True".format(
        r.state, r.is_valid_state(r.state)))
    r.state = np.array([0.7, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
    print("Is colliding free? {} {} Expecting True".format(
        r.state, r.is_valid_state(r.state)))
    goal_state = np.array([0.04, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5],
                          dtype=np.float32)
    # mags = np.array([0.7 - 0.04, 0.125], dtype=np.float32)
    # deltas = np.array([0.0125, 0.0125], dtype=np.float32)
    mags = 0.7 - 0.04
    deltas = 0.0125
    print(r.transit_state(r.state, 1, mags, deltas))
    print(r.transit_state(r.state, 6, mags, deltas))
    for x in np.arange(-0.8, 0.8, 0.02):
        r.state = np.array([x, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], dtype=np.float32)
        print("Is colliding free? {} {}".format(r.state,
                                                r.is_valid_state(r.state)))
        r.render_mvrgbd()
        # print('r.mvrgb {}'.format(r.mvrgb.shape))
        # print('r.views {} w {} h {}'.format(r.views.shape, w, h))
        img = r.mvrgb.reshape(w * r.views.shape[0], h, 3)
        imsave('mvrgb-x={}.png'.format(x), img)

    r.teardown()
    pyosr.shutdown()
def _create_r():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    return r
예제 #17
0
def create_offscreen_renderer(puzzle_file, resolution=256):
    global _egl_dpy
    import pyosr
    if _egl_dpy is None:
        pyosr.init()
        _egl_dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(_egl_dpy)
    r = pyosr.Renderer()
    r.pbufferWidth = resolution
    r.pbufferHeight = resolution
    r.setup()
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    _load_unit_world(r, puzzle_file)
    return r
def gtgen(gtfn, gtdir, gtnumber):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    gt = pyosr.GTGenerator(r)
    gt.rl_stepping_size = 0.0125
    gtdata = np.load(gtfn)
    gt.install_gtdata(gtdata['V'], gtdata['E'], gtdata['D'], gtdata['N'])
    gt.init_knn_in_batch()

    print(r.scene_matrix)
    print(r.robot_matrix)
    # return

    for i in range(gtnumber):
        # Random state, copied from RLDriver.restart_epoch
        while True:
            tr = np.random.rand(3) * 2.0 - 1.0
            u1, u2, u3 = np.random.rand(3)
            quat = [
                sqrt(1 - u1) * sin(2 * pi * u2),
                sqrt(1 - u1) * cos(2 * pi * u2),
                sqrt(u1) * sin(2 * pi * u3),
                sqrt(u1) * cos(2 * pi * u3)
            ]
            part1 = np.array(tr, dtype=np.float32)
            part2 = np.array(quat, dtype=np.float32)
            r.state = np.concatenate((part1, part2))
            if r.is_disentangled(r.state):
                continue
            if r.is_valid_state(r.state):
                break
        gtstats, gtactions, terminated = gt.generate_gt_path(
            r.state, MAX_GT_STATES)
        if not terminated:
            '''
            Try again
            '''
            continue
예제 #19
0
    def __init__(self,
                 env,
                 rob,
                 rob_texfn='',
                 center=None,
                 res=256,
                 flat_surface=False,
                 gen_surface_normal=False):
        if self.dpy is None:
            pyosr.init()
            self.dpy = pyosr.create_display()
        self.glctx = pyosr.create_gl_context(self.dpy)
        self.env_fn = env
        self.rob_fn = rob
        self.name = 'Unnamed'

        r = pyosr.Renderer()
        r.avi = True
        r.flat_surface = flat_surface
        r.pbufferWidth = res
        r.pbufferHeight = res
        r.setup()

        r.loadModelFromFile(env)
        r.loadRobotFromFile(rob)
        if rob_texfn:
            r.loadRobotTextureImage(rob_texfn)
        r.scaleToUnit()
        r.angleModel(0.0, 0.0)
        r.default_depth = 0.0
        if center is not None:
            r.enforceRobotCenter(center)
        r.views = np.array([[0.0, 0.0]], dtype=np.float32)

        self.r = r
        self.res = res
        self.rgb_shape = (res, res, 3)
        self.dep_shape = (res, res, 1)
        self.gen_surface_normal = gen_surface_normal
def _create_uw(aniconf, cmd):
    if cmd == 'show':
        return None  # show command does not need unit world object
    if 'render' in cmd or cmd in ['atlas2prim', 'useuniformatlas']:
        pyosr.init()
        dpy = pyosr.create_display()
        glctx = pyosr.create_gl_context(dpy)
        r = pyosr.Renderer()  # 'project' command requires a Renderer
        if cmd in ['atlas2prim', 'useuniformatlas']:
            r.pbufferWidth = ATLAS_RES
            r.pbufferHeight = ATLAS_RES
        r.setup()
        r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    else:
        r = pyosr.UnitWorld()  # pyosr.Renderer is not avaliable in HTCondor

    if aniconf.rob_ompl_center is None:
        '''
        When OMPL_center is not specified
        '''
        assert aniconf.env_wt_fn == aniconf.env_uv_fn
        assert aniconf.rob_wt_fn == aniconf.rob_uv_fn

    if cmd in ['run', 'isect']:
        # fb = r.render_barycentric(r.BARY_RENDERING_ROBOT, np.array([1024, 1024], dtype=np.int32))
        # imsave('1.png', fb)
        # sys.exit(0)
        r.loadModelFromFile(aniconf.env_wt_fn)
        r.loadRobotFromFile(aniconf.rob_wt_fn)
    else:  # All the remaining commands need UV coordinates
        r.loadModelFromFile(aniconf.env_uv_fn)
        r.loadRobotFromFile(aniconf.rob_uv_fn)
    if aniconf.rob_ompl_center is not None:
        r.enforceRobotCenter(aniconf.rob_ompl_center)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)

    return r
예제 #21
0
def aa_train_main(args):
    ckpt_dir = args.ckptdir
    ckpt_prefix = args.ckptprefix
    device = args.device
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                                         init_state,
                                         view_config,
                                         config.SV_VISCFG,
                                         config.MV_VISCFG,
                                         output_number=AA_OUTPUT_NUMBER,
                                         use_rgb=True,
                                         continuous_policy_loss=True)
        driver = rldriver.RLDriver(MODELS,
                                   init_state,
                                   view_config,
                                   config.SV_VISCFG,
                                   config.MV_VISCFG,
                                   output_number=AA_OUTPUT_NUMBER,
                                   use_rgb=True,
                                   master_driver=masterdriver,
                                   grads_applier=grad_applier,
                                   continuous_policy_loss=True)
        driver.get_sync_from_master_op()
        driver.get_apply_grads_op()
        driver.learning_rate_input = learning_rate_input
        driver.a3c_local_t = 32
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step,
                                              1,
                                              name='increment_global_step')
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        last_time = time.time()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))
            while epoch < args.iter:
                fn = "{}/{}{:06d}.npz".format(args.path, args.prefix,
                                              epoch % args.gtnumber)
                dic = np.load(fn)
                driver.train_from_gt(sess, dic['KEYS'], dic['TR'], dic['ROT'],
                                     dic['DIST'])
                epoch += 1
                sess.run(increment_global_step)
                if epoch % 1000 == 0 or time.time() - last_time >= 10 * 60:
                    print("Saving checkpoint")
                    fn = saver.save(sess,
                                    ckpt_dir + ckpt_prefix,
                                    global_step=global_step)
                    print("Saved checkpoint to {}".format(fn))
                    last_time = time.time()
                print("Epoch {}".format(epoch))
예제 #22
0
import pyosr
import numpy as np
import matplotlib.pyplot as plt

pyosr.init()
pyosr.create_gl_context(pyosr.create_display())

r = pyosr.Renderer()
r.setup()
r.loadModelFromFile('../res/alpha/robot.obj')
r.angleModel(30.0, 30.0)
pix = r.render_depth_to_buffer()
r.teardown()
pyosr.shutdown()

w = r.pbufferWidth
h = r.pbufferHeight

img = np.array(pix, dtype=np.float32)
img = img.reshape(w, h)

plt.pcolor(img)
plt.show()
예제 #23
0
def main():
    pyosr.init()
    args = rlargs.parse()
    print(args)
    player = create_player(args)
    reanimate(player)
예제 #24
0
def reanimate():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.avi = True
    r.pbufferWidth = r.pbufferHeight = 512
    r.setup()
    if not hasattr(aniconf, 'env_wt_fn'):
        r.loadModelFromFile(aniconf.env_fn)
        r.loadRobotFromFile(aniconf.rob_fn)
    else:
        r.loadModelFromFile(aniconf.env_wt_fn)
        r.loadRobotFromFile(aniconf.rob_wt_fn)
    if hasattr(aniconf, 'rob_ompl_center'):
        r.enforceRobotCenter(aniconf.rob_ompl_center)

    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    # r.views = np.array([[15.0, 110.0]], dtype=np.float32)
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    print(r.scene_matrix)
    print(r.robot_matrix)

    # r.set_perturbation(uw_random.random_state(0.00))
    # r.set_perturbation(uw_random.random_state(0.25))
    # r.set_perturbation(np.array([0,0.0,0,0.5,0.5,0.5,0.5],dtype=np.float32))
    # r.set_perturbation(np.array([0,0.0,0,0,0,1,0],dtype=np.float32))
    # return

    class ReAnimator(object):
        reaching_terminal = False
        driver = None
        im = None
        keys = None

        def __init__(self, renderer, keys, delta=0.125):
            self.renderer = renderer
            self.keys = keys
            self.t = 0.0
            self.delta = delta
            self.reaching_terminal = False

        def perform(self, framedata):
            r = self.renderer
            index = int(math.floor(self.t))
            nindex = index + 1
            if nindex < len(self.keys) and not self.reaching_terminal:
                pkey = self.keys[index]
                nkey = self.keys[nindex]
                tau = self.t - index
                state = interpolate(pkey, nkey, tau)
                r.state = r.translate_to_unit_state(state)
                print("tau {} TO NKEY {}".format(
                    tau,
                    pyosr.distance(r.state, r.translate_to_unit_state(nkey))))
                r.render_mvrgbd()
                print(r.mvrgb.shape)
                rgb = r.mvrgb.reshape((r.pbufferWidth, r.pbufferHeight, 3))
                print(r.state)
                valid = r.is_valid_state(r.state)
                disentangled = r.is_disentangled(r.state)
                print('\tNew State {} Collision Free ? {} Disentangled ? {}'.
                      format(r.state, valid, disentangled))
                if not valid:
                    print('\tNOT COLLISION FREE, SAN CHECK FAILED')
                    self.reaching_terminal = True
                if self.im is None:
                    print('rgb {}'.format(rgb.shape))
                    self.im = plt.imshow(rgb)
                else:
                    self.im.set_array(rgb)
                self.t += self.delta

    fig = plt.figure()
    keys = np.loadtxt(aniconf.keys_fn)
    # keys = np.loadtxt('rrt-secondhalf.path')
    if aniconf.keys_w_last:
        # if True:
        print('before keys[0] {}'.format(keys[0]))
        keys[:, [3, 4, 5, 6]] = keys[:, [6, 3, 4, 5]]
        print('after keys[0] {}'.format(keys[0]))
    # ra = ReAnimator(r, keys, 1.0)
    ra = ReAnimator(r, keys, 0.0625)
    '''
    st0 = np.array([21.782108575648873,11.070742691783639,13.072090341969885,0.99496368307688909,-0.050573680994590003,0.08255004745739393,0.025981951687884433], dtype=np.float64)
    st1 = np.array([24.404447383193428,16.614281021136808,17.241012748680941,0.89856334412742611,-0.42392368380753659,0.035352511370216902,0.10780921499298371], dtype=np.float64)
    st2 = np.array([25.04292893291256,16.429006629785405,21.742598419634952,-0.080755517119222811,-0.980264716314169,0.167639957003235,0.066756851485538532], dtype=np.float64)
    ra = ReAnimator(r, [st1, st2], 0.0125)
    '''
    '''
    st1 = np.array([21.75988005530629,19.55840991214458,18.407298399116954,0.8747148780179097,-0.40598294544114955,0.21522777832862061,0.15404133737658857], dtype=np.float64)
    st2 = np.array([16.242401343877191,15.371074546390151,23.775856491398514,0.38753152804205182,-0.26626876971877833,-0.75270143169934201,-0.46082622729571437], dtype=np.float64)
    ra = ReAnimator(r, [st1, st2], 0.0125/2.0)
    '''
    FPS = 60
    ani = animation.FuncAnimation(fig, ra.perform, interval=1000 / FPS)
    plt.show()
def show_torus_ring():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                init_state,
                view_config,
                config.SV_VISCFG,
                config.MV_VISCFG,
                use_rgb=True)
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step, 1, name='increment_global_step')
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        last_time = time.time()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            policy_before, value_before, _, _ = masterdriver.evaluate(sess)
            #print("Last b before {}".format(sess.run(masterdriver.get_nn_args()[-2])))
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))
            else:
                print('Cannot find checkpoint at {}'.format(ckpt_dir))
                return
            policy_after, value_after, _, _ = masterdriver.evaluate(sess)
            print("Value Before Restoring {} and After {}".format(value_before, value_after))
            # print("Last b {}".format(sess.run(masterdriver.get_nn_args()[-2])))
            driver = masterdriver
            r = masterdriver.renderer
            fig = plt.figure()
            class ReAnimator(object):
                reaching_terminal = False
                driver = None
                im = None
                sess = None

                def __init__(self, driver, sess):
                    self.driver = driver
                    self.sess = sess

                def perform(self, framedata):
                    driver = self.driver
                    r = driver.renderer
                    sess = self.sess
                    if not self.reaching_terminal:
                        policy, value, img, dep = driver.evaluate(sess)
                        policy = policy.reshape(driver.action_size)
                        action = driver.make_decision(policy, sess)
                        nstate,reward,self.reaching_terminal = driver.get_reward(action)
                        valid = r.is_valid_state(nstate)
                        print('Current Value {} Policy {} Action {} Reward {}'.format(value, policy, action, reward))
                        print('\tNew State {} Collision Free ? {}'.format(nstate, valid))
                        # print('Action {}, New State {}'.format(action, nstate))
                        rgb = np.squeeze(img[0, 0, :, : ,:], axis=[0,1])
                        if self.im is None:
                            print('rgb {}'.format(rgb.shape))
                            self.im = plt.imshow(rgb)
                        else:
                            self.im.set_array(rgb)
                        r.state = nstate
            ra = ReAnimator(driver, sess)
            ani = animation.FuncAnimation(fig, ra.perform)
            plt.show()
def calibrate(gtfn, cfn):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    origin = np.array([0,0,0,1,0,0,0], dtype=np.float64)
    d = np.load(gtfn)
    V=d['V']
    D=d['D']
    N=d['N']
    pos = []
    posd = []
    neg = []
    negd = []
    for i in range(len(V)):
        V[i] = r.translate_to_unit_state(V[i])
        if r.is_disentangled(V[i]):
            D[i] = 0.0
    assert len(d['N']) == len(V), "N size ({}) does not match V's ({})".format(len(d['N']), len(V))
    print("istate {}  distance {}".format(V[0], pyosr.distance(origin, V[0])))
    print("gstate {}  distance {}".format(V[1], pyosr.distance(origin, V[1])))
    # Trim distant samples, which usually shows nothing
    old_to_new = {}
    new_to_old = {}
    NN = []
    for i in range(len(V)):
        if pyosr.distance(origin, V[i]) > 0.85:
            continue
        old_to_new[i] = len(pos)
        pos.append(V[i])
        posd.append(D[i])
    for i in range(len(V)):
        if pyosr.distance(origin, V[i]) > 0.85:
            continue
        if N[i] < 0:
            NN.append(N[i])
        else:
            NN.append(old_to_new[N[i]])
    NE = []
    for e in d['E']:
        if e[0] not in old_to_new or e[1] not in old_to_new:
            continue
        NE.append([old_to_new[e[0]], old_to_new[e[1]]])
    assert len(NN) == len(pos), "NN size ({}) does not match pos' ({})".format(len(NN), len(pos))
    V = np.array(pos)
    D = np.array(posd)
    for i in range(len(V)):
        while True:
            s = uw_random.random_state()
            if not r.is_disentangled(s):
                break
        neg.append(s)
        negd.append(-10.0)
        NN.append(-1)
    NV = np.array(neg)
    ND = np.array(negd)
    V = np.concatenate((V, NV))
    D = np.concatenate((D, ND))
    E = np.array(NE)
    N = np.array(NN)
    #p = np.random.permutation(len(V))
    #np.savez(cfn, V=V[p], E=E, D=D[p], N=N[p])
    np.savez(cfn, V=V, E=E, D=D, N=N)
def reanimate(gtfn, fn):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    gt = pyosr.GTGenerator(r)
    gt.rl_stepping_size = 0.0125 / 16
    gt.verify_magnitude = 0.0125 / 16 / 8
    # gt.rl_stepping_size = 0.0125 / 16 / 1024
    # gt.verify_magnitude = 0.0125 / 16 / 1024 / 2
    # gt.rl_stepping_size = 0.0125 / 64
    # gt.verify_magnitude = 0.0125 / 64 / 8
    gtdata = np.load(gtfn)
    gt.install_gtdata(gtdata['V'], gtdata['E'], gtdata['D'], gtdata['N'])

    # FIXME: add this back after debugging Dijkstra
    # gt.init_knn_in_batch()

    class ReAnimator(object):
        reaching_terminal = False
        driver = None
        im = None
        cont_actions = None

        def __init__(self,
                     renderer,
                     init_state,
                     cont_tr,
                     cont_rot,
                     delta=0.125):
            self.renderer = renderer
            self.cont_tr = cont_tr
            self.cont_rot = cont_rot
            self.t = 0.0
            assert delta <= 1.0, "delta must be <= 1.0, we are reanimating with stateful ACTIONS"
            self.delta = delta
            self.state = renderer.translate_to_unit_state(init_state)
            self.reaching_terminal = False

        def perform(self, framedata):
            r = self.renderer
            index = int(math.floor(self.t))
            if index < len(self.cont_tr) and not self.reaching_terminal:
                pkey = self.state
                nkey, _, _ = r.transit_state_by(pkey, self.cont_tr[index],
                                                self.cont_rot[index],
                                                gt.verify_magnitude)
                tau = self.t - index
                print("tau {}".format(tau))
                state = interpolate(pkey, nkey, tau)
                r.state = state
                r.render_mvrgbd()
                print(r.mvrgb.shape)
                rgb = r.mvrgb.reshape((r.pbufferWidth, r.pbufferHeight, 3))
                print(r.state)
                valid = r.is_valid_state(r.state)
                disentangled = r.is_disentangled(r.state)
                print('\tNew State {} Collision Free ? {} Disentangled ? {}'.
                      format(r.state, valid, disentangled))
                if not valid:
                    print('\tNOT COLLISION FREE, SAN CHECK FAILED')
                    self.reaching_terminal = True
                if self.im is None:
                    print('rgb {}'.format(rgb.shape))
                    self.im = plt.imshow(rgb)
                else:
                    self.im.set_array(rgb)
                self.t += self.delta
                if index != int(math.floor(self.t)):
                    self.state = nkey

    fig = plt.figure()
    # fn = 'blend-cont-saved.npz'
    if not os.path.isfile(fn):
        # This is a legacy debugging code path
        print("Translating into Cont. Actions...")
        cont_tr, cont_rot, _, _ = gt.cast_path_to_cont_actions_in_UW(keys)
        print("Done")
        np.savez('blend-cont.npz', TR=cont_tr, ROT=cont_rot)
        keys = np.loadtxt('blend.path')
    else:
        print("Loading cacahed Cont. Actions")
        dic = np.load(fn)
        cont_tr = dic['TR']
        cont_rot = dic['ROT']
        keys = dic['KEYS']
    # keys[:, [3,4,5,6]] = keys[:,[6,3,4,5]]
    ra = ReAnimator(r, keys[0], cont_tr, cont_rot, 1.0)
    ani = animation.FuncAnimation(fig, ra.perform)
    plt.show()
def reanimate(gtfn, anifn):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    gt = pyosr.GTGenerator(r)
    # gt.rl_stepping_size = 0.0125 / 64 # it's for animation only if AA is enabled
    gt.verify_magnitude = 0.0125 / 64 / 8
    # print('default rl_stepping_size {}'.format(gt.rl_stepping_size))
    gtdata = np.load(gtfn)
    gt.install_gtdata(gtdata['V'], gtdata['E'], gtdata['D'], gtdata['N'])
    # FIXME: add this back after debugging Dijkstra
    # gt.init_knn_in_batch()

    print(r.scene_matrix)
    print(r.robot_matrix)

    # return

    class ReAnimator(object):
        reaching_terminal = False
        driver = None
        im = None
        keys = None
        prev_state = None

        def __init__(self, renderer, keys, delta=0.125):
            self.renderer = renderer
            self.keys = keys
            self.t = 0.0
            self.delta = delta
            self.reaching_terminal = False
            self.prev_state = self.renderer.translate_to_unit_state(keys[0])

        def perform(self, framedata):
            r = self.renderer
            index = int(math.floor(self.t))
            nindex = index + 1
            if nindex < len(self.keys) and not self.reaching_terminal:
                pkey = self.keys[index]
                nkey = self.keys[nindex]
                tau = self.t - index
                print("tau {}".format(tau))
                state = interpolate(pkey, nkey, tau)
                r.state = r.translate_to_unit_state(state)
                r.render_mvrgbd()
                # print(r.mvrgb.shape)
                rgb = r.mvrgb.reshape((r.pbufferWidth, r.pbufferHeight, 3))
                # print(r.state)
                valid = r.is_valid_state(r.state)
                print('\tNew State {} Collision Free ? {}'.format(
                    r.state, valid))
                print('\tDistance {}'.format(
                    pyosr.distance(r.state, self.prev_state)))
                if not valid:
                    print(
                        '\tNOT COLLISION FREE, SAN CHECK FAILED AT {}'.format(
                            self.t))
                    self.reaching_terminal = True
                if self.im is None:
                    print('rgb {}'.format(rgb.shape))
                    self.im = plt.imshow(rgb)
                else:
                    self.im.set_array(rgb)
                self.prev_state = r.state
                self.t += self.delta

    fig = plt.figure()
    keys = np.loadtxt(aniconf.keys_fn)
    keys[:, [3, 4, 5, 6]] = keys[:, [6, 3, 4, 5]]
    init_state = keys[0]
    # init_state = np.array([0.36937377,0.1908864,0.21092376, 0.98570921, -0.12078825, -0.0675864, 0.09601888], dtype=np.float64)
    # gt.init_knn_in_batch()

    if anifn is None:
        STEP_LIMIT = 64 * 1024
        FOR_RL = False
        gtstats, actions, terminated = gt.generate_gt_path(
            init_state, STEP_LIMIT, FOR_RL)
        if STEP_LIMIT >= 1024:
            np.savez('data-from-init_state', S=gtstats, A=actions)
            return
        print("terminated ? {} Expecting True".format(terminated))
    else:
        '''
        gtdata = np.load(anifn)
        gtstats = gtdata['S']
        '''
        gtstats = np.loadtxt(anifn)
        # gtstats[:, [3,4,5,6]] = gtstats[:,[6,3,4,5]]
        print(gtstats.shape)
    # print('trajectory:\n')
    # print(gtstats)
    ra = ReAnimator(r, gtstats, 0.05)
    ani = animation.FuncAnimation(fig, ra.perform)
    plt.show()
def reanimate(gtfn, pathfn, swap, cachedir, interpthresh=0, in_unit=True):
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    r = pyosr.Renderer()
    r.avi = True
    r.setup()
    r.loadModelFromFile(aniconf.env_fn)
    r.loadRobotFromFile(aniconf.rob_fn)
    r.scaleToUnit()
    r.angleModel(0.0, 0.0)
    r.default_depth = 0.0
    r.views = np.array([[0.0, 0.0]], dtype=np.float32)
    gt = pyosr.GTGenerator(r)
    # gt.rl_stepping_size = 0.0125 / 16
    # gt.verify_magnitude = 0.0125 / 16 / 8
    # gt.rl_stepping_size = 0.0125 / 16 / 1024
    # gt.verify_magnitude = 0.0125 / 16 / 1024 / 2
    gt.rl_stepping_size = 0.0125 * 4
    gt.verify_magnitude = 0.0125 * 4 / 8
    # gt.rl_stepping_size = 0.0125 / 64
    # gt.verify_magnitude = 0.0125 / 64 / 8
    gtdata = np.load(gtfn)
    gt.install_gtdata(gtdata['V'], gtdata['E'], gtdata['D'], gtdata['N'])

    # FIXME: add this back after debugging Dijkstra
    # gt.init_knn_in_batch()

    class ExpandingReAnimator(object):
        reaching_terminal = False
        driver = None
        im = None
        keys = None
        prev_state = None
        gt = None
        state = None

        def __init__(self, renderer, gt, keys, delta=1):
            self.renderer = renderer
            self.gt = gt
            self.keys = keys
            self.t = 0.0
            self.delta = delta
            self.reaching_terminal = False
            self.prev_state = self.renderer.translate_to_unit_state(keys[0])
            self.st_index = 0
            self.state = self.keys[0]
            self.expand_states()

        def expand_states(self):
            keys = self.keys
            if self.st_index + 1 >= keys.shape[0]:
                self.reaching_terminal = True
                return
            r = self.renderer
            # r.light_position = np.random.rand(3)
            '''
            from_state = r.translate_to_unit_state(keys[self.st_index])
            to_state =  r.translate_to_unit_state(keys[self.st_index + 1])
            print(from_state)
            print(to_state)
            mag = pyosr.distance(from_state, to_state)
            '''
            '''
            for a in range(12):
                tup = r.transit_state(from_state, a, gt.rl_stepping_size, gt.verify_magnitude)
                print(tup)
                print("distance from start: {}".format(pyosr.distance(tup[0], from_state)))
                d = pyosr.distance(tup[0], to_state)
                print("distance to next: {} Closer? {}".format(d, d < mag))
            return
            '''
            fn = '{}/{}.npz'.format(cachedir, self.st_index)
            if not os.path.isfile(fn):
                tup = self.gt.project_trajectory(self.state,
                                                 keys[self.st_index + 1],
                                                 in_unit=in_unit)
                self.exp_st = tup[0]
                if self.exp_st.shape[0] > 0:
                    self.state = self.exp_st[-1]
                print("cache states to {}".format(fn))
                np.savez(fn, S=tup[0], A=tup[1])
            else:
                print("load cached states from {}".format(fn))
                cache = np.load(fn)['S']
                self.exp_st = cache
            # print(self.exp_st)
            self.exp_st_index = 0
            self.st_index += 1

        def perform(self, framedata):
            r = self.renderer
            while self.exp_st_index >= self.exp_st.shape[0]:
                self.expand_states()
                if self.reaching_terminal == True:
                    print("Reaching Terminal")
                    return
            '''
            if self.exp_st_index > self.keys.shape[0]:
                return
            r.state = r.translate_to_unit_state(self.keys[self.exp_st_index])
            '''
            r.state = r.translate_to_unit_state(self.exp_st[self.exp_st_index])
            r.render_mvrgbd()
            rgb = r.mvrgb.reshape((r.pbufferWidth, r.pbufferHeight, 3))
            if self.im is None:
                self.im = plt.imshow(rgb)
            else:
                self.im.set_array(rgb)
            self.prev_state = r.state
            self.exp_st_index += self.delta
            print('{}: {}'.format(self.exp_st_index, r.state))
            '''
            index = int(math.floor(self.t))
            nindex = index + 1
            if nindex < len(self.keys) and not self.reaching_terminal:
                pkey = self.keys[index]
                nkey = self.keys[nindex]
                tau = self.t - index
                print("tau {}".format(tau))
                state = interpolate(pkey, nkey, tau)
                r.state = r.translate_to_unit_state(state)
                r.render_mvrgbd()
                # print(r.mvrgb.shape)
                rgb = r.mvrgb.reshape((r.pbufferWidth, r.pbufferHeight, 3))
                # print(r.state)
                valid = r.is_valid_state(r.state)
                print('\tNew State {} Collision Free ? {}'.format(r.state, valid))
                print('\tDistance {}'.format(pyosr.distance(r.state, self.prev_state)))
                if not valid:
                    print('\tNOT COLLISION FREE, SAN CHECK FAILED AT {}'.format(self.t))
                    self.reaching_terminal = True
                if self.im is None:
                    print('rgb {}'.format(rgb.shape))
                    self.im = plt.imshow(rgb)
                else:
                    self.im.set_array(rgb)
                self.prev_state = r.state
                self.t += self.delta
            '''

    fig = plt.figure()
    keys = np.loadtxt(pathfn)
    if swap:
        '''
        Transpose W-Last data into W-First format
        '''
        keys[:, [3, 4, 5, 6]] = keys[:, [6, 3, 4, 5]]
    if interpthresh > 0:
        ikeys = [keys[0]]
        for key in keys[1:]:
            print("Partition {}\n{}".format(ikeys[-1], key))
            inodes = partition(ikeys[-1], key, interpthresh)
            ikeys += inodes
            ikeys.append(key)
        ikeys = np.array(ikeys)
        print("Using: {}".format(ikeys))
        print("Total nodes: {}".format(len(ikeys)))
    ra = ExpandingReAnimator(r, gt, keys)
    ani = animation.FuncAnimation(fig, ra.perform)
    plt.show()