예제 #1
0
def main():
    idx = input(
        'Please select the controller -- \n1a. Original Dynamical System\n1b. Improved Dynamical System\n'
        +
        '2.  Reinforcement Learning\n3.  Rapidly-Exploring Random Tree\nEnter your choice: '
    )
    assert idx in ['1a', '1b', '2', '3'
                   ], 'Invalid input! The input needs to be 1a, 1b, 2, or 3. '
    env = PandaEnv()
    env.render()
    env_kernel = ReachingEnvKernelNormal()
    if idx == '1a':
        controller = DSController(typ='original', visualize=True)
        controller_kernel = TransitionKernel()
    elif idx == '1b':
        controller = DSController(typ='improved', visualize=True)
        controller_kernel = TransitionKernel()
    elif idx == '2':
        controller = RLController(visualize=True)
        controller_kernel = TransitionKernel()
    else:
        controller = RRTController(visualize=True)
        controller_kernel = RRTKernelNormal()
    while True:
        env_kernel.sample_prior()
        controller_kernel.sample_prior()
        env.reset(target_loc=env_kernel.value)
        controller.get_trajectory(env, controller_kernel)
예제 #2
0
def rrt_max_illegibility():
    name = f'samples/{inspect.currentframe().f_code.co_name}.pkl'
    samples = sample(N=10000,
                     alpha=0.1,
                     prior_file='samples/rrt_prior.pkl',
                     N_sigma=1000,
                     behavior_func=behavior.illegibility_behavior,
                     env=PandaEnv(),
                     env_kernel=ReachingEnvKernelNormal(),
                     controller=RRTController(),
                     controller_kernel=RRTKernelNormal(),
                     target_type='maximal',
                     save=name)
예제 #3
0
def rrt_min_ee_distance():
    name = f'samples/{inspect.currentframe().f_code.co_name}.pkl'
    samples = sample(N=10000,
                     alpha=0.1,
                     prior_file='samples/rrt_prior.pkl',
                     N_sigma=1000,
                     behavior_func=behavior.ee_distance_behavior,
                     env=PandaEnv(),
                     env_kernel=ReachingEnvKernelNormal(),
                     controller=RRTController(),
                     controller_kernel=RRTKernelNormal(),
                     target_type='match',
                     target_behavior=0,
                     save=name)
예제 #4
0
def ds_improved_min_end_distance():
    name = f'samples/{inspect.currentframe().f_code.co_name}.pkl'
    samples = sample(N=10000,
                     alpha=0.1,
                     prior_file='samples/ds_improved_prior.pkl',
                     N_sigma=1000,
                     behavior_func=behavior.ee_distance_behavior,
                     env=PandaEnv(),
                     env_kernel=ReachingEnvKernelNormal(),
                     controller=DSController(typ='improved'),
                     controller_kernel=TransitionKernel(),
                     target_type='match',
                     target_behavior=0,
                     save=name)
예제 #5
0
파일: sample_prior.py 프로젝트: wx-b/RoCUS
def main():
	idx = input('Please select the controller -- \n1a. Original Dynamical System\n1b. Improved Dynamical System\n' + 
				'2. Reinforcement Learning\n3. Rapidly-Exploring Random Tree\nEnter your choice: ')
	assert idx in ['1a', '1b', '2', '3'], 'Invalid input! The input needs to be 1a, 1b, 2, or 3. '
	env = PandaEnv()
	env_kernel = ReachingEnvKernelNormal()
	if idx == '1a':
		controller = DSController(typ='original')
		controller_kernel = TransitionKernel()
		save_fn = 'samples/ds_original_prior.pkl'
	elif idx == '1b':
		controller = DSController(typ='improved')
		controller_kernel = TransitionKernel()
		save_fn = 'samples/ds_improved_prior.pkl'
	elif idx == '2':
		controller = RLController()
		controller_kernel = TransitionKernel()
		save_fn = 'samples/rl_prior.pkl'
	else:
		controller = RRTController()
		controller_kernel = RRTKernelNormal()
		save_fn = 'samples/rrt_prior.pkl'
	sample_prior(2000, save_fn, env, controller, env_kernel, controller_kernel)