def _generate_single_workspace(self, workspace_id):
     while True:
         a = datetime.datetime.now()
         workspace_params = self.generator.generate_workspace()
         self.openrave_manager = OpenraveManager(self.config['openrave_rl']['segment_validity_step'],
                                                 PotentialPoint.from_config(self.config))
         self.openrave_manager.loaded_params_path = None
         self.openrave_manager.load_params(workspace_params, '')
         successful_trajectories_count = 0
         i = 0
         for i in range(self.test_trajectories):
             # see if there is hope
             trajectories_left = self.test_trajectories - i
             if trajectories_left + successful_trajectories_count < self.trajectories_required_to_pass:
                 print 'no hope to get the required ratio'
                 break
             # try a trajectory
             successful_trajectories_count += self._try_plan(workspace_params, self.openrave_manager) is not None
             # if successful update the status
             if successful_trajectories_count >= self.trajectories_required_to_pass:
                 print 'workspace found'
                 save_path = os.path.join(output_dir, '{}_workspace.pkl'.format(workspace_id))
                 workspace_params.save(save_path)
                 return
         b = datetime.datetime.now()
         print 'trajectories tried {}'.format(i)
         print 'success count {}'.format(successful_trajectories_count)
         print 'time since start {}'.format(b - a)
         print ''
예제 #2
0
 def _generate_single_workspace(self, workspace_id):
     while True:
         a = datetime.datetime.now()
         workspace_params = self.generator.generate_workspace()
         self.openrave_manager = OpenraveManager(
             self.config["openrave_rl"]["segment_validity_step"],
             PotentialPoint.from_config(self.config),
         )
         self.openrave_manager.loaded_params_path = None
         self.openrave_manager.load_params(workspace_params, "")
         successful_trajectories_count = 0
         i = 0
         for i in range(self.test_trajectories):
             # see if there is hope
             trajectories_left = self.test_trajectories - i
             if (trajectories_left + successful_trajectories_count <
                     self.trajectories_required_to_pass):
                 print("no hope to get the required ratio")
                 break
             # try a trajectory
             successful_trajectories_count += (self._try_plan(
                 workspace_params, self.openrave_manager) is not None)
             # if successful update the status
             if successful_trajectories_count >= self.trajectories_required_to_pass:
                 print("workspace found")
                 save_path = os.path.join(
                     output_dir, "{}_workspace.pkl".format(workspace_id))
                 workspace_params.save(save_path)
                 return
         b = datetime.datetime.now()
         print("trajectories tried {}".format(i))
         print("success count {}".format(successful_trajectories_count))
         print("time since start {}".format(b - a))
         print("")
예제 #3
0
    def __init__(self, config):
        self.action_step_size = config['openrave_rl']['action_step_size']
        self.goal_sensitivity = config['openrave_rl']['goal_sensitivity']
        self.keep_alive_penalty = config['openrave_rl']['keep_alive_penalty']
        self.truncate_penalty = config['openrave_rl']['truncate_penalty']

        self.openrave_manager = OpenraveManager(
            config['openrave_rl']['segment_validity_step'], PotentialPoint.from_config(config))

        self.current_joints = None
        self.goal_joints = None
        self.start_joints = None
        self.traj = None
예제 #4
0
    def __init__(self, config):
        self.action_step_size = config["openrave_rl"]["action_step_size"]
        self.goal_sensitivity = config["openrave_rl"]["goal_sensitivity"]
        self.keep_alive_penalty = config["openrave_rl"]["keep_alive_penalty"]
        self.truncate_penalty = config["openrave_rl"]["truncate_penalty"]

        self.openrave_manager = OpenraveManager(
            config["openrave_rl"]["segment_validity_step"],
            PotentialPoint.from_config(config),
        )

        self.current_joints = None
        self.goal_joints = None
        self.start_joints = None
        self.traj = None
def print_model_stats(pre_trained_reward_network, test_batch_size, sess):
    # read the data
    test = load_data_from(os.path.join("supervised_data", "test"),
                          max_read=10 * test_batch_size)
    print(len(test))

    # partition to train and test
    random.shuffle(test)

    openrave_manager = OpenraveManager(
        0.001, PotentialPoint.from_config(pre_trained_reward_network.config))

    sess.run(tf.global_variables_initializer())

    # run test for one (random) batch
    random.shuffle(test)
    test_batch = oversample_batch(test, 0, test_batch_size)
    test_batch, test_rewards, test_status = get_batch_and_labels(
        test_batch, openrave_manager)
    reward_prediction, status_prediction = pre_trained_reward_network.make_prediction(
        *([sess] + test_batch))
    # see what happens for different reward classes:
    (
        goal_rewards_stats,
        collision_rewards_stats,
        other_rewards_stats,
    ) = compute_stats_per_class(test_status, test_rewards, status_prediction,
                                reward_prediction)
    print("before loading weights")
    print("goal mean_error {} max_error {} accuracy {}".format(
        *goal_rewards_stats))
    print("collision mean_error {} max_error {} accuracy {}".format(
        *collision_rewards_stats))
    print("other mean_error {} max_error {} accuracy {}".format(
        *other_rewards_stats))

    # load weights
    pre_trained_reward_network.load_weights(sess)
    # run test for one (random) batch
    random.shuffle(test)

    test_batch = oversample_batch(test, 0, test_batch_size)
    test_batch, test_rewards, test_status = get_batch_and_labels(
        test_batch, openrave_manager)
    reward_prediction, status_prediction = pre_trained_reward_network.make_prediction(
        *([sess] + test_batch))
    # see what happens for different reward classes:
    (
        goal_rewards_stats,
        collision_rewards_stats,
        other_rewards_stats,
    ) = compute_stats_per_class(test_status, test_rewards, status_prediction,
                                reward_prediction)
    print("after loading weights")
    print("goal mean_error {} max_error {} accuracy {}".format(
        *goal_rewards_stats))
    print("collision mean_error {} max_error {} accuracy {}".format(
        *collision_rewards_stats))
    print("other mean_error {} max_error {} accuracy {}".format(
        *other_rewards_stats))
예제 #6
0
    def __init__(self, config):
        self.action_step_size = config['openrave_rl']['action_step_size']
        self.goal_sensitivity = config['openrave_rl']['goal_sensitivity']
        self.challenging_trajectories_only = config['openrave_planner'][
            'challenging_trajectories_only']
        self.planner_iterations_start = config['openrave_planner'][
            'planner_iterations_start']
        self.planner_iterations_increase = config['openrave_planner'][
            'planner_iterations_increase']
        self.planner_iterations_decrease = config['openrave_planner'][
            'planner_iterations_decrease']
        self.max_planner_iterations = self.planner_iterations_start

        self.openrave_manager = OpenraveManager(
            config['openrave_rl']['segment_validity_step'],
            PotentialPoint.from_config(config))
    def __init__(self, config):
        self.action_step_size = config["openrave_rl"]["action_step_size"]
        self.goal_sensitivity = config["openrave_rl"]["goal_sensitivity"]
        self.challenging_trajectories_only = config["openrave_planner"][
            "challenging_trajectories_only"]
        self.planner_iterations_start = config["openrave_planner"][
            "planner_iterations_start"]
        self.planner_iterations_increase = config["openrave_planner"][
            "planner_iterations_increase"]
        self.planner_iterations_decrease = config["openrave_planner"][
            "planner_iterations_decrease"]
        self.max_planner_iterations = self.planner_iterations_start

        self.openrave_manager = OpenraveManager(
            config["openrave_rl"]["segment_validity_step"],
            PotentialPoint.from_config(config),
        )
예제 #8
0
def run_motion_planner():
    result = None
    openrave_manager = OpenraveManager(
        config['openrave_rl']['segment_validity_step'],
        PotentialPoint.from_config(config))
    for start_joints, goal_joints, workspace_id, _ in queries:
        params_file_path = image_cache.items[workspace_id].full_filename
        openrave_manager.set_params(params_file_path)
        for i in range(repeat):
            start_time = datetime.datetime.now()
            traj = openrave_manager.plan(start_joints, goal_joints, None)
            # assert traj is not None
            end_time = datetime.datetime.now()
            time_diff = end_time - start_time
            if result is None:
                result = time_diff
            else:
                result += time_diff
    return result
show_collision = False
collision_samples = 10000
# show_close_to_goal = True
show_close_to_goal = False
close_to_goal_samples = 10000
show_pose_action_direction_arrow = True
show_goal_end_effector_pose = True

# load configuration
config_path = os.path.join(os.getcwd(), 'config/config.yml')
with open(config_path, 'r') as yml_file:
    config = yaml.load(yml_file)

# load the workspace
openrave_manager = OpenraveManager(
    config['openrave_rl']['segment_validity_step'],
    PotentialPoint.from_config(config))
params_file = os.path.abspath(
    os.path.expanduser(
        os.path.join('~/ModelBasedDDPG/scenario_params', scenario,
                     'params.pkl')))
openrave_manager.load_params(WorkspaceParams.load_from_file(params_file))
openrave_manager.robot.SetDOFValues([0.0] + goal_joints, [0, 1, 2, 3, 4])

openrave_manager.get_initialized_viewer()
red_color = np.array([1.0, 0.0, 0.0])
yellow_color = np.array([1.0, 1.0, 0.0])
green_color = np.array([0.0, 1.0, 0.0])


def create_sphere(id, radius, openrave_manager):
예제 #10
0
scenario = 'hard'
model_name = '2019_01_25_10_09_04'
number_of_imitation_files = 3
sphere_limitation = 1000

imitation_data_path = os.path.abspath(os.path.expanduser(os.path.join('~/ModelBasedDDPG/imitation_data', scenario)))
rl_trajectories_data_path = os.path.abspath(os.path.expanduser(
    os.path.join('~/ModelBasedDDPG/', scenario, 'trajectories', model_name)))

# load configuration
config_path = os.path.join(os.getcwd(), 'config/config.yml')
with open(config_path, 'r') as yml_file:
    config = yaml.load(yml_file)

# load the workspace
openrave_manager = OpenraveManager(config['openrave_rl']['segment_validity_step'], PotentialPoint.from_config(config))


def process_poses(target_poses, x_coordinate_range=(0.0, 0.13), z_coordinate_range=(0.3, 0.45)):
    return [p for p in target_poses if x_coordinate_range[0] <= p[0] <= x_coordinate_range[1] and z_coordinate_range[0] <= p[1] <= z_coordinate_range[1]]


def process_rl_files(data_dir, trajectory_limitation):
    steps_offset = 40
    steps_increase = 2000
    trajectories_seen = 0
    result = []
    while trajectories_seen < trajectory_limitation:
        global_step_dir = os.path.join(data_dir, '{}'.format(steps_offset))
        steps_offset += steps_increase
        for dirpath, dirnames, filenames in os.walk(global_step_dir):
예제 #11
0
class OpenraveRLInterface:
    # class StepResult(enum.Enum):
    #     free_space = 1
    #     collision = 2
    #     close_to_goal = 3

    def __init__(self, config):
        self.action_step_size = config['openrave_rl']['action_step_size']
        self.goal_sensitivity = config['openrave_rl']['goal_sensitivity']
        self.keep_alive_penalty = config['openrave_rl']['keep_alive_penalty']
        self.truncate_penalty = config['openrave_rl']['truncate_penalty']

        self.openrave_manager = OpenraveManager(
            config['openrave_rl']['segment_validity_step'], PotentialPoint.from_config(config))

        self.current_joints = None
        self.goal_joints = None
        self.start_joints = None
        self.traj = None

    def is_below_goal_sensitivity(self, start_joints, goal_joints):
        start_pose = self.openrave_manager.get_target_pose(start_joints)
        goal_pose = self.openrave_manager.get_target_pose(goal_joints)
        pose_distance = np.linalg.norm(np.array(start_pose) - np.array(goal_pose))
        return pose_distance < self.goal_sensitivity

    def start_specific(self, traj, verify_traj=True):
        self.traj = traj
        start_joints = traj[0]
        goal_joints = traj[-1]
        # assert path is legal
        if verify_traj:
            step_size = self.action_step_size + 0.00001
            for i in range(len(traj)-1):
                step_i_size = np.linalg.norm(np.array(traj[i]) - np.array(traj[i+1]))
                assert step_i_size < step_size, 'step_i_size {}'.format(step_i_size)
        steps_required_for_motion_plan = len(traj)
        self.current_joints = np.array(start_joints)
        self.start_joints = np.array(start_joints)
        self.goal_joints = np.array(goal_joints)
        return start_joints, goal_joints, steps_required_for_motion_plan

    @staticmethod
    def _is_valid_region(start_pose, goal_pose):
        return start_pose[1] > 0.0 and goal_pose[1] > 0.0

    def _is_challenging(self, start_pose, goal_pose):
        workspace_params = self.openrave_manager.loaded_params
        if workspace_params is None or workspace_params.number_of_obstacles == 0:
            return True
        # check if the distance from any obstacle is smaller that the start-goal-distance
        start = np.array(start_pose)
        goal = np.array(goal_pose)
        start_goal_distance = np.linalg.norm(start - goal)
        for i in range(workspace_params.number_of_obstacles):
            obstacle = np.array(
                [workspace_params.centers_position_x[i], workspace_params.centers_position_z[i]]
            )
            start_obstacle_distance = np.linalg.norm(start - obstacle)
            goal_obstacle_distance = np.linalg.norm(goal - obstacle)
            if start_obstacle_distance < start_goal_distance and goal_obstacle_distance < start_goal_distance:
                return True
        # all tests failed
        return False

    def step(self, joints_action):
        # compute next joints
        step = joints_action * self.action_step_size
        next_joints_before_truncate = self.current_joints + step
        next_joints = self.openrave_manager.truncate_joints(next_joints_before_truncate)

        reward = 0.0
        if self.truncate_penalty > 0.0:
            reward -= self.truncate_penalty * np.linalg.norm(next_joints_before_truncate - next_joints) / \
                      self.action_step_size

        # if segment not valid return collision result
        if not self.openrave_manager.check_segment_validity(self.current_joints, next_joints):
            return self._get_step_result(next_joints, -1.0 + reward, True, 2)
        # if close enough to goal, return positive reward
        if self.is_below_goal_sensitivity(next_joints, self.goal_joints):
            return self._get_step_result(next_joints, 1.0 + reward, True, 3)
        # else, just a normal step...
        return self._get_step_result(next_joints, -self.keep_alive_penalty + reward, False, 1)

    def _get_step_result(self, next_joints, reward, is_terminal, enum_res):
        if is_terminal:
            self.current_joints = None
        else:
            self.current_joints = next_joints
        return list(next_joints), reward, is_terminal, enum_res
예제 #12
0
number_of_unzippers = config['general']['number_of_unzippers']

train = Oversampler(train_data_dir,
                    batch_size,
                    oversample_goal,
                    oversample_collision,
                    number_of_unzippers=number_of_unzippers)
test = Oversampler(test_data_dir,
                   batch_size,
                   oversample_goal,
                   oversample_collision,
                   number_of_unzippers=number_of_unzippers)

# get openrave manager
openrave_manager = OpenraveManager(0.001, PotentialPoint.from_config(config))

# set summaries and saver dir
summaries_dir = os.path.join('reward', 'tensorboard')
train_summary_writer = tf.summary.FileWriter(
    os.path.join(summaries_dir, 'train_' + model_name))
test_summary_writer = tf.summary.FileWriter(
    os.path.join(summaries_dir, 'test_' + model_name))
saver_dir = os.path.join('reward', 'model', model_name)
if not os.path.exists(saver_dir):
    os.makedirs(saver_dir)

# save the config
config_copy_path = os.path.join(saver_dir, 'config.yml')
yaml.dump(config, open(config_copy_path, 'w'))
예제 #13
0
from potential_point import PotentialPoint

# the scenario
# scenario = 'simple'
# scenario = 'hard'
scenario = "vision"
workspace_id = 35

# load configuration
config_path = os.path.join(os.getcwd(), "config/config.yml")
with open(config_path, "r") as yml_file:
    config = yaml.load(yml_file)

# load the workspace
openrave_manager = OpenraveManager(
    config["openrave_rl"]["segment_validity_step"],
    PotentialPoint.from_config(config))

# load the openrave view
params_file_path = os.path.abspath(
    os.path.expanduser("~/ModelBasedDDPG/scenario_params"))
if scenario == "vision":
    params_file_path = os.path.join(params_file_path, "vision",
                                    "{}_workspace.pkl".format(workspace_id))
else:
    params_file_path = os.path.join(params_file_path, scenario, "params.pkl")
openrave_manager.set_params(params_file_path)
openrave_manager.get_initialized_viewer()

print("here")
예제 #14
0
class WorkerQueue(multiprocessing.Process):
    def __init__(
        self,
        test_trajectories,
        attempts_to_find_single_trajectory,
        trajectories_required_to_pass,
        planner_iterations,
        workspace_generation_queue,
        worker_specific_queue,
        results_queue,
    ):
        multiprocessing.Process.__init__(self)
        # parameters
        self.test_trajectories = test_trajectories
        self.attempts_to_find_single_trajectory = attempts_to_find_single_trajectory
        self.trajectories_required_to_pass = trajectories_required_to_pass
        self.planner_iterations = planner_iterations
        # queues
        self.workspace_generation_queue = workspace_generation_queue
        self.worker_specific_queue = worker_specific_queue
        self.results_queue = results_queue
        # members
        self.generator = WorkspaceGenerator(obstacle_count_probabilities={
            2: 0.05,
            3: 0.5,
            4: 0.4,
            5: 0.05
        })
        config_path = os.path.join(os.getcwd(), "config/config.yml")
        with open(config_path, "r") as yml_file:
            self.config = yaml.load(yml_file)
        self.openrave_manager = None

    def _is_below_goal_sensitivity(self, start_pose, goal_pose):
        pose_distance = np.linalg.norm(
            np.array(start_pose) - np.array(goal_pose))
        return pose_distance < self.config["openrave_rl"]["goal_sensitivity"]

    @staticmethod
    def _is_challenging(start_pose, goal_pose, workspace_params):
        start = np.array(start_pose)
        goal = np.array(goal_pose)
        start_goal_distance = np.linalg.norm(start - goal)
        for i in range(workspace_params.number_of_obstacles):
            obstacle = np.array([
                workspace_params.centers_position_x[i],
                workspace_params.centers_position_z[i],
            ])
            start_obstacle_distance = np.linalg.norm(start - obstacle)
            goal_obstacle_distance = np.linalg.norm(goal - obstacle)
            if (start_obstacle_distance < start_goal_distance
                    and goal_obstacle_distance < start_goal_distance):
                return True
        # all tests failed
        return False

    def _try_plan(self, workspace_params, openrave_manager):
        for i in range(self.attempts_to_find_single_trajectory):
            # select at random
            start_joints = openrave_manager.get_random_joints({0: 0.0})
            goal_joints = openrave_manager.get_random_joints({0: 0.0})
            start_pose = openrave_manager.get_target_pose(start_joints)
            goal_pose = openrave_manager.get_target_pose(goal_joints)
            # if the start and goal are too close, re-sample
            if self._is_below_goal_sensitivity(start_joints, goal_joints):
                continue
            # valid region:
            if not start_pose[1] < 0.0 or goal_pose[1] < 0.0:
                continue
            # trajectories that must cross an obstacle
            if self._is_challenging(start_pose, goal_pose, workspace_params):
                continue
            traj = openrave_manager.plan(start_joints, goal_joints,
                                         self.planner_iterations)
            return traj is not None
        return None

    def _generate_single_workspace(self, workspace_id):
        while True:
            a = datetime.datetime.now()
            workspace_params = self.generator.generate_workspace()
            self.openrave_manager = OpenraveManager(
                self.config["openrave_rl"]["segment_validity_step"],
                PotentialPoint.from_config(self.config),
            )
            self.openrave_manager.loaded_params_path = None
            self.openrave_manager.load_params(workspace_params, "")
            successful_trajectories_count = 0
            i = 0
            for i in range(self.test_trajectories):
                # see if there is hope
                trajectories_left = self.test_trajectories - i
                if (trajectories_left + successful_trajectories_count <
                        self.trajectories_required_to_pass):
                    print("no hope to get the required ratio")
                    break
                # try a trajectory
                successful_trajectories_count += (self._try_plan(
                    workspace_params, self.openrave_manager) is not None)
                # if successful update the status
                if successful_trajectories_count >= self.trajectories_required_to_pass:
                    print("workspace found")
                    save_path = os.path.join(
                        output_dir, "{}_workspace.pkl".format(workspace_id))
                    workspace_params.save(save_path)
                    return
            b = datetime.datetime.now()
            print("trajectories tried {}".format(i))
            print("success count {}".format(successful_trajectories_count))
            print("time since start {}".format(b - a))
            print("")

    def run(self):
        while True:
            try:
                # wait 1 second for a workspace generation request
                workspace_id = self.workspace_generation_queue.get(block=True,
                                                                   timeout=1)
                self._generate_single_workspace(workspace_id)
                self.results_queue.put(workspace_id)
                self.workspace_generation_queue.task_done()
            except Queue.Empty:
                pass
            try:
                # need to terminate
                worker_specific_task = self.worker_specific_queue.get(
                    block=True, timeout=0.001)
                self.worker_specific_queue.task_done()
                break
            except Queue.Empty:
                pass
예제 #15
0
def main():
    config_path = os.path.join(os.getcwd(), 'config/config.yml')
    with open(config_path, 'r') as yml_file:
        config = yaml.load(yml_file)
        print('------------ Config ------------')
        print(yaml.dump(config))
    model_dir = os.path.join(trajectories_dir, model_name)
    potential_points_location = os.path.join(model_dir, 'potential_points.p')
    potential_points = pickle.load(open(potential_points_location))

    # search_key = os.path.join(model_dir, '{}_step_{}_{}.p'.format(global_step, message, path_id))
    search_key = os.path.join(model_dir, global_step,
                              '{}_{}.p'.format(message, path_id))
    trajectory_files = glob.glob(search_key)
    trajectory_file = trajectory_files[0]
    pose_goal, trajectory, workspace_id = pickle.load(open(trajectory_file))
    trajectory = [[0.0] + list(j) for j in trajectory]

    openrave_manager = OpenraveManager(0.001, potential_points)
    # get the parameters
    if scenario == 'vision':
        workspace_params_path = os.path.abspath(
            os.path.expanduser(
                os.path.join('~/ModelBasedDDPG/scenario_params/vision/',
                             workspace_id)))
    else:
        workspace_params_path = os.path.abspath(
            os.path.expanduser(
                os.path.join('~/ModelBasedDDPG/scenario_params', scenario,
                             'params.pkl')))
    if workspace_params_path is not None:
        openrave_manager.set_params(workspace_params_path)

    openrave_manager.get_initialized_viewer()
    openrave_manager.env.GetViewer().SetSize(1200, 800)

    # for link in openrave_manager.robot.GetLinks():
    #     link.SetVisible(False)

    if display_start_goal_end_spheres:
        start = trajectory[0]
        end = trajectory[-1]
        pose_start = openrave_manager.get_target_pose(start)
        pose_start = (pose_start[0], 0.0, pose_start[1])
        pose_goal = (pose_goal[0], 0.0, pose_goal[1])
        pose_end = openrave_manager.get_target_pose(end)
        pose_end = (pose_end[0], 0.0, pose_end[1])
        start_sphere = create_sphere('start', trajectory_spheres_radius,
                                     openrave_manager)
        move_body(start_sphere, pose_start, 0.0)
        goal_sphere = create_sphere('goal', goal_radius, openrave_manager)
        move_body(goal_sphere, pose_goal, 0.0)
        end_sphere = create_sphere('end', trajectory_spheres_radius,
                                   openrave_manager)

        start_sphere.GetLinks()[0].GetGeometries()[0].SetDiffuseColor(
            np.array([0, 0, 204]))
        goal_sphere.GetLinks()[0].GetGeometries()[0].SetDiffuseColor(
            np.array([240, 100, 10]))
        goal_sphere.GetLinks()[0].GetGeometries()[0].SetTransparency(0.3)
        end_sphere.GetLinks()[0].GetGeometries()[0].SetDiffuseColor(
            np.array([100, 204, 204]))
        move_body(end_sphere, pose_end, 0.0)

    print 'len(trajectory) ', len(trajectory)
    for i in range(len(trajectory)):
        print 'i ', i
        openrave_manager.robot.SetDOFValues(trajectory[i])
        time.sleep(1 / speed)

    time.sleep(0.2)
    if message == 'collision':
        time.sleep(1.2)
예제 #16
0
import tensorflow as tf
import os
import yaml

from openrave_manager import OpenraveManager
from potential_point import PotentialPoint

is_gpu = tf.test.is_gpu_available()
config_path = os.path.join(os.getcwd(), 'config/config.yml')
with open(config_path, 'r') as yml_file:
    config = yaml.load(yml_file)
potential_points = PotentialPoint.from_config(config)
openrave_manager = OpenraveManager(0.01, potential_points)
random_joints = openrave_manager.get_random_joints()

print 'has gpu result {}'.format(is_gpu)
print 'random joints result {}'.format(random_joints)
class OpenraveTrajectoryGenerator:
    def __init__(self, config):
        self.action_step_size = config["openrave_rl"]["action_step_size"]
        self.goal_sensitivity = config["openrave_rl"]["goal_sensitivity"]
        self.challenging_trajectories_only = config["openrave_planner"][
            "challenging_trajectories_only"]
        self.planner_iterations_start = config["openrave_planner"][
            "planner_iterations_start"]
        self.planner_iterations_increase = config["openrave_planner"][
            "planner_iterations_increase"]
        self.planner_iterations_decrease = config["openrave_planner"][
            "planner_iterations_decrease"]
        self.max_planner_iterations = self.planner_iterations_start

        self.openrave_manager = OpenraveManager(
            config["openrave_rl"]["segment_validity_step"],
            PotentialPoint.from_config(config),
        )

    def is_below_goal_sensitivity(self, start_joints, goal_joints):
        start_pose = self.openrave_manager.get_target_pose(start_joints)
        goal_pose = self.openrave_manager.get_target_pose(goal_joints)
        pose_distance = np.linalg.norm(
            np.array(start_pose) - np.array(goal_pose))
        return pose_distance < self.goal_sensitivity

    def find_random_trajectory_single_try(self):
        # select at random
        start_joints = self.openrave_manager.get_random_joints({0: 0.0})
        goal_joints = self.openrave_manager.get_random_joints({0: 0.0})
        # if the start and goal are too close, re-sample
        if self.is_below_goal_sensitivity(start_joints, goal_joints):
            return None
        start_pose = self.openrave_manager.get_target_pose(start_joints)
        goal_pose = self.openrave_manager.get_target_pose(goal_joints)
        # valid region:
        if not self._is_valid_region(start_pose, goal_pose):
            return None
        # trajectories that must cross an obstacle
        if self.challenging_trajectories_only and not self._is_challenging(
                start_pose, goal_pose):
            return None
        traj = self.openrave_manager.plan(start_joints, goal_joints,
                                          self.max_planner_iterations)
        return traj

    def find_random_trajectory(self):
        # lower_size = 0.0  # when doing curriculum, this this is the lowest possible distance between start and goal
        while True:
            traj = self.find_random_trajectory_single_try()
            if traj is None:
                # if failed to plan, give more power
                self.max_planner_iterations += self.planner_iterations_increase
            elif (self.max_planner_iterations > self.planner_iterations_start +
                  self.planner_iterations_decrease):
                # if plan was found, maybe we need less iterations
                self.max_planner_iterations -= self.planner_iterations_decrease
                return self.split_trajectory(traj, self.action_step_size)

    @staticmethod
    def _is_valid_region(start_pose, goal_pose):
        return start_pose[1] > 0.0 and goal_pose[1] > 0.0

    def _is_challenging(self, start_pose, goal_pose):
        workspace_params = self.openrave_manager.loaded_params
        if workspace_params is None or workspace_params.number_of_obstacles == 0:
            return True
        # check if the distance from any obstacle is smaller that the start-goal-distance
        start = np.array(start_pose)
        goal = np.array(goal_pose)
        start_goal_distance = np.linalg.norm(start - goal)
        for i in range(workspace_params.number_of_obstacles):
            obstacle = np.array([
                workspace_params.centers_position_x[i],
                workspace_params.centers_position_z[i],
            ])
            start_obstacle_distance = np.linalg.norm(start - obstacle)
            goal_obstacle_distance = np.linalg.norm(goal - obstacle)
            if (start_obstacle_distance < start_goal_distance
                    and goal_obstacle_distance < start_goal_distance):
                return True
        # all tests failed
        return False

    @staticmethod
    def split_trajectory(trajectory, action_step_size):
        res = [tuple(trajectory[0])]
        for i in range(len(trajectory) - 1):
            current_step = np.array(trajectory[i])
            next_step = np.array(trajectory[i + 1])
            difference = next_step - current_step
            difference_norm = np.linalg.norm(difference)
            if difference_norm < action_step_size:
                # if smaller than allowed step just append the next step
                res.append(tuple(trajectory[i + 1]))
                continue
            scaled_step = (action_step_size / difference_norm) * difference
            steps = []
            for alpha in range(
                    int(np.floor(difference_norm / action_step_size))):
                processed_step = current_step + (1 + alpha) * scaled_step
                steps.append(processed_step)
            # we probably have a leftover section, append it to res
            last_step_difference = np.linalg.norm(steps[-1] - next_step)
            if last_step_difference > 0.0:
                steps.append(next_step)
            # append to path
            res += [tuple(s) for s in steps]
        return res