def get_parameter_result(sim_world, task, parameter_fns, evaluation_fn, **kwargs): start_time = time.time() plan, plan_time = None, 0 while (plan is None) and (elapsed_time(start_time) < kwargs['max_time']): plan, plan_time = simulate_trial(sim_world, task, parameter_fns, **kwargs) score = None if plan is None: print('No plan was found in {} seconds.'.format(kwargs['max_time'])) control = {'feature': None, 'parameter': None} else: score = evaluation_fn(plan) # _, args = find_unique(lambda a: a[0] == 'pour', plan) # control = args[-1] elapsed = elapsed_time(start_time) result = { 'success': (plan is not None), 'score': score, 'plan-time': plan_time, 'elapsed-time': elapsed, # Includes execution time # 'feature': control['feature'], # 'parameter': control['parameter'] } return result
def find_closest(x0, curve, t_range=None, max_time=INF, max_iterations=INF, distance_fn=None, verbose=False): assert (max_time < INF) or (max_iterations < INF) if t_range is None: t_range = Interval(curve.x[0], curve.x[-1]) t_range = Interval(max(t_range[0], curve.x[0]), min(curve.x[-1], t_range[-1])) if distance_fn is None: distance_fn = get_distance start_time = time.time() closest_dist, closest_t = INF, None for iteration in irange(max_iterations): if elapsed_time(start_time) >= max_time: break t = random.uniform(*t_range) # TODO: halton x = curve(t) dist = distance_fn(x0, x) if dist < closest_dist: closest_dist, closest_t = dist, t if verbose: print( 'Iteration: {} | Dist: {:.3f} | T: {:.3f} | Time: {:.3f}'. format(iteration, closest_dist, t, elapsed_time(start_time))) return closest_dist, closest_t
def random_restart(belief, args, problem, max_time=INF, max_iterations=INF, max_planner_time=INF, **kwargs): domain_pddl, constant_map, _, _, init, goal_formula = problem start_time = time.time() task = belief.task world = task.world iterations = 0 while (elapsed_time(start_time) < max_time) and (iterations < max_iterations): iterations += 1 try: stream_pddl, stream_map = get_streams(world, teleport_base=task.teleport_base, collisions=not args.cfree, teleport=args.teleport) problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal_formula) remaining_time = min(max_time - elapsed_time(start_time), max_planner_time) plan, plan_cost, certificate = solve_pddlstream(belief, problem, args, max_time=remaining_time, **kwargs) if plan is not None: return plan, plan_cost, certificate except KeyboardInterrupt as e: raise e except MemoryError as e: traceback.print_exc() if not REPLAN_FAILURES: raise e break except Exception as e: traceback.print_exc() if not REPLAN_FAILURES: raise e # FastDownward translator runs out of memory return None, INF, Certificate(all_facts=[], preimage_facts=[])
def optimize_commands(robot, obstacles, element_bodies, extrusion_path, initial_conf, commands, max_iterations=INF, max_time=60, motions=True, collisions=True, **kwargs): if commands is None: return None start_time = time.time() element_from_id, node_points, ground_nodes = load_extrusion(extrusion_path) print_gen_fn = get_print_gen_fn(robot, obstacles, node_points, element_bodies, ground_nodes, precompute_collisions=False, collisions=collisions, **kwargs) commands = list(commands) sequence = [command.elements[0] for command in commands] directed_sequence = [command.directed_elements[0] for command in commands] indices = list(range(len(sequence))) #trajectories = flatten_commands(commands) #print(len(trajectories), get_print_distance(trajectories, teleport=False)) total_distance = sum(get_command_distance(robot, initial_conf, commands, index, command) for index, command in enumerate(commands)) # TODO: bias sampling towards far apart relative to neighoors # TODO: bias IK solutions to be the best of several iterations = extrusion_failures = 0 while (iterations < max_iterations) and (elapsed_time(start_time) < max_time): iterations += 1 index = random.choice(indices) printed = sequence[:index-1] element = sequence[index] directed = directed_sequence[index] print(commands[index]) distance = get_command_distance(robot, initial_conf, commands, index, commands[index]) print('Iteration {} | Failures: {} | Distance: {:.3f} | Index: {}/{} | Time: {:.3f}'.format( iterations, extrusion_failures, total_distance, index, len(indices), elapsed_time(start_time))) new_command, = next(print_gen_fn(directed[0], element, extruded=printed), (None,)) if new_command is None: extrusion_failures += 1 continue new_distance = get_command_distance(robot, initial_conf, commands, index, new_command) if new_distance < distance: commands.pop(index) commands.insert(index, new_command) total_distance -= (distance - new_distance) # data = { # 'extrusion_failures': extrusion_failures, # } return commands #, data
def create_belief(self): t0 = time.time() print('Creating initial belief') belief = create_surface_belief(self.world, self.prior) belief.task = self print('Took {:2f} seconds'.format(elapsed_time(t0))) return belief
def compute_component_mst(node_points, ground_nodes, unprinted, initial_position=None): # Weighted A* start_time = time.time() point_from_vertex = dict(enumerate(node_points)) vertices = {v for e in unprinted for v in e} components = get_connected_components(vertices, unprinted) entry_nodes = set(ground_nodes) if initial_position is not None: point_from_vertex[INITIAL_NODE] = initial_position components.append([INITIAL_NODE]) entry_nodes.add(INITIAL_NODE) edge_weights = {} for c1, c2 in combinations(range(len(components)), r=2): # TODO: directed edges from all points to entry nodes nodes1 = set(components[c1]) nodes2 = set(components[c2]) if (c1 == [INITIAL_NODE]) or (c2 == [INITIAL_NODE]): nodes1 &= entry_nodes nodes2 &= entry_nodes edge_weights[c1, c2] = min( get_distance(point_from_vertex[n1], point_from_vertex[n2]) for n1, n2 in product(nodes1, nodes2)) tree = compute_spanning_tree(edge_weights) weight = sum(edge_weights[e] for e in tree) print( 'Elements: {} | Components: {} | Tree: {} | Weight: {}: Time: {:.3f} '. format(len(unprinted), len(components), tree, weight, elapsed_time(start_time))) return weight
def evaluate_scores(domain, seed, algorithm, learner, num_trials, serial=False, visualize=False): results = [] if num_trials == 0: return results start_time = time.time() # trials selected by learner, num_trials is number of contexts that the learner evaluated upon trials = get_trials(problem=domain.skill, fn=learner, num_trials=num_trials, seed=seed, visualize=visualize, verbose=serial) num_cores = get_num_cores(trials, serial=serial) results = run_trials(trials, num_cores=num_cores) scored = get_scored_results(results) scores = [ domain.score_fn(result[FEATURE], result[PARAMETER], result[SCORE]) for result in scored ] # TODO: record best predictions as well as average prediction quality print('Seed: {} | {} | Results: {} | Scored: {} | Time: {:.3f}'.format( seed, algorithm, len(results), len(scored), elapsed_time(start_time))) if scores: print('Average: {:.3f} | Success: {:.1f}%'.format( np.average(scores), 100 * score_accuracy(scores))) return results
def local_search(extrusion_path, element_from_id, node_points, ground_nodes, checker, sequence, initial_position=None, stiffness=True, max_time=INF): start_time = time.time() sequence = list(sequence) #elements = set(element_from_id.values()) #indices = list(range(len(sequence))) #directions = [True, False] while elapsed_time(start_time) < max_time: new_sequence = search_neighborhood(extrusion_path, element_from_id, node_points, ground_nodes, checker, sequence, initial_position, stiffness=stiffness, max_time=INF) if new_sequence is None: break sequence = new_sequence
def create_inverse_reachability(robot, body, table, arm, grasp_type, max_attempts=500, num_samples=500): tool_link = get_gripper_link(robot, arm) robot_saver = BodySaver(robot) gripper_from_base_list = [] grasps = GET_GRASPS[grasp_type](body) start_time = time.time() while len(gripper_from_base_list) < num_samples: box_pose = sample_placement(body, table) set_pose(body, box_pose) grasp_pose = random.choice(grasps) gripper_pose = multiply(box_pose, invert(grasp_pose)) for attempt in range(max_attempts): robot_saver.restore() base_conf = next(uniform_pose_generator(robot, gripper_pose)) #, reachable_range=(0., 1.))) #set_base_values(robot, base_conf) set_group_conf(robot, 'base', base_conf) if pairwise_collision(robot, table): continue grasp_conf = pr2_inverse_kinematics(robot, arm, gripper_pose) #, nearby_conf=USE_CURRENT) #conf = inverse_kinematics(robot, link, gripper_pose) if (grasp_conf is None) or pairwise_collision(robot, table): continue gripper_from_base = multiply(invert(get_link_pose(robot, tool_link)), get_base_pose(robot)) #wait_if_gui() gripper_from_base_list.append(gripper_from_base) print('{} / {} | {} attempts | [{:.3f}]'.format( len(gripper_from_base_list), num_samples, attempt, elapsed_time(start_time))) wait_if_gui() break else: print('Failed to find a kinematic solution after {} attempts'.format(max_attempts)) return save_inverse_reachability(robot, arm, grasp_type, tool_link, gripper_from_base_list)
def compute_motions(robot, fixed_obstacles, element_bodies, initial_conf, print_trajectories, **kwargs): # TODO: reoptimize for the sequence that have the smallest movements given this # TODO: sample trajectories # TODO: more appropriate distance based on displacement/volume if print_trajectories is None: return None #if any(isinstance(print_traj, MotionTrajectory) for print_traj in print_trajectories): # return print_trajectories start_time = time.time() printed_elements = [] all_trajectories = [] current_conf = initial_conf for i, print_traj in enumerate(print_trajectories): if not np.allclose(current_conf, print_traj.start_conf, rtol=0, atol=1e-8): motion_traj = compute_motion(robot, fixed_obstacles, element_bodies, printed_elements, current_conf, print_traj.start_conf, **kwargs) if motion_traj is None: return None print('{}) {} | Time: {:.3f}'.format(i, motion_traj, elapsed_time(start_time))) all_trajectories.append(motion_traj) if isinstance(print_traj, PrintTrajectory): printed_elements.append(print_traj.element) all_trajectories.append(print_traj) current_conf = print_traj.end_conf motion_traj = compute_motion(robot, fixed_obstacles, element_bodies, printed_elements, current_conf, initial_conf, **kwargs) if motion_traj is None: return None return all_trajectories + [motion_traj]
def create_inverse_reachability2(robot, body, table, arm, grasp_type, max_attempts=500, num_samples=500): tool_link = get_gripper_link(robot, arm) problem = MockProblem(robot, fixed=[table], grasp_types=[grasp_type]) placement_gen_fn = get_stable_gen(problem) grasp_gen_fn = get_grasp_gen(problem, collisions=True) ik_ir_fn = get_ik_ir_gen(problem, max_attempts=max_attempts, learned=False, teleport=True) placement_gen = placement_gen_fn(body, table) grasps = list(grasp_gen_fn(body)) print('Grasps:', len(grasps)) # TODO: sample the torso height # TODO: consider IK with respect to the torso frame start_time = time.time() gripper_from_base_list = [] while len(gripper_from_base_list) < num_samples: [(p,)] = next(placement_gen) (g,) = random.choice(grasps) output = next(ik_ir_fn(arm, body, p, g), None) if output is None: print('Failed to find a solution after {} attempts'.format(max_attempts)) else: (_, ac) = output [at,] = ac.commands at.path[-1].assign() gripper_from_base = multiply(invert(get_link_pose(robot, tool_link)), get_base_pose(robot)) gripper_from_base_list.append(gripper_from_base) print('{} / {} [{:.3f}]'.format( len(gripper_from_base_list), num_samples, elapsed_time(start_time))) wait_if_gui() return save_inverse_reachability(robot, arm, grasp_type, tool_link, gripper_from_base_list)
def pour_beads(world, cup_name, beads, reset_contained=False, fix_outside=True, cup_thickness=0.01, bead_offset=0.01, drop_rate=0.02, **kwargs): if not beads: return set() start_time = time.time() # TODO: compute the radius of each bead bead_radius = np.average(approximate_as_prism(beads[0])) / 2. masses = list(map(get_mass, beads)) savers = list(map(BodySaver, beads)) for body in beads: set_mass(body, 0) cup = world.get_body(cup_name) local_center, (diameter, height) = approximate_as_cylinder(cup) center = get_point(cup) + local_center interior_radius = max(0.0, diameter / 2. - bead_radius - cup_thickness) # TODO: fill up to a certain threshold ty = get_type(cup_name) if ty in SPOON_DIAMETERS: # TODO: do this in a more principled way interior_radius = 0 center[1] += (SPOON_LENGTHS[ty] - SPOON_DIAMETERS[ty]) / 2. # TODO: some sneak out through the bottom # TODO: could reduce gravity while filling world.controller.set_gravity() for i, bead in enumerate(beads): # TODO: halton sequence x, y = center[:2] + np.random.uniform( 0, interior_radius) * unit_from_theta( np.random.uniform(-np.pi, np.pi)) new_z = center[2] + height / 2. + bead_radius + bead_offset set_point(bead, [x, y, new_z]) set_mass(bead, masses[i]) world.controller.rest_for_duration(drop_rate) world.controller.rest_for_duration(BEADS_REST) print('Simulated {} beads in {:3f} seconds'.format( len(beads), elapsed_time(start_time))) contained_beads = get_contained_beads(cup, beads, **kwargs) #wait_for_user() for body in beads: if fix_outside and (body not in contained_beads): set_mass(body, 0) for saver in savers: if reset_contained or (saver.body not in contained_beads): saver.restore() #wait_for_user() return contained_beads
def optimize_feature(learner, max_time=INF, **kwargs): #features = read_json(get_feature_path(learner.func.skill)) feature_fn, attributes, pairs = generate_candidates( learner.func.skill, **kwargs) start_time = time.time() world = ROSWorld(use_robot=False, sim_only=True) saver = ClientSaver(world.client) print('Optimizing over {} feature pairs'.format(len(pairs))) best_pair, best_score = None, -INF # maximize for pair in randomize(pairs): # islice if max_time <= elapsed_time(start_time): break world.reset(keep_robot=False) for name in pair: world.perception.add_item(name) feature = feature_fn(world, *pair) parameter = next( learner.parameter_generator(world, feature, min_score=best_score, valid=True, verbose=False), None) if parameter is None: continue context = learner.func.context_from_feature(feature) sample = learner.func.sample_from_parameter(parameter) x = x_from_context_sample(context, sample) #x = learner.func.x_from_feature_parameter(feature, parameter) score_fn = learner.get_score_f(context, noise=False, negate=False) # maximize score = float(score_fn(x)[0, 0]) if best_score < score: best_pair, best_score = pair, score world.stop() saver.restore() print( 'Best pair: {} | Best score: {:.3f} | Pairs: {} | Time: {:.3f}'.format( best_pair, best_score, len(pairs), elapsed_time(start_time))) assert best_score is not None # TODO: ensure the same parameter is used return dict(zip(attributes, best_pair))
def mpc(x0, v0, curve, dt_max=0.5, max_time=INF, max_iterations=INF, v_max=None, **kwargs): assert (max_time < INF) or (max_iterations < INF) from scipy.interpolate import CubicHermiteSpline start_time = time.time() best_cost, best_spline = INF, None for iteration in irange(max_iterations): if elapsed_time(start_time) >= max_time: break t1 = random.uniform(curve.x[0], curve.x[-1]) future = (curve.x[-1] - t1) # TODO: weighted if future >= best_cost: continue x1 = curve(t1) if (v_max is not None) and (max((x1 - x0) / v_max) > dt_max): continue # if quickest_inf_accel(x0, x1, v_max=v_max) > dt_max: # continue v1 = curve(t1, nu=1) #dt = dt_max dt = random.uniform(0, dt_max) times = [0., dt] positions = [x0, x1] velocities = [v0, v1] spline = CubicHermiteSpline(times, positions, dydx=velocities) if not check_spline(spline, **kwargs): continue # TODO: optimize to find the closest on the path within a range cost = future + (spline.x[-1] - spline.x[0]) if cost < best_cost: best_cost, best_spline = cost, spline print('Iteration: {} | Cost: {:.3f} | T: {:.3f} | Time: {:.3f}'. format(iteration, cost, t1, elapsed_time(start_time))) print(best_cost, t1, elapsed_time(start_time)) return best_cost, best_spline
def wait_until_observation(problem, ros_world, max_time=5): ros_world.sync_controllers() # AKA update sim robot joint positions iteration = count() start_time = time.time() ros_world.perception.updated_simulation = False while elapsed_time(start_time) < max_time: ros_world.perception.wait_for_update() names = set(info.name for info in ros_world.perception.surfaces + ros_world.perception.items) items = REQUIREMENT_FNS[problem](ros_world) print('Iteration: {} | Bodies: {} | Time: {:.3f}'.format( next(iteration), sorted(names), elapsed_time(start_time))) if items is not None: break else: print('Failed to detect required objects for {}'.format(problem)) return None ros_world.perception.update_simulation() # TODO: return the detected items return items
def retrain_sklearer(learner, newx=None, newy=None, new_w=None): start_time = time.time() if (newx is not None) and (newy is not None): learner.xx = np.vstack((learner.xx, newx)) if learner.model_type in CLASSIFIERS: newy = (THRESHOLD < newy) learner.yy = np.hstack((learner.yy, newy)) if new_w is not None: learner.weights = np.hstack((learner.weights, new_w)) # TODO: what was this supposed to do? #self.xx, self.yy, sample_weight = shuffle(self.xx, self.yy, _get_sample_weight(self)) if learner.model.__class__ in [MLPClassifier, MLPRegressor]: learner.model.fit(learner.xx, learner.yy) else: # TODO: preprocess to change the mean_function # https://scikit-learn.org/stable/modules/preprocessing.html xx, yy = learner.xx, learner.yy weights = None if (learner.transfer_weight is not None) and (2 <= len(Counter(learner.weights))): assert 0. <= learner.transfer_weight <= 1. #weights = learner.weights weights = np.ones(yy.shape) total_weight = sum(weights) normal_indices = np.argwhere(learner.weights == NORMAL_WEIGHT) weights[normal_indices] = total_weight * ( 1 - learner.transfer_weight) / len(normal_indices) transfer_indices = np.argwhere(learner.weights == TRANSFER_WEIGHT) weights[ transfer_indices] = total_weight * learner.transfer_weight / len( transfer_indices) print( 'Transfer weight: {:.3f} | Normal: {} | Transfer: {} | Other: {}' .format( learner.transfer_weight, len(normal_indices), len(transfer_indices), len(weights) - len(normal_indices) - len(transfer_indices))) #weights = 1./len(yy) * np.ones(len(yy)) #num_repeat = 0 # if num_repeat != 0: # xx = np.vstack([xx, xx[:num_repeat]]) # yy = np.vstack([yy, yy[:num_repeat]]) # weights = np.concatenate([ # 1./len(yy) * np.ones(len(yy)), # 1./num_repeat * np.ones(num_repeat), # ]) learner.model.fit(xx, yy, sample_weight=weights) print('Trained in {:.3f} seconds'.format(elapsed_time(start_time))) learner.metric() learner.trained = True
def run_trials(trials, data_path=None, num_cores=False, **kwargs): # https://stackoverflow.com/questions/15314189/python-multiprocessing-pool-hangs-at-join # https://stackoverflow.com/questions/39884898/large-amount-of-multiprocessing-process-causing-deadlock # TODO: multiprocessing still seems to hang on one thread before starting assert (get_python_version() == 3) results = [] if not trials: return results start_time = time.time() serial = (num_cores is False) # None is the max number failures = 0 scored = 0 try: for result in map_general(run_trial, trials, serial, num_cores=num_cores, **kwargs): num_trials = len(results) + failures print( '{}\nTrials: {} | Successes: {} | Failures: {} | Scored: {} | Time: {:.3f}' .format(SEPARATOR, num_trials, len(results), failures, scored, elapsed_time(start_time))) print('Result:', str_from_object(result)) if result is None: failures += 1 print('Error! Trial resulted in an exception') continue scored += int(result.get('score', None) is not None) results.append(result) write_results(data_path, results) # except BaseException as e: # traceback.print_exc() # e finally: print(SEPARATOR) safe_rm_dir(TEMP_DIRECTORY) write_results(data_path, results) print('Hours: {:.3f}'.format(elapsed_time(start_time) / HOURS_TO_SECS)) # TODO: make a generator version of this return results
def simulate(controller=None, max_duration=INF, max_steps=INF, print_rate=1., sleep=None): enable_gravity() dt = get_time_step() print('Time step: {:.6f} sec'.format(dt)) start_time = last_print = time.time() for step in irange(max_steps): duration = step * dt if duration >= max_duration: break if (controller is not None) and is_empty(controller): break step_simulation() synchronize_viewer() if elapsed_time(last_print) >= print_rate: print( 'Sim step: {} | Sim time: {:.3f} sec | Elapsed time: {:.3f} sec' .format(step, duration, elapsed_time(start_time))) last_print = time.time() if sleep is not None: time.sleep(sleep)
def search_neighborhood(extrusion_path, element_from_id, node_points, ground_nodes, checker, sequence, initial_position, stiffness=True, max_candidates=1, max_time=INF): start_time = time.time() best_sequence = None best_cost = compute_sequence_distance(node_points, sequence, start=initial_position, end=initial_position) candidates = 1 for i1, i2 in randomize( combinations_with_replacement(range(len(sequence)), r=2)): for directed1 in get_directions(sequence[i1]): for directed2 in get_directions(sequence[i2]): if implies(best_sequence, (max_candidates <= candidates)) and ( max_time <= elapsed_time(start_time)): return best_sequence candidates += 1 if i1 == i2: new_sequence = sequence[:i1] + [directed1 ] + sequence[i1 + 1:] else: new_sequence = sequence[:i1] + [ directed1 ] + sequence[i1 + 1:i2] + [directed2] + sequence[i2 + 1:] assert len(new_sequence) == len(sequence) if count_violations(ground_nodes, new_sequence): continue new_cost = compute_sequence_distance(node_points, new_sequence, start=initial_position, end=initial_position) if best_cost <= new_cost: continue print(best_cost, new_cost) return new_sequence # TODO: eager version of this also # if stiffness and not test_stiffness(extrusion_path, element_from_id, printed, checker=checker, verbose=False): # continue # Unfortunately the full structure is affected return best_sequence
def solve_pybullet_ik(self, world_from_tool, nearby_tolerance): start_time = time.time() # Most of the time is spent creating the robot # TODO: use the waypoint version that doesn't repeatedly create the robot current_conf = get_joint_positions(self.robot, self.arm_joints) full_conf = sub_inverse_kinematics( self.robot, self.arm_joints[0], self.tool_link, world_from_tool, custom_limits=self.custom_limits ) # , max_iterations=1) # , **kwargs) conf = get_joint_positions(self.robot, self.arm_joints) max_distance = get_distance(current_conf, conf, norm=INF) if nearby_tolerance < max_distance: return None print('Nearby) time: {:.3f} | distance: {:.5f}'.format( elapsed_time(start_time), max_distance)) return full_conf
def evaluate_learner(domain, seed, train_confusion, X_test, Y_test, algorithm, learner, train_size, num_trials, alphas, serial=False, confusion_only=True): # learner select train_size number of trials? start_time = time.time() max_cores = get_max_cores(serial=serial) test_confusion = None if (train_size is not None) and (len(Y_test) != 0): # TODO: kernel density estimation print('Test examples:', Y_test.shape[0]) if algorithm.name in NN_MODELS: alphas = alphas[:1] confusions = [] for alpha in alphas: print('Alpha={}'.format(alpha) if alpha is None else 'Alpha={:.3f}'.format(alpha)) confusions.append(compute_metrics(Y_test, learner.score_x( X_test, alpha=alpha, max_cores=max_cores))) test_confusion = confusions[0] results = [] if not confusion_only: # trials selected by learner, num_trials is number of contexts that the learner evaluated upon trials = get_trials(problem=domain.skill, fn=learner, num_trials=num_trials, seed=seed, verbose=serial) num_cores = get_num_cores(trials, serial=serial) results = run_trials(trials, num_cores=num_cores) scores = [domain.score_fn(result[FEATURE], result[PARAMETER], result[SCORE]) for result in results] # TODO: record best predictions as well as average prediction quality print('Seed: {} | {} | Results: {} | Time: {:.3f}'.format( seed, algorithm, len(results), elapsed_time(start_time))) if scores: print('Average: {:.3f} | Success: {:.1f}%'.format( np.average(scores), 100 * score_accuracy(scores))) confusion = { 'train': train_confusion, 'test': test_confusion, } return Experiment(algorithm, train_size, confusion, results)
def update(self, detections, n_samples=25): start_time = time.time() self.observations.append(detections) # Processing detected first # Could simply sample from the set of worlds and update # Would need to sample many worlds with name at different poses # Instead, let the moving object take on different poses with LockRenderer(): with WorldSaver(): if REPAIR_DETECTIONS: detections = fix_detections( self, detections) # TODO: skip if in sim detections = relative_detections(self, detections) order = [name for name in detections] # Detected order.extend(set(self.pose_dists) - set(order)) # Not detected for name in order: self.pose_dists[name] = self.pose_dists[name].update( self, detections, n_samples=n_samples) self.update_state() print('Update time: {:.3f} sec for {} objects and {} samples'.format( elapsed_time(start_time), len(order), n_samples)) return self
def compute_euclidean_tree(node_points, ground_nodes, elements, initial_position=None): # remove printed elements from the tree # TODO: directed version start_time = time.time() point_from_vertex, edges = embed_graph(elements, node_points, ground_nodes, initial_position) edge_weights = {(n1, n2): get_distance(point_from_vertex[n1], point_from_vertex[n2]) for n1, n2 in edges} components = get_connected_components(point_from_vertex, edge_weights) tree = compute_spanning_tree(edge_weights) from extrusion.visualization import draw_model draw_model(tree, point_from_vertex, ground_nodes, color=BLUE) draw_model(edges - tree, point_from_vertex, ground_nodes, color=RED) weight = sum(edge_weights[e] for e in tree) print(len(components), len(point_from_vertex), len(tree), weight, elapsed_time(start_time)) wait_for_user() return weight
def follow_trajectory(self, joints, path, times_from_start=None, real_time_step=0.0, waypoint_tolerance=1e-2 * np.pi, goal_tolerance=5e-3 * np.pi, max_sim_duration=1.0, print_sim_frequency=1.0, **kwargs): # real_time_step = 1e-1 # Can optionally sleep to slow down visualization start_time = time.time() if times_from_start is not None: assert len(path) == len(times_from_start) current_positions = get_joint_positions(self.robot, joints) differences = [(np.array(q2) - np.array(q1)) / (t2 - t1) for q1, t1, q2, t2 in zip([current_positions] + path, [0.] + times_from_start, path, times_from_start)] velocity_path = differences[1:] + [np.zeros(len(joints)) ] # Using velocity at endpoints else: velocity_path = [None] * len(path) sim_duration = 0.0 sim_steps = 0 last_print_sim_duration = sim_duration with ClientSaver(self.client): dt = get_time_step() # TODO: fit using splines to get velocity info for num, positions in enumerate(path): if self.execute_motor_control: sim_start = sim_duration tolerance = goal_tolerance if (num == len(path) - 1) else waypoint_tolerance velocities = velocity_path[num] for _ in joint_controller_hold2(self.robot, joints, positions, velocities, tolerance=tolerance, **kwargs): step_simulation() # print(get_joint_velocities(self.robot, joints)) # print([get_joint_torque(self.robot, joint) for joint in joints]) sim_duration += dt sim_steps += 1 time.sleep(real_time_step) if print_sim_frequency <= (sim_duration - last_print_sim_duration): print( 'Waypoint: {} | Simulation steps: {} | Simulation seconds: {:.3f} | Steps/sec {:.3f}' .format(num, sim_steps, sim_duration, sim_steps / elapsed_time(start_time))) last_print_sim_duration = sim_duration if max_sim_duration <= (sim_duration - sim_start): print( 'Timeout of {:.3f} simulation seconds exceeded!' .format(max_sim_duration)) break # control_joints(self.robot, arm_joints, positions) else: set_joint_positions(self.robot, joints, positions) print( 'Followed {} waypoints in {:.3f} simulation seconds and {} simulation steps' .format(len(path), sim_duration, sim_steps))
def collect_place(world, object_name, surface_name, grasp_type, args): date = get_date() #set_seed(args.seed) #dump_body(world.robot) surface_pose = get_surface_reference_pose(world.kitchen, surface_name) # TODO: assumes the drawer is open stable_gen_fn = get_stable_gen(world, z_offset=Z_EPSILON, visibility=False, learned=False, collisions=not args.cfree) grasp_gen_fn = get_grasp_gen(world) ik_ir_gen = get_pick_gen_fn(world, learned=False, collisions=not args.cfree, teleport=args.teleport) stable_gen = stable_gen_fn(object_name, surface_name) grasps = list(grasp_gen_fn(object_name, grasp_type)) robot_name = get_body_name(world.robot) path = get_place_path(robot_name, surface_name, grasp_type) print(SEPARATOR) print('Robot name: {} | Object name: {} | Surface name: {} | Grasp type: {} | Filename: {}'.format( robot_name, object_name, surface_name, grasp_type, path)) entries = [] start_time = time.time() failures = 0 while (len(entries) < args.num_samples) and \ (elapsed_time(start_time) < args.max_time): #and (failures <= max_failures): (rel_pose,) = next(stable_gen) if rel_pose is None: break (grasp,) = random.choice(grasps) with LockRenderer(lock=True): result = next(ik_ir_gen(object_name, rel_pose, grasp), None) if result is None: print('Failure! | {} / {} [{:.3f}]'.format( len(entries), args.num_samples, elapsed_time(start_time))) failures += 1 continue # TODO: ensure an arm motion exists bq, aq, at = result rel_pose.assign() bq.assign() aq.assign() base_pose = get_link_pose(world.robot, world.base_link) object_pose = rel_pose.get_world_from_body() tool_pose = multiply(object_pose, invert(grasp.grasp_pose)) entries.append({ 'tool_from_base': multiply(invert(tool_pose), base_pose), 'surface_from_object': multiply(invert(surface_pose), object_pose), 'base_from_object': multiply(invert(base_pose), object_pose), }) print('Success! | {} / {} [{:.3f}]'.format( len(entries), args.num_samples, elapsed_time(start_time))) if has_gui(): wait_for_user() #visualize_database(tool_from_base_list) if not entries: safe_remove(path) return None # Assuming the kitchen is fixed but the objects might be open world data = { 'date': date, 'robot_name': robot_name, # get_name | get_body_name | get_base_name | world.robot_name 'base_link': get_link_name(world.robot, world.base_link), 'tool_link': get_link_name(world.robot, world.tool_link), 'kitchen_name': get_body_name(world.kitchen), 'surface_name': surface_name, 'object_name': object_name, 'grasp_type': grasp_type, 'entries': entries, 'failures': failures, 'successes': len(entries), } write_json(path, data) print('Saved', path) return data
def solve_serialized(robots, obstacles, node_points, element_bodies, ground_nodes, layer_from_n, trajectories=[], post_process=False, collisions=True, disable=False, max_time=INF, **kwargs): start_time = time.time() saver = WorldSaver() elements = set(element_bodies) elements_from_layers = compute_elements_from_layer(elements, layer_from_n) layers = sorted(elements_from_layers.keys()) print('Layers:', layers) full_plan = [] makespan = 0. removed = set() for layer in reversed(layers): print(SEPARATOR) print('Layer: {}'.format(layer)) saver.restore() remaining = elements_from_layers[layer] printed = elements - remaining - removed draw_model(remaining, node_points, ground_nodes, color=GREEN) draw_model(printed, node_points, ground_nodes, color=RED) problem = get_pddlstream(robots, obstacles, node_points, element_bodies, ground_nodes, layer_from_n, printed=printed, removed=removed, return_home=False, trajectories=trajectories, collisions=collisions, disable=disable, **kwargs) layer_plan, certificate = solve_pddlstream(problem, node_points, element_bodies, max_time=max_time - elapsed_time(start_time)) remove_all_debug() if layer_plan is None: return None if post_process: print(SEPARATOR) # Allows the planner to continue to check collisions problem.init[:] = certificate.all_facts #static_facts = extract_static_facts(layer_plan, ...) #problem.init.extend(('Order',) + pair for pair in compute_total_orders(layer_plan)) for fact in [('print', ), ('move', )]: if fact in problem.init: problem.init.remove(fact) new_layer_plan, _ = solve_pddlstream(problem, node_points, element_bodies, planner=POSTPROCESS_PLANNER, max_time=max_time - elapsed_time(start_time)) if (new_layer_plan is not None) and (compute_duration(new_layer_plan) < compute_duration(layer_plan)): layer_plan = new_layer_plan user_input('{:.3f}->{:.3f}'.format( compute_duration(layer_plan), compute_duration(new_layer_plan))) # TODO: replan in a cost sensitive way layer_plan = apply_start(layer_plan, makespan) duration = compute_duration(layer_plan) makespan += duration print( '\nLength: {} | Start: {:.3f} | End: {:.3f} | Duration: {:.3f} | Makespan: {:.3f}' .format(len(layer_plan), compute_start(layer_plan), compute_end(layer_plan), duration, makespan)) full_plan.extend(layer_plan) removed.update(remaining) print(SEPARATOR) print_plan(full_plan) return full_plan, None
def regression(robot, obstacles, element_bodies, extrusion_path, partial_orders=[], heuristic='z', max_time=INF, max_memory=INF, backtrack_limit=INF, revisit=False, stiffness_attempts=1, collisions=True, stiffness=True, motions=True, lazy=LAZY, checker=None, **kwargs): # Focused has the benefit of reusing prior work # Greedy has the benefit of conditioning on previous choices # TODO: max branching factor # TODO: be more careful when near the end # TODO: max time spent evaluating successors (less expensive when few left) # TODO: tree rollouts # TODO: best-first search with a minimizing path distance cost # TODO: immediately select if becomes more stable # TODO: focus branching factor on most stable regions start_time = time.time() initial_conf = get_configuration(robot) initial_position = get_tool_position(robot) element_from_id, node_points, ground_nodes = load_extrusion(extrusion_path) id_from_element = get_id_from_element(element_from_id) all_elements = frozenset(element_bodies) ground_elements = get_ground_elements(all_elements, ground_nodes) if stiffness and (checker is None): checker = create_stiffness_checker(extrusion_path, verbose=False) print_gen_fn = get_print_gen_fn(robot, obstacles, node_points, element_bodies, ground_nodes, precompute_collisions=False, max_directions=MAX_DIRECTIONS, max_attempts=MAX_ATTEMPTS, collisions=collisions, **kwargs) heuristic_fn = get_heuristic_fn(robot, extrusion_path, heuristic, checker=checker, forward=False) queue = [] outgoing_from_element = outgoing_from_edges(partial_orders) def add_successors(printed, position, conf): only_ground = printed <= ground_elements num_remaining = len(printed) - 1 #assert 0 <= num_remaining for element in randomize(printed): if not (outgoing_from_element[element] & printed) and implies( is_ground(element, ground_nodes), only_ground): for directed in get_directions(element): visits = 0 bias = heuristic_fn(printed, directed, position, conf) priority = (num_remaining, bias, random.random()) heapq.heappush(queue, (visits, priority, printed, directed, conf)) final_conf = initial_conf # TODO: allow choice of final config final_position = initial_position final_printed = all_elements visited = {final_printed: Node(None, None)} if check_connected(ground_nodes, final_printed) and \ (not stiffness or test_stiffness(extrusion_path, element_from_id, final_printed, checker=checker)): add_successors(final_printed, final_position, final_conf) # if has_gui(): # sequence = sorted(final_printed, key=lambda e: heuristic_fn(final_printed, e, conf=None), reverse=True) # remove_all_debug() # draw_ordered(sequence, node_points) # wait_for_user() plan = None min_remaining = len(all_elements) num_evaluated = max_backtrack = extrusion_failures = transit_failures = stiffness_failures = 0 while queue and (elapsed_time(start_time) < max_time) and check_memory(): #max_memory): visits, priority, printed, directed, current_conf = heapq.heappop( queue) element = get_undirected(all_elements, directed) num_remaining = len(printed) backtrack = num_remaining - min_remaining max_backtrack = max(max_backtrack, backtrack) if backtrack_limit < backtrack: break # continue num_evaluated += 1 print( 'Iteration: {} | Best: {} | Printed: {} | Element: {} | Index: {} | Time: {:.3f}' .format(num_evaluated, min_remaining, len(printed), element, id_from_element[element], elapsed_time(start_time))) next_printed = printed - {element} next_nodes = compute_printed_nodes(ground_nodes, next_printed) #draw_action(node_points, next_printed, element) #if 3 < backtrack + 1: # remove_all_debug() # set_renderer(enable=True) # draw_model(next_printed, node_points, ground_nodes) # wait_for_user() node1, node2 = directed if (next_printed in visited) or (node1 not in next_nodes) or not check_connected( ground_nodes, next_printed): continue # TODO: stiffness plan lazily here possibly with reuse if stiffness and not test_stiffness( extrusion_path, element_from_id, next_printed, checker=checker): stiffness_failures += 1 continue # TODO: stronger condition for this procedure # if plan_stiffness(extrusion_path, element_from_id, node_points, ground_nodes, next_printed, # checker=checker, max_backtrack=0) is None: # # TODO: cache and reuse prior stiffness plans # print('Failed stiffness plan') # TODO: require just a short horizon # continue if revisit: heapq.heappush( queue, (visits + 1, priority, printed, directed, current_conf)) command, = next(print_gen_fn(node1, element, extruded=next_printed), (None, )) if command is None: extrusion_failures += 1 continue if motions and not lazy: motion_traj = compute_motion( robot, obstacles, element_bodies, printed, command.end_conf, current_conf, collisions=collisions, max_time=max_time - elapsed_time(start_time)) # TODO: smooth=...) if motion_traj is None: transit_failures += 1 continue command.trajectories.append(motion_traj) if num_remaining < min_remaining: min_remaining = num_remaining #print('New best: {}'.format(num_remaining)) #if has_gui(): # # TODO: change link transparency # remove_all_debug() # draw_model(next_printed, node_points, ground_nodes) # wait_for_duration(0.5) visited[next_printed] = Node( command, printed) # TODO: be careful when multiple trajs if not next_printed: min_remaining = 0 commands = retrace_commands(visited, next_printed, reverse=True) if OPTIMIZE: commands = optimize_commands(robot, obstacles, element_bodies, extrusion_path, initial_conf, commands, motions=motions, collisions=collisions) plan = flatten_commands(commands) if motions and not lazy: motion_traj = compute_motion(robot, obstacles, element_bodies, frozenset(), initial_conf, plan[0].start_conf, collisions=collisions, max_time=max_time - elapsed_time(start_time)) if motion_traj is None: plan = None transit_failures += 1 else: plan.insert(0, motion_traj) if motions and lazy: plan = compute_motions(robot, obstacles, element_bodies, initial_conf, plan, collisions=collisions, max_time=max_time - elapsed_time(start_time)) break # if plan is not None: # break add_successors(next_printed, node_points[node1], command.start_conf) #del checker data = { #'memory': get_memory_in_kb(), # May need to update instead 'num_evaluated': num_evaluated, 'min_remaining': min_remaining, 'max_backtrack': max_backtrack, 'stiffness_failures': stiffness_failures, 'extrusion_failures': extrusion_failures, 'transit_failures': transit_failures, } return plan, data
def main(): parser = argparse.ArgumentParser() parser.add_argument('paths', nargs='*', help='Paths to the data.') #parser.add_argument('-a', '--active', type=int, default=0, # None # help='The number of active samples to collect') parser.add_argument('-l', '--learner', default=None, help='Path to the learner that should be used') parser.add_argument('-n', '--num_trials', type=int, default=100, help='The number of samples to collect') parser.add_argument('-s', '--save', action='store_true', help='Whether to save the learners') parser.add_argument('-r', '--num_rounds', type=int, default=1, help='The number of rounds to collect') #parser.add_argument('-t', '--num_train', type=int, default=None, # help='The size of the training set') args = parser.parse_args() # TODO: be careful that paging isn't altering the data # TODO: don't penalize if the learner identifies that it can't make a good prediction # TODO: use a different set of randomized parameters for train and test include_none = False serial = is_darwin() #training_sizes = inclusive_range(50, 500, 25) #training_sizes = inclusive_range(25, 100, 5) #training_sizes = inclusive_range(25, 100, 5) training_sizes = inclusive_range(10, 50, 5) #training_sizes = inclusive_range(100, 1000, 100) #training_sizes = [20] #training_sizes = [1500] #kernels = ['RBF', 'Matern52', 'MLP'] kernels = ['MLP'] #hyperparameters = [None] #hyperparameters = [True] hyperparameters = [True, None] # None, query_type = BEST # BEST | CONFIDENT | REJECTION | ACTIVE # type of query used to evaluate the learner is_adaptive = False max_test = 50 # #alphas = np.linspace(0.0, 0.9, num=5, endpoint=True) alphas = [0.0, .8, .9, .99] #alphas = [None] # Use the default (i.e. GP parameters) use_vars = [True] binary = False split = UNIFORM # BALANCED # Omitting failed labels is okay because they will never be executed algorithms = [] #algorithms += [(Algorithm(BATCH_GP, kernel=kernel, hyperparameters=hype, use_var=use_var), [num_train]) # for num_train, kernel, hype, use_var in product(training_sizes, kernels, hyperparameters, use_vars)] algorithms += [(Algorithm(STRADDLE_GP, kernel, hype, use_var), training_sizes) for kernel, hype, use_var in product(kernels, hyperparameters, use_vars)] #algorithms += [(Algorithm(rf_model, p_explore=None, use_var=use_var), [num_train]) # for rf_model, num_train, use_var in product(RF_MODELS, training_sizes, use_vars)] #algorithms += [(Algorithm(nn_model, p_explore=None), [num_train]) # for nn_model, num_train in product(NN_MODELS, training_sizes)] #algorithms += [(Algorithm(RANDOM), None), (Algorithm(DESIGNED), None)] print('Algorithms:', algorithms) print('Split:', split) trials_per_round = sum(1 if train_sizes is None else (train_sizes[-1] - train_sizes[0] + len(train_sizes)) for _, train_sizes in algorithms) num_experiments = args.num_rounds*trials_per_round date_name = datetime.datetime.now().strftime(DATE_FORMAT) size_str = '[{},{}]'.format(training_sizes[0], training_sizes[-1]) #size_str = '-'.join(map(str, training_sizes)) experiments_name = '{}_r={}_t={}_n={}'.format(date_name, args.num_rounds, size_str, args.num_trials) #'19-08-09_21-44-58_r=5_t=[10,150]_n=1'# #experiments_name = 't={}'.format(args.num_rounds) # TODO: could include OS and username if desired domain = load_data(args.paths) print() print(domain) X, Y, W = domain.get_data(include_none=include_none) print('Total number of examples:', len(X)) if binary: # NN can fit perfectly when binary # Binary seems to be outperforming w/o Y = threshold_scores(Y) max_train = len(X) - max_test #min(max([0] + [active_sizes[0] for _, active_sizes in algorithms # if active_sizes is not None]), len(X)) #parameters = { # 'include None': include_none, # 'binary': binary, # 'split': split, #} print('Name:', experiments_name) print('Experiments:', num_experiments) print('Max train:', max_train) print('Include None:', include_none) print('Examples: n={}, d={}'.format(*X.shape)) print('Binary:', binary) print('Estimated hours:', num_experiments * SEC_PER_EXPERIMENT / HOURS_TO_SECS) user_input('Begin?') # TODO: residual learning for sim to real transfer # TODO: can always be conservative and add sim negative examples # TODO: combine all data to write in one folder data_dir = os.path.join(DATA_DIRECTORY, domain.name) # EXPERIMENT_DIRECTORY experiments_dir = os.path.join(data_dir, experiments_name) mkdir(experiments_dir) start_time = time.time() experiments = [] for round_idx in range(args.num_rounds): round_dir = os.path.join(data_dir, experiments_name, str(round_idx)) mkdir(round_dir) seed = hash(time.time()) train_test_file = os.path.join(round_dir, 'data.pk3') if not os.path.exists(train_test_file): X_train, Y_train, X_test, Y_test = split_data(X, Y, split, max_train) X_test, Y_test = X_test[:max_test], Y_test[:max_test] write_pickle(train_test_file, (X_train, Y_train, X_test, Y_test)) else: X_train, Y_train, X_test, Y_test = read_pickle(train_test_file) print('Train examples:', X_train.shape) print('Test examples:', X_test.shape) # TODO: need to be super careful when running with multiple contexts for algorithm, active_sizes in algorithms: # active_sizes = [first #trainingdata selected from X_train, #active exploration + #trainingdata] print(SEPARATOR) print('Round: {} | {} | Seed: {} | Sizes: {}'.format(round_idx, algorithm, seed, active_sizes)) # TODO: allow keyboard interrupt if active_sizes is None: learner = algorithm.name active_size = None train_confusion = None experiments.append(evaluate_learner(domain, seed, train_confusion, X_test, Y_test, algorithm, learner, active_size, args.num_trials, alphas, serial)) else: # [10 20 25] take first 10 samples from X_train to train the model, 10 samples chosen actively # sequentially + evaluate model, 5 samples chosen actively sequentially + evaluate model # Could always keep around all the examples and retrain # TODO: segfaults when this runs in parallel # TODO: may be able to retrain in parallel if I set OPENBLAS_NUM_THREADS learner_prior_nx = 0 ''' if algorithm.hyperparameters: if domain.skill == 'pour': learner_file = '/Users/ziw/ltamp_pr2/data/pour_19-06-13_00-59-21/19-08-09_19-30-01_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format( round_idx) elif domain.skill == 'scoop': learner_file = '/Users/ziw/ltamp_pr2/data/scoop_19-06-10_20-16-59_top-diameter/19-08-09_19-34-56_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format( round_idx) learner = read_pickle(learner_file) learner_prior_nx = learner.nx learner.retrain(newx=X_train[:active_sizes[0]], newy=Y_train[:active_sizes[0], None]) else: ''' learner, train_confusion = create_learner(domain, X_train, Y_train, split, algorithm, num_train=active_sizes[0], query_type=query_type, is_adaptive=is_adaptive) if algorithm.name == STRADDLE_GP: X_select, Y_select = X_train[active_sizes[0]:], Y_train[active_sizes[0]:] for active_size in active_sizes: num_active = active_size - learner.nx + learner_prior_nx# learner.nx is len(learner.xx) print('\nRound: {} | {} | Seed: {} | Size: {} | Active: {}'.format( round_idx, algorithm, seed, active_size, num_active)) if algorithm.name == STRADDLE_GP: X_select, Y_select = active_learning_discrete(learner, num_active, X_select, Y_select) #if args.save: save_learner(round_dir, learner) experiments.append(evaluate_learner(domain, seed, None, X_test, Y_test, algorithm, learner, active_size, args.num_trials, alphas, serial)) save_experiments(experiments_dir, experiments) print(SEPARATOR) if experiments: save_experiments(experiments_dir, experiments) plot_experiments(domain, experiments_name, experiments_dir, experiments, include_none=False) #include_none=include_none) print('Experiments:', experiments_dir) print('Total experiments:', len(experiments)) print('Total hours:', elapsed_time(start_time) / HOURS_TO_SECS)
def solve_collision_free(door, obstacle, max_iterations=100, step_size=math.radians(5), min_distance=2e-2, draw=True): joints = get_movable_joints(door) door_link = child_link_from_joint(joints[-1]) # print(get_com_pose(door, door_link)) # print(get_link_inertial_pose(door, door_link)) # print(get_link_pose(door, door_link)) # draw_pose(get_com_pose(door, door_link)) handles = [] success = False start_time = time.time() for iteration in range(max_iterations): current_conf = np.array(get_joint_positions(door, joints)) collision_infos = get_closest_points(door, obstacle, link1=door_link, max_distance=min_distance) if not collision_infos: success = True break collision_infos = sorted(collision_infos, key=lambda info: info.contactDistance) collision_infos = collision_infos[:1] # TODO: average all these if draw: for collision_info in collision_infos: handles.extend(draw_collision_info(collision_info)) wait_if_gui() [collision_info] = collision_infos[:1] distance = collision_info.contactDistance print( 'Iteration: {} | Collisions: {} | Distance: {:.3f} | Time: {:.3f}'. format(iteration, len(collision_infos), distance, elapsed_time(start_time))) if distance >= min_distance: success = True break # TODO: convergence or decay in step size direction = step_size * get_unit_vector( collision_info.contactNormalOnB) # B->A (already normalized) contact_point = collision_info.positionOnA #com_pose = get_com_pose(door, door_link) # TODO: be careful here com_pose = get_link_pose(door, door_link) local_point = tform_point(invert(com_pose), contact_point) #local_point = unit_point() translate, rotate = compute_jacobian(door, door_link, point=local_point) delta_conf = np.array([ np.dot(translate[mj], direction) # + np.dot(rotate[mj], direction) for mj in movable_from_joints(door, joints) ]) new_conf = current_conf + delta_conf if violates_limits(door, joints, new_conf): break set_joint_positions(door, joints, new_conf) if draw: wait_if_gui() remove_handles(handles) print('Success: {} | Iteration: {} | Time: {:.3f}'.format( success, iteration, elapsed_time(start_time))) #quit() return success
def main(num_iterations=10): # The URDF loader seems robust to package:// and slightly wrong relative paths? connect(use_gui=True) add_data_path() plane = p.loadURDF("plane.urdf") side = 0.05 block = create_box(w=side, l=side, h=side, color=RED) start_time = time.time() with LockRenderer(): with HideOutput(): # TODO: MOVO must be loaded last robot = load_model(MOVO_URDF, fixed_base=True) #set_all_color(robot, color=MOVO_COLOR) assign_link_colors(robot) base_joints = joints_from_names(robot, BASE_JOINTS) draw_base_limits((get_min_limits( robot, base_joints), get_max_limits(robot, base_joints)), z=1e-2) print('Load time: {:.3f}'.format(elapsed_time(start_time))) dump_body(robot) #print(get_colliding(robot)) #for arm in ARMS: # test_close_gripper(robot, arm) #test_grasps(robot, block) arm = RIGHT tool_link = link_from_name(robot, TOOL_LINK.format(arm)) #joint_names = HEAD_JOINTS #joints = joints_from_names(robot, joint_names) joints = base_joints + get_arm_joints(robot, arm) #joints = get_movable_joints(robot) print('Joints:', get_joint_names(robot, joints)) ik_info = MOVO_INFOS[arm] print_ik_warning(ik_info) ik_joints = get_ik_joints(robot, ik_info, tool_link) #fixed_joints = [] fixed_joints = ik_joints[:1] #fixed_joints = ik_joints wait_if_gui('Start?') sample_fn = get_sample_fn(robot, joints) handles = [] for i in range(num_iterations): conf = sample_fn() print('Iteration: {}/{} | Conf: {}'.format(i + 1, num_iterations, np.array(conf))) set_joint_positions(robot, joints, conf) tool_pose = get_link_pose(robot, tool_link) remove_handles(handles) handles = draw_pose(tool_pose) wait_if_gui() #conf = next(ikfast_inverse_kinematics(robot, MOVO_INFOS[arm], tool_link, tool_pose, # fixed_joints=fixed_joints, max_time=0.1), None) #if conf is not None: # set_joint_positions(robot, ik_joints, conf) #wait_if_gui() test_retraction(robot, ik_info, tool_link, fixed_joints=fixed_joints, max_time=0.05, max_candidates=100) disconnect()