def create_meshes(ty, draw=False, visualize=False): assert not (visualize and draw) # Incompatible? suffix = SUFFIX_TEMPLATE.format(ty) models_path = os.path.join(get_models_path(), MODELS_TEMPLATE.format(ty)) ensure_dir(models_path) for prefix, properties in OBJECT_PROPERTIES[ty].items(): color = normalize_rgb(properties.color) side = approximate_bowl(properties, d=2) # 1 doesn't seem to really work name = prefix + suffix print(name, color) print(side) if draw: draw_curvature(side, name=name) chunks = make_revolute_chunks(side, n_theta=60, n_chunks=10, in_off=properties.thickness/4., scale=SCALE) obj_path = os.path.join(models_path, OBJ_TEMPLATE.format(name)) write_obj(chunks, obj_path) if visualize: body = create_obj(obj_path, color=color) _, dims = approximate_as_cylinder(body) print(dims) wait_for_user() remove_body(body) pcd_path = os.path.join(models_path, PCD_TEMPLATE.format(name)) pcd_from_mesh(obj_path, pcd_path)
def reset(self): #remove_all_debug() for camera in self.cameras.values(): remove_body(camera.body) #remove_body(camera.kinect) self.cameras = {} for name in list(self.body_from_name): self.remove_body(name)
def main(): connect(use_gui=True) with HideOutput(): pr2 = load_model( "models/drake/pr2_description/urdf/pr2_simplified.urdf") set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']), REST_LEFT_ARM) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']), rightarm_from_leftarm(REST_LEFT_ARM)) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']), [0.2]) dump_body(pr2) block = load_model(BLOCK_URDF, fixed_base=False) set_point(block, [2, 0.5, 1]) target_point = point_from_pose(get_pose(block)) # head_link = link_from_name(pr2, HEAD_LINK) head_joints = joints_from_names(pr2, PR2_GROUPS['head']) head_link = head_joints[-1] #max_detect_distance = 2.5 max_register_distance = 1.0 distance_range = (max_register_distance / 2, max_register_distance) base_generator = visible_base_generator(pr2, target_point, distance_range) base_joints = joints_from_names(pr2, PR2_GROUPS['base']) for i in range(5): base_conf = next(base_generator) set_joint_positions(pr2, base_joints, base_conf) p.addUserDebugLine(point_from_pose(get_link_pose(pr2, head_link)), target_point, lineColorRGB=(1, 0, 0)) # addUserDebugText p.addUserDebugLine(point_from_pose( get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))), target_point, lineColorRGB=(0, 0, 1)) # addUserDebugText # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, ) head_conf = inverse_visibility(pr2, target_point) set_joint_positions(pr2, head_joints, head_conf) print(get_detections(pr2)) # TODO: does this detect the robot sometimes? detect_mesh, z = get_detection_cone(pr2, block) detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5)) set_pose(detect_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25)) set_pose(view_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) wait_for_user() remove_body(detect_cone) remove_body(view_cone) disconnect()
def remove_body(self, name): body = self.get_body(name) remove_body(body) del self.body_from_name[name]
def compute_motion(robot, fixed_obstacles, element_bodies, printed_elements, start_conf, end_conf, collisions=True, max_time=INF, buffer=0.1, smooth=100): # TODO: can also just plan to initial conf and then shortcut joints = get_movable_joints(robot) assert len(joints) == len(end_conf) weights = JOINT_WEIGHTS resolutions = np.divide(RESOLUTION * np.ones(weights.shape), weights) disabled_collisions = get_disabled_collisions(robot) custom_limits = {} #element_from_body = {b: e for e, b in element_bodies.items()} hulls, obstacles = {}, [] if collisions: element_obstacles = {element_bodies[e] for e in printed_elements} obstacles = set(fixed_obstacles) | element_obstacles #hulls, obstacles = decompose_structure(fixed_obstacles, element_bodies, printed_elements) #print(hulls) #print(obstacles) #wait_for_user() #printed_elements = set(element_bodies) bounding = None if printed_elements: # TODO: pass in node_points bounding = create_bounding_mesh(printed_elements, element_bodies=element_bodies, node_points=None, buffer=0.01) # buffer=buffer) #wait_for_user() sample_fn = get_sample_fn(robot, joints, custom_limits=custom_limits) distance_fn = get_distance_fn(robot, joints, weights=weights) extend_fn = get_extend_fn(robot, joints, resolutions=resolutions) #collision_fn = get_collision_fn(robot, joints, obstacles, attachments={}, self_collisions=SELF_COLLISIONS, # disabled_collisions=disabled_collisions, custom_limits=custom_limits, max_distance=0.) collision_fn = get_element_collision_fn(robot, obstacles) fine_extend_fn = get_extend_fn(robot, joints, resolutions=1e-1*resolutions) #, norm=INF) def test_bounding(q): set_joint_positions(robot, joints, q) collision = (bounding is not None) and pairwise_collision(robot, bounding, max_distance=buffer) # body_collision return q, collision def dynamic_extend_fn(q_start, q_end): # TODO: retime trajectories to be move more slowly around the structure for (q1, c1), (q2, c2) in get_pairs(map(test_bounding, extend_fn(q_start, q_end))): # print(c1, c2, len(list(fine_extend_fn(q1, q2)))) # set_joint_positions(robot, joints, q2) # wait_for_user() if c1 and c2: for q in fine_extend_fn(q1, q2): # set_joint_positions(robot, joints, q) # wait_for_user() yield q else: yield q2 def element_collision_fn(q): if collision_fn(q): return True #for body in get_bodies_in_region(get_aabb(robot)): # Perform per link? # if (element_from_body.get(body, None) in printed_elements) and pairwise_collision(robot, body): # return True for hull, bodies in hulls.items(): if pairwise_collision(robot, hull) and any(pairwise_collision(robot, body) for body in bodies): return True return False path = None if check_initial_end(start_conf, end_conf, collision_fn): path = birrt(start_conf, end_conf, distance_fn, sample_fn, dynamic_extend_fn, element_collision_fn, restarts=50, iterations=100, smooth=smooth, max_time=max_time) # path = plan_joint_motion(robot, joints, end_conf, obstacles=obstacles, # self_collisions=SELF_COLLISIONS, disabled_collisions=disabled_collisions, # weights=weights, resolutions=resolutions, # restarts=50, iterations=100, smooth=100) # if (bounding is not None) and (path is not None): # for i, q in enumerate(path): # print('{}/{}'.format(i, len(path))) # set_joint_positions(robot, joints, q) # wait_for_user() if bounding is not None: remove_body(bounding) for hull in hulls: remove_body(hull) if path is None: print('Failed to find a motion plan!') return None return MotionTrajectory(robot, joints, path)
def visualize_collected_pours(paths, num=6, save=True): result_from_bowl = {} for path in randomize(paths): results = read_data(path) print(path, len(results)) for result in results: result_from_bowl.setdefault(result['feature']['bowl_type'], []).append(result) world = create_world() environment = get_bodies() #collector = SKILL_COLLECTORS['pour'] print(get_camera()) for bowl_type in sorted(result_from_bowl): # TODO: visualize same for body in get_bodies(): if body not in environment: remove_body(body) print('Bowl type:', bowl_type) bowl_body = load_cup_bowl(bowl_type) bowl_pose = get_pose(bowl_body) results = result_from_bowl[bowl_type] results = randomize(results) score_cup_pose = [] for i, result in enumerate(results): if num <= len(score_cup_pose): break feature = result['feature'] parameter = result['parameter'] score = result['score'] if (score is None) or not result['execution'] or result['annotation']: continue cup_body = load_cup_bowl(feature['cup_type']) world.bodies[feature['bowl_name']] = bowl_body world.bodies[feature['cup_name']] = cup_body fraction = compute_fraction_filled(score) #value = collector.score_fn(feature, parameter, score) value = pour_score(feature, parameter, score) print(i, feature['cup_type'], fraction, value) path, _ = pour_path_from_parameter(world, feature, parameter) sort = fraction #sort = parameter['pitch'] #sort = parameter['axis_in_bowl_z'] score_cup_pose.append((sort, fraction, value, cup_body, path[0])) #order = score_cup_pose order = randomize(score_cup_pose) #order = sorted(score_cup_pose) angles = np.linspace(0, 2*np.pi, num=len(score_cup_pose), endpoint=False) # Halton for angle, (_, fraction, value, cup_body, pose) in zip(angles, order): #fraction = clip(fraction, min_value=0, max_value=1) value = clip(value, *DEFAULT_INTERVAL) fraction = rescale(value, DEFAULT_INTERVAL, new_interval=(0, 1)) #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN) color = interpolate_hue(fraction) set_color(cup_body, apply_alpha(color, alpha=0.25)) #angle = random.uniform(-np.pi, np.pi) #angle = 0 rotate_bowl = Pose(euler=Euler(yaw=angle)) cup_pose = multiply(bowl_pose, invert(rotate_bowl), pose) set_pose(cup_body, cup_pose) #wait_for_user() #remove_body(cup_body) if save: #filename = os.path.join('workspace', '{}.png'.format(bowl_type)) #save_image(filename, take_picture()) # [0, 255] #wait_for_duration(duration=0.5) # TODO: get window location #os.system("screencapture -R {},{},{},{} {}".format( # 275, 275, 500, 500, filename)) # -R<x,y,w,h> capture screen rect wait_for_user() remove_body(bowl_body)
def main(): # TODO: update this example connect(use_gui=True) with HideOutput(): pr2 = load_model(DRAKE_PR2_URDF) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']), REST_LEFT_ARM) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']), rightarm_from_leftarm(REST_LEFT_ARM)) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']), [0.2]) dump_body(pr2) block = load_model(BLOCK_URDF, fixed_base=False) set_point(block, [2, 0.5, 1]) target_point = point_from_pose(get_pose(block)) draw_point(target_point) head_joints = joints_from_names(pr2, PR2_GROUPS['head']) #head_link = child_link_from_joint(head_joints[-1]) #head_name = get_link_name(pr2, head_link) head_name = 'high_def_optical_frame' # HEAD_LINK_NAME | high_def_optical_frame | high_def_frame head_link = link_from_name(pr2, head_name) #max_detect_distance = 2.5 max_register_distance = 1.0 distance_range = (max_register_distance / 2, max_register_distance) base_generator = visible_base_generator(pr2, target_point, distance_range) base_joints = joints_from_names(pr2, PR2_GROUPS['base']) for i in range(5): base_conf = next(base_generator) set_joint_positions(pr2, base_joints, base_conf) handles = [ add_line(point_from_pose(get_link_pose(pr2, head_link)), target_point, color=RED), add_line(point_from_pose( get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))), target_point, color=BLUE), ] # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, ) head_conf = inverse_visibility(pr2, target_point, head_name=head_name, head_joints=head_joints) assert head_conf is not None set_joint_positions(pr2, head_joints, head_conf) print(get_detections(pr2)) # TODO: does this detect the robot sometimes? detect_mesh, z = get_detection_cone(pr2, block) detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5)) set_pose(detect_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25)) set_pose(view_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) wait_if_gui() remove_body(detect_cone) remove_body(view_cone) disconnect()