Exemple #1
0
def color_structure(element_bodies, printed, next_element):
    # TODO: could also do this with debug segments
    element_colors = {}
    for element in printed:
        element_colors[element] = apply_alpha(BLUE, alpha=1)
    element_colors[next_element] = apply_alpha(GREEN, alpha=1)
    remaining = set(element_bodies) - printed - {next_element}
    for element in remaining:
        element_colors[element] = apply_alpha(RED, alpha=0.5)
    for element, color in element_colors.items():
        body = element_bodies[element]
        [shape] = get_visual_data(body)
        if color != shape.rgbaColor:
            set_color(body, color=color)
Exemple #2
0
def randomize_body(name,
                   width_range=(1., 1.),
                   height_range=(1., 1.),
                   mass_range=(1., 1.)):
    average_mass = MODEL_MASSES[get_type(name)]
    obj_path, color = load_cup_bowl_obj(get_type(name))
    if obj_path is None:
        obj_path, color = get_body_obj(name), apply_alpha(get_color(name))
    width_scale = np.random.uniform(*width_range)
    height_scale = np.random.uniform(*height_range)
    #if (width_scale == 1.) and (height_scale == 1.):
    #    return load_pybullet(obj_path)
    transform = np.diag([width_scale, width_scale, height_scale])
    transformed_obj_file = transform_obj_file(read(obj_path), transform)
    transformed_dir = os.path.join(os.getcwd(), 'temp_models/')
    ensure_dir(transformed_dir)
    global RANDOMIZED_OBJS
    transformed_obj_path = os.path.join(
        transformed_dir,
        'transformed_{}_{}'.format(len(RANDOMIZED_OBJS),
                                   os.path.basename(obj_path)))
    #RANDOMIZED_OBJS[name].append(transformed_obj_path) # pybullet caches obj files
    write(transformed_obj_path, transformed_obj_file)
    # TODO: could scale mass proportionally
    mass_scale = np.random.uniform(*mass_range)
    mass = mass_scale * average_mass
    body = create_obj(transformed_obj_path, scale=1, mass=mass, color=color)
    RANDOMIZED_OBJS[body] = ObjInfo(width_scale, height_scale, mass_scale)
    return body
Exemple #3
0
def create_bounding_mesh(printed_elements, element_bodies=None, node_points=None, buffer=0.):
    # TODO: use bounding boxes instead of points
    # TODO: connected components
    assert printed_elements
    assert element_bodies or node_points
    printed_points = []
    if node_points is not None:
        printed_points.extend(node_points[n] for element in printed_elements for n in element)
    if element_bodies is not None:
        for element in printed_elements:
            body = element_bodies[element]
            printed_points.extend(apply_affine(get_pose(body), vertices_from_link(body, BASE_LINK)))

    if buffer != 0.:
        half_extents = buffer*np.ones(3)
        for point in list(printed_points):
            printed_points.extend(np.array(point) + np.array(corner)
                                  for corner in get_aabb_vertices(AABB(-half_extents, half_extents)))

    rgb = colorsys.hsv_to_rgb(h=random.random(), s=1, v=1)
    #rgb = RED
    try:
        mesh = convex_hull(printed_points)
        # handles = draw_mesh(mesh)
        return create_mesh(mesh, under=True, color=apply_alpha(rgb, 0.5))
    except QhullError as e:
        print(printed_elements)
        raise e
Exemple #4
0
def main():
    # The URDF loader seems robust to package:// and slightly wrong relative paths?
    connect(use_gui=True)
    add_data_path()
    plane = p.loadURDF("plane.urdf")
    with LockRenderer():
        with HideOutput():
            robot = load_model(MOVO_URDF, fixed_base=True)
        for link in get_links(robot):
            set_color(robot,
                      color=apply_alpha(0.25 * np.ones(3), 1),
                      link=link)
        base_joints = joints_from_names(robot, BASE_JOINTS)
        draw_base_limits((get_min_limits(
            robot, base_joints), get_max_limits(robot, base_joints)),
                         z=1e-2)
    dump_body(robot)
    wait_for_user('Start?')

    #for arm in ARMS:
    #    test_close_gripper(robot, arm)

    arm = 'right'
    tool_link = link_from_name(robot, TOOL_LINK.format(arm))
    #joint_names = HEAD_JOINTS
    #joints = joints_from_names(robot, joint_names)
    joints = base_joints + get_arm_joints(robot, arm)
    #joints = get_movable_joints(robot)
    print('Joints:', get_joint_names(robot, joints))

    ik_joints = get_ik_joints(robot, MOVO_INFOS[arm], tool_link)
    #fixed_joints = []
    fixed_joints = ik_joints[:1]
    #fixed_joints = ik_joints

    sample_fn = get_sample_fn(robot, joints)
    handles = []
    for i in range(10):
        print('Iteration:', i)
        conf = sample_fn()
        print(conf)
        set_joint_positions(robot, joints, conf)
        tool_pose = get_link_pose(robot, tool_link)
        remove_handles(handles)
        handles = draw_pose(tool_pose)
        wait_for_user()

        #conf = next(ikfast_inverse_kinematics(robot, MOVO_INFOS[arm], tool_link, tool_pose,
        #                                      fixed_joints=fixed_joints, max_time=0.1), None)
        #if conf is not None:
        #    set_joint_positions(robot, ik_joints, conf)
        #wait_for_user()
        test_retraction(robot,
                        MOVO_INFOS[arm],
                        tool_link,
                        fixed_joints=fixed_joints,
                        max_time=0.1)
    disconnect()
Exemple #5
0
def add_table(world, use_surface=False):
    # Use the table in simulation to ensure no penetration
    if use_surface:
        # TODO: note whether the convex hull image was clipped (only partial)
        table_mesh = rectangular_mesh(width=0.6, length=1.2)
        color = apply_alpha(COLORS['tan'])
        table_body = create_mesh(table_mesh, under=True, color=color)
        set_pose(table_body, TABLE_POSE)
        world.perception.sim_bodies[TABLE_NAME] = table_body
        world.perception.sim_surfaces[TABLE_NAME] = None
    else:
        table_body = world.perception.add_surface(TABLE_NAME, TABLE_POSE)
    return table_body
Exemple #6
0
def create_elements_bodies(node_points,
                           elements,
                           color=apply_alpha(RED, alpha=1),
                           diameter=ELEMENT_DIAMETER,
                           shrink=ELEMENT_SHRINK):
    # TODO: could scale the whole environment
    # TODO: create a version without shrinking for transit planning
    # URDF_USE_IMPLICIT_CYLINDER
    element_bodies = []
    for (n1, n2) in elements:
        p1, p2 = node_points[n1], node_points[n2]
        height = max(np.linalg.norm(p2 - p1) - 2 * shrink, 0)
        #if height == 0: # Cannot keep this here
        #    continue
        center = (p1 + p2) / 2
        # extents = (p2 - p1) / 2

        delta = p2 - p1
        x, y, z = delta
        phi = np.math.atan2(y, x)
        theta = np.math.acos(z / np.linalg.norm(delta))
        quat = quat_from_euler(Euler(pitch=theta, yaw=phi))
        # p1 is z=-height/2, p2 is z=+height/2

        # Much smaller than cylinder
        # Also faster, C_shape 177.398 vs 400
        body = create_box(diameter,
                          diameter,
                          height,
                          color=color,
                          mass=STATIC_MASS)
        set_color(body, color)
        set_point(body, center)
        set_quat(body, quat)
        #dump_body(body, fixed=True)

        # Visually, smallest diameter is 2e-3
        # The geometries and bounding boxes seem correct though
        # TODO: create_cylinder takes in a radius not diameter
        #body = create_cylinder(ELEMENT_DIAMETER, height, color=color, mass=STATIC_MASS)
        #print('Diameter={:.5f} | Height={:.5f}'.format(ELEMENT_DIAMETER/2., height))
        #print(get_aabb_extent(get_aabb(body)).round(6).tolist())
        #print(get_visual_data(body))
        #print(get_collision_data(body))
        #set_point(body, center)
        #set_quat(body, quat)
        #draw_aabb(get_aabb(body))

        element_bodies.append(body)
        #wait_for_user()
    return element_bodies
Exemple #7
0
def create_beads(num, radius, parameters={}, uniform_color=None, init_x=10.0):
    beads = []
    if num <= 0:
        return beads
    #timestep = 1/1200. # 1/350.
    #p.setTimeStep(timestep, physicsClientId=get_client())
    for i in range(num):
        color = apply_alpha(
            np.random.random(3)) if uniform_color is None else uniform_color
        body = create_sphere(radius, color=color)  # TODO: other shapes
        #set_color(droplet, color)
        #print(get_dynamics_info(droplet))
        set_dynamics(body, **parameters)
        x = init_x + i * (2 * radius + 1e-2)
        set_point(body, Point(x=x))
        beads.append(body)
    return beads
Exemple #8
0
def load_world(use_floor=True):
    obstacles = []
    #side, height = 10, 0.01
    robot = load_robot()
    with HideOutput():
        lower, _ = get_aabb(robot)
        if use_floor:
            #floor = load_model('models/short_floor.urdf', fixed_base=True)
            #add_data_path()
            #floor = load_pybullet('plane.urdf', fixed_base=True)
            #set_color(floor, TAN)
            #floor = create_box(w=side, l=side, h=height, color=apply_alpha(GROUND_COLOR))
            floor = create_plane(color=apply_alpha(GROUND_COLOR))
            obstacles.append(floor)
            #set_point(floor, Point(z=lower[2]))
            set_point(floor, Point(x=1.2, z=0.023 - 0.025))  # -0.02
        else:
            floor = None  # TODO: make this an empty list of obstacles
        #set_all_static()
    return obstacles, robot
Exemple #9
0
 def add_camera(self,
                name,
                pose,
                camera_matrix,
                max_depth=KINECT_DEPTH,
                display=False):
     cone = get_viewcone(depth=max_depth,
                         camera_matrix=camera_matrix,
                         color=apply_alpha(RED, 0.1),
                         mass=0,
                         collision=False)
     set_pose(cone, pose)
     if display:
         kinect = load_pybullet(KINECT_URDF, fixed_base=True)
         set_pose(kinect, pose)
         set_color(kinect, BLACK)
         self.add(name, kinect)
     self.cameras[name] = Camera(cone, camera_matrix, max_depth)
     draw_pose(pose)
     step_simulation()
     return name
Exemple #10
0
def validate_trajectories(element_bodies, fixed_obstacles, trajectories):
    if trajectories is None:
        return False
    # TODO: combine all validation procedures
    remove_all_debug()
    for body in element_bodies.values():
        set_color(body, np.zeros(4))

    print('Trajectories:', len(trajectories))
    obstacles = list(fixed_obstacles)
    for i, trajectory in enumerate(trajectories):
        for _ in trajectory.iterate():
            #wait_for_user()
            if any(pairwise_collision(trajectory.robot, body) for body in obstacles):
                if has_gui():
                    print('Collision on trajectory {}'.format(i))
                    wait_for_user()
                return False
        if isinstance(trajectory, PrintTrajectory):
            body = element_bodies[trajectory.element]
            set_color(body, apply_alpha(RED))
            obstacles.append(body)
    return True
Exemple #11
0
def load_body(name):
    average_mass = MODEL_MASSES[get_type(name)]
    obj_path, color = load_cup_bowl_obj(get_type(name))
    if obj_path is None:
        obj_path, color = get_body_obj(name), apply_alpha(get_color(name))
    return create_obj(obj_path, scale=1, mass=average_mass, color=color)
Exemple #12
0
def visualize_collected_pours(paths, num=6, save=True):
    result_from_bowl = {}
    for path in randomize(paths):
        results = read_data(path)
        print(path, len(results))
        for result in results:
            result_from_bowl.setdefault(result['feature']['bowl_type'], []).append(result)

    world = create_world()
    environment = get_bodies()
    #collector = SKILL_COLLECTORS['pour']
    print(get_camera())

    for bowl_type in sorted(result_from_bowl):
        # TODO: visualize same
        for body in get_bodies():
            if body not in environment:
                remove_body(body)
        print('Bowl type:', bowl_type)
        bowl_body = load_cup_bowl(bowl_type)
        bowl_pose = get_pose(bowl_body)

        results = result_from_bowl[bowl_type]
        results = randomize(results)

        score_cup_pose = []
        for i, result in enumerate(results):
            if num <= len(score_cup_pose):
                break
            feature = result['feature']
            parameter = result['parameter']
            score = result['score']
            if (score is None) or not result['execution'] or result['annotation']:
                continue
            cup_body = load_cup_bowl(feature['cup_type'])
            world.bodies[feature['bowl_name']] = bowl_body
            world.bodies[feature['cup_name']] = cup_body
            fraction = compute_fraction_filled(score)
            #value = collector.score_fn(feature, parameter, score)
            value = pour_score(feature, parameter, score)
            print(i, feature['cup_type'], fraction, value)
            path, _ = pour_path_from_parameter(world, feature, parameter)
            sort = fraction
            #sort = parameter['pitch']
            #sort = parameter['axis_in_bowl_z']
            score_cup_pose.append((sort, fraction, value, cup_body, path[0]))

        #order = score_cup_pose
        order = randomize(score_cup_pose)
        #order = sorted(score_cup_pose)
        angles = np.linspace(0, 2*np.pi, num=len(score_cup_pose), endpoint=False) # Halton
        for angle, (_, fraction, value, cup_body, pose) in zip(angles, order):
            #fraction = clip(fraction, min_value=0, max_value=1)
            value = clip(value, *DEFAULT_INTERVAL)
            fraction = rescale(value, DEFAULT_INTERVAL, new_interval=(0, 1))
            #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN)
            color = interpolate_hue(fraction)
            set_color(cup_body, apply_alpha(color, alpha=0.25))
            #angle = random.uniform(-np.pi, np.pi)
            #angle = 0
            rotate_bowl = Pose(euler=Euler(yaw=angle))
            cup_pose = multiply(bowl_pose, invert(rotate_bowl), pose)
            set_pose(cup_body, cup_pose)
            #wait_for_user()
            #remove_body(cup_body)

        if save:
            #filename = os.path.join('workspace', '{}.png'.format(bowl_type))
            #save_image(filename, take_picture())  # [0, 255]
            #wait_for_duration(duration=0.5)
            # TODO: get window location
            #os.system("screencapture -R {},{},{},{} {}".format(
            #    275, 275, 500, 500, filename)) # -R<x,y,w,h> capture screen rect
            wait_for_user()
        remove_body(bowl_body)
Exemple #13
0
def visualize_vector_field(learner, bowl_type='blue_bowl', cup_type='red_cup',
                           num=500, delta=False, alpha=0.5):
    print(learner, len(learner.results))
    xx, yy, ww = learner.xx, learner.yy, learner.weights
    learner.hyperparameters = get_parameters(learner.model)
    print(learner.hyperparameters)
    learner.query_type = REJECTION  # BEST | REJECTION | STRADDLE

    world = create_world()
    world.bodies[bowl_type] = load_cup_bowl(bowl_type)
    world.bodies[cup_type] = load_cup_bowl(cup_type)
    feature = get_pour_feature(world, bowl_type, cup_type)
    set_point(world.bodies[cup_type], np.array([0.2, 0, 0]))

    # TODO: visualize training data as well
    # TODO: artificially limit training data to make a low-confidence region
    # TODO: visualize mean and var at the same time using intensity and hue
    print('Feature:', feature)
    with LockRenderer():
        #for parameter in islice(learner.parameter_generator(world, feature, valid=True), num):
        parameters = []
        while len(parameters) < num:
            parameter = learner.sample_parameter()
            # TODO: special color if invalid
            if is_valid_pour(world, feature, parameter):
                parameters.append(parameter)

    center, _ = approximate_as_prism(world.get_body(cup_type))
    bodies = []
    with LockRenderer():
        for parameter in parameters:
            path, _ = pour_path_from_parameter(world, feature, parameter)
            pose = path[0]
            body = create_cylinder(radius=0.001, height=0.02, color=apply_alpha(GREY, alpha))
            set_pose(body, multiply(pose, Pose(point=center)))
            bodies.append(body)

    #train_sizes = inclusive_range(10, 100, 1)
    #train_sizes = inclusive_range(10, 100, 10)
    #train_sizes = inclusive_range(100, 1000, 100)
    #train_sizes = [1000]
    train_sizes = [None]

    # training_poses = []
    # for result in learner.results[:train_sizes[-1]]:
    #     path, _ = pour_path_from_parameter(world, feature, result['parameter'])
    #     pose = path[0]
    #     training_poses.append(pose)

    # TODO: draw the example as well
    scores_per_size = []
    for train_size in train_sizes:
        if train_size is not None:
            learner.xx, learner.yy, learner.weights = xx[:train_size], yy[:train_size],  ww[:train_size]
            learner.retrain()
        X = np.array([learner.func.x_from_feature_parameter(feature, parameter) for parameter in parameters])
        predictions = learner.predict_x(X, noise=False) # Noise is just a constant
        scores_per_size.append([prediction['mean'] for prediction in predictions]) # mean | variance
        #scores_per_size.append([1./np.sqrt(prediction['variance']) for prediction in predictions])
        #scores_per_size.append([prediction['mean'] / np.sqrt(prediction['variance']) for prediction in predictions])
        #plt.hist(scores_per_size[-1])
        #plt.show()
        #scores_per_size.append([scipy.stats.norm.interval(alpha=0.95, loc=prediction['mean'],
        #                                                  scale=np.sqrt(prediction['variance']))[0]
        #                         for prediction in predictions]) # mean | variance
        # score = learner.score(feature, parameter)

    if delta:
        scores_per_size = [np.array(s2) - np.array(s1) for s1, s2 in zip(scores_per_size, scores_per_size[1:])]
        train_sizes = train_sizes[1:]

    all_scores = [score for scores in scores_per_size for score in scores]
    #interval = (min(all_scores), max(all_scores))
    interval = scipy.stats.norm.interval(alpha=0.95, loc=np.mean(all_scores), scale=np.std(all_scores))
    #interval = DEFAULT_INTERVAL
    print('Interval:', interval)
    print('Mean: {:.3f} | Stdev: {:.3f}'.format(np.mean(all_scores), np.std(all_scores)))

    #train_body = create_cylinder(radius=0.002, height=0.02, color=apply_alpha(BLACK, 1.0))
    for i, (train_size, scores) in enumerate(zip(train_sizes, scores_per_size)):
        print('Train size: {} | Average: {:.3f} | Min: {:.3f} | Max: {:.3f}'.format(
            train_size, np.mean(scores), min(scores), max(scores)))
        handles = []
        # TODO: visualize delta
        with LockRenderer():
            #if train_size is None:
            #    train_size = len(learner.results)
            #set_pose(train_body, multiply(training_poses[train_size-1], Pose(point=center)))
            #set_pose(world.bodies[cup_type], training_poses[train_size-1])
            for score, body in zip(scores, bodies):
                score = clip(score, *interval)
                fraction = rescale(score, interval, new_interval=(0, 1))
                color = interpolate_hue(fraction)
                #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN) # TODO: convex combination
                set_color(body, apply_alpha(color, alpha))
                #set_pose(world.bodies[cup_type], pose)
                #draw_pose(pose, length=0.05)
                #handles.extend(draw_point(tform_point(pose, center), color=color))
                #extent = np.array([0, 0, 0.01])
                #start = tform_point(pose, center - extent/2)
                #end = tform_point(pose, center + extent/2)
                #handles.append(add_line(start, end, color=color, width=1))
        wait_for_duration(0.5)
        # TODO: test on boundaries
    wait_for_user()
Exemple #14
0
def plan_extrusion(args_list, viewer=False, verify=False, verbose=False, watch=False):
    results = []
    if not args_list:
        return results
    # TODO: setCollisionFilterGroupMask
    if not verbose:
        sys.stdout = open(os.devnull, 'w')

    problems = {args.problem for args in args_list}
    assert len(problems) == 1
    [problem] = problems

    # TODO: change dir for pddlstream
    extrusion_path = get_extrusion_path(problem)
    #extrusion_path = rotate_problem(extrusion_path)
    element_from_id, node_points, ground_nodes = load_extrusion(extrusion_path, verbose=True)
    elements = sorted(element_from_id.values())
    #node_points = transform_model(problem, elements, node_points, ground_nodes)

    connect(use_gui=viewer, shadows=SHADOWS, color=BACKGROUND_COLOR)
    with LockRenderer(lock=True):
        #draw_pose(unit_pose(), length=1.)
        obstacles, robot = load_world()
        #draw_model(elements, node_points, ground_nodes)
        #wait_for_user()
        color = apply_alpha(BLACK, alpha=0) # 0, 1
        #color = None
        element_bodies = dict(zip(elements, create_elements_bodies(node_points, elements, color=color)))
        set_extrusion_camera(node_points)
        #if viewer:
        #    label_nodes(node_points)
        saver = WorldSaver()

    #visualize_stiffness(extrusion_path)
    #debug_elements(robot, node_points, node_order, elements)
    initial_position = point_from_pose(get_link_pose(robot, link_from_name(robot, TOOL_LINK)))

    checker = None
    plan = None
    for args in args_list:
        if args.stiffness and (checker is None):
            checker = create_stiffness_checker(extrusion_path, verbose=False)

        saver.restore()
        #initial_conf = get_joint_positions(robot, get_movable_joints(robot))
        with LockRenderer(lock=not viewer):
            start_time = time.time()
            plan, data = None, {}
            with timeout(args.max_time):
                with Profiler(num=10, cumulative=False):
                    plan, data = solve_extrusion(robot, obstacles, element_from_id, node_points, element_bodies,
                                                 extrusion_path, ground_nodes, args, checker=checker)
            runtime = elapsed_time(start_time)

        sequence = recover_directed_sequence(plan)
        if verify:
            max_translation, max_rotation = compute_plan_deformation(extrusion_path, recover_sequence(plan))
            valid = check_plan(extrusion_path, sequence)
            print('Valid:', valid)
            safe = validate_trajectories(element_bodies, obstacles, plan)
            print('Safe:', safe)
            data.update({
                'safe': safe,
                'valid': valid,
                'max_translation': max_translation,
                'max_rotation': max_rotation,
            })

        data.update({
            'runtime': runtime,
            'num_elements': len(elements),
            'ee_distance': compute_sequence_distance(node_points, sequence, start=initial_position, end=initial_position),
            'print_distance': get_print_distance(plan, teleport=True),
            'distance': get_print_distance(plan, teleport=False),
            'sequence': sequence,
            'parameters': get_global_parameters(),
            'problem':  args.problem,
            'algorithm': args.algorithm,
            'heuristic': args.bias,
            'plan_extrusions': not args.disable,
            'use_collisions': not args.cfree,
            'use_stiffness': args.stiffness,
        })
        print(data)
        #plan_path = '{}_solution.json'.format(args.problem)
        #with open(plan_path, 'w') as f:
        #    json.dump(plan_data, f, indent=2, sort_keys=True)
        results.append((args, data))

    reset_simulation()
    disconnect()
    if watch and (plan is not None):
        args = args_list[-1]
        animate = not (args.disable or args.ee_only)
        connect(use_gui=True, shadows=SHADOWS, color=BACKGROUND_COLOR) # TODO: avoid reconnecting
        obstacles, robot = load_world()
        display_trajectories(node_points, ground_nodes, plan, #time_step=None, video=True,
                             animate=animate)
        reset_simulation()
        disconnect()

    if not verbose:
        sys.stdout.close()
    return results
Exemple #15
0
def normalize_rgb(color, **kwargs):
    return apply_alpha(np.array(color, dtype=float) / 255.0, **kwargs)