예제 #1
0
파일: run.py 프로젝트: yijiangh/pychoreo
def display_trajectories(assembly_network,
                         process_trajs,
                         extrusion_time_step=0.075,
                         transition_time_step=0.1):
    disconnect()
    assert (process_trajs)

    connect(use_gui=True)
    floor, robot = load_world()
    camera_base_pt = assembly_network.get_end_points(0)[0]
    camera_pt = np.array(camera_base_pt) + np.array([0.1, 0, 0.05])
    set_camera_pose(tuple(camera_pt), camera_base_pt)

    movable_joints = get_movable_joints(robot)
    #element_bodies = dict(zip(elements, create_elements(node_points, elements)))
    #for body in element_bodies.values():
    #    set_color(body, (1, 0, 0, 0))
    # connected = set(ground_nodes)
    for seq_id, unit_proc in sorted(process_trajs.items()):
        #     if isinstance(trajectory, PrintTrajectory):
        #         print(trajectory, trajectory.n1 in connected, trajectory.n2 in connected,
        #               is_ground(trajectory.element, ground_nodes), len(trajectory.path))
        #         connected.add(trajectory.n2)
        #     #wait_for_interrupt()
        #     #set_color(element_bodies[element], (1, 0, 0, 1))
        last_point = None
        handles = []

        if 'transition' in unit_proc and unit_proc['transition']:
            for conf in unit_proc['transition']:
                set_joint_positions(robot, movable_joints, conf)
                wait_for_duration(transition_time_step)
        else:
            print(
                'seq #{} does not have transition traj found!'.format(seq_id))

        for conf in unit_proc['print']:
            set_joint_positions(robot, movable_joints, conf)
            # if isinstance(trajectory, PrintTrajectory):
            current_point = point_from_pose(
                get_link_pose(robot, link_from_name(robot, TOOL_FRAME)))
            if last_point is not None:
                color = (
                    0, 0, 1
                )  #if is_ground(trajectory.element, ground_nodes) else (1, 0, 0)
                handles.append(add_line(last_point, current_point,
                                        color=color))
            last_point = current_point
            wait_for_duration(extrusion_time_step)

    print('simulation done.')
    wait_for_user()
    disconnect()
예제 #2
0
def check_trajectory_collision(robot, trajectory, bodies):
    # TODO: each new addition makes collision checking more expensive
    #offset = 4
    movable_joints = get_movable_joints(robot)
    #for q in trajectory[offset:-offset]:
    collisions = [False for _ in range(len(bodies))] # TODO: batch collision detection
    for q in trajectory:
        set_joint_positions(robot, movable_joints, q)
        for i, body in enumerate(bodies):
            if not collisions[i]:
                collisions[i] |= pairwise_collision(robot, body)
    return collisions
예제 #3
0
    def diagnosis_collision_fn(q):
        set_all_bodies_color()
        for b in obstacles:
            set_color(b, (0, 0, 1, 0.3))

        if not all_between(lower_limits, q, upper_limits):
            print('Exceeding joint limit!')
        set_joint_positions(body, joints, q)
        for attachment in attachments:
            attachment.assign()
        for link1, link2 in check_link_pairs:
            cr = pairwise_link_collision_diagnosis(body, link1, body, link2)
            if cr:
                for u_cr in cr:
                    b1 = get_body_from_pb_id(u_cr[1])
                    b2 = get_body_from_pb_id(u_cr[2])
                    print('pairwise LINK collision: body {} - body {}'.format(
                        get_name(b1), get_name(b2)))
                    set_color(b1, (1, 0, 0, 1))
                    set_color(b2, (0, 1, 0, 1))
                    add_body_name(b1)
                    add_body_name(b2)

                    add_line(u_cr[5], u_cr[6])
                    camera_base_pt = u_cr[5]
                    camera_pt = np.array(camera_base_pt) + np.array(
                        [0.1, 0, 0.05])
                    set_camera_pose(tuple(camera_pt), camera_base_pt)

                    wait_for_user()

        for pair in check_body_pairs:
            cr = pairwise_collision_diagnosis(*pair, **kwargs)
            if cr:
                for u_cr in cr:
                    b1 = get_body_from_pb_id(u_cr[1])
                    b2 = get_body_from_pb_id(u_cr[2])
                    print('pairwise BODY collision: body {} - body {}'.format(
                        get_name(b1), get_name(b2)))
                    set_color(b1, (1, 0, 0, 1))
                    set_color(b2, (0, 1, 0, 1))
                    add_body_name(b1)
                    add_body_name(b2)

                    add_line(u_cr[5], u_cr[6])
                    camera_base_pt = u_cr[5]
                    camera_pt = np.array(camera_base_pt) + np.array(
                        [0.1, 0, 0.05])
                    set_camera_pose(tuple(camera_pt), camera_base_pt)

                    wait_for_user()
        print('no collision detected!')
예제 #4
0
 def collision_fn(q, dynamic_obstacles=[]):
     if not all_between(lower_limits, q, upper_limits):
         return True
     set_joint_positions(body, joints, q)
     for attachment in attachments:
         attachment.assign()
     if dynamic_obstacles:
         check_body_pairs.extend(
             list(product(moving_bodies, dynamic_obstacles)))
     for link1, link2 in check_link_pairs:
         # Self-collisions should not have the max_distance parameter
         if pairwise_link_collision(body, link1, body,
                                    link2):  #, **kwargs):
             return True
     return any(
         pairwise_collision(*pair, **kwargs) for pair in check_body_pairs)
예제 #5
0
파일: run.py 프로젝트: yijiangh/pychoreo
def display_trajectories(assembly_network, trajectories, time_step=0.075):
    disconnect()
    if trajectories is None:
        return
    connect(use_gui=True)
    floor, robot = load_world()
    camera_base_pt = assembly_network.get_end_points(0)[0]
    camera_pt = np.array(camera_base_pt) + np.array([0.1, 0, 0.05])
    set_camera_pose(tuple(camera_pt), camera_base_pt)
    # wait_for_interrupt()

    movable_joints = get_movable_joints(robot)
    #element_bodies = dict(zip(elements, create_elements(node_points, elements)))
    #for body in element_bodies.values():
    #    set_color(body, (1, 0, 0, 0))
    # connected = set(ground_nodes)
    for trajectory in trajectories:
        #     if isinstance(trajectory, PrintTrajectory):
        #         print(trajectory, trajectory.n1 in connected, trajectory.n2 in connected,
        #               is_ground(trajectory.element, ground_nodes), len(trajectory.path))
        #         connected.add(trajectory.n2)
        #     #wait_for_interrupt()
        #     #set_color(element_bodies[element], (1, 0, 0, 1))
        last_point = None
        handles = []
        for conf in trajectory:  #.path:
            set_joint_positions(robot, movable_joints, conf)
            # if isinstance(trajectory, PrintTrajectory):
            current_point = point_from_pose(
                get_link_pose(robot, link_from_name(robot, TOOL_FRAME)))
            if last_point is not None:
                color = (
                    0, 0, 1
                )  #if is_ground(trajectory.element, ground_nodes) else (1, 0, 0)
                handles.append(add_line(last_point, current_point,
                                        color=color))
            last_point = current_point
            wait_for_duration(time_step)
        # wait_for_interrupt()

    wait_for_interrupt()
    disconnect()
예제 #6
0
파일: run.py 프로젝트: yijiangh/pychoreo
def main(precompute=False):
    parser = argparse.ArgumentParser()
    # four-frame | simple_frame | djmm_test_block | mars_bubble | sig_artopt-bunny | topopt-100 | topopt-205 | topopt-310 | voronoi
    parser.add_argument('-p',
                        '--problem',
                        default='simple_frame',
                        help='The name of the problem to solve')
    parser.add_argument(
        '-sm',
        '--search_method',
        default='b',
        help='csp search method, b for backward, f for forward.')
    parser.add_argument(
        '-vom',
        '--value_order_method',
        default='sp',
        help=
        'value ordering method, sp for special heuristic, random for random value ordering'
    )
    parser.add_argument('-l',
                        '--use_layer',
                        action='store_true',
                        help='use layer info in the search.')
    # parser.add_argument('-c', '--cfree', action='store_true', help='Disables collisions with obstacles')
    # parser.add_argument('-m', '--motions', action='store_true', help='Plans motions between each extrusion')
    parser.add_argument('-v',
                        '--viewer',
                        action='store_true',
                        help='Enables the viewer during planning (slow!)')
    parser.add_argument(
        '-s',
        '--parse_seq',
        action='store_true',
        help=
        'parse sequence planning result from a file and proceed to motion planning'
    )
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Debug mode.')
    args = parser.parse_args()
    print('Arguments:', args)

    elements, node_points, ground_nodes, file_path = load_extrusion(
        args.problem, parse_layers=True)
    node_order = list(range(len(node_points)))

    # vert indices sanity check
    ground_nodes = [n for n in ground_nodes if n in node_order]
    elements = [
        element for element in elements
        if all(n in node_order for n in element.node_ids)
    ]

    connect(use_gui=args.viewer)
    floor, robot = load_world()

    # TODO: import other static obstacles
    static_obstacles = [floor]
    movable_joints = get_movable_joints(robot)
    disabled_collisions = get_disabled_collisions(robot)
    initial_conf = get_joint_positions(robot, movable_joints)

    camera_pt = np.array(node_points[10]) + np.array([0.1, 0, 0.05])
    target_camera_pt = node_points[0]

    # create collision bodies
    bodies = create_elements(node_points,
                             [tuple(e.node_ids) for e in elements])
    for e, b in zip(elements, bodies):
        e.element_body = b
        # draw_pose(get_pose(b), length=0.004)

    assembly_network = AssemblyNetwork(node_points, elements, ground_nodes)
    assembly_network.compute_traversal_to_ground_dist()

    sc = stiffness_checker(json_file_path=file_path, verbose=False)
    # sc.set_self_weight_load(True)
    print('test stiffness check on the whole structure: {0}'.format(
        sc.solve()))

    if has_gui():
        pline_handle = draw_model(assembly_network, draw_tags=False)
        set_camera_pose(tuple(camera_pt), target_camera_pt)
        wait_for_user()

    use_seq_existing_plan = args.parse_seq
    if not use_seq_existing_plan:
        with LockRenderer():
            search_method = SEARCH_METHODS[args.search_method]
            element_seq, seq_poses = plan_sequence(
                robot,
                static_obstacles,
                assembly_network,
                stiffness_checker=sc,
                search_method=search_method,
                value_ordering_method=args.value_order_method,
                use_layer=args.use_layer,
                file_name=args.problem)
        write_seq_json(assembly_network, element_seq, seq_poses, args.problem)
    else:
        element_seq, seq_poses = read_seq_json(args.problem)

    # TODO: sequence direction routing (if real fab)
    ####################
    # sequence planning completed
    if has_gui():
        # wait_for_interrupt('Press a key to visualize the plan...')
        # map(p.removeUserDebugItem, pline_handle)
        remove_all_debug()
        # draw_assembly_sequence(assembly_network, element_seq, seq_poses, time_step=1)

    if USE_MESHCAT:
        print('Visualizing assembly seq in meshcat...')
        vis = meshcat.Visualizer()
        try:
            vis.open()
        except:
            vis.url()
        meshcat_visualize_assembly_sequence(vis,
                                            assembly_network,
                                            element_seq,
                                            seq_poses,
                                            scale=3,
                                            time_step=0.5,
                                            direction_len=0.025)

    # motion planning phase
    # assume that the robot's dof is all included in the ikfast model
    print('start sc motion planning.')
    with LockRenderer():
        # tot_traj, graph_sizes = direct_ladder_graph_solve(robot, assembly_network, element_seq, seq_poses, static_obstacles)
        sg = SparseLadderGraph(robot, len(get_movable_joints(robot)),
                               assembly_network, element_seq, seq_poses,
                               static_obstacles)
        sg.find_sparse_path(max_time=SPARSE_LADDER_GRAPH_SOLVE_TIMEOUT)
        tot_traj, graph_sizes = sg.extract_solution()

    process_trajs = {}
    for seq_id, print_jt_list in enumerate(
            list(divide_list_chunks(tot_traj, graph_sizes))):
        process_trajs[seq_id] = {}
        process_trajs[seq_id]['print'] = print_jt_list

    # TODO: a separate function
    # transition planning
    print('start transition planning.')
    moving_obstacles = {}
    for seq_id, e_id in sorted(element_seq.items()):
        print('transition planning # {} - E#{}'.format(seq_id, e_id))
        # print('moving obs: {}'.format([get_name(mo) for mo in moving_obstacles.values()]))
        # print('static obs: {}'.format([get_name(so) for so in static_obstacles]))
        print('---')

        if seq_id != 0:
            set_joint_positions(robot, movable_joints,
                                process_trajs[seq_id - 1]['print'][-1])
        else:
            set_joint_positions(robot, movable_joints, initial_conf)

        transition_traj = plan_joint_motion(robot,
                                            movable_joints,
                                            process_trajs[seq_id]['print'][0],
                                            obstacles=static_obstacles +
                                            list(moving_obstacles.values()),
                                            self_collisions=SELF_COLLISIONS)

        if not transition_traj:
            add_line(*assembly_network.get_end_points(e_id))

            cfn = get_collision_fn_diagnosis(
                robot,
                movable_joints,
                obstacles=static_obstacles + list(moving_obstacles.values()),
                attachments=[],
                self_collisions=SELF_COLLISIONS,
                disabled_collisions=disabled_collisions)

            st_conf = get_joint_positions(robot, movable_joints)
            print('start extrusion pose:')
            cfn(st_conf)

            end_conf = process_trajs[seq_id]['print'][0]
            print('end extrusion pose:')
            cfn(end_conf)

        process_trajs[seq_id]['transition'] = transition_traj

        e_body = assembly_network.get_element_body(e_id)
        moving_obstacles[seq_id] = e_body

    print('transition planning done! proceed to simulation?')
    wait_for_user()

    display_trajectories(assembly_network,
                         process_trajs,
                         extrusion_time_step=0.15,
                         transition_time_step=0.1)
    print('Quit?')
    if has_gui():
        wait_for_user()
예제 #7
0
 def iterate(self):
     for conf in self.path[1:]:
         set_joint_positions(self.robot, self.joints, conf)
         yield