コード例 #1
0
ファイル: collect_push.py プロジェクト: lyltc1/LTAMP
    def score_fn(plan):
        assert plan is not None
        initial_distance = get_distance(
            point_from_pose(initial_pose)[:2], goal_pos2d)
        final_pose = world.perception.get_pose(block_name)
        final_distance = get_distance(
            point_from_pose(final_pose)[:2], goal_pos2d)
        quat_distance = quat_angle_between(quat_from_pose(initial_pose),
                                           quat_from_pose(final_pose))
        print(
            'Initial: {:.5f} m | Final: {:.5f} | Rotation: {:.5f} rads'.format(
                initial_distance, final_distance, quat_distance))
        # TODO: compare orientation to final predicted orientation
        # TODO: record simulation time in the event that the controller gets stuck

        score = {
            'initial_distance': initial_distance,
            'final_distance': final_distance,
            'rotation': quat_distance,
            DYNAMICS: parameters_from_name,
        }

        #_, args = find_unique(lambda a: a[0] == 'push', plan)
        #control = args[-1]
        return score
コード例 #2
0
ファイル: tsp.py プロジェクト: caelan/pb-construction
def compute_component_mst(node_points,
                          ground_nodes,
                          unprinted,
                          initial_position=None):
    # Weighted A*
    start_time = time.time()
    point_from_vertex = dict(enumerate(node_points))
    vertices = {v for e in unprinted for v in e}
    components = get_connected_components(vertices, unprinted)

    entry_nodes = set(ground_nodes)
    if initial_position is not None:
        point_from_vertex[INITIAL_NODE] = initial_position
        components.append([INITIAL_NODE])
        entry_nodes.add(INITIAL_NODE)

    edge_weights = {}
    for c1, c2 in combinations(range(len(components)), r=2):
        # TODO: directed edges from all points to entry nodes
        nodes1 = set(components[c1])
        nodes2 = set(components[c2])
        if (c1 == [INITIAL_NODE]) or (c2 == [INITIAL_NODE]):
            nodes1 &= entry_nodes
            nodes2 &= entry_nodes
        edge_weights[c1, c2] = min(
            get_distance(point_from_vertex[n1], point_from_vertex[n2])
            for n1, n2 in product(nodes1, nodes2))

    tree = compute_spanning_tree(edge_weights)
    weight = sum(edge_weights[e] for e in tree)
    print(
        'Elements: {} | Components: {} | Tree: {} | Weight: {}: Time: {:.3f} '.
        format(len(unprinted), len(components), tree, weight,
               elapsed_time(start_time)))
    return weight
コード例 #3
0
def sample_tool_ik(robot,
                   tool_pose,
                   max_attempts=10,
                   closest_only=False,
                   get_all=False,
                   prev_free_list=[],
                   **kwargs):
    generator = get_ik_generator(robot,
                                 tool_pose,
                                 prev_free_list=prev_free_list,
                                 **kwargs)
    ik_joints = get_movable_joints(robot)
    for _ in range(max_attempts):
        try:
            solutions = next(generator)
            if closest_only and solutions:
                current_conf = get_joint_positions(robot, ik_joints)
                solutions = [
                    min(solutions,
                        key=lambda conf: get_distance(current_conf, conf))
                ]
            solutions = list(
                filter(
                    lambda conf: not violates_limits(robot, ik_joints, conf),
                    solutions))
            return solutions if get_all else select_solution(
                robot, ik_joints, solutions, **kwargs)
        except StopIteration:
            break
    return None
コード例 #4
0
 def __init__(self,
              end_effector,
              element_bodies,
              node_points,
              ground_nodes,
              element,
              reverse,
              max_directions=INF):
     # TODO: more directions for ground collisions
     self.end_effector = end_effector
     self.element = element
     self.reverse = reverse
     self.max_directions = max_directions
     self.element_pose = get_pose(element_bodies[element])
     n1, n2 = element
     length = get_distance(node_points[n1], node_points[n2])
     self.translation_path = np.append(
         np.arange(-length / 2, length / 2, STEP_SIZE), [length / 2])
     if ORTHOGONAL_GROUND and is_ground(element, ground_nodes):
         # TODO: orthogonal to the ground
         self.direction_generator = cycle(
             [Pose(euler=Euler(roll=0, pitch=0))])
     else:
         self.direction_generator = get_direction_generator(
             self.end_effector, use_halton=False)
     self.trajectories = []
コード例 #5
0
def compute_sequence_distance(node_points,
                              directed_elements,
                              start=None,
                              end=None):
    if directed_elements is None:
        return INF
    distance = 0.
    position = start
    for (n1, n2) in directed_elements:
        if position is not None:
            distance += get_distance(position, node_points[n1])
        distance += get_distance(node_points[n1], node_points[n2])
        position = node_points[n2]
    if end is not None:
        distance += get_distance(position, end)
    return distance
コード例 #6
0
ファイル: tsp.py プロジェクト: caelan/pb-construction
def greedily_plan(all_elements, node_points, ground_nodes, remaining,
                  initial_point):
    from extrusion.heuristics import compute_z_distance
    level_from_node, cost_from_edge = compute_layer_from_directed(
        all_elements, node_points, ground_nodes, remaining)
    # sequence = sorted(tree_elements, key=lambda e: cost_from_edge[e])
    if cost_from_edge is None:
        return level_from_node, cost_from_edge, None

    point = initial_point
    sequence = []
    remaining_elements = set(cost_from_edge)
    while remaining_elements:
        # TODO: sample different random seeds
        #key_fn = lambda d: (cost_from_edge[d], random.random())
        #key_fn = lambda d: (cost_from_edge[d], compute_z_distance(node_points, d))
        key_fn = lambda d: (cost_from_edge[d],
                            get_distance(point, node_points[d[0]]))
        directed = min(remaining_elements, key=key_fn)
        remaining_elements.remove(directed)
        sequence.append(directed)
        point = node_points[directed[1]]
    # draw_ordered(sequence, node_points)
    # wait_for_user()
    return level_from_node, cost_from_edge, sequence
コード例 #7
0
 def solve_inverse_kinematics(self,
                              world_from_tool,
                              nearby_tolerance=INF,
                              **kwargs):
     if self.ik_solver is not None:
         return self.solve_trac_ik(world_from_tool, **kwargs)
     #if nearby_tolerance != INF:
     #    return self.solve_pybullet_ik(world_from_tool, nearby_tolerance=nearby_tolerance)
     current_conf = get_joint_positions(self.robot, self.arm_joints)
     start_time = time.time()
     if nearby_tolerance == INF:
         generator = ikfast_inverse_kinematics(self.robot,
                                               PANDA_INFO,
                                               self.tool_link,
                                               world_from_tool,
                                               max_attempts=10,
                                               use_halton=True)
     else:
         generator = closest_inverse_kinematics(
             self.robot,
             PANDA_INFO,
             self.tool_link,
             world_from_tool,
             max_time=0.01,
             max_distance=nearby_tolerance,
             use_halton=True)
     conf = next(generator, None)
     #conf = sample_tool_ik(self.robot, world_from_tool, max_attempts=100)
     if conf is None:
         return conf
     max_distance = get_distance(current_conf, conf, norm=INF)
     #print('Time: {:.3f} | distance: {:.5f} | max: {:.5f}'.format(
     #    elapsed_time(start_time), max_distance, nearby_tolerance))
     set_joint_positions(self.robot, self.arm_joints, conf)
     return get_configuration(self.robot)
コード例 #8
0
ファイル: collect_scoop.py プロジェクト: lyltc1/LTAMP
    def score_fn(plan):
        assert plan is not None
        final_pose = world.get_pose(bowl_name)
        point_distance = get_distance(point_from_pose(initial_pose),
                                      point_from_pose(final_pose))  #, norm=2)
        quat_distance = quat_angle_between(quat_from_pose(initial_pose),
                                           quat_from_pose(final_pose))
        print('Translation: {:.5f} m | Rotation: {:.5f} rads'.format(
            point_distance, quat_distance))

        with ClientSaver(world.client):
            bowl_beads = get_contained_beads(bowl_body, init_beads)
            fraction_bowl = float(
                len(bowl_beads)) / len(init_beads) if init_beads else 0
            mass_in_bowl = sum(map(get_mass, bowl_beads))
            spoon_beads = get_contained_beads(world.get_body(spoon_name),
                                              init_beads)
            fraction_spoon = float(
                len(spoon_beads)) / len(init_beads) if init_beads else 0
            mass_in_spoon = sum(map(get_mass, spoon_beads))
        print('In Bowl: {:.3f} | In Spoon: {:.3f}'.format(
            fraction_bowl, fraction_spoon))
        # TODO: measure change in roll/pitch

        # TODO: could make latent parameters field
        score = {
            # Displacements
            'bowl_translation': point_distance,
            'bowl_rotation': quat_distance,
            # Masses
            'total_mass': init_mass,
            'mass_in_bowl': mass_in_bowl,
            'mass_in_spoon': mass_in_spoon,
            'spoon_mass_capacity':
            (init_mass / len(init_beads)) * spoon_capacity,
            # Counts
            'num_beads': len(init_beads),
            'bowl_beads': len(bowl_beads),
            'spoon_beads': len(spoon_beads),
            'spoon_capacity': spoon_capacity,
            # Fractions
            'fraction_in_bowl': fraction_bowl,
            'fraction_in_spoon': fraction_spoon,
            # Latent
            'bead_radius': bead_radius,
            DYNAMICS: parameters_from_name
        }

        fraction_filled = float(score['spoon_beads']) / score['spoon_capacity']
        spilled_beads = score['num_beads'] - (score['bowl_beads'] +
                                              score['spoon_beads'])
        fraction_spilled = float(spilled_beads) / score['num_beads']
        print('Fraction Filled: {} | Fraction Spilled: {}'.format(
            fraction_filled, fraction_spilled))

        #_, args = find_unique(lambda a: a[0] == 'scoop', plan)
        #control = args[-1]
        return score
コード例 #9
0
ファイル: ik.py プロジェクト: yijiangh/conrob_pybullet
def sample_tool_ik(robot, tool_pose, closest_only=False, get_all=False, **kwargs):
    generator = get_ik_generator(robot, tool_pose)
    ik_joints = get_movable_joints(robot)
    solutions = next(generator)
    if closest_only and solutions:
        current_conf = get_joint_positions(robot, ik_joints)
        solutions = [min(solutions, key=lambda conf: get_distance(current_conf, conf))]
    solutions = list(filter(lambda conf: not violates_limits(robot, ik_joints, conf), solutions))
    return solutions if get_all else select_solution(robot, ik_joints, solutions, **kwargs)
コード例 #10
0
def control_state(robot,
                  joints,
                  target_positions,
                  target_velocities=None,
                  position_tol=INF,
                  velocity_tol=INF,
                  max_velocities=None,
                  verbose=True):  # TODO: max_accelerations
    if target_velocities is None:
        target_velocities = np.zeros(len(joints))
    if max_velocities is None:
        max_velocities = get_max_velocities(robot, joints)
    assert (max_velocities > 0).all()
    max_velocity_control_joints(robot,
                                joints,
                                positions=target_positions,
                                velocities=target_velocities,
                                max_velocities=max_velocities)
    for i in irange(INF):
        current_positions = np.array(get_joint_positions(robot, joints))
        position_error = get_distance(current_positions,
                                      target_positions,
                                      norm=INF)
        current_velocities = np.array(get_joint_velocities(robot, joints))
        velocity_error = get_distance(current_velocities,
                                      target_velocities,
                                      norm=INF)
        if verbose:
            # print('Positions: {} | Target positions: {}'.format(current_positions.round(N_DIGITS), target_positions.round(N_DIGITS)))
            # print('Velocities: {} | Target velocities: {}'.format(current_velocities.round(N_DIGITS), target_velocities.round(N_DIGITS)))
            print(
                'Step: {} | Position error: {:.3f}/{:.3f} | Velocity error: {:.3f}/{:.3f}'
                .format(i, position_error, position_tol, velocity_error,
                        velocity_tol))
        # TODO: draw the tolerance interval
        if (position_error <= position_tol) and (velocity_error <=
                                                 velocity_tol):
            return  # TODO: declare success or failure by yielding or throwing an exception
        yield i
コード例 #11
0
def compute_distance_from_node(elements, node_points, ground_nodes):
    #incoming_supporters, _ = neighbors_from_orders(get_supported_orders(
    #    element_from_id.values(), node_points))
    nodes = nodes_from_elements(elements)
    neighbors = adjacent_from_edges(elements)
    edge_costs = {edge: get_distance(node_points[edge[0]], node_points[edge[1]])
                  for edge in elements}
    #edge_costs = {edge: np.abs(node_points[edge[1]] - node_points[edge[0]])[2]
    #              for edge in elements}
    edge_costs.update({edge[::-1]: distance for edge, distance in edge_costs.items()})
    successor_fn = lambda v: neighbors[v]
    cost_fn = lambda v1, v2: edge_costs[v1, v2] # TODO: z on cost function
    return dijkstra(ground_nodes & nodes, successor_fn, cost_fn)
コード例 #12
0
ファイル: collect_stir.py プロジェクト: lyltc1/LTAMP
def compute_dispersion(bowl_body, beads_per_color):
    # homogeneous mixture, dispersion
    # Mean absolute difference
    # Distance correlation
    distances = []
    for beads in beads_per_color:
        bowl_beads = get_contained_beads(bowl_body, beads)
        points = list(map(get_point, bowl_beads))
        distances.extend([
            get_distance(p1, p2, norm=2)
            for p1, p2 in combinations(points, r=2)
        ])
    return np.mean(distances)
コード例 #13
0
def compute_transit_distance(node_points,
                             directed_elements,
                             start=None,
                             end=None):
    if directed_elements is None:
        return INF
    assert directed_elements
    # Could instead compute full distance and subtract
    pairs = []
    if start is not None:
        pairs.append((start, node_points[directed_elements[0][0]]))
    pairs.extend((node_points[directed1[1]], node_points[directed2[0]])
                 for directed1, directed2 in get_pairs(directed_elements))
    if end is not None:
        pairs.append((node_points[directed_elements[-1][1]], end))
    return sum(get_distance(*pair) for pair in pairs)
コード例 #14
0
    def score_fn(plan):
        assert plan is not None
        final_pose = world.get_pose(bowl_name)
        point_distance = get_distance(point_from_pose(initial_pose),
                                      point_from_pose(final_pose))  #, norm=2)
        quat_distance = quat_angle_between(quat_from_pose(initial_pose),
                                           quat_from_pose(final_pose))
        print('Translation: {:.5f} m | Rotation: {:.5f} rads'.format(
            point_distance, quat_distance))

        with ClientSaver(world.client):
            # TODO: lift the bowl up (with particles around) to prevent scale detections
            final_bowl_beads = get_contained_beads(bowl_body, init_beads)
            fraction_bowl = safe_ratio(len(final_bowl_beads),
                                       len(init_beads),
                                       undefined=0)
            mass_in_bowl = sum(map(get_mass, final_bowl_beads))
            final_cup_beads = get_contained_beads(cup_body, init_beads)
            fraction_cup = safe_ratio(len(final_cup_beads),
                                      len(init_beads),
                                      undefined=0)
            mass_in_cup = sum(map(get_mass, final_cup_beads))
        print('In Bowl: {} | In Cup: {}'.format(fraction_bowl, fraction_cup))

        score = {
            # Displacements
            'bowl_translation': point_distance,
            'bowl_rotation': quat_distance,
            # Masses
            'mass_in_bowl': mass_in_bowl,
            'mass_in_cup': mass_in_cup,
            # Counts
            'bowl_beads': len(final_bowl_beads),
            'cup_beads': len(final_cup_beads),
            # Fractions
            'fraction_in_bowl': fraction_bowl,
            'fraction_in_cup': fraction_cup,
        }
        score.update(latent)
        # TODO: store the cup path length to bias towards shorter paths

        #_, args = find_unique(lambda a: a[0] == 'pour', plan)
        #control = args[-1]
        #feature = control['feature']
        #parameter = control['parameter']
        return score
コード例 #15
0
 def solve_pybullet_ik(self, world_from_tool, nearby_tolerance):
     start_time = time.time()
     # Most of the time is spent creating the robot
     # TODO: use the waypoint version that doesn't repeatedly create the robot
     current_conf = get_joint_positions(self.robot, self.arm_joints)
     full_conf = sub_inverse_kinematics(
         self.robot,
         self.arm_joints[0],
         self.tool_link,
         world_from_tool,
         custom_limits=self.custom_limits
     )  # , max_iterations=1)  # , **kwargs)
     conf = get_joint_positions(self.robot, self.arm_joints)
     max_distance = get_distance(current_conf, conf, norm=INF)
     if nearby_tolerance < max_distance:
         return None
     print('Nearby) time: {:.3f} | distance: {:.5f}'.format(
         elapsed_time(start_time), max_distance))
     return full_conf
コード例 #16
0
ファイル: collect_stir.py プロジェクト: lyltc1/LTAMP
    def score_fn(plan):
        assert plan is not None
        with ClientSaver(world.client):
            rgb_image = take_image(world, bowl_body, beads_per_color)
            values = score_image(rgb_image, bead_colors, beads_per_color)

        final_pose = perception.get_pose(bowl_name)
        point_distance = get_distance(point_from_pose(initial_pose),
                                      point_from_pose(final_pose))  #, norm=2)
        quat_distance = quat_angle_between(quat_from_pose(initial_pose),
                                           quat_from_pose(final_pose))
        print('Translation: {:.5f} m | Rotation: {:.5f} rads'.format(
            point_distance, quat_distance))

        with ClientSaver(world.client):
            all_beads = list(flatten(beads_per_color))
            bowl_beads = get_contained_beads(bowl_body, all_beads)
            fraction_bowl = float(
                len(bowl_beads)) / len(all_beads) if all_beads else 0
        print('In Bowl: {}'.format(fraction_bowl))

        with ClientSaver(world.client):
            final_dispersion = compute_dispersion(bowl_body, beads_per_color)
        print('Initial Dispersion: {:.3f} | Final Dispersion {:.3f}'.format(
            initial_distance, final_dispersion))

        score = {
            'bowl_translation': point_distance,
            'bowl_rotation': quat_distance,
            'fraction_in_bowl': fraction_bowl,
            'initial_dispersion': initial_distance,
            'final_dispersion': final_dispersion,
            'num_beads': len(all_beads),  # Beads per color
            DYNAMICS: parameters_from_name,
        }
        # TODO: include time required for stirring
        # TODO: change in dispersion

        #wait_for_user()
        #_, args = find_unique(lambda a: a[0] == 'stir', plan)
        #control = args[-1]
        return score
コード例 #17
0
ファイル: collect_pr2.py プロジェクト: lyltc1/LTAMP
def score_poses(problem, task, ros_world):
    cup_name, bowl_name = REQUIREMENT_FNS[problem](ros_world)
    name_from_type = {'cup': cup_name, 'bowl': bowl_name}
    initial_poses = {
        ty: ros_world.get_pose(name)
        for ty, name in name_from_type.items()
    }
    final_stackings = wait_until_observation(problem, ros_world)
    #final_stackings = None
    if final_stackings is None:
        # TODO: only do this for the bad bowl
        point_distances = {ty: INF for ty in name_from_type}
        quat_distances = {ty: 2 * np.pi
                          for ty in name_from_type}  # Max rotation
    else:
        final_poses = {
            ty: ros_world.get_pose(name)
            for ty, name in name_from_type.items()
        }
        point_distances = {
            ty: get_distance(point_from_pose(initial_poses[ty]),
                             point_from_pose(final_poses[ty]))
            for ty in name_from_type
        }
        # TODO: wrap around distance for symmetric things
        quat_distances = {
            ty: quat_angle_between(quat_from_pose(initial_poses[ty]),
                                   quat_from_pose(final_poses[ty]))
            for ty in name_from_type
        }
    score = {}
    for ty in sorted(name_from_type):
        print(
            '{} | translation (m): {:.3f} | rotation (degrees): {:.3f}'.format(
                ty, point_distances[ty], math.degrees(quat_distances[ty])))
        score.update({
            '{}_translation'.format(ty): point_distances[ty],
            '{}_rotation'.format(ty): quat_distances[ty],
        })
    return score
コード例 #18
0
ファイル: tsp.py プロジェクト: caelan/pb-construction
def compute_euclidean_tree(node_points,
                           ground_nodes,
                           elements,
                           initial_position=None):
    # remove printed elements from the tree
    # TODO: directed version
    start_time = time.time()
    point_from_vertex, edges = embed_graph(elements, node_points, ground_nodes,
                                           initial_position)
    edge_weights = {(n1, n2): get_distance(point_from_vertex[n1],
                                           point_from_vertex[n2])
                    for n1, n2 in edges}
    components = get_connected_components(point_from_vertex, edge_weights)
    tree = compute_spanning_tree(edge_weights)
    from extrusion.visualization import draw_model
    draw_model(tree, point_from_vertex, ground_nodes, color=BLUE)
    draw_model(edges - tree, point_from_vertex, ground_nodes, color=RED)

    weight = sum(edge_weights[e] for e in tree)
    print(len(components), len(point_from_vertex), len(tree), weight,
          elapsed_time(start_time))
    wait_for_user()
    return weight
コード例 #19
0
def optimize_angle(end_effector,
                   element_pose,
                   translation,
                   direction,
                   reverse,
                   candidate_angles,
                   collision_fn,
                   nearby=True,
                   max_error=TRANSLATION_TOLERANCE):
    robot = end_effector.robot
    movable_joints = get_movable_joints(robot)
    best_error, best_angle, best_conf = max_error, None, None
    initial_conf = get_joint_positions(robot, movable_joints)
    for angle in candidate_angles:
        grasp_pose = get_grasp_pose(translation, direction, angle, reverse)
        # Pose_{world,EE} = Pose_{world,element} * Pose_{element,EE}
        #                 = Pose_{world,element} * (Pose_{EE,element})^{-1}
        target_pose = multiply(element_pose, invert(grasp_pose))
        set_pose(end_effector.body,
                 multiply(target_pose, end_effector.tool_from_ee))

        if nearby:
            set_joint_positions(robot, movable_joints, initial_conf)
        conf = solve_ik(end_effector, target_pose, nearby=nearby)
        if (conf is None) or collision_fn(conf):
            continue

        set_joint_positions(robot, movable_joints, conf)
        link_pose = get_link_pose(robot, end_effector.tool_link)
        error = get_distance(point_from_pose(target_pose),
                             point_from_pose(link_pose))
        if error < best_error:  # TODO: error a function of direction as well
            best_error, best_angle, best_conf = error, angle, conf
            #break
    if best_conf is not None:
        set_joint_positions(robot, movable_joints, best_conf)
    return best_angle, best_conf
コード例 #20
0
ファイル: collect_stir.py プロジェクト: lyltc1/LTAMP
def score_image(rgb_image, bead_colors, beads_per_color, max_distance=0.1):
    # TODO: could floodfill to identify bead clusters (and reward more clusters)
    # TODO: ensure the appropriate ratios are visible on top
    # TODO: penalize marbles that have left the bowl
    # TODO: estimate the number of escaped marbles using the size at that distance
    bead_pixels = [[] for _ in bead_colors]
    for r in range(rgb_image.shape[0]):  # height
        for c in range(rgb_image.shape[1]):  # width
            pixel = rgb_image[r, c]
            assert pixel[3] == 255
            rgb = pixel[:3] / 255.
            best_index, best_distance = None, INF
            for index, color in enumerate(bead_colors):
                distance = np.linalg.norm(rgb - color[:3])
                if distance < best_distance:
                    best_index, best_distance = index, distance
            if best_distance <= max_distance:
                bead_pixels[best_index].append((r, c))
    # TODO: discount beads outside
    all_beads = list(flatten(beads_per_color))
    bead_frequencies = np.array([len(beads) for beads in beads_per_color],
                                dtype=float) / len(all_beads)
    all_pixels = list(flatten(bead_pixels))
    image_frequencies = np.array([len(beads) for beads in bead_pixels],
                                 dtype=float) / len(all_pixels)
    print(bead_frequencies, image_frequencies,
          image_frequencies - bead_frequencies)

    distances = []
    for pixels in bead_pixels:
        distances.extend([
            get_distance(p1, p2, norm=2)
            for p1, p2 in combinations(pixels, r=2)
        ])
    dispersion = np.mean(distances)  # TODO: translate into meters?
    print(dispersion)
コード例 #21
0
def retime_waypoints(waypoints, start_time=0.):
    durations = [start_time] + [
        get_distance(*pair) / TOOL_VELOCITY for pair in get_pairs(waypoints)
    ]
    return np.cumsum(durations)
コード例 #22
0
 def fn(*locations):
     return 1. + get_distance(*map(extract_point, locations))
コード例 #23
0
ファイル: tsp.py プロジェクト: caelan/pb-construction
def solve_tsp(all_elements,
              ground_nodes,
              node_points,
              printed,
              initial_point,
              final_point,
              bidirectional=False,
              layers=True,
              max_time=30,
              visualize=True,
              verbose=False):
    # https://developers.google.com/optimization/routing/tsp
    # https://developers.google.com/optimization/reference/constraint_solver/routing/RoutingModel
    # http://www.math.uwaterloo.ca/tsp/concorde.html
    # https://developers.google.com/optimization/reference/python/constraint_solver/pywrapcp
    # AddDisjunction
    # TODO: pick up and delivery
    # TODO: time window for skipping elements
    # TODO: Minimum Spanning Tree (MST) bias
    # TODO: time window constraint to ensure connected
    # TODO: reuse by simply swapping out the first vertex
    # arc-routing
    from ortools.constraint_solver import routing_enums_pb2, pywrapcp
    from extrusion.visualization import draw_ordered, draw_model
    start_time = time.time()
    assert initial_point is not None
    remaining = all_elements - printed
    #printed_nodes = compute_printed_nodes(ground_nodes, printed)
    if not remaining:
        cost = get_distance(initial_point, final_point)
        return [], cost
    # TODO: use as a lower bound
    total_distance = compute_element_distance(node_points, remaining)

    # TODO: some of these are invalid still
    level_from_node, cost_from_edge, sequence = greedily_plan(
        all_elements, node_points, ground_nodes, remaining, initial_point)
    if sequence is None:
        return None, INF
    extrusion_edges = set()
    point_from_vertex = {INITIAL_NODE: initial_point, FINAL_NODE: final_point}
    frame_nodes = nodes_from_elements(remaining)
    min_level = min(level_from_node[n] for n in frame_nodes)
    max_level = max(level_from_node[n] for n in frame_nodes)

    frame_keys = set()
    keys_from_node = defaultdict(set)
    for element in remaining:
        mid = (element, element)
        point_from_vertex[mid] = get_midpoint(node_points, element)
        for node in element:
            key = (element, node)
            point_from_vertex[key] = node_points[node]
            frame_keys.add(key)
            keys_from_node[node].add(key)

        for reverse in [True, False]:
            directed = reverse_element(element) if reverse else element
            node1, node2 = directed
            #delta = node_points[node2] - node_points[node1]
            #pitch = get_pitch(delta)
            #upward = -SUPPORT_THETA <= pitch
            # theta = angle_between(delta, [0, 0, -1])
            # upward = theta < (np.pi / 2 - SUPPORT_THETA)
            #if (directed in tree_elements): # or upward:
            if bidirectional or (directed in cost_from_edge):
                # Add edges from anything that is roughly the correct cost
                start = (element, node1)
                end = (element, node2)
                extrusion_edges.update({(start, mid), (mid, end)})

    for node in keys_from_node:
        for edge in product(keys_from_node[node], repeat=2):
            extrusion_edges.add(edge)
    # Key thing is partial order on buckets of elements to adhere to height

    # Connect v2 to v1 if v2 is the same level
    # Traversing an edge might move to a prior level (but at most one)
    transit_edges = set()
    for directed in product(frame_keys, repeat=2):
        key1, key2 = directed
        element1, node1 = key1
        #level1 = min(level_from_node[n] for n in element1)
        level1 = level_from_node[get_other_node(node1, element1)]
        _, node2 = key2
        level2 = level_from_node[node2]
        if level2 in [level1, level1 + 1]:  # TODO: could bucket more coarsely
            transit_edges.add(directed)
    for key in frame_keys:
        _, node = key
        if level_from_node[node] == min_level:
            transit_edges.add((INITIAL_NODE, key))
        if level_from_node[node] in [max_level, max_level - 1]:
            transit_edges.add((key, FINAL_NODE))
    # TODO: can also remove restriction that elements are printed in a single direction
    if not layers:
        transit_edges.update(
            product(frame_keys | {INITIAL_NODE, FINAL_NODE},
                    repeat=2))  # TODO: apply to greedy as well

    key_from_index = list(
        {k
         for pair in extrusion_edges | transit_edges for k in pair})
    edge_weights = {
        pair: INVALID
        for pair in product(key_from_index, repeat=2)
    }
    for k1, k2 in transit_edges | extrusion_edges:
        p1, p2 = point_from_vertex[k1], point_from_vertex[k2]
        edge_weights[k1, k2] = get_distance(p1, p2)
    #edge_weights.update({e: 0. for e in extrusion_edges}) # frame edges are free
    edge_weights[FINAL_NODE, INITIAL_NODE] = 0.
    edge_weights[INITIAL_NODE,
                 FINAL_NODE] = INVALID  # Otherwise might be backward

    print(
        'Elements: {} | Vertices: {} | Edges: {} | Structure: {:.3f} | Min Level {} | Max Level: {}'
        .format(len(remaining), len(key_from_index), len(edge_weights),
                total_distance, min_level, max_level))
    index_from_key = dict(map(reversed, enumerate(key_from_index)))
    num_vehicles, depot = 1, index_from_key[INITIAL_NODE]
    manager = pywrapcp.RoutingIndexManager(len(key_from_index), num_vehicles,
                                           depot)
    #[depot], [depot])
    solver = pywrapcp.RoutingModel(manager)

    cost_from_index = {}
    for (k1, k2), weight in edge_weights.items():
        i1, i2 = index_from_key[k1], index_from_key[k2]
        cost_from_index[i1, i2] = int(math.ceil(SCALE * weight))
    solver.SetArcCostEvaluatorOfAllVehicles(
        solver.RegisterTransitCallback(
            lambda i1, i2: cost_from_index[manager.IndexToNode(
                i1), manager.IndexToNode(i2)]))  # from -> to

    # sequence = plan_stiffness(None, None, node_points, ground_nodes, elements,
    #                           initial_position=initial_point, stiffness=False, max_backtrack=INF)

    initial_order = []
    #initial_order = [INITIAL_NODE] # Start and end automatically included
    for directed in sequence:
        node1, node2 = directed
        element = get_undirected(remaining, directed)
        initial_order.extend([
            (element, node1),
            (element, element),
            (element, node2),
        ])
    initial_order.append(FINAL_NODE)
    #initial_order.append(INITIAL_NODE)
    initial_route = [index_from_key[key] for key in initial_order]
    #index = initial_route.index(0)
    #initial_route = initial_route[index:] + initial_route[:index] + [0]

    initial_solution = solver.ReadAssignmentFromRoutes(
        [initial_route], ignore_inactive_indices=True)
    assert initial_solution is not None
    #print_solution(manager, solver, initial_solution)
    #print(solver.GetAllDimensionNames())
    #print(solver.ComputeLowerBound())

    objective = initial_solution.ObjectiveValue() / SCALE
    invalid = int(objective / INVALID)
    order = parse_solution(solver, manager, key_from_index,
                           initial_solution)[:-1]
    ordered_pairs = get_pairs(order)
    cost = sum(edge_weights[pair] for pair in ordered_pairs)
    #print('Initial solution | Invalid: {} | Objective: {:.3f} | Cost: {:.3f} | Duration: {:.3f}s'.format(
    #    invalid, objective, cost, elapsed_time(start_time)))
    if False and visualize:  # and invalid
        remove_all_debug()
        draw_model(printed, node_points, None, color=BLACK)
        draw_point(initial_point, color=BLACK)
        draw_point(final_point, color=GREEN)
        for pair in ordered_pairs:
            if edge_weights[pair] == INVALID:
                for key in pair:
                    draw_point(point_from_vertex[key], color=RED)
        draw_ordered(ordered_pairs, point_from_vertex)
        wait_for_user()  # TODO: pause only if viewer
    #sequence = extract_sequence(level_from_node, remaining, ordered_pairs)
    #print(compute_sequence_distance(node_points, sequence, start=initial_point, end=final_point), total_distance+cost)
    #print([cost_from_edge[edge] for edge in sequence])
    #return sequence, cost

    start_time = time.time()
    search_parameters = pywrapcp.DefaultRoutingSearchParameters()
    #search_parameters.solution_limit = 1
    search_parameters.time_limit.seconds = int(max_time)
    search_parameters.log_search = verbose
    # AUTOMATIC | PATH_CHEAPEST_ARC | LOCAL_CHEAPEST_ARC | GLOBAL_CHEAPEST_ARC | LOCAL_CHEAPEST_INSERTION
    #search_parameters.first_solution_strategy = (
    #    routing_enums_pb2.FirstSolutionStrategy.AUTOMATIC)
    # AUTOMATIC | GREEDY_DESCENT | GUIDED_LOCAL_SEARCH | SIMULATED_ANNEALING | TABU_SEARCH | OBJECTIVE_TABU_SEARCH
    #search_parameters.local_search_metaheuristic = (
    #    routing_enums_pb2.LocalSearchMetaheuristic.GREEDY_DESCENT)
    #solution = solver.SolveWithParameters(search_parameters)
    solution = solver.SolveFromAssignmentWithParameters(
        initial_solution, search_parameters)
    #print_solution(manager, solver, solution)

    print('Status: {} | Text: {}'.format(solver.status(),
                                         STATUS[solver.status()]))
    if not solution:
        print('Failure! Duration: {:.3f}s'.format(elapsed_time(start_time)))
        return None, INF

    objective = solution.ObjectiveValue() / SCALE
    invalid = int(objective / INVALID)
    order = parse_solution(solver, manager, key_from_index, solution)[:-1]
    ordered_pairs = get_pairs(order)  # + [(order[-1], order[0])]
    #cost = compute_element_distance(point_from_vertex, ordered_pairs)
    cost = sum(edge_weights[pair] for pair in ordered_pairs)
    print(
        'Final solution | Invalid: {} | Objective: {:.3f} | Cost: {:.3f} | Duration: {:.3f}s'
        .format(invalid, objective, cost, elapsed_time(start_time)))

    sequence = extract_sequence(level_from_node, remaining, ordered_pairs)
    #print(compute_sequence_distance(node_points, sequence, start=initial_point, end=final_point)) #, total_distance+cost)

    #print(sequence)
    violations = 0
    printed_nodes = compute_printed_nodes(ground_nodes, printed)
    for directed in sequence:
        if directed[0] not in printed_nodes:
            #print(directed)
            violations += 1
        printed_nodes.update(directed)
    print('Violations:', violations)

    if visualize:
        # TODO: visualize by weight
        remove_all_debug()
        draw_model(printed, node_points, None, color=BLACK)
        draw_point(initial_point, color=BLACK)
        draw_point(final_point, color=GREEN)
        #tour_pairs = ordered_pairs + get_pairs(list(reversed(order)))
        #draw_model(extrusion_edges - set(tour_pairs), point_from_vertex, ground_nodes, color=BLACK)
        draw_ordered(ordered_pairs, point_from_vertex)
        wait_for_user()

    if sequence is None:
        return None, INF
    #print([cost_from_edge[edge] for edge in sequence])
    return sequence, cost
コード例 #24
0
    def fn(printed, directed, position, conf):
        # Queue minimizes the statistic
        element = get_undirected(all_elements, directed)
        n1, n2 = directed

        # forward adds, backward removes
        structure = printed | {element} if forward else printed - {element}
        structure_ids = get_extructed_ids(element_from_id, structure)

        normalizer = 1
        #normalizer = len(structure)
        #normalizer = compute_element_distance(node_points, all_elements)

        reduce_op = sum # sum | max | average
        reaction_fn = force_from_reaction  # force_from_reaction | torque_from_reaction

        first_node, second_node = directed if forward else reverse_element(directed)
        layer = sign * layer_from_edge[element]
        #layer = sign * layer_from_directed.get(directed, INF)

        tool_point = position
        tool_distance = 0.
        if heuristic in COST_HEURISTICS:
            if conf is not None:
                if hash_or_id(conf) not in ee_cache:
                    with BodySaver(robot):
                        set_configuration(robot, conf)
                        ee_cache[hash_or_id(conf)] = get_tool_position(robot)
                tool_point = ee_cache[hash_or_id(conf)]
            tool_distance = get_distance(tool_point, node_points[first_node])

        # TODO: weighted average to balance cost and bias
        if heuristic == 'none':
            return 0
        if heuristic == 'random':
            return random.random()
        elif heuristic == 'degree':
            # TODO: other graph statistics
            #printed_nodes = {n for e in printed for n in e}
            #node = n1 if n2 in printed_nodes else n2
            #if node in ground_nodes:
            #    return 0
            raise NotImplementedError()
        elif heuristic == 'length':
            # Equivalent to mass if uniform density
            return get_element_length(element, node_points)
        elif heuristic == 'distance':
            return tool_distance
        elif heuristic == 'layered-distance':
            return (layer, tool_distance)
        # elif heuristic == 'components':
        #     # Ground nodes intentionally omitted
        #     # TODO: this is broken
        #     remaining = all_elements - printed if forward else printed - {element}
        #     vertices = nodes_from_elements(remaining)
        #     components = get_connected_components(vertices, remaining)
        #     #print('Components: {} | Distance: {:.3f}'.format(len(components), tool_distance))
        #     return (len(components), tool_distance)
        elif heuristic == 'fixed-tsp':
            # TODO: layer_from_edge[element]
            # TODO: score based on current distance from the plan in the tour
            # TODO: factor in the distance to the next element in a more effective way
            if order is None:
                return (INF, tool_distance)
            return (sign*order[element], tool_distance) # Chooses least expensive direction
        elif heuristic == 'tsp':
            if printed not in tsp_cache: # not last_plan and
                # TODO: seed with the previous solution
                #remaining = all_elements - printed if forward else printed
                #assert element in remaining
                #printed_nodes = compute_printed_nodes(ground_nodes, printed) if forward else ground_nodes
                tsp_cache[printed] = solve_tsp(all_elements, ground_nodes, node_points, printed, tool_point, initial_point,
                                               bidirectional=True, layers=True, max_time=30, visualize=False, verbose=True)
                #print(tsp_cache[printed])
                if not last_plan:
                    last_plan[:] = tsp_cache[printed][0]
            plan, cost = tsp_cache[printed]
            #plan = last_plan[len(printed):]
            if plan is None:
                #return tool_distance
                return (layer, INF)
            transit = compute_transit_distance(node_points, plan, start=tool_point, end=initial_point)
            assert forward
            first = plan[0] == directed
            #return not first # No info if the best isn't possible
            index = None
            for i, directed2 in enumerate(plan):
                undirected2 = get_undirected(all_elements, directed2)
                if element == undirected2:
                    assert index is None
                    index = i
            assert index is not None
            # Could also break ties by other in the plan
            # Two plans might have the same cost but the swap might be detrimental
            new_plan = [directed] + plan[:index] + plan[index+1:]
            assert len(plan) == len(new_plan)
            new_transit = compute_transit_distance(node_points, new_plan, start=tool_point, end=initial_point)
            #print(layer, cost, transit + compute_element_distance(node_points, plan),
            #      new_transit + compute_element_distance(node_points, plan))
            #return new_transit
            return (layer, not first, new_transit) # Layer important otherwise it shortcuts
        elif heuristic == 'online-tsp':
            if forward:
                _, tsp_distance = solve_tsp(all_elements-structure, ground_nodes, node_points, printed,
                                            node_points[second_node], initial_point, visualize=False)
            else:
                _, tsp_distance = solve_tsp(structure, ground_nodes, node_points, printed, initial_point,
                                            node_points[second_node], visualize=False)
            total = tool_distance + tsp_distance
            return total
        # elif heuristic == 'mst':
        #     # TODO: this is broken
        #     mst_distance = compute_component_mst(node_points, ground_nodes, remaining,
        #                                          initial_position=node_points[second_node])
        #     return tool_distance + mst_distance
        elif heuristic == 'x':
            return sign * get_midpoint(node_points, element)[0]
        elif heuristic == 'z':
            return sign * compute_z_distance(node_points, element)
        elif heuristic == 'pitch':
            #delta = node_points[second_node] - node_points[first_node]
            delta = node_points[n2] - node_points[n1]
            return get_pitch(delta)
        elif heuristic == 'dijkstra': # offline
            # TODO: sum of all element path distances
            return sign*np.average([distance_from_node[node].cost for node in element]) # min, max, average
        elif heuristic == 'online-dijkstra':
            if printed not in distance_cache:
                distance_cache[printed] = compute_distance_from_node(printed, node_points, ground_nodes)
            return sign*min(distance_cache[printed][node].cost
                            if node in distance_cache[printed] else INF
                            for node in element)
        elif heuristic == 'plan-stiffness':
            if order is None:
                return None
            return (sign*order[element], directed not in order)
        elif heuristic == 'load':
            nodal_loads = checker.get_nodal_loads(existing_ids=structure_ids, dof_flattened=False) # get_self_weight_loads
            return reduce_op(np.linalg.norm(force_from_reaction(reaction)) for reaction in nodal_loads.values())
        elif heuristic == 'fixed-forces':
            #printed = all_elements # disable to use most up-to-date
            # TODO: relative to the load introduced
            if printed not in reaction_cache:
                reaction_cache[printed] = compute_all_reactions(extrusion_path, all_elements, checker=checker)
            force = reduce_op(np.linalg.norm(reaction_fn(reaction)) for reaction in reaction_cache[printed].reactions[element])
            return force / normalizer
        elif heuristic == 'forces':
            reactions_from_nodes = compute_node_reactions(extrusion_path, structure, checker=checker)
            #torque = sum(np.linalg.norm(np.sum([torque_from_reaction(reaction) for reaction in reactions], axis=0))
            #            for reactions in reactions_from_nodes.values())
            #return torque / normalizer
            total = reduce_op(np.linalg.norm(reaction_fn(reaction)) for reactions in reactions_from_nodes.values()
                            for reaction in reactions)
            return total / normalizer
            #return max(sum(np.linalg.norm(reaction_fn(reaction)) for reaction in reactions)
            #               for reactions in reactions_from_nodes.values())
        elif heuristic == 'stiffness':
            # TODO: add different variations
            # TODO: normalize by initial stiffness, length, or degree
            # Most unstable or least unstable first
            # Gets faster with fewer all_elements
            #old_stiffness = score_stiffness(extrusion_path, element_from_id, printed, checker=checker)
            stiffness = score_stiffness(extrusion_path, element_from_id, structure, checker=checker) # lower is better
            return stiffness / normalizer
            #return stiffness / old_stiffness
        elif heuristic == 'fixed-stiffness':
            # TODO: invert the sign for regression/progression?
            # TODO: sort FastDownward by the (fixed) action cost
            return stiffness_cache[element] / normalizer
        elif heuristic == 'relative-stiffness':
            stiffness = score_stiffness(extrusion_path, element_from_id, structure, checker=checker) # lower is better
            if normalizer == 0:
                return 0
            return stiffness / normalizer
            #return stiffness / stiffness_cache[element]
        raise ValueError(heuristic)
コード例 #25
0
def get_element_length(element, node_points):
    n1, n2 = element
    return get_distance(node_points[n2], node_points[n1])
コード例 #26
0
def trajectory_cost_fn(t):
    distance = t.distance(
        distance_fn=lambda q1, q2: get_distance(q1[:2], q2[:2]))
    return BASE_CONSTANT + distance / BASE_VELOCITY
コード例 #27
0
 def get_link_distance(self, **kwargs):
     # TODO: just endpoints
     link_path = list(map(point_from_pose, self.get_link_path(**kwargs)))
     return sum(get_distance(*pair) for pair in get_pairs(link_path))
コード例 #28
0
def base_cost_fn(q1, q2):
    distance = get_distance(q1.values[:2], q2.values[:2])
    return BASE_CONSTANT + distance / BASE_VELOCITY