示例#1
0
def main(display='execute'):  # control | execute | step
    connect(use_gui=True)
    disable_real_time()
    robot = load_model(DRAKE_IIWA_URDF)  # KUKA_IIWA_URDF | DRAKE_IIWA_URDF
    floor = load_model('models/short_floor.urdf')
    block = load_model(BLOCK_URDF, fixed_base=False)
    set_pose(block, Pose(Point(y=0.5, z=stable_z(block, floor))))
    set_default_camera()
    dump_world()

    saved_world = WorldSaver()
    command = plan(robot, block, fixed=[floor], teleport=False)
    if (command is None) or (display is None):
        print('Unable to find a plan!')
        return

    saved_world.restore()
    update_state()
    user_input('{}?'.format(display))
    if display == 'control':
        enable_gravity()
        command.control(real_time=False, dt=0)
    elif display == 'execute':
        command.refine(num_steps=10).execute(time_step=0.005)
    elif display == 'step':
        command.step()
    else:
        raise ValueError(display)

    print('Quit?')
    wait_for_interrupt()
    disconnect()
def test_arm_control(pr2, left_joints, arm_start):
    user_input('Control Arm?')
    real_time = False
    enable_gravity()
    p.setRealTimeSimulation(real_time)
    for _ in joint_controller(pr2, left_joints, arm_start):
        if not real_time:
            p.stepSimulation()
示例#3
0
def main():
    connect(use_gui=True)

    with HideOutput():
        pr2 = load_model("models/pr2_description/pr2.urdf")
    test_clone_robot(pr2)
    test_clone_arm(pr2)

    user_input('Finish?')
    disconnect()
def test_arm_motion(pr2, left_joints, arm_goal):
    disabled_collisions = get_disabled_collisions(pr2)
    user_input('Plan Arm?')
    arm_path = plan_joint_motion(pr2, left_joints, arm_goal, disabled_collisions=disabled_collisions)
    if arm_path is None:
        print('Unable to find an arm path')
        return
    print(len(arm_path))
    for q in arm_path:
        set_joint_positions(pr2, left_joints, q)
        #raw_input('Continue?')
        time.sleep(0.01)
示例#5
0
def main():
    # collect data in parallel, parameters are generated uniformly randomly in a range
    # data stored to pour_date/trails_n=10000.json
    # TODO: ulimit settings
    # https://ss64.com/bash/ulimit.html
    # https://stackoverflow.com/questions/938733/total-memory-used-by-python-process
    # import psutil
    # TODO: resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    assert (get_python_version() == 3)
    parser = argparse.ArgumentParser()
    parser.add_argument('-f',
                        '--fn',
                        default=TRAINING,
                        help='The parameter function to use.')
    parser.add_argument('-n',
                        '--num',
                        type=int,
                        default=10000,
                        help='The number of samples to collect.')
    parser.add_argument('-p',
                        '--problem',
                        required=True,
                        choices=sorted(SKILL_COLLECTORS.keys()),
                        help='The name of the skill to learn.')
    parser.add_argument('-t',
                        '--time',
                        type=int,
                        default=1 * 60,
                        help='The max planning runtime for each trial.')
    parser.add_argument('-v',
                        '--visualize',
                        action='store_true',
                        help='When enabled, visualizes execution.')
    args = parser.parse_args()
    serial = is_darwin()
    assert implies(args.visualize, serial)

    trials = get_trials(args.problem,
                        args.fn,
                        args.num,
                        max_time=args.time,
                        valid=True,
                        visualize=args.visualize,
                        verbose=serial)
    data_path = None if serial else get_data_path(args.problem, trials)
    num_cores = get_num_cores(trials, serial)
    user_input('Begin?')
    # TODO: store the generating distribution for samples and objects?

    print(SEPARATOR)
    results = run_trials(trials, data_path, num_cores=num_cores)
示例#6
0
def test_base_motion(pr2, base_start, base_goal):
    #disabled_collisions = get_disabled_collisions(pr2)
    set_base_values(pr2, base_start)
    user_input('Plan Base?')
    base_limits = ((-2.5, -2.5), (2.5, 2.5))
    base_path = plan_base_motion(pr2, base_goal, base_limits)
    if base_path is None:
        print('Unable to find a base path')
        return
    print(len(base_path))
    for bq in base_path:
        set_base_values(pr2, bq)
        user_input('Continue?')
示例#7
0
    def fn(body, pose, grasp):
        obstacles = [body] + fixed
        gripper_pose = end_effector_from_body(pose.pose, grasp.grasp_pose)
        approach_pose = approach_from_grasp(grasp.approach_pose, gripper_pose)
        draw_pose(gripper_pose, length=0.04)
        draw_pose(approach_pose, length=0.04)

        for _ in range(num_attempts):
            if USE_IKFAST:
                q_approach = sample_tool_ik(robot, approach_pose)
                if q_approach is not None:
                    set_joint_positions(robot, movable_joints, q_approach)
            else:
                set_joint_positions(robot, movable_joints, sample_fn()) # Random seed
                q_approach = inverse_kinematics(robot, grasp.link, approach_pose)
            if (q_approach is None) or any(pairwise_collision(robot, b) for b in obstacles):
                continue
            conf = BodyConf(robot, joints=movable_joints)

            if USE_IKFAST:
                q_grasp = sample_tool_ik(robot, gripper_pose, closest_only=True)
                if q_grasp is not None:
                    set_joint_positions(robot, movable_joints, q_grasp)
            else:
                q_grasp = inverse_kinematics(robot, grasp.link, gripper_pose)
            if (q_grasp is None) or any(pairwise_collision(robot, b) for b in obstacles):
                continue

            if teleport:
                path = [q_approach, q_grasp]
            else:
                conf.assign()
                #direction, _ = grasp.approach_pose
                #path = workspace_trajectory(robot, grasp.link, point_from_pose(approach_pose), -direction,
                #                                   quat_from_pose(approach_pose))
                path = plan_direct_joint_motion(robot, conf.joints, q_grasp, obstacles=obstacles, \
                                                self_collisions=self_collisions)
                if path is None:
                    if DEBUG_FAILURE: user_input('Approach motion failed')
                    continue
            command = Command([BodyPath(robot, path, joints=movable_joints),
                               Attach(body, robot, grasp.link),
                               BodyPath(robot, path[::-1], joints=movable_joints, attachments=[grasp])])
            return (conf, command)
            # TODO: holding collisions
        return None
示例#8
0
def main(use_pr2_drake=False):
    connect(use_gui=True)
    add_data_path()

    plane = p.loadURDF("plane.urdf")
    #table_path = "table/table.urdf"
    # table_path = "models/table_collision/table.urdf"
    # table = p.loadURDF(table_path, 0, 0, 0, 0, 0, 0.707107, 0.707107)
    # table_square/table_square.urdf, cube.urdf, block.urdf, door.urdf

    pr2_urdf = DRAKE_PR2_URDF if use_pr2_drake else PR2_URDF
    with HideOutput():
        pr2 = load_model(pr2_urdf, fixed_base=True)  # TODO: suppress warnings?
    dump_body(pr2)

    base_start = (-2, -2, 0)
    base_goal = (2, 2, 0)
    arm_start = SIDE_HOLDING_LEFT_ARM
    #arm_start = TOP_HOLDING_LEFT_ARM
    #arm_start = REST_LEFT_ARM
    arm_goal = TOP_HOLDING_LEFT_ARM
    #arm_goal = SIDE_HOLDING_LEFT_ARM

    left_joints = joints_from_names(pr2, PR2_GROUPS['left_arm'])
    right_joints = joints_from_names(pr2, PR2_GROUPS['right_arm'])
    torso_joints = joints_from_names(pr2, PR2_GROUPS['torso'])
    set_joint_positions(pr2, left_joints, arm_start)
    set_joint_positions(pr2, right_joints,
                        rightarm_from_leftarm(REST_LEFT_ARM))
    set_joint_positions(pr2, torso_joints, [0.2])
    open_arm(pr2, 'left')
    # test_ikfast(pr2)

    p.addUserDebugLine(base_start, base_goal,
                       lineColorRGB=(1, 0, 0))  # addUserDebugText
    print(base_start, base_goal)
    if use_pr2_drake:
        test_drake_base_motion(pr2, base_start, base_goal)
    else:
        test_base_motion(pr2, base_start, base_goal)

    test_arm_motion(pr2, left_joints, arm_goal)
    # test_arm_control(pr2, left_joints, arm_start)

    user_input('Finish?')
    disconnect()
示例#9
0
def test_drake_base_motion(pr2, base_start, base_goal):
    # TODO: combine this with test_arm_motion
    """
    Drake's PR2 URDF has explicit base joints
    """
    disabled_collisions = get_disabled_collisions(pr2)
    base_joints = [joint_from_name(pr2, name) for name in PR2_GROUPS['base']]
    set_joint_positions(pr2, base_joints, base_start)
    user_input('Plan Base?')
    base_path = plan_joint_motion(pr2, base_joints, base_goal, disabled_collisions=disabled_collisions)
    if base_path is None:
        print('Unable to find a base path')
        return
    print(len(base_path))
    for bq in base_path:
        set_joint_positions(pr2, base_joints, bq)
        user_input('Continue?')
示例#10
0
def main(display='execute'):  # control | execute | step
    connect(use_gui=True)
    disable_real_time()

    with HideOutput():
        root_directory = os.path.dirname(os.path.abspath(__file__))
        robot = load_pybullet(os.path.join(root_directory, IRB6600_TRACK_URDF), fixed_base=True)
    floor = load_model('models/short_floor.urdf')
    block = load_model(BLOCK_URDF, fixed_base=False)
    floor_x = 2
    set_pose(floor, Pose(Point(x=floor_x, z=0.5)))
    set_pose(block, Pose(Point(x=floor_x, y=0, z=stable_z(block, floor))))
    # set_default_camera()
    dump_world()

    saved_world = WorldSaver()
    with LockRenderer():
        command = plan(robot, block, fixed=[floor], teleport=False)
    if (command is None) or (display is None):
        print('Unable to find a plan!')
        print('Quit?')
        wait_for_interrupt()
        disconnect()
        return

    saved_world.restore()
    update_state()
    user_input('{}?'.format(display))
    if display == 'control':
        enable_gravity()
        command.control(real_time=False, dt=0)
    elif display == 'execute':
        command.refine(num_steps=10).execute(time_step=0.002)
    elif display == 'step':
        command.step()
    else:
        raise ValueError(display)

    print('Quit?')
    wait_for_interrupt()
    disconnect()
示例#11
0
    wait_for_duration, enable_gravity, enable_real_time, trajectory_controller, simulate_controller, \
    add_fixed_constraint, remove_fixed_constraint, Pose, Euler, get_collision_fn, LockRenderer, user_input, has_gui, \
    disconnect

from pddlstream.algorithms.focused import solve_focused
from pddlstream.language.constants import And, print_solution
from pddlstream.language.generator import from_gen_fn, from_fn
from pddlstream.utils import read, get_file_path

try:
    from conrob_pybullet.utils.ikfast.kuka_kr6_r900.ik import sample_tool_ik
except ImportError as e:
    print('\x1b[6;30;43m' + '{}, Using pybullet ik fn instead'.format(e) +
          '\x1b[0m')
    USE_IKFAST = False
    user_input("Press Enter to continue...")
else:
    USE_IKFAST = True

PICKNPLACE_DIRECTORY = os.path.join('..', 'assembly_instances', 'picknplace')
PICKNPLACE_FILENAMES = {
    #'choreo_brick_demo': 'choreo_brick_demo.json',
    'choreo_brick_demo': 'json/brick_demo.json',
    'choreo_eth-trees_demo': 'choreo_eth-trees_demo.json',
}
GRASP_NAMES = [
    'pick_grasp_approach_plane', 'pick_grasp_plane', 'pick_grasp_retreat_plane'
]
TOOL_NAME = 'eef_tcp_frame'  # robot_tool0 | eef_base_link | eef_tcp_frame
SELF_COLLISIONS = False
MILLIMETER = 0.001
示例#12
0
def solve_serialized(robots,
                     obstacles,
                     node_points,
                     element_bodies,
                     ground_nodes,
                     layer_from_n,
                     trajectories=[],
                     post_process=False,
                     collisions=True,
                     disable=False,
                     max_time=INF,
                     **kwargs):
    start_time = time.time()
    saver = WorldSaver()
    elements = set(element_bodies)
    elements_from_layers = compute_elements_from_layer(elements, layer_from_n)
    layers = sorted(elements_from_layers.keys())
    print('Layers:', layers)

    full_plan = []
    makespan = 0.
    removed = set()
    for layer in reversed(layers):
        print(SEPARATOR)
        print('Layer: {}'.format(layer))
        saver.restore()
        remaining = elements_from_layers[layer]
        printed = elements - remaining - removed
        draw_model(remaining, node_points, ground_nodes, color=GREEN)
        draw_model(printed, node_points, ground_nodes, color=RED)
        problem = get_pddlstream(robots,
                                 obstacles,
                                 node_points,
                                 element_bodies,
                                 ground_nodes,
                                 layer_from_n,
                                 printed=printed,
                                 removed=removed,
                                 return_home=False,
                                 trajectories=trajectories,
                                 collisions=collisions,
                                 disable=disable,
                                 **kwargs)
        layer_plan, certificate = solve_pddlstream(problem,
                                                   node_points,
                                                   element_bodies,
                                                   max_time=max_time -
                                                   elapsed_time(start_time))
        remove_all_debug()
        if layer_plan is None:
            return None

        if post_process:
            print(SEPARATOR)
            # Allows the planner to continue to check collisions
            problem.init[:] = certificate.all_facts
            #static_facts = extract_static_facts(layer_plan, ...)
            #problem.init.extend(('Order',) + pair for pair in compute_total_orders(layer_plan))
            for fact in [('print', ), ('move', )]:
                if fact in problem.init:
                    problem.init.remove(fact)
            new_layer_plan, _ = solve_pddlstream(problem,
                                                 node_points,
                                                 element_bodies,
                                                 planner=POSTPROCESS_PLANNER,
                                                 max_time=max_time -
                                                 elapsed_time(start_time))
            if (new_layer_plan
                    is not None) and (compute_duration(new_layer_plan) <
                                      compute_duration(layer_plan)):
                layer_plan = new_layer_plan
            user_input('{:.3f}->{:.3f}'.format(
                compute_duration(layer_plan),
                compute_duration(new_layer_plan)))

        # TODO: replan in a cost sensitive way
        layer_plan = apply_start(layer_plan, makespan)
        duration = compute_duration(layer_plan)
        makespan += duration
        print(
            '\nLength: {} | Start: {:.3f} | End: {:.3f} | Duration: {:.3f} | Makespan: {:.3f}'
            .format(len(layer_plan), compute_start(layer_plan),
                    compute_end(layer_plan), duration, makespan))
        full_plan.extend(layer_plan)
        removed.update(remaining)
    print(SEPARATOR)
    print_plan(full_plan)
    return full_plan, None
示例#13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('paths', nargs='*', help='Paths to the data.')
    #parser.add_argument('-a', '--active', type=int, default=0, # None
    #                    help='The number of active samples to collect')
    parser.add_argument('-l', '--learner', default=None,
                        help='Path to the learner that should be used')
    parser.add_argument('-n', '--num_trials', type=int, default=100,
                        help='The number of samples to collect')
    parser.add_argument('-s', '--save', action='store_true',
                        help='Whether to save the learners')
    parser.add_argument('-r', '--num_rounds', type=int, default=1,
                        help='The number of rounds to collect')
    #parser.add_argument('-t', '--num_train', type=int, default=None,
    #                    help='The size of the training set')
    args = parser.parse_args()

    # TODO: be careful that paging isn't altering the data
    # TODO: don't penalize if the learner identifies that it can't make a good prediction
    # TODO: use a different set of randomized parameters for train and test

    include_none = False
    serial = is_darwin()


    #training_sizes = inclusive_range(50, 500, 25)
    #training_sizes = inclusive_range(25, 100, 5)
    #training_sizes = inclusive_range(25, 100, 5)
    training_sizes = inclusive_range(10, 50, 5)
    #training_sizes = inclusive_range(100, 1000, 100)
    #training_sizes = [20]
    #training_sizes = [1500]

    #kernels = ['RBF', 'Matern52', 'MLP']
    kernels = ['MLP']

    #hyperparameters = [None]
    #hyperparameters = [True]
    hyperparameters = [True, None] # None,

    query_type = BEST # BEST | CONFIDENT | REJECTION | ACTIVE # type of query used to evaluate the learner
    is_adaptive = False
    max_test = 50 #
    #alphas = np.linspace(0.0, 0.9, num=5, endpoint=True)
    alphas = [0.0, .8, .9, .99]
    #alphas = [None]  # Use the default (i.e. GP parameters)

    use_vars = [True]
    binary = False
    split = UNIFORM # BALANCED

    # Omitting failed labels is okay because they will never be executed
    algorithms = []
    #algorithms += [(Algorithm(BATCH_GP, kernel=kernel, hyperparameters=hype, use_var=use_var), [num_train])
    #               for num_train, kernel, hype, use_var in product(training_sizes, kernels, hyperparameters, use_vars)]
    algorithms += [(Algorithm(STRADDLE_GP, kernel, hype, use_var), training_sizes)
                   for kernel, hype, use_var in product(kernels, hyperparameters, use_vars)]
    #algorithms += [(Algorithm(rf_model, p_explore=None, use_var=use_var), [num_train])
    #               for rf_model, num_train, use_var in product(RF_MODELS, training_sizes, use_vars)]
    #algorithms += [(Algorithm(nn_model, p_explore=None), [num_train])
    #               for nn_model, num_train in product(NN_MODELS, training_sizes)]
    #algorithms += [(Algorithm(RANDOM), None), (Algorithm(DESIGNED), None)]
    print('Algorithms:', algorithms)
    print('Split:', split)

    trials_per_round = sum(1 if train_sizes is None else
                           (train_sizes[-1] - train_sizes[0] + len(train_sizes))
                           for _, train_sizes in algorithms)
    num_experiments = args.num_rounds*trials_per_round

    date_name = datetime.datetime.now().strftime(DATE_FORMAT)
    size_str = '[{},{}]'.format(training_sizes[0], training_sizes[-1])
    #size_str = '-'.join(map(str, training_sizes))
    experiments_name = '{}_r={}_t={}_n={}'.format(date_name, args.num_rounds, size_str, args.num_trials) #'19-08-09_21-44-58_r=5_t=[10,150]_n=1'#
    #experiments_name = 't={}'.format(args.num_rounds)
    # TODO: could include OS and username if desired

    domain = load_data(args.paths)
    print()
    print(domain)
    X, Y, W = domain.get_data(include_none=include_none)
    print('Total number of examples:', len(X))
    if binary:
        # NN can fit perfectly when binary
        # Binary seems to be outperforming w/o
        Y = threshold_scores(Y)

    max_train = len(X) - max_test #min(max([0] + [active_sizes[0] for _, active_sizes in algorithms
                     #          if active_sizes is not None]), len(X))

    #parameters = {
    #    'include None': include_none,
    #    'binary': binary,
    #    'split': split,
    #}

    print('Name:', experiments_name)
    print('Experiments:', num_experiments)
    print('Max train:', max_train)
    print('Include None:', include_none)
    print('Examples: n={}, d={}'.format(*X.shape))
    print('Binary:', binary)
    print('Estimated hours:', num_experiments * SEC_PER_EXPERIMENT / HOURS_TO_SECS)
    user_input('Begin?')
    # TODO: residual learning for sim to real transfer
    # TODO: can always be conservative and add sim negative examples

    # TODO: combine all data to write in one folder
    data_dir = os.path.join(DATA_DIRECTORY, domain.name) # EXPERIMENT_DIRECTORY
    experiments_dir = os.path.join(data_dir, experiments_name)
    mkdir(experiments_dir)
    start_time = time.time()
    experiments = []
    for round_idx in range(args.num_rounds):
        round_dir = os.path.join(data_dir, experiments_name, str(round_idx))
        mkdir(round_dir)
        seed = hash(time.time())
        train_test_file = os.path.join(round_dir, 'data.pk3')
        if not os.path.exists(train_test_file):
            X_train, Y_train, X_test, Y_test = split_data(X, Y, split, max_train)
            X_test, Y_test = X_test[:max_test], Y_test[:max_test]
            write_pickle(train_test_file, (X_train, Y_train, X_test, Y_test))
        else:
            X_train, Y_train, X_test, Y_test = read_pickle(train_test_file)

        print('Train examples:', X_train.shape)
        print('Test examples:', X_test.shape)
        # TODO: need to be super careful when running with multiple contexts

        for algorithm, active_sizes in algorithms:
            # active_sizes = [first #trainingdata selected from X_train, #active exploration + #trainingdata]
            print(SEPARATOR)
            print('Round: {} | {} | Seed: {} | Sizes: {}'.format(round_idx, algorithm, seed, active_sizes))
            # TODO: allow keyboard interrupt
            if active_sizes is None:
                learner = algorithm.name
                active_size = None
                train_confusion = None
                experiments.append(evaluate_learner(domain, seed, train_confusion, X_test, Y_test, algorithm, learner,
                                                    active_size, args.num_trials, alphas,
                                                    serial))
            else:
                # [10 20 25] take first 10 samples from X_train to train the model, 10 samples chosen actively
                # sequentially + evaluate model, 5 samples chosen actively sequentially + evaluate model
                # Could always keep around all the examples and retrain
                # TODO: segfaults when this runs in parallel
                # TODO: may be able to retrain in parallel if I set OPENBLAS_NUM_THREADS
                learner_prior_nx = 0
                '''
                if algorithm.hyperparameters:
                    if domain.skill == 'pour':
                        learner_file = '/Users/ziw/ltamp_pr2/data/pour_19-06-13_00-59-21/19-08-09_19-30-01_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format(
                            round_idx)
                    elif domain.skill == 'scoop':
                        learner_file = '/Users/ziw/ltamp_pr2/data/scoop_19-06-10_20-16-59_top-diameter/19-08-09_19-34-56_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format(
                            round_idx)
                    learner = read_pickle(learner_file)
                    learner_prior_nx = learner.nx
                    learner.retrain(newx=X_train[:active_sizes[0]], newy=Y_train[:active_sizes[0], None])
                else:
                '''
                learner, train_confusion = create_learner(domain, X_train, Y_train, split, algorithm,
                                                          num_train=active_sizes[0], query_type=query_type,
                                                          is_adaptive=is_adaptive)

                if algorithm.name == STRADDLE_GP:
                    X_select, Y_select = X_train[active_sizes[0]:], Y_train[active_sizes[0]:]

                for active_size in active_sizes:
                    num_active = active_size - learner.nx + learner_prior_nx# learner.nx is len(learner.xx)
                    print('\nRound: {} | {} | Seed: {} | Size: {} | Active: {}'.format(
                        round_idx, algorithm, seed, active_size, num_active))
                    if algorithm.name == STRADDLE_GP:
                        X_select, Y_select = active_learning_discrete(learner, num_active, X_select, Y_select)
                    #if args.save:
                    save_learner(round_dir, learner)
                    experiments.append(evaluate_learner(domain, seed, None, X_test, Y_test,
                                                        algorithm, learner,
                                                        active_size, args.num_trials, alphas,
                                                        serial))
                    save_experiments(experiments_dir, experiments)

    print(SEPARATOR)
    if experiments:
        save_experiments(experiments_dir, experiments)
        plot_experiments(domain, experiments_name, experiments_dir, experiments,
                         include_none=False)
                         #include_none=include_none)
        print('Experiments:', experiments_dir)
    print('Total experiments:', len(experiments))
    print('Total hours:', elapsed_time(start_time) / HOURS_TO_SECS)
示例#14
0
def main():
    connect(use_gui=True)
    add_data_path()

    set_camera(0, -30, 1)
    plane = load_pybullet('plane.urdf', fixed_base=True)
    #plane = load_model('plane.urdf')
    cup = load_model('models/cup.urdf', fixed_base=True)
    #set_point(cup, Point(z=stable_z(cup, plane)))
    set_point(cup, Point(z=.2))
    set_color(cup, (1, 0, 0, .4))
    #wait_for_interrupt()

    num_droplets = 100
    #radius = 0.025
    #radius = 0.005
    radius = 0.0025
    # TODO: more efficient ways to make all of these
    droplets = [create_sphere(radius, mass=0.01)
                for _ in range(num_droplets)]  # kg
    cup_thickness = 0.001

    lower, upper = get_lower_upper(cup)
    print(lower, upper)
    buffer = cup_thickness + radius
    lower = np.array(lower) + buffer * np.ones(len(lower))
    upper = np.array(upper) - buffer * np.ones(len(upper))

    limits = zip(lower, upper)
    x_range, y_range = limits[:2]
    z = upper[2] + 0.1
    #x_range = [-1, 1]
    #y_range = [-1, 1]
    #z = 1
    for droplet in droplets:
        x = np.random.uniform(*x_range)
        y = np.random.uniform(*y_range)
        set_point(droplet, Point(x, y, z))

    for i, droplet in enumerate(droplets):
        x, y = np.random.normal(0, 1e-3, 2)
        set_point(droplet, Point(x, y, z + i * (2 * radius + 1e-3)))

    #dump_world()
    wait_for_interrupt()

    #user_input('Start?')
    #dt = 1. / 240
    dt = 0.01
    #dt = 0
    print('dt:', dt)
    enable_gravity()
    simulate_for_duration(5.0, dt=dt)

    # enable_real_time()
    # try:
    #     while True:
    #         enable_gravity() # enable_real_time requires a command
    #         #time.sleep(dt)
    # except KeyboardInterrupt:
    #     pass
    # print()

    #time.sleep(1.0)
    #wait_for_interrupt()
    user_input('Finish?')
    disconnect()
示例#15
0
def train_parallel(args, n=1):
    from extrusion.run import plan_extrusion
    assert SKIP_PERCENTAGE == 0
    initial_time = time.time()

    problems = sorted(set(enumerate_problems()) - set(EXCLUDE))
    #problems = ['four-frame']
    #problems = ['simple_frame', 'topopt-101_tiny', 'topopt-100_S1_03-14-2019_w_layer']

    algorithms = list(ALGORITHMS)
    if args.disable:
        for algorithm in LOOKAHEAD_ALGORITHMS:
            if algorithm in algorithms:
                algorithms.remove(algorithm)
    #algorithms = ['regression']

    heuristics = HEURISTICS
    #heuristics = DISTANCE_HEURISTICS + COST_HEURISTICS

    seeds = list(range(args.num))
    if n is None:
        n = len(seeds)
    groups = list(chunks(seeds, n=n))

    print('Chunks: {}'.format(len(groups)))
    print('Problems ({}): {}'.format(len(problems), problems))
    #problems = [path for path in problems if 'simple_frame' in path]
    print('Algorithms ({}): {}'.format(len(algorithms), algorithms))
    print('Heuristics ({}): {}'.format(len(heuristics), heuristics))
    jobs = [[
        Configuration(seed, problem, algorithm, heuristic, args.max_time,
                      args.cfree, args.disable, args.stiffness, args.motions,
                      args.ee_only) for seed, algorithm, heuristic in product(
                          group, algorithms, heuristics)
    ] for problem, group in product(problems, groups)]
    # TODO: separate out the algorithms again
    # TODO: print the size per job
    print('Jobs: {}'.format(len(jobs)))

    serial = is_darwin()
    available_cores = cpu_count()
    num_cores = max(1, min(1 if serial else available_cores - 4, len(jobs)))
    print('Max Cores:', available_cores)
    print('Serial:', serial)
    print('Using Cores:', num_cores)
    date = datetime.datetime.now().strftime(DATE_FORMAT)
    filename = '{}.pk{}'.format(date, get_python_version())
    path = os.path.join(EXPERIMENTS_DIR, filename)
    print('Data path:', path)

    user_input('Begin?')
    start_time = time.time()
    timeouts = 0
    pool = Pool(processes=num_cores)  # , initializer=mute)
    generator = pool.imap_unordered(plan_extrusion, jobs, chunksize=1)
    results = []
    while True:
        # TODO: randomly sort instead
        last_time = time.time()
        try:
            for config, data in generator.next():  # timeout=2 * args.max_time)
                results.append((config, data))
                print('{}/{} completed | {:.3f} seconds | timeouts: {} | {}'.
                      format(len(results), len(jobs), elapsed_time(start_time),
                             timeouts,
                             datetime.datetime.now().strftime(DATE_FORMAT)))
                print(config, data)
            if results:
                write_pickle(path, results)
                print('Saved', path)
        except StopIteration:
            break
        # except TimeoutError:
        #     # TODO: record this as a failure? Nothing is saved though...
        #     timeouts += 1
        #     #traceback.print_exc()
        #     print('Error! Timed out after {:.3f} seconds'.format(elapsed_time(last_time)))
        #     break # This kills all jobs
        #     #continue # This repeats jobs until success
    print('Total time:', elapsed_time(initial_time))
    return results
示例#16
0
def main():
    parser = create_parser()
    args = parser.parse_args()
    print(args)

    # https://stackoverflow.com/questions/15314189/python-multiprocessing-pool-hangs-at-join
    # https://stackoverflow.com/questions/39884898/large-amount-of-multiprocessing-process-causing-deadlock
    # TODO: alternatively don't destroy the world
    num_cores = max(1, cpu_count() - SPARE_CORES)
    json_path = os.path.abspath(
        os.path.join(EXPERIMENTS_DIRECTORY, '{}.json'.format(get_date())))

    #memory_per_core = float(MAX_RAM) / num_cores # gigabytes
    #set_soft_limit(resource.RLIMIT_AS, int(BYTES_PER_GIGABYTE * memory_per_core)) # bytes
    #set_soft_limit(resource.RLIMIT_CPU, 2*MAX_TIME) # seconds
    # RLIMIT_MEMLOCK, RLIMIT_STACK, RLIMIT_DATA

    print('Results:', json_path)
    print('Num Cores:', num_cores)
    #print('Memory per Core: {:.2f}'.format(memory_per_core))
    print('Tasks: {} | {}'.format(len(TASK_NAMES), TASK_NAMES))
    print('Policies: {} | {}'.format(len(POLICIES), POLICIES))
    print('Num Trials:', N_TRIALS)
    num_experiments = len(TASK_NAMES) * len(POLICIES) * N_TRIALS
    print('Num Experiments:', num_experiments)
    max_parallel = math.ceil(float(num_experiments) / num_cores)
    print('Estimated duration: {:.2f} hours'.format(
        MEAN_TIME_PER_TRIAL * max_parallel / HOURS_TO_SECS))
    user_input('Begin?')
    print(SEPARATOR)

    print('Creating problems')
    start_time = time.time()
    problems = create_problems(args)
    experiments = [
        {
            'problem': copy.deepcopy(problem),
            'policy': policy
        }  #, 'args': args}
        for problem in problems for policy in POLICIES
    ]
    print('Created {} problems and {} experiments in {:.3f} seconds'.format(
        len(problems), len(experiments), elapsed_time(start_time)))
    print(SEPARATOR)

    ensure_dir(EXPERIMENTS_DIRECTORY)
    safe_rm_dir(TEMP_DIRECTORY)
    ensure_dir(TEMP_DIRECTORY)
    start_time = time.time()
    results = []
    try:
        for result in map_parallel(run_experiment,
                                   experiments,
                                   num_cores=num_cores):
            results.append(result)
            print('{}\nExperiments: {} / {} | Time: {:.3f}'.format(
                SEPARATOR, len(results), len(experiments),
                elapsed_time(start_time)))
            print('Experiment:', str_from_object(result['experiment']))
            print('Outcome:', str_from_object(result['outcome']))
            write_json(json_path, results)
    #except BaseException as e:
    #    traceback.print_exc() # e
    finally:
        if results:
            write_json(json_path, results)
        print(SEPARATOR)
        print('Saved:', json_path)
        print('Results:', len(results))
        print('Duration / experiment: {:.3f}'.format(
            num_cores * elapsed_time(start_time) / len(experiments)))
        print('Duration: {:.2f} hours'.format(
            elapsed_time(start_time) / HOURS_TO_SECS))
        safe_rm_dir(TEMP_DIRECTORY)
        # TODO: dump results automatically?
    return results
示例#17
0
文件: run_pr2.py 项目: lyltc1/LTAMP
def get_input(message, options):
    full_message = '{} [{}]: '.format(message, ','.join(options))
    response = user_input(full_message)
    while response not in options:
        response = user_input(full_message)
    return response
示例#18
0
文件: run_pr2.py 项目: lyltc1/LTAMP
def main():
    assert (get_python_version() == 2)  # ariadne has ROS with python2
    parser = argparse.ArgumentParser()
    parser.add_argument('-p',
                        '--problem',
                        default='test_pick',
                        help='The name of the problem to solve.')
    parser.add_argument(
        '-e',
        '--execute',
        action='store_true',
        help='When enabled, executes the plan using physics simulation.')
    parser.add_argument(
        '-c',
        '--cfree',
        action='store_true',
        help='When enabled, disables collision checking (for debugging).')
    parser.add_argument(
        '-l',
        '--learning',
        action='store_true',
        help='When enabled, uses learned generators when applicable.')
    parser.add_argument(
        '-v',
        '--visualize_planning',
        action='store_true',
        help=
        'When enabled, visualizes planning rather than the world (for debugging).'
    )
    args = parser.parse_args()

    task_fn = get_task_fn(PROBLEMS, args.problem)
    task = task_fn()
    print('Task:', task.arms)

    ros_world = ROSWorld(sim_only=False, visualize=not args.visualize_planning)
    reset_robot(task, ros_world)
    ros_world.perception.wait_for_update()
    if task.use_kitchen:
        add_table_surfaces(ros_world)
    ros_world.sync_controllers()  # AKA update sim robot joint positions
    ros_world.perception.update_simulation()
    add_holding(task, ros_world)

    print('Surfaces:', ros_world.perception.get_surfaces())
    print('Items:', ros_world.perception.get_items())
    draw_names(ros_world)
    draw_forward_reachability(ros_world, task.arms)

    planning_world = PlanningWorld(task, visualize=args.visualize_planning)
    planning_world.load(ros_world)
    print(planning_world)
    plan = plan_actions(planning_world, collisions=not args.cfree)
    if plan is None:
        print('Failed to find a plan')
        user_input('Finish?')
        return

    if not args.execute and not review_plan(task, ros_world, plan):
        return
    if args.cfree:
        print('Aborting execution')
        return

    #sparsify_plan(plan)
    simulator = ros_world.alternate_controller if args.execute else None
    if simulator is not None:
        simulator.set_gravity()
    finished = execute_plan(ros_world, plan, joint_speed=0.25)
    print('Finished:', finished)
示例#19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('paths', nargs='*', help='Paths to the data.')
    #parser.add_argument('-a', '--active', type=int, default=0, # None
    #                    help='The number of active samples to collect')
    parser.add_argument(
        '-d',
        '--deterministic',
        action='store_true',
        help='Whether to deterministically create training splits')
    parser.add_argument('-n',
                        '--num_trials',
                        type=int,
                        default=-1,
                        help='The number of samples to collect')
    parser.add_argument('-s',
                        '--save',
                        action='store_true',
                        help='Whether to save the learners')
    parser.add_argument('-r',
                        '--num_rounds',
                        type=int,
                        default=1,
                        help='The number of rounds to collect')
    parser.add_argument('-t',
                        '--test',
                        action='store_true',
                        help='Whether to save the data')
    parser.add_argument('-v',
                        '--visualize',
                        action='store_true',
                        help='When enabled, visualizes execution.')
    args = parser.parse_args()

    # TODO: be careful that paging isn't altering the data
    # TODO: use a different set of randomized parameters for train and test

    serial = is_darwin()
    visualize = serial and args.visualize
    assert implies(visualize, serial)
    num_trials = get_max_cores(
        serial) if args.num_trials < 0 else args.num_trials

    ##################################################

    #train_sizes = inclusive_range(50, 200, 10) # Best
    #train_sizes = inclusive_range(50, 400, 10) # F1
    #train_sizes = inclusive_range(25, 400, 25)
    #train_sizes = inclusive_range(50, 100, 5) # Real
    #train_sizes = inclusive_range(100, 200, 5)
    #train_sizes = inclusive_range(10, 250, 5)
    #train_sizes = inclusive_range(35, 70, 5)
    #train_sizes = inclusive_range(5, 50, 5)
    #train_sizes = inclusive_range(40, 80, 5)
    #train_sizes = inclusive_range(100, 1000, 100)
    #train_sizes = [50]
    #train_sizes = [250]
    train_sizes = [1000]
    #train_sizes = [327] # train + test
    #train_sizes = inclusive_range(5, 150, 25)
    #train_sizes = [100]

    #kernels = ['RBF', 'Matern52', 'MLP']
    kernels = ['MLP']

    hyperparams = [None]
    #hyperparams = [True]
    #hyperparams = [None, True]

    query_type = BEST  # BEST | CONFIDENT | REJECTION | ACTIVE # type of query used to evaluate the learner

    include_none = False
    binary = False

    # 0 => no transfer
    # 1 => mean transfer
    # 2 => kernel transfer
    # 3 => both transfer
    transfer_weights = [None]
    #transfer_weights = list(range(4))
    #transfer_weights = [0, 1]
    #transfer_weights = [3]
    #transfer_weights = np.around(np.linspace(0.0, 1.0, num=1+5, endpoint=True), decimals=3) # max 10 colors
    #transfer_weights = list(range(1, 1+3))

    #split = UNIFORM # BALANCED
    #print('Split:', split)
    #parameters = {
    #    'include None': include_none,
    #    'binary': binary,
    #    'split': split,
    #}

    # Omitting failed labels is okay because they will never be executed
    algorithms = []
    #algorithms += [(Algorithm(nn_model, label='NN'), [num])
    #              for nn_model, num in product(NN_MODELS, train_sizes)]
    #algorithms += [(Algorithm(RANDOM), None), (Algorithm(DESIGNED), None)]

    #algorithms += [(Algorithm(RF_CLASSIFIER, variance=False, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(RF_REGRESSOR, variance=False, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_RF, variance=True, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_MAXVAR_RF, variance=True, transfer_weight=tw), train_sizes)
    #                for tw in product(use_vars, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_STRADDLE_RF, variance=True, transfer_weight=tw), train_sizes)
    #                for tw, in product([None])] # transfer_weights

    use_vars = [True]
    # STRADDLE is better than MAXVAR when the learner has a good estimate of uncertainty
    algorithms += [
        (Algorithm(BATCH_GP, kernel, hype, use_var, tw,
                   label='GP'), [num])  # label='GP-{}'.format(kernel)
        for num, kernel, hype, use_var, tw in product(
            train_sizes, kernels, hyperparams, use_vars, transfer_weights)
    ]
    #algorithms += [(Algorithm(BATCH_MAXVAR_GP, kernel, hype, True, tw, label='GP-Var'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)]
    #algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hype, True, tw, label='GP-LSE'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)] # default active
    #algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hype, True, tw, label='GP-LSE2'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)] # active control only

    # algorithms += [(Algorithm(MAXVAR_GP, kernel, hype, use_var), train_sizes)
    #                for kernel, hype, use_var in product(kernels, hyperparams, use_vars)]
    #algorithms += [(Algorithm(STRADDLE_GP, kernel, hype, use_var, tw), train_sizes)
    #                for kernel, hype, use_var, tw in product(kernels, hyperparams, use_vars, transfer_weights)]

    #batch_sizes = inclusive_range(train_sizes[0], 90, 10)
    #step_size = 10 # TODO: extract from train_sizes
    #final_size = train_sizes[-1]
    # Previously didn't have use_var=True
    # algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hyperparameters=batch_size, variance=True, transfer_weight=tw),
    #                 inclusive_range(batch_size, final_size, step_size))
    #                for kernel, tw, batch_size in product(kernels, transfer_weights, batch_sizes)]
    # algorithms += [(Algorithm(BATCH_STRADDLE_RF, hyperparameters=batch_size, variance=True, transfer_weight=tw),
    #                 inclusive_range(batch_size, final_size, step_size))
    #                 for tw, batch_size in product(transfer_weights, batch_sizes)]

    print('Algorithms:', algorithms)

    ##################################################

    real_world = not args.paths
    transfer_domain = load_data(TRANSFER_DATASETS, verbose=False)
    transfer_algorithm = None
    if real_world and transfer_weights != [None]:
        #assert transfer_weights[0] is not None
        transfer_data = transfer_domain.create_dataset(
            include_none=include_none, binary=binary)
        transfer_algorithm = Algorithm(BATCH_GP,
                                       kernel=kernels[0],
                                       variance=use_vars[0])

    validity_learner = None
    #validity_learner = create_validity_classifier(transfer_domain)

    ##################################################

    train_paths = args.paths
    if real_world:
        train_paths = SCOOP_TRAIN_DATASETS  # TRAIN_DATASETS
        #train_paths = TRANSFER_DATASETS
        #train_paths = TRAIN_DATASETS + TRANSFER_DATASETS # Train before transfer
    #scale_paths = TRAIN_DATASETS + TEST_DATASETS
    scale_paths = None
    print(SEPARATOR)
    print('Train paths:', train_paths)
    domain = load_data(train_paths)
    print()
    print(domain)
    all_data = domain.create_dataset(include_none=include_none,
                                     binary=binary,
                                     scale_paths=scale_paths)
    #all_data.results = all_data.results[:1000]

    num_failed = 0
    #num_failed = 100
    failed_domain = transfer_domain if real_world else domain
    failed_results = randomize(
        result for result in failed_domain.results
        if not result.get('success', False))[:num_failed]
    #failed_data = Dataset(domain, failed_results, **all_data.kwargs)

    test_paths = SCOOP_TEST_DATASETS  # TEST_DATASETS | SCOOP_TEST_DATASETS
    #test_paths = None
    if real_world and not (set(train_paths) & set(test_paths)):
        #assert not set(train_paths) & set(test_paths)
        #max_test = 0
        test_data = load_data(test_paths).create_dataset(
            include_none=False, binary=binary, scale_paths=scale_paths)
    else:
        #assert scale_paths is None # TODO: max_train will be too small otherwise
        test_paths = test_data = None
    print(SEPARATOR)
    print('Test paths:', test_paths)

    all_active_data = None
    #if real_world:
    #    all_active_data = load_data(ACTIVE_DATASETS).create_dataset(include_none=True, binary=binary, scale_paths=scale_paths)

    # TODO: could include OS and username if desired
    date_name = datetime.datetime.now().strftime(DATE_FORMAT)
    size_str = '[{},{}]'.format(train_sizes[0], train_sizes[-1])
    #size_str = '-'.join(map(str, train_sizes))
    experiments_name = '{}_r={}_t={}_n={}'.format(date_name, args.num_rounds,
                                                  size_str, num_trials)

    trials_per_round = sum(
        1 if train_sizes is None else (train_sizes[-1] - train_sizes[0] +
                                       len(train_sizes))
        for _, train_sizes in algorithms)
    num_experiments = args.num_rounds * trials_per_round
    max_train = min(
        max([0] + [
            active_sizes[0]
            for _, active_sizes in algorithms if active_sizes is not None
        ]), len(all_data))
    max_test = min(len(all_data) - max_train, 1000)

    ##################################################

    # #features = ['bowl_height']
    # features = ['spoon_height']
    # #features = ['bowl_height', 'spoon_height']
    # X, Y, _ = all_data.get_data()
    # #indices = [domain.inputs.index(feature) for feature in features]
    # #X = X[:,indices]
    # X = [[result[FEATURE][name] for name in features] for result in all_data.results]
    # from sklearn.linear_model import LinearRegression
    # model = LinearRegression(fit_intercept=True, normalize=False)
    # model.fit(X, Y)
    # #print(model.get_params())
    # print(model.coef_.tolist(), model.intercept_)
    # print(model.score(X, Y))

    #data_dir = os.path.join(DATA_DIRECTORY, domain.name) # EXPERIMENT_DIRECTORY
    data_dir = os.path.abspath(os.path.join(domain.name, os.path.pardir))
    experiments_dir, data_path = None, None
    if not args.test or not serial:
        experiments_dir = os.path.join(data_dir, experiments_name)
        data_path = os.path.join(
            experiments_dir, 'experiments.pk{}'.format(get_python_version()))

    ##################################################

    print(SEPARATOR)
    print('Name:', experiments_name)
    print('Experiments:', num_experiments)
    print('Experiment dir:', experiments_dir)
    print('Data path:', data_path)
    print('Examples:', len(all_data))
    print('Valid:',
          sum(result.get('valid', True) for result in all_data.results))
    print('Success:',
          sum(result.get('success', False) for result in all_data.results))
    print(
        'Scored:',
        sum(
            result.get('score', None) is not None
            for result in all_data.results))
    print('Max train:', max_train)
    print('Max test:', max_test)
    print('Include None:', include_none)
    print('Examples: n={}, d={}'.format(len(all_data), domain.dx))
    print('Binary:', binary)
    print('Serial:', serial)
    print('Estimated hours: {:.3f}'.format(num_experiments *
                                           SEC_PER_EXPERIMENT / HOURS_TO_SECS))
    user_input('Begin?')

    ##################################################

    experiments = []
    if experiments_dir is not None:
        mkdir(experiments_dir)
        # if os.path.exists(data_path):
        #     experiments.extend(read_pickle(data_path))

    # TODO: embed in a KeyboardInterrupt to allow early termination
    start_time = time.time()
    for round_idx in range(args.num_rounds):
        seed = round_idx if args.deterministic else hash(
            time.time())  # vs just time.time()?
        random.seed(seed)
        all_data.shuffle()
        if test_paths is None:  # cannot use test_data
            #test_data, train_data = split_data(all_data, max_test)
            train_data = test_data = all_data  # Training performance
        else:
            train_data = all_data

        transfer_learner = None
        if transfer_algorithm is not None:
            round_data, _ = transfer_data.partition(index=1000)
            transfer_learner, _ = create_learner(transfer_domain,
                                                 round_data,
                                                 transfer_algorithm,
                                                 verbose=True)
            transfer_learner.retrain()

        print(SEPARATOR)
        print('Round {} | Train examples: {} | Test examples: {}'.format(
            round_idx, len(train_data), len(test_data)))
        for algorithm, active_sizes in algorithms:
            # active_sizes = [first #trainingdata selected from X_train, #active exploration + #trainingdata]
            print(SEPARATOR)
            print('Round: {} | {} | Seed: {} | Sizes: {}'.format(
                round_idx, algorithm, seed, active_sizes))
            # TODO: allow keyboard interrupt
            if active_sizes is None:
                learner = algorithm.name
                active_size = train_confusion = None
                experiments.append(
                    evaluate_learner(domain, seed, train_confusion, test_data,
                                     algorithm, learner, active_size,
                                     num_trials, serial, args.visualize))
                continue
            # [10 20 25] take first 10 samples from X_train to train the model, 10 samples chosen actively
            # sequentially + evaluate model, 5 samples chosen actively sequentially + evaluate model
            # Could always keep around all the examples and retrain
            # TODO: segfaults when this runs in parallel
            # TODO: may be able to retrain in parallel if I set OPENBLAS_NUM_THREADS
            num_batch = active_sizes[0]
            batch_data, active_data = train_data.partition(num_batch)
            if all_active_data is not None:
                active_data = all_active_data.clone()

            #batch_data.results.extend(failed_results)
            learner, train_confusion = create_learner(
                domain,
                batch_data,
                algorithm,  # alphas,
                query_type=query_type,
                verbose=True)
            learner.validity_learner = validity_learner
            if transfer_learner is not None:
                learner.sim_model = transfer_learner.model
            learner.retrain()
            for active_size in active_sizes:
                num_active = active_size - (learner.nx - len(failed_results))
                print('\nRound: {} | {} | Seed: {} | Size: {} | Active: {}'.
                      format(round_idx, algorithm, seed, active_size,
                             num_active))
                if algorithm.name in CONTINUOUS_ACTIVE_GP:
                    active_learning(learner, num_active, visualize=visualize)
                    #active_learning(learner, num_active, discrete_feature=True, random_feature=False)
                    #active_learning_discrete(learner, active_data, num_active, random_feature=False)
                elif algorithm.name in BATCH_ACTIVE:
                    active_learning_discrete(learner, active_data, num_active)
                    #active_learning(learner, num_active, discrete_feature=True, random_feature=True)
                    #active_learning_discrete(learner, active_data, num_active, random_feature=True)
                #if round_dir is not None:
                #    save_learner(round_dir, learner)
                if args.save:
                    learner.save(data_dir)
                experiments.append(
                    evaluate_learner(domain, seed, train_confusion, test_data,
                                     algorithm, learner, active_size,
                                     num_trials, serial, args.visualize))
                save_experiments(data_path, experiments)

    print(SEPARATOR)
    if experiments:
        save_experiments(data_path, experiments)
        plot_experiments(domain,
                         experiments_name,
                         experiments_dir,
                         experiments,
                         include_none=False)
        print('Experiments: {}'.format(experiments_dir))
    print('Total experiments: {}'.format(len(experiments)))
    print('Total hours: {:.3f}'.format(
        elapsed_time(start_time) / HOURS_TO_SECS))
示例#20
0
def main(execute='execute'):
    parser = argparse.ArgumentParser()  # Automatically includes help
    parser.add_argument('-viewer', action='store_true', help='enable viewer.')
    #parser.add_argument('-display', action='store_true', help='enable viewer.')
    args = parser.parse_args()
    #display = args.display
    display = True

    connect(use_gui=args.viewer)
    robot, block = load_world()

    #robot2 = clone_body(robot)
    #block2 = clone_body(block)
    #dump_world()

    saved_world = WorldSaver()
    dump_world()

    ss_problem = ss_from_problem(robot,
                                 movable=[block],
                                 teleport=False,
                                 movable_collisions=True)
    #ss_problem = ss_problem.debug_problem()
    #print(ss_problem)

    t0 = time.time()
    plan, evaluations = dual_focused(ss_problem, verbose=True)
    # plan, evaluations = incremental(ss_problem, verbose=True)
    print_plan(plan, evaluations)
    print(time.time() - t0)
    if (not display) or (plan is None):
        disconnect()
        return

    paths = []
    for action, params in plan:
        if action.name == 'place':
            paths += params[-1].reverse().body_paths
        elif action.name in ['move_free', 'move_holding', 'pick']:
            paths += params[-1].body_paths
    print(paths)
    command = Command(paths)

    if not args.viewer:  # TODO: how to reenable the viewer
        disconnect()
        connect(use_gui=True)
        load_world()
    saved_world.restore()

    user_input('Execute?')
    if execute == 'control':
        command.control()
    elif execute == 'execute':
        command.refine(num_steps=10).execute(time_step=0.001)
    elif execute == 'step':
        command.step()
    else:
        raise ValueError(execute)

    #dt = 1. / 240 # Bullet default
    #p.setTimeStep(dt)

    wait_for_interrupt()
    disconnect()