コード例 #1
0
 def is_safe(self, elements, element_bodies):
     # TODO: check the end-effector first
     known_elements = set(self.safe_per_element) & set(elements)
     if not all(self.safe_per_element[e] for e in known_elements):
         return False
     unknown_elements = randomize(set(elements) - known_elements)
     if not unknown_elements:
         return True
     for trajectory in randomize(
             self.trajectories
     ):  # TODO: could cache each individual collision
         intersecting = trajectory.get_intersecting()
         for i in randomize(range(len(trajectory))):
             set_joint_positions(trajectory.robot, trajectory.joints,
                                 trajectory.path[i])
             for element in unknown_elements:
                 body = element_bodies[element]
                 #if not pairwise_collision(trajectory.robot, body):
                 #    self.set_unsafe(element)
                 #    return False
                 for robot_link, bodies in intersecting[i].items():
                     #print(robot_link, bodies, len(bodies))
                     if (element_bodies[element]
                             in bodies) and pairwise_link_collision(
                                 trajectory.robot,
                                 robot_link,
                                 body,
                                 link2=BASE_LINK):
                         self.set_unsafe(element)
                         return False
     self.update_safe(elements)
     return True
コード例 #2
0
def command_collision(end_effector, command, bodies):
    # TODO: each new addition makes collision checking more expensive
    #offset = 4
    #for robot_conf in trajectory[offset:-offset]:
    collisions = [False for _ in range(len(bodies))]
    # Orientation remains the same for the extrusion trajectory
    idx_from_body = dict(zip(bodies, range(len(bodies))))
    # TODO: separate into another method. Sort paths by tool poses first
    for trajectory in command.trajectories:
        for tool_pose in randomize(trajectory.get_link_path()):  # TODO: bisect
            end_effector.set_pose(tool_pose)
            #for body, _ in get_bodies_in_region(tool_aabb): # TODO
            for i, body in enumerate(bodies):
                if body not in idx_from_body:  # Robot
                    continue
                idx = idx_from_body[body]
                if not collisions[idx]:
                    collisions[idx] |= pairwise_collision(
                        end_effector.body, body)
    for trajectory in command.trajectories:
        for robot_conf in randomize(trajectory.path):
            set_joint_positions(trajectory.robot, trajectory.joints,
                                robot_conf)
            for i, body in enumerate(bodies):
                if not collisions[i]:
                    collisions[i] |= pairwise_collision(trajectory.robot, body)
    #for element, unsafe in zip(elements, collisions):
    #    command.safe_per_element[element] = unsafe
    return collisions
コード例 #3
0
    def test(name1, command1, name2, command2):
        robot1, robot2 = index_from_name(robots, name1), index_from_name(
            robots, name2)
        if (robot1 == robot2) or not collisions:
            return False
        # TODO: check collisions between pairs of inflated adjacent element
        for traj1, traj2 in randomize(
                product(command1.trajectories, command2.trajectories)):
            # TODO: use swept aabbs for element checks
            aabbs1, aabbs2 = traj1.get_aabbs(), traj2.get_aabbs()
            swept_aabbs1 = {
                link: aabb_union(link_aabbs[link] for link_aabbs in aabbs1)
                for link in aabbs1[0]
            }
            swept_aabbs2 = {
                link: aabb_union(link_aabbs[link] for link_aabbs in aabbs2)
                for link in aabbs2[0]
            }
            swept_overlap = [
                (link1, link2)
                for link1, link2 in product(swept_aabbs1, swept_aabbs2)
                if aabb_overlap(swept_aabbs1[link1], swept_aabbs2[link2])
            ]
            if not swept_overlap:
                continue
            # for l1 in set(map(itemgetter(0), swept_overlap)):
            #     draw_aabb(swept_aabbs1[l1], color=RED)
            # for l2 in set(map(itemgetter(1), swept_overlap)):
            #     draw_aabb(swept_aabbs2[l2], color=BLUE)

            for index1, index2 in product(randomize(range(len(traj1.path))),
                                          randomize(range(len(traj2.path)))):
                overlap = [(link1, link2)
                           for link1, link2 in swept_overlap if aabb_overlap(
                               aabbs1[index1][link1], aabbs2[index2][link2])]
                #overlap = list(product(aabbs1[index1], aabbs2[index2]))
                if not overlap:
                    continue
                set_configuration(robot1, traj1.path[index1])
                set_configuration(robot2, traj2.path[index2])
                #wait_if_gui()
                #if pairwise_collision(robot1, robot2):
                #    return True
                for link1, link2 in overlap:
                    if pairwise_link_collision(robot1, link1, robot2, link2):
                        #wait_if_gui()
                        return True
        return False
コード例 #4
0
ファイル: utils.py プロジェクト: ramonpereira/SS-Replan
def get_grasps(world, name, grasp_types=GRASP_TYPES, pre_distance=APPROACH_DISTANCE, **kwargs):
    use_width = world.robot_name == FRANKA_CARTER
    body = world.get_body(name)
    #fraction = 0.25
    obj_type = type_from_name(name)
    body_pose = REFERENCE_POSE.get(obj_type, unit_pose())
    center, extent = approximate_as_prism(body, body_pose)

    for grasp_type in grasp_types:
        if not implies(world.is_real(), is_valid_grasp_type(name, grasp_type)):
            continue
        #assert is_valid_grasp_type(name, grasp_type)
        if grasp_type == TOP_GRASP:
            grasp_length = 1.5 * FINGER_EXTENT[2]  # fraction = 0.5
            pre_direction = pre_distance * get_unit_vector([0, 0, 1])
            post_direction = unit_point()
            generator = get_top_grasps(body, under=True, tool_pose=TOOL_POSE, body_pose=body_pose,
                                       grasp_length=grasp_length, max_width=np.inf, **kwargs)
        elif grasp_type == SIDE_GRASP:
            # Take max of height and something
            grasp_length = 1.75 * FINGER_EXTENT[2]  # No problem if pushing a little
            x, z = pre_distance * get_unit_vector([3, -1])
            pre_direction = [0, 0, x]
            post_direction = [0, 0, z]
            top_offset = extent[2] / 2 if obj_type in MID_SIDE_GRASPS else 1.0*FINGER_EXTENT[0]
            # Under grasps are actually easier for this robot
            # TODO: bug in under in that it grasps at the bottom
            generator = get_side_grasps(body, under=False, tool_pose=TOOL_POSE, body_pose=body_pose,
                                        grasp_length=grasp_length, top_offset=top_offset, max_width=np.inf, **kwargs)
            # if world.robot_name == FRANKA_CARTER else unit_pose()
            generator = (multiply(Pose(euler=Euler(yaw=yaw)), grasp)
                         for grasp in generator for yaw in [0, np.pi])
        else:
            raise ValueError(grasp_type)
        grasp_poses = randomize(list(generator))
        if obj_type in CYLINDERS:
            # TODO: filter first
            grasp_poses = (multiply(grasp_pose, Pose(euler=Euler(
                yaw=random.uniform(-math.pi, math.pi)))) for grasp_pose in cycle(grasp_poses))
        for i, grasp_pose in enumerate(grasp_poses):
            pregrasp_pose = multiply(Pose(point=pre_direction), grasp_pose,
                                     Pose(point=post_direction))
            grasp = Grasp(world, name, grasp_type, i, grasp_pose, pregrasp_pose)
            with BodySaver(body):
                grasp.get_attachment().assign()
                with BodySaver(world.robot):
                    grasp.grasp_width = close_until_collision(
                        world.robot, world.gripper_joints, bodies=[body])
            #print(get_joint_positions(world.robot, world.arm_joints)[-1])
            #draw_pose(unit_pose(), parent=world.robot, parent_link=world.tool_link)
            #grasp.get_attachment().assign()
            #wait_for_user()
            ##for value in get_joint_limits(world.robot, world.arm_joints[-1]):
            #for value in [-1.8973, 0, +1.8973]:
            #    set_joint_position(world.robot, world.arm_joints[-1], value)
            #    grasp.get_attachment().assign()
            #    wait_for_user()
            if use_width and (grasp.grasp_width is None):
                continue
            yield grasp
コード例 #5
0
def add_successors(queue,
                   all_elements,
                   node_points,
                   ground_nodes,
                   heuristic_fn,
                   printed,
                   position,
                   conf,
                   partial_orders=[],
                   visualize=False):
    incoming_from_element = incoming_from_edges(partial_orders)
    remaining = all_elements - printed
    num_remaining = len(remaining) - 1
    #assert 0 <= num_remaining
    #bias_from_element = {}
    # TODO: print ground first
    for directed in randomize(
            compute_printable_directed(all_elements, ground_nodes, printed)):
        element = get_undirected(all_elements, directed)
        if not (incoming_from_element[element] <= printed):
            continue
        bias = heuristic_fn(printed, directed, position, conf)
        priority = (num_remaining, bias, random.random())
        visits = 0
        heapq.heappush(queue, (visits, priority, printed, directed, conf))
コード例 #6
0
 def sample_remaining(printed, next_printed, sample_fn, num=1, **kwargs):
     if num == 0:
         return True
     remaining_elements = (all_elements - next_printed) if plan_all else \
         compute_printable_elements(all_elements, ground_nodes, next_printed)
     # TODO: could just consider nodes in printed (connected=True)
     return all(sample_fn(printed, next_printed, element, connected=False, num=num, **kwargs)
                for element in randomize(remaining_elements))
コード例 #7
0
 def is_safe(self, obstacles):
     checked_bodies = set(self.safe_from_body) & set(obstacles)
     if any(not self.safe_from_body[e] for e in checked_bodies):
         return False
     unchecked_bodies = randomize(set(obstacles) - checked_bodies)
     if not unchecked_bodies:
         return True
     for tool_pose in randomize(self.tool_path):
         self.extrusion.end_effector.set_pose(tool_pose)
         tool_aabb = get_aabb(
             self.extrusion.end_effector.body)  # TODO: cache nearby_bodies
         nearby_bodies = {
             b
             for b, _ in get_bodies_in_region(tool_aabb)
             if b in unchecked_bodies
         }
         for body in nearby_bodies:
             if pairwise_collision(self.extrusion.end_effector.body, body):
                 self.safe_from_body[body] = False
                 return False
     return True
コード例 #8
0
 def add_successors(printed, position, conf):
     only_ground = printed <= ground_elements
     num_remaining = len(printed) - 1
     #assert 0 <= num_remaining
     for element in randomize(printed):
         if not (outgoing_from_element[element] & printed) and implies(
                 is_ground(element, ground_nodes), only_ground):
             for directed in get_directions(element):
                 visits = 0
                 bias = heuristic_fn(printed, directed, position, conf)
                 priority = (num_remaining, bias, random.random())
                 heapq.heappush(queue,
                                (visits, priority, printed, directed, conf))
コード例 #9
0
ファイル: collect_pr2.py プロジェクト: lyltc1/LTAMP
def create_random_generator(args):
    # Left to right
    if args.problem == 'pour':
        cups, bowls = (TRAIN_CUPS, TRAIN_BOWLS) if args.train else (TEST_CUPS,
                                                                    TEST_BOWLS)
        print('Cups ({}): {}'.format(len(cups), cups))
        print('Bowls ({}) {}:'.format(len(bowls), bowls))
        combinations = randomize(product(cups, bowls))
        if not args.train:
            combinations = POUR_TEST_PAIRS
    elif args.problem == 'scoop':
        bowls = TRAIN_BOWLS if args.train else TEST_BOWLS
        cups = [SCOOP_CUP]
        combinations = randomize(product(bowls, cups))
        #assert args.train
        if not args.train:
            combinations = list(product(SCOOP_TEST, cups))
    else:
        raise NotImplementedError(args.problem)
    print('Combinations ({})'.format(len(combinations)))
    #print(randomize(combinations))
    generator = cycle(combinations)
    #generator = (random.choice(combinations) for _ in inf_generator())
    return generator
コード例 #10
0
 def sample(self, discrete=True):
     # TODO: timeout if unable to find
     while True:
         poses = {}
         for name, pose_dist in randomize(self.pose_dists.items()):
             body = self.world.get_body(name)
             pose = pose_dist.sample_discrete(
             ) if discrete else pose_dist.sample()
             pose.assign()
             if any(
                     pairwise_collision(body, self.world.get_body(other))
                     for other in poses):
                 break
             poses[name] = pose
         else:
             return poses
コード例 #11
0
ファイル: stiffness.py プロジェクト: caelan/pb-construction
def search_neighborhood(extrusion_path,
                        element_from_id,
                        node_points,
                        ground_nodes,
                        checker,
                        sequence,
                        initial_position,
                        stiffness=True,
                        max_candidates=1,
                        max_time=INF):
    start_time = time.time()
    best_sequence = None
    best_cost = compute_sequence_distance(node_points,
                                          sequence,
                                          start=initial_position,
                                          end=initial_position)
    candidates = 1
    for i1, i2 in randomize(
            combinations_with_replacement(range(len(sequence)), r=2)):
        for directed1 in get_directions(sequence[i1]):
            for directed2 in get_directions(sequence[i2]):
                if implies(best_sequence, (max_candidates <= candidates)) and (
                        max_time <= elapsed_time(start_time)):
                    return best_sequence
                candidates += 1
                if i1 == i2:
                    new_sequence = sequence[:i1] + [directed1
                                                    ] + sequence[i1 + 1:]
                else:
                    new_sequence = sequence[:i1] + [
                        directed1
                    ] + sequence[i1 + 1:i2] + [directed2] + sequence[i2 + 1:]
                assert len(new_sequence) == len(sequence)
                if count_violations(ground_nodes, new_sequence):
                    continue
                new_cost = compute_sequence_distance(node_points,
                                                     new_sequence,
                                                     start=initial_position,
                                                     end=initial_position)
                if best_cost <= new_cost:
                    continue
                print(best_cost, new_cost)
                return new_sequence
                # TODO: eager version of this also
                # if stiffness and not test_stiffness(extrusion_path, element_from_id, printed, checker=checker, verbose=False):
                #    continue # Unfortunately the full structure is affected
    return best_sequence
コード例 #12
0
def optimize_feature(learner, max_time=INF, **kwargs):
    #features = read_json(get_feature_path(learner.func.skill))
    feature_fn, attributes, pairs = generate_candidates(
        learner.func.skill, **kwargs)
    start_time = time.time()
    world = ROSWorld(use_robot=False, sim_only=True)
    saver = ClientSaver(world.client)
    print('Optimizing over {} feature pairs'.format(len(pairs)))

    best_pair, best_score = None, -INF  # maximize
    for pair in randomize(pairs):  # islice
        if max_time <= elapsed_time(start_time):
            break
        world.reset(keep_robot=False)
        for name in pair:
            world.perception.add_item(name)
        feature = feature_fn(world, *pair)
        parameter = next(
            learner.parameter_generator(world,
                                        feature,
                                        min_score=best_score,
                                        valid=True,
                                        verbose=False), None)
        if parameter is None:
            continue
        context = learner.func.context_from_feature(feature)
        sample = learner.func.sample_from_parameter(parameter)
        x = x_from_context_sample(context, sample)
        #x = learner.func.x_from_feature_parameter(feature, parameter)
        score_fn = learner.get_score_f(context, noise=False,
                                       negate=False)  # maximize
        score = float(score_fn(x)[0, 0])
        if best_score < score:
            best_pair, best_score = pair, score
    world.stop()
    saver.restore()
    print(
        'Best pair: {} | Best score: {:.3f} | Pairs: {} | Time: {:.3f}'.format(
            best_pair, best_score, len(pairs), elapsed_time(start_time)))
    assert best_score is not None
    # TODO: ensure the same parameter is used
    return dict(zip(attributes, best_pair))
コード例 #13
0
def tool_path_collision(end_effector, element_pose, translation_path,
                        direction, angle, reverse, obstacles):
    # TODO: allow sampling in the full sphere by checking collision with an element while sliding
    for tool_pose in randomize(
            compute_tool_path(element_pose, translation_path, direction, angle,
                              reverse)):
        end_effector.set_pose(tool_pose)
        #bodies = obstacles
        tool_aabb = get_aabb(end_effector.body)  # TODO: could just translate
        #handles = draw_aabb(tool_aabb)
        bodies = {
            b
            for b, _ in get_bodies_in_region(tool_aabb) if b in obstacles
        }
        #print(bodies)
        #for body, link in bodies:
        #    handles.extend(draw_aabb(get_aabb(body, link)))
        #wait_for_user()
        #remove_handles(handles)
        if any(pairwise_collision(end_effector.body, obst) for obst in bodies):
            # TODO: sort by angle with smallest violation
            return True
    return False
コード例 #14
0
ファイル: motion.py プロジェクト: caelan/pb-construction
def decompose_structure(fixed_obstacles, element_bodies, printed_elements, resolution=0.25):
    # TODO: precompute this
    frequencies = {}
    for element in printed_elements:
        #key = None
        midpoint = np.average([node_points[n] for n in element], axis=0)
        #key = int(midpoint[2] / resolution)
        key = tuple((midpoint / resolution).astype(int).tolist()) # round or int?
        frequencies.setdefault(key, []).append(element)
    #print(len(frequencies))
    #print(Counter({key: len(elements) for key, elements in frequencies.items()}))

    # TODO: apply this elsewhere
    hulls = {}
    obstacles = list(fixed_obstacles)
    for elements in frequencies.values():
        element_obstacles = randomize(element_bodies[e] for e in elements)
        if MIN_ELEMENTS <= len(elements):
            hull = create_bounding_mesh(element_bodies, node_points, elements)
            assert hull is not None
            hulls[hull] = element_obstacles
        else:
            obstacles.extend(element_obstacles)
    return hulls, obstacles
コード例 #15
0
ファイル: visualize_pours.py プロジェクト: lyltc1/LTAMP
def visualize_collected_pours(paths, num=6, save=True):
    result_from_bowl = {}
    for path in randomize(paths):
        results = read_data(path)
        print(path, len(results))
        for result in results:
            result_from_bowl.setdefault(result['feature']['bowl_type'], []).append(result)

    world = create_world()
    environment = get_bodies()
    #collector = SKILL_COLLECTORS['pour']
    print(get_camera())

    for bowl_type in sorted(result_from_bowl):
        # TODO: visualize same
        for body in get_bodies():
            if body not in environment:
                remove_body(body)
        print('Bowl type:', bowl_type)
        bowl_body = load_cup_bowl(bowl_type)
        bowl_pose = get_pose(bowl_body)

        results = result_from_bowl[bowl_type]
        results = randomize(results)

        score_cup_pose = []
        for i, result in enumerate(results):
            if num <= len(score_cup_pose):
                break
            feature = result['feature']
            parameter = result['parameter']
            score = result['score']
            if (score is None) or not result['execution'] or result['annotation']:
                continue
            cup_body = load_cup_bowl(feature['cup_type'])
            world.bodies[feature['bowl_name']] = bowl_body
            world.bodies[feature['cup_name']] = cup_body
            fraction = compute_fraction_filled(score)
            #value = collector.score_fn(feature, parameter, score)
            value = pour_score(feature, parameter, score)
            print(i, feature['cup_type'], fraction, value)
            path, _ = pour_path_from_parameter(world, feature, parameter)
            sort = fraction
            #sort = parameter['pitch']
            #sort = parameter['axis_in_bowl_z']
            score_cup_pose.append((sort, fraction, value, cup_body, path[0]))

        #order = score_cup_pose
        order = randomize(score_cup_pose)
        #order = sorted(score_cup_pose)
        angles = np.linspace(0, 2*np.pi, num=len(score_cup_pose), endpoint=False) # Halton
        for angle, (_, fraction, value, cup_body, pose) in zip(angles, order):
            #fraction = clip(fraction, min_value=0, max_value=1)
            value = clip(value, *DEFAULT_INTERVAL)
            fraction = rescale(value, DEFAULT_INTERVAL, new_interval=(0, 1))
            #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN)
            color = interpolate_hue(fraction)
            set_color(cup_body, apply_alpha(color, alpha=0.25))
            #angle = random.uniform(-np.pi, np.pi)
            #angle = 0
            rotate_bowl = Pose(euler=Euler(yaw=angle))
            cup_pose = multiply(bowl_pose, invert(rotate_bowl), pose)
            set_pose(cup_body, cup_pose)
            #wait_for_user()
            #remove_body(cup_body)

        if save:
            #filename = os.path.join('workspace', '{}.png'.format(bowl_type))
            #save_image(filename, take_picture())  # [0, 255]
            #wait_for_duration(duration=0.5)
            # TODO: get window location
            #os.system("screencapture -R {},{},{},{} {}".format(
            #    275, 275, 500, 500, filename)) # -R<x,y,w,h> capture screen rect
            wait_for_user()
        remove_body(bowl_body)
コード例 #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('paths', nargs='*', help='Paths to the data.')
    #parser.add_argument('-a', '--active', type=int, default=0, # None
    #                    help='The number of active samples to collect')
    parser.add_argument(
        '-d',
        '--deterministic',
        action='store_true',
        help='Whether to deterministically create training splits')
    parser.add_argument('-n',
                        '--num_trials',
                        type=int,
                        default=-1,
                        help='The number of samples to collect')
    parser.add_argument('-s',
                        '--save',
                        action='store_true',
                        help='Whether to save the learners')
    parser.add_argument('-r',
                        '--num_rounds',
                        type=int,
                        default=1,
                        help='The number of rounds to collect')
    parser.add_argument('-t',
                        '--test',
                        action='store_true',
                        help='Whether to save the data')
    parser.add_argument('-v',
                        '--visualize',
                        action='store_true',
                        help='When enabled, visualizes execution.')
    args = parser.parse_args()

    # TODO: be careful that paging isn't altering the data
    # TODO: use a different set of randomized parameters for train and test

    serial = is_darwin()
    visualize = serial and args.visualize
    assert implies(visualize, serial)
    num_trials = get_max_cores(
        serial) if args.num_trials < 0 else args.num_trials

    ##################################################

    #train_sizes = inclusive_range(50, 200, 10) # Best
    #train_sizes = inclusive_range(50, 400, 10) # F1
    #train_sizes = inclusive_range(25, 400, 25)
    #train_sizes = inclusive_range(50, 100, 5) # Real
    #train_sizes = inclusive_range(100, 200, 5)
    #train_sizes = inclusive_range(10, 250, 5)
    #train_sizes = inclusive_range(35, 70, 5)
    #train_sizes = inclusive_range(5, 50, 5)
    #train_sizes = inclusive_range(40, 80, 5)
    #train_sizes = inclusive_range(100, 1000, 100)
    #train_sizes = [50]
    #train_sizes = [250]
    train_sizes = [1000]
    #train_sizes = [327] # train + test
    #train_sizes = inclusive_range(5, 150, 25)
    #train_sizes = [100]

    #kernels = ['RBF', 'Matern52', 'MLP']
    kernels = ['MLP']

    hyperparams = [None]
    #hyperparams = [True]
    #hyperparams = [None, True]

    query_type = BEST  # BEST | CONFIDENT | REJECTION | ACTIVE # type of query used to evaluate the learner

    include_none = False
    binary = False

    # 0 => no transfer
    # 1 => mean transfer
    # 2 => kernel transfer
    # 3 => both transfer
    transfer_weights = [None]
    #transfer_weights = list(range(4))
    #transfer_weights = [0, 1]
    #transfer_weights = [3]
    #transfer_weights = np.around(np.linspace(0.0, 1.0, num=1+5, endpoint=True), decimals=3) # max 10 colors
    #transfer_weights = list(range(1, 1+3))

    #split = UNIFORM # BALANCED
    #print('Split:', split)
    #parameters = {
    #    'include None': include_none,
    #    'binary': binary,
    #    'split': split,
    #}

    # Omitting failed labels is okay because they will never be executed
    algorithms = []
    #algorithms += [(Algorithm(nn_model, label='NN'), [num])
    #              for nn_model, num in product(NN_MODELS, train_sizes)]
    #algorithms += [(Algorithm(RANDOM), None), (Algorithm(DESIGNED), None)]

    #algorithms += [(Algorithm(RF_CLASSIFIER, variance=False, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(RF_REGRESSOR, variance=False, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_RF, variance=True, transfer_weight=tw, label='RF'), [num])
    #                for num, tw in product(train_sizes, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_MAXVAR_RF, variance=True, transfer_weight=tw), train_sizes)
    #                for tw in product(use_vars, [None])] # transfer_weights
    #algorithms += [(Algorithm(BATCH_STRADDLE_RF, variance=True, transfer_weight=tw), train_sizes)
    #                for tw, in product([None])] # transfer_weights

    use_vars = [True]
    # STRADDLE is better than MAXVAR when the learner has a good estimate of uncertainty
    algorithms += [
        (Algorithm(BATCH_GP, kernel, hype, use_var, tw,
                   label='GP'), [num])  # label='GP-{}'.format(kernel)
        for num, kernel, hype, use_var, tw in product(
            train_sizes, kernels, hyperparams, use_vars, transfer_weights)
    ]
    #algorithms += [(Algorithm(BATCH_MAXVAR_GP, kernel, hype, True, tw, label='GP-Var'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)]
    #algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hype, True, tw, label='GP-LSE'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)] # default active
    #algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hype, True, tw, label='GP-LSE2'), train_sizes)
    #                for kernel, hype, tw in product(kernels, hyperparams, transfer_weights)] # active control only

    # algorithms += [(Algorithm(MAXVAR_GP, kernel, hype, use_var), train_sizes)
    #                for kernel, hype, use_var in product(kernels, hyperparams, use_vars)]
    #algorithms += [(Algorithm(STRADDLE_GP, kernel, hype, use_var, tw), train_sizes)
    #                for kernel, hype, use_var, tw in product(kernels, hyperparams, use_vars, transfer_weights)]

    #batch_sizes = inclusive_range(train_sizes[0], 90, 10)
    #step_size = 10 # TODO: extract from train_sizes
    #final_size = train_sizes[-1]
    # Previously didn't have use_var=True
    # algorithms += [(Algorithm(BATCH_STRADDLE_GP, kernel, hyperparameters=batch_size, variance=True, transfer_weight=tw),
    #                 inclusive_range(batch_size, final_size, step_size))
    #                for kernel, tw, batch_size in product(kernels, transfer_weights, batch_sizes)]
    # algorithms += [(Algorithm(BATCH_STRADDLE_RF, hyperparameters=batch_size, variance=True, transfer_weight=tw),
    #                 inclusive_range(batch_size, final_size, step_size))
    #                 for tw, batch_size in product(transfer_weights, batch_sizes)]

    print('Algorithms:', algorithms)

    ##################################################

    real_world = not args.paths
    transfer_domain = load_data(TRANSFER_DATASETS, verbose=False)
    transfer_algorithm = None
    if real_world and transfer_weights != [None]:
        #assert transfer_weights[0] is not None
        transfer_data = transfer_domain.create_dataset(
            include_none=include_none, binary=binary)
        transfer_algorithm = Algorithm(BATCH_GP,
                                       kernel=kernels[0],
                                       variance=use_vars[0])

    validity_learner = None
    #validity_learner = create_validity_classifier(transfer_domain)

    ##################################################

    train_paths = args.paths
    if real_world:
        train_paths = SCOOP_TRAIN_DATASETS  # TRAIN_DATASETS
        #train_paths = TRANSFER_DATASETS
        #train_paths = TRAIN_DATASETS + TRANSFER_DATASETS # Train before transfer
    #scale_paths = TRAIN_DATASETS + TEST_DATASETS
    scale_paths = None
    print(SEPARATOR)
    print('Train paths:', train_paths)
    domain = load_data(train_paths)
    print()
    print(domain)
    all_data = domain.create_dataset(include_none=include_none,
                                     binary=binary,
                                     scale_paths=scale_paths)
    #all_data.results = all_data.results[:1000]

    num_failed = 0
    #num_failed = 100
    failed_domain = transfer_domain if real_world else domain
    failed_results = randomize(
        result for result in failed_domain.results
        if not result.get('success', False))[:num_failed]
    #failed_data = Dataset(domain, failed_results, **all_data.kwargs)

    test_paths = SCOOP_TEST_DATASETS  # TEST_DATASETS | SCOOP_TEST_DATASETS
    #test_paths = None
    if real_world and not (set(train_paths) & set(test_paths)):
        #assert not set(train_paths) & set(test_paths)
        #max_test = 0
        test_data = load_data(test_paths).create_dataset(
            include_none=False, binary=binary, scale_paths=scale_paths)
    else:
        #assert scale_paths is None # TODO: max_train will be too small otherwise
        test_paths = test_data = None
    print(SEPARATOR)
    print('Test paths:', test_paths)

    all_active_data = None
    #if real_world:
    #    all_active_data = load_data(ACTIVE_DATASETS).create_dataset(include_none=True, binary=binary, scale_paths=scale_paths)

    # TODO: could include OS and username if desired
    date_name = datetime.datetime.now().strftime(DATE_FORMAT)
    size_str = '[{},{}]'.format(train_sizes[0], train_sizes[-1])
    #size_str = '-'.join(map(str, train_sizes))
    experiments_name = '{}_r={}_t={}_n={}'.format(date_name, args.num_rounds,
                                                  size_str, num_trials)

    trials_per_round = sum(
        1 if train_sizes is None else (train_sizes[-1] - train_sizes[0] +
                                       len(train_sizes))
        for _, train_sizes in algorithms)
    num_experiments = args.num_rounds * trials_per_round
    max_train = min(
        max([0] + [
            active_sizes[0]
            for _, active_sizes in algorithms if active_sizes is not None
        ]), len(all_data))
    max_test = min(len(all_data) - max_train, 1000)

    ##################################################

    # #features = ['bowl_height']
    # features = ['spoon_height']
    # #features = ['bowl_height', 'spoon_height']
    # X, Y, _ = all_data.get_data()
    # #indices = [domain.inputs.index(feature) for feature in features]
    # #X = X[:,indices]
    # X = [[result[FEATURE][name] for name in features] for result in all_data.results]
    # from sklearn.linear_model import LinearRegression
    # model = LinearRegression(fit_intercept=True, normalize=False)
    # model.fit(X, Y)
    # #print(model.get_params())
    # print(model.coef_.tolist(), model.intercept_)
    # print(model.score(X, Y))

    #data_dir = os.path.join(DATA_DIRECTORY, domain.name) # EXPERIMENT_DIRECTORY
    data_dir = os.path.abspath(os.path.join(domain.name, os.path.pardir))
    experiments_dir, data_path = None, None
    if not args.test or not serial:
        experiments_dir = os.path.join(data_dir, experiments_name)
        data_path = os.path.join(
            experiments_dir, 'experiments.pk{}'.format(get_python_version()))

    ##################################################

    print(SEPARATOR)
    print('Name:', experiments_name)
    print('Experiments:', num_experiments)
    print('Experiment dir:', experiments_dir)
    print('Data path:', data_path)
    print('Examples:', len(all_data))
    print('Valid:',
          sum(result.get('valid', True) for result in all_data.results))
    print('Success:',
          sum(result.get('success', False) for result in all_data.results))
    print(
        'Scored:',
        sum(
            result.get('score', None) is not None
            for result in all_data.results))
    print('Max train:', max_train)
    print('Max test:', max_test)
    print('Include None:', include_none)
    print('Examples: n={}, d={}'.format(len(all_data), domain.dx))
    print('Binary:', binary)
    print('Serial:', serial)
    print('Estimated hours: {:.3f}'.format(num_experiments *
                                           SEC_PER_EXPERIMENT / HOURS_TO_SECS))
    user_input('Begin?')

    ##################################################

    experiments = []
    if experiments_dir is not None:
        mkdir(experiments_dir)
        # if os.path.exists(data_path):
        #     experiments.extend(read_pickle(data_path))

    # TODO: embed in a KeyboardInterrupt to allow early termination
    start_time = time.time()
    for round_idx in range(args.num_rounds):
        seed = round_idx if args.deterministic else hash(
            time.time())  # vs just time.time()?
        random.seed(seed)
        all_data.shuffle()
        if test_paths is None:  # cannot use test_data
            #test_data, train_data = split_data(all_data, max_test)
            train_data = test_data = all_data  # Training performance
        else:
            train_data = all_data

        transfer_learner = None
        if transfer_algorithm is not None:
            round_data, _ = transfer_data.partition(index=1000)
            transfer_learner, _ = create_learner(transfer_domain,
                                                 round_data,
                                                 transfer_algorithm,
                                                 verbose=True)
            transfer_learner.retrain()

        print(SEPARATOR)
        print('Round {} | Train examples: {} | Test examples: {}'.format(
            round_idx, len(train_data), len(test_data)))
        for algorithm, active_sizes in algorithms:
            # active_sizes = [first #trainingdata selected from X_train, #active exploration + #trainingdata]
            print(SEPARATOR)
            print('Round: {} | {} | Seed: {} | Sizes: {}'.format(
                round_idx, algorithm, seed, active_sizes))
            # TODO: allow keyboard interrupt
            if active_sizes is None:
                learner = algorithm.name
                active_size = train_confusion = None
                experiments.append(
                    evaluate_learner(domain, seed, train_confusion, test_data,
                                     algorithm, learner, active_size,
                                     num_trials, serial, args.visualize))
                continue
            # [10 20 25] take first 10 samples from X_train to train the model, 10 samples chosen actively
            # sequentially + evaluate model, 5 samples chosen actively sequentially + evaluate model
            # Could always keep around all the examples and retrain
            # TODO: segfaults when this runs in parallel
            # TODO: may be able to retrain in parallel if I set OPENBLAS_NUM_THREADS
            num_batch = active_sizes[0]
            batch_data, active_data = train_data.partition(num_batch)
            if all_active_data is not None:
                active_data = all_active_data.clone()

            #batch_data.results.extend(failed_results)
            learner, train_confusion = create_learner(
                domain,
                batch_data,
                algorithm,  # alphas,
                query_type=query_type,
                verbose=True)
            learner.validity_learner = validity_learner
            if transfer_learner is not None:
                learner.sim_model = transfer_learner.model
            learner.retrain()
            for active_size in active_sizes:
                num_active = active_size - (learner.nx - len(failed_results))
                print('\nRound: {} | {} | Seed: {} | Size: {} | Active: {}'.
                      format(round_idx, algorithm, seed, active_size,
                             num_active))
                if algorithm.name in CONTINUOUS_ACTIVE_GP:
                    active_learning(learner, num_active, visualize=visualize)
                    #active_learning(learner, num_active, discrete_feature=True, random_feature=False)
                    #active_learning_discrete(learner, active_data, num_active, random_feature=False)
                elif algorithm.name in BATCH_ACTIVE:
                    active_learning_discrete(learner, active_data, num_active)
                    #active_learning(learner, num_active, discrete_feature=True, random_feature=True)
                    #active_learning_discrete(learner, active_data, num_active, random_feature=True)
                #if round_dir is not None:
                #    save_learner(round_dir, learner)
                if args.save:
                    learner.save(data_dir)
                experiments.append(
                    evaluate_learner(domain, seed, train_confusion, test_data,
                                     algorithm, learner, active_size,
                                     num_trials, serial, args.visualize))
                save_experiments(data_path, experiments)

    print(SEPARATOR)
    if experiments:
        save_experiments(data_path, experiments)
        plot_experiments(domain,
                         experiments_name,
                         experiments_dir,
                         experiments,
                         include_none=False)
        print('Experiments: {}'.format(experiments_dir))
    print('Total experiments: {}'.format(len(experiments)))
    print('Total hours: {:.3f}'.format(
        elapsed_time(start_time) / HOURS_TO_SECS))