コード例 #1
0
def read_data(path):
    # TODO: move to pybullet-tools
    _, extension = os.path.splitext(path)
    if extension == '.json':
        return read_json(path)
    if extension in ['.pp2', '.pp3', '.pkl', '.pk2', '.pk3']:
        return read_pickle(path)
    raise NotImplementedError(extension)
コード例 #2
0
ファイル: visualize_pours.py プロジェクト: lyltc1/LTAMP
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('paths', nargs='*', help='Paths to the data.')
    args = parser.parse_args()

    if not args.paths:
        visualize_collected_pours(paths=BEST_DATASETS) # TRAIN_DATASETS | ACTIVE_DATASETS | TEST_DATASETS | BEST_DATASETS
    else:
        [path] = args.paths
        learner = read_pickle(path)
        visualize_learned_pours(learner)
コード例 #3
0
def enumerate_experiments():
    for filename in sorted(os.listdir(EXPERIMENTS_DIR)):
        path = os.path.join(EXPERIMENTS_DIR, filename)
        try:
            data = read_pickle(path)
            configs, results = zip(*data)
            #print(results[0]['sequence'])
            #problems = {config.problem for config in configs}
            algorithms = {config.algorithm for config in configs}
            heuristics = {config.bias for config in configs}
            print()
            print(path, len(data), sorted(algorithms), sorted(heuristics))
            print(configs[0])
        except TypeError:
            print('Unable to load', path)
            continue
コード例 #4
0
def run_trial(args):
    skill, fn, trial, kwargs = args
    if isinstance(fn, str) and os.path.isfile(fn):
        fn = read_pickle(fn)
    if not kwargs['verbose']:
        sys.stdout = open(os.devnull, 'w')
    current_wd, trial_wd = start_task()

    # if not is_darwin():
    #    # When disabled, the pybullet output still occurs across threads
    #    HideOutput.DEFAULT_ENABLE = False
    seed = hash((kwargs.get('seed', time.time()), trial))
    set_seed(seed)
    # time.sleep(1.*random.random()) # Hack to stagger processes

    sim_world, collector, task, feature, evaluation_fn, _ = sample_task(
        skill, **kwargs)
    print('Feature:', feature)
    parameter_fns, parameter, prediction = get_parameter_fns(
        sim_world, collector, fn, feature, kwargs['valid'])
    # TODO: some of the parameters are None when run in parallel
    result = {
        SKILL: skill,
        'date': datetime.datetime.now().strftime(DATE_FORMAT),
        'seed': seed,
        'trial': trial,
        'simulated': True,
        FEATURE: feature,
    }
    if test_validity(result, sim_world, collector, feature, parameter,
                     prediction):
        result.update(
            get_parameter_result(sim_world, task, parameter_fns, evaluation_fn,
                                 **kwargs))
    else:
        print('Invalid parameter! Skipping planning.')
    # result['seed'] = seed
    # except BaseException as e:
    #    traceback.print_exc() # e

    complete_task(sim_world, current_wd, trial_wd)
    if not kwargs['verbose']:
        sys.stdout.close()
    return result
コード例 #5
0
ファイル: visualize_diverse.py プロジェクト: lyltc1/LTAMP
def main(skill='pour'):
    parser = argparse.ArgumentParser()
    parser.add_argument('expid', type=int, help='experiment ID')
    parser.add_argument(
        'beta_lambda',
        type=float,
        default=0.99,
        help='lambda parameter for computing beta from best beta')
    args = parser.parse_args()
    beta_lambda = args.beta_lambda
    expid = args.expid
    n_samples = 20
    trainsize = 1000
    exp_dir = 'sampling_trainsize={}_samples={}_beta_lambdda={}'.format(
        trainsize, n_samples, beta_lambda)
    if skill is 'pour':
        exp_dir = os.path.join('data/pour_19-06-13_00-59-21/', exp_dir)
        domain = load_data(['data/pour_19-06-13_00-59-21/trials_n=10000.json'])
    elif skill is 'scoop':
        exp_dir = os.path.join('data/scoop_19-06-10_20-16-59_top-diameter/',
                               exp_dir)
        domain = load_data(
            ['data/scoop_19-06-10_20-16-59_top-diameter/trials_n=10000.json'])

    data_path = os.path.join(
        exp_dir, 'experiments_{}.pk{}'.format(expid, get_python_version()))
    if not os.path.exists(data_path):
        print('{} does not exist'.format(data_path))
        return
    data, seed = read_pickle(data_path)

    print('sample a new task')
    current_wd, trial_wd = start_task()
    sim_world, collector, task, feature, evalfunc, saver = sample_task_with_seed(
        skill, seed=seed, visualize=True)

    samples = data[UNIFORM].samples
    scores = data[UNIFORM].scores
    good_samples = samples[scores > 0]

    scores, plan_results = evaluate_samples(sim_world, collector, task,
                                            feature, domain, good_samples[:5],
                                            evalfunc, saver)
コード例 #6
0
def get_parameter_fns(sim_world, collector, fn, feature, valid):
    if isinstance(fn, str) and os.path.isfile(fn):
        fn = read_pickle(fn)
    parameter = prediction = None
    if isinstance(fn, ActiveLearner):
        fn.reset_sample()  # Should be redundant
        print('Learner: {} | Query type: {}'.format(fn.name, fn.query_type))
        # parameter_fn = lambda world, feature: iter([fn.query(feature)]) # Need to convert to parameter
        parameter_fn = fn.parameter_generator
        parameter = next(parameter_fn(sim_world, feature, valid=valid), False)
        # prediction = fn.prediction(feature, parameter)
        if parameter is not False:
            parameter['policy'] = (fn.name, fn.query_type)  # fn.algorithm?
    elif fn == TRAINING:
        # Commits to a single parameter
        parameter_fn = collector.parameter_fns[RANDOM]
        for parameter in parameter_fn(sim_world, feature):  # TODO: timeout
            if not valid or collector.validity_test(sim_world, feature,
                                                    parameter):
                break
        else:
            parameter = False
        if parameter is not False:
            parameter['policy'] = fn
    elif fn in collector.parameter_fns:
        # Does not commit to a single parameter
        parameter_fn = collector.parameter_fns[fn]
    else:
        raise ValueError(fn)
    # TODO: could prune failing parameters here
    print('Parameter:', parameter)
    print('Prediction:', prediction)
    if parameter is not None:
        parameter_fn = get_trial_parameter_fn(parameter)
    parameter_fns = {collector.gen_fn: parameter_fn}
    return parameter_fns, parameter, prediction
コード例 #7
0
ファイル: run_pr2_active.py プロジェクト: lyltc1/LTAMP
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('paths', nargs='*', help='Paths to the data.')
    #parser.add_argument('-a', '--active', type=int, default=0, # None
    #                    help='The number of active samples to collect')
    parser.add_argument('-l', '--learner', default=None,
                        help='Path to the learner that should be used')
    parser.add_argument('-n', '--num_trials', type=int, default=100,
                        help='The number of samples to collect')
    parser.add_argument('-s', '--save', action='store_true',
                        help='Whether to save the learners')
    parser.add_argument('-r', '--num_rounds', type=int, default=1,
                        help='The number of rounds to collect')
    #parser.add_argument('-t', '--num_train', type=int, default=None,
    #                    help='The size of the training set')
    args = parser.parse_args()

    # TODO: be careful that paging isn't altering the data
    # TODO: don't penalize if the learner identifies that it can't make a good prediction
    # TODO: use a different set of randomized parameters for train and test

    include_none = False
    serial = is_darwin()


    #training_sizes = inclusive_range(50, 500, 25)
    #training_sizes = inclusive_range(25, 100, 5)
    #training_sizes = inclusive_range(25, 100, 5)
    training_sizes = inclusive_range(10, 50, 5)
    #training_sizes = inclusive_range(100, 1000, 100)
    #training_sizes = [20]
    #training_sizes = [1500]

    #kernels = ['RBF', 'Matern52', 'MLP']
    kernels = ['MLP']

    #hyperparameters = [None]
    #hyperparameters = [True]
    hyperparameters = [True, None] # None,

    query_type = BEST # BEST | CONFIDENT | REJECTION | ACTIVE # type of query used to evaluate the learner
    is_adaptive = False
    max_test = 50 #
    #alphas = np.linspace(0.0, 0.9, num=5, endpoint=True)
    alphas = [0.0, .8, .9, .99]
    #alphas = [None]  # Use the default (i.e. GP parameters)

    use_vars = [True]
    binary = False
    split = UNIFORM # BALANCED

    # Omitting failed labels is okay because they will never be executed
    algorithms = []
    #algorithms += [(Algorithm(BATCH_GP, kernel=kernel, hyperparameters=hype, use_var=use_var), [num_train])
    #               for num_train, kernel, hype, use_var in product(training_sizes, kernels, hyperparameters, use_vars)]
    algorithms += [(Algorithm(STRADDLE_GP, kernel, hype, use_var), training_sizes)
                   for kernel, hype, use_var in product(kernels, hyperparameters, use_vars)]
    #algorithms += [(Algorithm(rf_model, p_explore=None, use_var=use_var), [num_train])
    #               for rf_model, num_train, use_var in product(RF_MODELS, training_sizes, use_vars)]
    #algorithms += [(Algorithm(nn_model, p_explore=None), [num_train])
    #               for nn_model, num_train in product(NN_MODELS, training_sizes)]
    #algorithms += [(Algorithm(RANDOM), None), (Algorithm(DESIGNED), None)]
    print('Algorithms:', algorithms)
    print('Split:', split)

    trials_per_round = sum(1 if train_sizes is None else
                           (train_sizes[-1] - train_sizes[0] + len(train_sizes))
                           for _, train_sizes in algorithms)
    num_experiments = args.num_rounds*trials_per_round

    date_name = datetime.datetime.now().strftime(DATE_FORMAT)
    size_str = '[{},{}]'.format(training_sizes[0], training_sizes[-1])
    #size_str = '-'.join(map(str, training_sizes))
    experiments_name = '{}_r={}_t={}_n={}'.format(date_name, args.num_rounds, size_str, args.num_trials) #'19-08-09_21-44-58_r=5_t=[10,150]_n=1'#
    #experiments_name = 't={}'.format(args.num_rounds)
    # TODO: could include OS and username if desired

    domain = load_data(args.paths)
    print()
    print(domain)
    X, Y, W = domain.get_data(include_none=include_none)
    print('Total number of examples:', len(X))
    if binary:
        # NN can fit perfectly when binary
        # Binary seems to be outperforming w/o
        Y = threshold_scores(Y)

    max_train = len(X) - max_test #min(max([0] + [active_sizes[0] for _, active_sizes in algorithms
                     #          if active_sizes is not None]), len(X))

    #parameters = {
    #    'include None': include_none,
    #    'binary': binary,
    #    'split': split,
    #}

    print('Name:', experiments_name)
    print('Experiments:', num_experiments)
    print('Max train:', max_train)
    print('Include None:', include_none)
    print('Examples: n={}, d={}'.format(*X.shape))
    print('Binary:', binary)
    print('Estimated hours:', num_experiments * SEC_PER_EXPERIMENT / HOURS_TO_SECS)
    user_input('Begin?')
    # TODO: residual learning for sim to real transfer
    # TODO: can always be conservative and add sim negative examples

    # TODO: combine all data to write in one folder
    data_dir = os.path.join(DATA_DIRECTORY, domain.name) # EXPERIMENT_DIRECTORY
    experiments_dir = os.path.join(data_dir, experiments_name)
    mkdir(experiments_dir)
    start_time = time.time()
    experiments = []
    for round_idx in range(args.num_rounds):
        round_dir = os.path.join(data_dir, experiments_name, str(round_idx))
        mkdir(round_dir)
        seed = hash(time.time())
        train_test_file = os.path.join(round_dir, 'data.pk3')
        if not os.path.exists(train_test_file):
            X_train, Y_train, X_test, Y_test = split_data(X, Y, split, max_train)
            X_test, Y_test = X_test[:max_test], Y_test[:max_test]
            write_pickle(train_test_file, (X_train, Y_train, X_test, Y_test))
        else:
            X_train, Y_train, X_test, Y_test = read_pickle(train_test_file)

        print('Train examples:', X_train.shape)
        print('Test examples:', X_test.shape)
        # TODO: need to be super careful when running with multiple contexts

        for algorithm, active_sizes in algorithms:
            # active_sizes = [first #trainingdata selected from X_train, #active exploration + #trainingdata]
            print(SEPARATOR)
            print('Round: {} | {} | Seed: {} | Sizes: {}'.format(round_idx, algorithm, seed, active_sizes))
            # TODO: allow keyboard interrupt
            if active_sizes is None:
                learner = algorithm.name
                active_size = None
                train_confusion = None
                experiments.append(evaluate_learner(domain, seed, train_confusion, X_test, Y_test, algorithm, learner,
                                                    active_size, args.num_trials, alphas,
                                                    serial))
            else:
                # [10 20 25] take first 10 samples from X_train to train the model, 10 samples chosen actively
                # sequentially + evaluate model, 5 samples chosen actively sequentially + evaluate model
                # Could always keep around all the examples and retrain
                # TODO: segfaults when this runs in parallel
                # TODO: may be able to retrain in parallel if I set OPENBLAS_NUM_THREADS
                learner_prior_nx = 0
                '''
                if algorithm.hyperparameters:
                    if domain.skill == 'pour':
                        learner_file = '/Users/ziw/ltamp_pr2/data/pour_19-06-13_00-59-21/19-08-09_19-30-01_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format(
                            round_idx)
                    elif domain.skill == 'scoop':
                        learner_file = '/Users/ziw/ltamp_pr2/data/scoop_19-06-10_20-16-59_top-diameter/19-08-09_19-34-56_r=10_t=[50,400]_n=1/{}/gp_active_mlp_true_true.pk3'.format(
                            round_idx)
                    learner = read_pickle(learner_file)
                    learner_prior_nx = learner.nx
                    learner.retrain(newx=X_train[:active_sizes[0]], newy=Y_train[:active_sizes[0], None])
                else:
                '''
                learner, train_confusion = create_learner(domain, X_train, Y_train, split, algorithm,
                                                          num_train=active_sizes[0], query_type=query_type,
                                                          is_adaptive=is_adaptive)

                if algorithm.name == STRADDLE_GP:
                    X_select, Y_select = X_train[active_sizes[0]:], Y_train[active_sizes[0]:]

                for active_size in active_sizes:
                    num_active = active_size - learner.nx + learner_prior_nx# learner.nx is len(learner.xx)
                    print('\nRound: {} | {} | Seed: {} | Size: {} | Active: {}'.format(
                        round_idx, algorithm, seed, active_size, num_active))
                    if algorithm.name == STRADDLE_GP:
                        X_select, Y_select = active_learning_discrete(learner, num_active, X_select, Y_select)
                    #if args.save:
                    save_learner(round_dir, learner)
                    experiments.append(evaluate_learner(domain, seed, None, X_test, Y_test,
                                                        algorithm, learner,
                                                        active_size, args.num_trials, alphas,
                                                        serial))
                    save_experiments(experiments_dir, experiments)

    print(SEPARATOR)
    if experiments:
        save_experiments(experiments_dir, experiments)
        plot_experiments(domain, experiments_name, experiments_dir, experiments,
                         include_none=False)
                         #include_none=include_none)
        print('Experiments:', experiments_dir)
    print('Total experiments:', len(experiments))
    print('Total hours:', elapsed_time(start_time) / HOURS_TO_SECS)
コード例 #8
0
ファイル: collect_pr2.py プロジェクト: lyltc1/LTAMP
def main():
    """
    ./home/demo/catkin_percep/collect_scales.sh (includes scale offset)
    Make sure to start the scales with nothing on them (for calibration)
    """
    assert (get_python_version() == 2)  # ariadne has ROS with python2
    parser = argparse.ArgumentParser()
    parser.add_argument('-a',
                        '--active',
                        action='store_true',
                        help='Uses active learning queries.')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Disables saving during debugging.')
    parser.add_argument(
        '-f',
        '--fn',
        type=str,
        default=TRAINING,  # DESIGNED | TRAINING
        help='The name of or the path to the policy that generates parameters.'
    )
    parser.add_argument('-m',
                        '--material',
                        required=True,
                        choices=sorted(MATERIALS),
                        help='The name of the material being used.')
    parser.add_argument('-p',
                        '--problem',
                        required=True,
                        choices=sorted(REQUIREMENT_FNS.keys()),
                        help='The name of the skill to learn.')
    parser.add_argument('-s',
                        '--spoon',
                        default=None,
                        choices=SPOONS,
                        help='The name of the spoon being used.')
    parser.add_argument('-r',
                        '--train',
                        action='store_true',
                        help='When enabled, uses the training dataset.')
    parser.add_argument(
        '-v',
        '--visualize_planning',
        action='store_true',
        help=
        'When enabled, visualizes planning rather than the world (for debugging).'
    )
    args = parser.parse_args()
    # TODO: toggle material default based on task

    # TODO: label material on the image
    assert args.material in MODEL_MASSES
    print('Policy:', args.fn)
    assert implies(args.problem in ['scoop'], args.spoon is not None)
    assert implies(args.active, args.train)

    ros_world = ROSWorld(sim_only=False, visualize=not args.visualize_planning)
    classes_pub = rospy.Publisher('~collect_classes', String, queue_size=1)
    with ros_world:
        set_camera_pose(np.array([1.5, -0.5, 1.5]),
                        target_point=np.array([0.75, 0, 0.75]))

    arm, is_open = ACTIVE_ARMS[args.problem]
    open_grippers = {arm: is_open}
    if args.problem == 'scoop':
        ros_world.controller.open_gripper(get_arm_prefix(arm), blocking=True)
        ros_world.controller.speak("{:.0f} seconds to attach {}".format(
            ATTACH_TIME, format_class(args.spoon)))
        rospy.sleep(ATTACH_TIME)  # Sleep to have time to set the spoon
    move_to_initial_config(ros_world, open_grippers)  #get_other_arm
    # TODO: cross validation for measuring performance across a few bowls

    launch = launch_kinect()
    video_time = time.time()

    if args.debug:
        ros_world.controller.speak("Warning! Data will not be saved.")
        time.sleep(1.0)
        data_path = None
    else:
        # TODO: only create directory if examples made
        data_path = get_data_path(args.problem, real=True)

    # TODO: log camera image after the pour
    policy = args.fn
    learner_path = None
    test_data = None
    if isinstance(policy, str) and os.path.isfile(policy):
        policy = read_pickle(policy)
        assert isinstance(policy, ActiveLearner)
        print(policy)
        print(policy.algorithm)
        #policy.transfer_weight = 0

        # print(policy.xx.shape)
        # policy.results = policy.results[:-1]
        # policy.xx = policy.xx[:-1]
        # policy.yy = policy.yy[:-1]
        # policy.weights = policy.weights[:-1]
        # print(policy.xx.shape)
        # write_pickle(args.fn, policy)
        # print('Saved', args.fn)

        if args.active:
            # policy.retrain()
            test_domain = load_data(SCOOP_TEST_DATASETS, verbose=False)
            test_data = test_domain.create_dataset(include_none=False,
                                                   binary=False)
            policy.query_type = STRADDLE  # VARIANCE
            #policy.weights = 0.1*np.ones(policy.yy.shape) # TODO: make this multiplicative
            #policy.retrain()
            evaluate_confusions(test_data, policy)
        else:
            policy.query_type = BEST
        ensure_dir(LEARNER_DIRECTORY)
        date_name = datetime.datetime.now().strftime(DATE_FORMAT)
        filename = '{}_{}.pk{}'.format(get_label(policy.algorithm), date_name,
                                       get_python_version())
        learner_path = os.path.join(LEARNER_DIRECTORY, filename)

    if ACTIVE_FEATURE and args.active:
        assert isinstance(policy, ActiveLearner)
        generator = create_active_generator(args, policy)
    else:
        generator = create_random_generator(args)
    pair = next(generator)
    print('Next pair:', pair)
    classes_pub.publish('{},{}'.format(*pair))
    for phrase in map(format_class, pair):
        ros_world.controller.speak(phrase)
    wait_for_user('Press enter to begin')

    # TODO: change the name of the directory after additional samples
    results = []
    num_trials = num_failures = num_scored = 0
    while True:
        start_time = elapsed_time(video_time)
        result = run_loop(args, ros_world, policy)
        print('Result:', str_from_object(result))
        print('{}\nTrials: {} | Successes: {} | Failures: {} | Time: {:.3f}'.
              format(SEPARATOR, num_trials, len(results), num_failures,
                     elapsed_time(video_time)))
        num_trials += 1
        if result is None:  # TODO: result['execution']
            num_failures += 1
            print('Error! Trial resulted in an exception')
            move_to_initial_config(ros_world, open_grippers)
            continue

        end_time = elapsed_time(video_time)
        print('Elapsed time:', end_time - start_time)
        # TODO: record the type of failure (planning, execution, etc...)
        scored = result['score'] is not None
        num_scored += scored
        # TODO: print the score

        if isinstance(policy, ActiveLearner) and args.active:  # and scored:
            update_learner(policy, learner_path, result)
            evaluate_confusions(test_data, policy)
            # TODO: how to handle failures that require bad annotations?

        pair = next(generator)
        print('Next pair:', pair)
        classes_pub.publish('{},{}'.format(*pair))
        for phrase in map(format_class, pair):
            ros_world.controller.speak(phrase)

        annotation = wait_for_user(
            'Enter annotation and press enter to continue: ')
        result.update({
            # TODO: record the query_type
            'policy': args.fn,
            'active_feature': ACTIVE_FEATURE,
            'trial': num_trials,
            'start_time': start_time,
            'end_time': end_time,
            'annotation': annotation,
        })
        results.append(result)
        if data_path is not None:
            write_results(data_path, results)
        #if annotation in ['q', 'quit']: # TODO: Ctrl-C to quit
        #    break

    ros_world.controller.speak("Finished")
    if launch is not None:
        launch.shutdown()
    print('Total time:', elapsed_time(video_time))
コード例 #9
0
ファイル: run_taskkernel.py プロジェクト: lyltc1/LTAMP
def plot():
    # success rate
    skill = 'pour'  # 'scoop'
    paths = [os.path.join(get_data_dir(skill), 'trials_n=10000.json')]
    beta_lambda = 0.99
    success_rate = {}
    plan_score = {}
    trainsize = 3000
    use_obstacle = 1
    strategy_ids = [1, 2, 4, 3]
    planing_time = {}
    exprange = range(5)  # range(5, 10) #
    exp_dir = os.path.join(os.path.dirname(paths[0]), 'default_hyper')
    print('trainsize={}\t beta_lambda={}\t use_obstacle={}'.format(
        trainsize, beta_lambda, use_obstacle))
    for expid in exprange:
        context = None
        for sample_strategy_id in strategy_ids:  # range(1,5):
            exp_file = 'tasklengthscale_sampling_trainsize={}_beta_lambdda={}_strategy_{}_obs_{}_expid_{}.pk3'.format(
                trainsize, beta_lambda, sample_strategy_id, int(use_obstacle),
                expid)
            exp_file = os.path.join(exp_dir, exp_file)

            results = read_pickle(exp_file)

            assert (context is None
                    or (context == results[0][0][1][0].context).all())

            context = results[0][0][1][0].context

            prev_results = results

    for sample_strategy_id in strategy_ids:
        sample_strategy = SAMPLE_STRATEGIES[sample_strategy_id]
        success_rate[sample_strategy] = []
        plan_score[sample_strategy] = []
        minlen = 1 << 31
        planing_time_res = []
        succ_rate_res = []
        none_rate_res = []
        fail_rate_res = []
        for expid in exprange:

            for exp_dir in [
                    os.path.join(os.path.dirname(paths[0]), 'default_hyper'),
                    os.path.join(os.path.dirname(paths[0]),
                                 'shakey_default_hyper')
            ]:
                exp_file = 'tasklengthscale_sampling_trainsize={}_beta_lambdda={}_strategy_{}_obs_{}_expid_{}.pk3'.format(
                    trainsize, beta_lambda, sample_strategy_id,
                    int(use_obstacle), expid)

                exp_file = os.path.join(exp_dir, exp_file)

                results = read_pickle(exp_file)
                res = []
                score = []
                for train, test in results[0]:
                    res.append(sum([t.scores >= 0 for t in test]) / len(test))
                    try:
                        planing_time_res.append(
                            np.mean(
                                [t.plan_results['plan-time'] for t in test]))
                    except:
                        print('no plan results')

                    succ_rate_res.extend([
                        1 if t.scores > 0 and t.plan_results['plan-time'] < 120
                        else 0 for t in test
                    ])
                    #none_rate_res.extend([1 if t.plan_results['score'] is None else 0 for t in test])
                    #fail_rate_res.extend([1 if t.scores < 0 and t.plan_results['score'] is not None else 0 for t in test])
                    score.append(
                        [len(t.samples) for t in test]
                    )  # ([.6**len(t.samples) if t.scores >= 0 else 0 for t in test]) # ([len(t.samples) for t in test])#
                    # if train:
                    #    print(*train.task_lengthscale)
                success_rate[sample_strategy].append(res)
                plan_score[sample_strategy].append(score)

                minlen = min(minlen, len(res))

        tmp = []
        tmpp = []
        for i in range(len(exprange)):
            tmp.append(success_rate[sample_strategy][i][:minlen])
            tmpp.append(plan_score[sample_strategy][i][:minlen])
        tmp = np.array(tmp)
        tmpp = np.array(tmpp)
        success_rate[sample_strategy] = (np.mean(tmp,
                                                 axis=0), np.std(tmp, axis=0))
        plan_score[sample_strategy] = (np.mean(tmpp), np.std(tmpp))
        print(sample_strategy)
        print('plan time', np.mean(planing_time_res), np.std(planing_time_res))
        print('success rate', np.mean(succ_rate_res), np.std(succ_rate_res))
        print('none rate', np.mean(none_rate_res), np.std(none_rate_res))
        print('fail rate', np.mean(fail_rate_res), np.std(fail_rate_res))
    print(success_rate)
    print(plan_score)
    print()
コード例 #10
0
def plot(trainsize,
         n_samples,
         beta_lambda,
         includenone=1,
         ignore_plan_fail=True,
         show=False,
         skill='pour'):
    exp_dir = 'sampling_trainsize={}_samples={}_beta_lambdda={}_includenone_{}'.format(
        trainsize, n_samples, beta_lambda, includenone)
    if skill is 'pour':
        exp_dir = os.path.join(get_data_dir('pour'), exp_dir)
    elif skill is 'scoop':
        exp_dir = os.path.join(get_data_dir('scoop'), exp_dir)
    acc = defaultdict(list)
    div5 = defaultdict(list)
    div = defaultdict(list)
    sample_time = defaultdict(list)
    num_samples_5 = defaultdict(list)
    npr = defaultdict(list)
    for expid in range(50):
        data_path = os.path.join(
            exp_dir, 'experiments_{}.pk{}'.format(expid, get_python_version()))
        if not os.path.exists(data_path):
            print('{} does not exist'.format(data_path))
            continue
        # print('processing {}'.format(data_path))
        data, seed = read_pickle(data_path)
        for strategy in data:
            if data[strategy].diversity_5 is None:
                continue
            none_res = np.sum([
                1 if pr['score'] is None else 0
                for pr in data[strategy].plan_results
            ])
            if data[strategy].precision < 1.:
                print('low precision - score 0:{}, <0:{}, >0:{}'.format(
                    none_res, np.sum(data[strategy].scores < 0),
                    np.sum(data[strategy].scores > 0)))

            if ignore_plan_fail:
                acc[strategy].append(
                    (np.sum(data[strategy].scores > 0) + none_res) /
                    len(data[strategy].scores))
            else:
                acc[strategy].append(data[strategy].precision)
            npr[strategy].append(none_res / len(data[strategy].scores))
            div5[strategy].append(data[strategy].diversity_5)
            div[strategy].append(data[strategy].diversity)
            sample_time[strategy].append(data[strategy].sample_time)
            num_samples_5[strategy].append(data[strategy].num_samples_5)

    sample_time_plot = []
    sample_time_err = []
    strategies = []
    num_samples_5_plot = []
    num_samples_5_err = []

    for strategy in sample_time:
        acc[strategy] = np.array(acc[strategy])
        print(
            '{}     fpr = {:.2f} \pm {:.2f}     npr = {:.2f} \pm {:.2f}     time = {:.2f} \pm {:.2f}     n5 = {:.2f} \pm {:.2f}     div5 = {:.2f} \pm {:.2f}'
            .format(strategy, np.mean(1 - acc[strategy]),
                    np.std(1 - acc[strategy]), np.mean(npr[strategy]),
                    np.std(npr[strategy]), np.mean(sample_time[strategy]),
                    np.std(sample_time[strategy]),
                    np.mean(num_samples_5[strategy]),
                    np.std(num_samples_5[strategy]), np.mean(div5[strategy]),
                    np.std(div5[strategy])))
        sample_time_plot.append(np.mean(sample_time[strategy]))
        sample_time_err = np.std(sample_time[strategy])
        num_samples_5_plot.append(np.mean(num_samples_5[strategy]))
        num_samples_5_err.append(np.std(num_samples_5[strategy]))
        strategies.append(strategy)

    exp_dir = os.path.join(exp_dir, 'figures')
    if not os.path.exists(exp_dir):
        os.mkdir(exp_dir)
    import matplotlib
    import matplotlib.pyplot as plt
    matplotlib.rcParams.update({'font.size': 20, 'legend.fontsize': 15})
    markers = ['o', 'v', '<', '>', '1', '2', '3']
    colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#cab2d6', '#fb9a99']

    # bar plot for sample time
    return
    fig, ax = plt.subplots()
    ax.bar(range(3),
           sample_time_plot,
           yerr=sample_time_err,
           align='center',
           alpha=0.5,
           ecolor='black',
           capsize=10)
    ax.set_ylabel('Sample time (seconds)')
    ax.set_xticks(range(3))
    ax.set_xticklabels(strategies)
    ax.yaxis.grid(True)

    # Save the figure and show
    plt.tight_layout()
    plt.savefig(os.path.join(exp_dir, 'sampletime.pdf'))
    if show:
        plt.show()
    else:
        plt.clf()

    # bar plot for num sample 5
    fig, ax = plt.subplots()
    ax.bar(range(3),
           num_samples_5_plot,
           yerr=num_samples_5_err,
           align='center',
           alpha=0.5,
           ecolor='black',
           capsize=10)
    ax.set_ylabel('Number of samples needed to generate 5 good ones')
    ax.set_xticks(range(3))
    ax.set_xticklabels(strategies)
    ax.yaxis.grid(True)

    # Save the figure and show
    # plt.tight_layout()
    plt.savefig(os.path.join(exp_dir, 'numsample5.pdf'))
    if show:
        plt.show()
    else:
        plt.clf()

    # div-acc using all data points
    i = 0
    for strategy in acc:
        # plt.scatter(np.mean(div5[strategy]), np.mean(acc[strategy]), marker=markers[i], color=colors[i], label=strategy)
        plt.scatter(div5[strategy],
                    acc[strategy],
                    marker=markers[i],
                    color=colors[i],
                    label=strategy)
        plt.xlabel('Diversity of the first 5 samples')
        plt.ylabel('Accuracy ')
        i += 1
    plt.legend()
    plt.savefig(
        os.path.join(
            exp_dir,
            'all_data_div5_ignorePlanFail_{}.pdf'.format(ignore_plan_fail)))
    if show:
        plt.show()
    else:
        plt.clf()

    i = 0
    for strategy in acc:
        # plt.scatter(np.mean(div[strategy]), np.mean(acc[strategy]), marker=markers[i], color=colors[i], label=strategy)
        plt.scatter(div[strategy],
                    acc[strategy],
                    marker=markers[i],
                    color=colors[i],
                    label=strategy)
        plt.xlabel('Diversity of all 20 samples')
        plt.ylabel('Accuracy ')
        i += 1
    plt.legend()
    plt.savefig(
        os.path.join(
            exp_dir,
            'all_data_div_ignorePlanFail_{}.pdf'.format(ignore_plan_fail)))
    if show:
        plt.show()
    else:
        plt.clf()

    # plot the average
    i = 0
    fig, ax = plt.subplots()
    for strategy in acc:
        confidence_ellipse(div5[strategy],
                           acc[strategy],
                           ax,
                           alpha=0.5,
                           facecolor=colors[i],
                           edgecolor=colors[i],
                           zorder=0)
        plt.scatter(np.mean(div5[strategy]),
                    np.mean(acc[strategy]),
                    marker='.',
                    s=10,
                    color=colors[i],
                    label=strategy)
        plt.xlabel('Diversity of the first 5 samples')
        plt.ylabel('Accuracy ')
        i += 1
    plt.legend()
    plt.savefig(
        os.path.join(
            exp_dir,
            'mean_div5_ignorePlanFail_{}.pdf'.format(ignore_plan_fail)))
    if show:
        plt.show()
    else:
        plt.clf()

    i = 0
    fig, ax = plt.subplots()
    for strategy in acc:
        confidence_ellipse(div[strategy],
                           acc[strategy],
                           ax,
                           alpha=0.5,
                           facecolor=colors[i],
                           edgecolor=colors[i],
                           zorder=0)
        plt.scatter(np.mean(div[strategy]),
                    np.mean(acc[strategy]),
                    marker='.',
                    s=10,
                    color=colors[i],
                    label=strategy)
        plt.xlabel('Diversity of all 20 samples')
        plt.ylabel('Accuracy ')
        i += 1
    plt.legend()
    plt.savefig(
        os.path.join(
            exp_dir,
            'mean_div_ignorePlanFail_{}.pdf'.format(ignore_plan_fail)))
    if show:
        plt.show()
    else:
        plt.clf()
コード例 #11
0
ファイル: learner.py プロジェクト: lyltc1/LTAMP
 def load(rel_path):
     return read_pickle(os.path.join(LEARNER_DIRECTORY, rel_path))
コード例 #12
0
def load_experiment(filename, overall=False, failed_runtimes=True):
    # TODO: maybe just pass the random seed as a separate arg
    # TODO: aggregate over all problems and score using IPC rules
    # https://ipc2018-classical.bitbucket.io/
    max_time = 0
    data_from_problem = OrderedDict()
    data = read_pickle(filename)
    for config, result in data:
        #config.problem = extrusion_name_from_path(config.problem)
        if config.problem in EXCLUDE:
            continue
        problem = ALL if overall else config.problem
        plan = result.get('sequence', None)
        result[SUCCESS] = (plan is not None)
        result[SUCCESS] *= 100
        result[RUNTIME] = min(result[RUNTIME], config.max_time)
        result['length'] = len(plan) if result[SUCCESS] else INF
        #max_trans, max_rot = max_plan_deformation(config.problem, plan)
        #result['max_trans'] = max_trans
        #result['max_rot'] = max_rot
        result.pop('sequence', None)
        max_time = max(max_time, result[RUNTIME])
        if not result[SUCCESS] and not failed_runtimes:
            result.pop(RUNTIME, None)
        data_from_problem.setdefault(problem, []).append((config, result))

    for p_idx, problem in enumerate(sorted(data_from_problem)):
        print()
        problem_name = os.path.basename(
            os.path.abspath(problem))  # TODO: this isn't a path...
        print('{}) Problem: {}'.format(p_idx, problem_name))
        if problem != ALL:
            extrusion_path = get_extrusion_path(problem)
            element_from_id, node_points, ground_nodes = load_extrusion(
                extrusion_path, verbose=False)
            print('Nodes: {} | Ground: {} | Elements: {}'.format(
                len(node_points), len(ground_nodes), len(element_from_id)))

        data_from_config = OrderedDict()
        value_per_field = {}
        for config, result in data_from_problem[problem]:
            new_config = Configuration(None, None, *config[2:])
            #print(config._asdict()) # config.__dict__
            for field, value in config._asdict().items():
                value_per_field.setdefault(field, set()).add(value)
            data_from_config.setdefault(new_config, []).append(result)

        print('Attributes:', str_from_object(value_per_field))
        print('Configs:', len(data_from_config))

        all_results = {}
        for c_idx, config in enumerate(sorted(data_from_config, key=str)):
            results = data_from_config[config]
            accumulated_result = {}
            for result in results:
                for name, value in result.items():
                    #if result[SUCCESS] or (name == SUCCESS):
                    if is_number(value):
                        accumulated_result.setdefault(name, []).append(value)
            mean_result = {
                name: round(np.average(values), 3)
                for name, values in accumulated_result.items()
            }
            key = {
                field: value
                for field, value in config._asdict().items()
                if (value is not None) and (field in ['algorithm', 'bias'] or
                                            (2 <= len(value_per_field[field])))
            }
            all_results[frozenset(key.items())] = {
                name: values
                for name, values in accumulated_result.items()
                if name in SCORES
            }
            score = score_result(mean_result)
            print('{}) {} ({}): {}'.format(c_idx, str_from_object(key),
                                           len(results),
                                           str_from_object(score)))

        if problem == ALL:
            for attribute in SCORES:
                bar_graph(all_results, attribute)
    scatter_plot(data)
    print('Max time: {:.3f} sec'.format(max_time))