コード例 #1
0
def generate_ca_trackpos(datapath, numsamples, sample_checkpoint_period, i_max, seed=1, iterate_seed=False):
    
    # Generate data from tracking desired end effector position, with obstacles and collision avoidance (assuming perfect position control)

    (_, _, _, _, rawdatapaths, _, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    generate_forced_bias_data(random, seed, rawdatapaths[1], numsamples, sample_checkpoint_period, i_max, 'perfect_position', 5, exit_criteria=exit_criteria_at_end_waypoint_or_i_max)
    print('Generated ' + str(numsamples) + ' position tracking VIK data in ' + datapath + '.' )
コード例 #2
0
def generate_ranpos(datapath, numsamples, sample_checkpoint_period, i_max, seed=0, iterate_seed=False):
    
    # Generate data from random configurations each timestep, with one single desired end effector position for 200 such timesteps

    (_, _, _, _, rawdatapaths, _, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    generate_forced_bias_data(random, seed, rawdatapaths[0], numsamples, sample_checkpoint_period, i_max, 'random_position', 0, exit_criteria=exit_criteria_at_i_max_only)
    print('Generated ' + str(numsamples) + '*' + str(i_max) + ' random_position VIK data in ' + datapath + '.')
コード例 #3
0
def observe_episode(seed,
                    datapath,
                    modelpath,
                    modelname,
                    episode_configuration_init,
                    episode_waypoint_cartesian_positions,
                    episode_cartesian_obstacles,
                    max_timesteps,
                    from_latest_checkpoint=True,
                    exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                    max_obstacles=max_obstacles,
                    actuators='perfect_position',
                    record=True):

    (random, randomstate, seed) = CAI_random(seed)

    if not modelpath.endswith('/'):
        modelpath = modelpath + '/'

    model = None
    if from_latest_checkpoint:
        model, _ = load_model_and_history_from_latest_checkpoint(
            random, modelpath, modelname)
    else:
        model, _ = load_model_and_history(random,
                                          modelpath + modelname + '.h5')

    model.summary()

    inference_model = Inference_Model(modelname, model, datapath)
    history = pandas_episode_trajectory_initialize(max_timesteps,
                                                   max_obstacles)

    ca_tasks = [0 for _ in episode_cartesian_obstacles]
    for obst_i, obstacle in enumerate(episode_cartesian_obstacles):
        ca_tasks[obst_i] = ca_task(
            vector([0, 0, 0]),
            vector([obstacle.center_x, obstacle.center_y, obstacle.center_z]),
            obstacle.radius, np.infty)

    top_priority_set_based_tasks = ca_tasks

    rtpwrapped = rtpUR5()
    simulate(top_priority_set_based_tasks, (episode_configuration_init, 0),
             episode_waypoint_cartesian_positions,
             rtpwrapped,
             max_timesteps,
             history,
             actuators=actuators,
             exit_criteria=exit_criteria,
             random=random,
             record=record,
             inference_model=inference_model)
コード例 #4
0
def CAVIKee_sphere_dataset(datapath, seed=4, iterate_seed=False):

    # Construct a dataset using collision avoidance position tracking data only to train collision avoidance

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((0.55,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0.7, 0.15, 0.15)
    max_obstacles = 5

    (dataset, filenames) = construct(random, (rawdatapaths[1],), max_obstacles, CAVIKee_sphere_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    save_numpy(datasetpaths[3], datasetnames[3], dataset)
    save_numpy(datasetpaths[3], datasetnames[3] + '_filenames', filenames)
コード例 #5
0
def VIK_dataset(datapath, seed=2, iterate_seed=False):

    # Construct a dataset using all data to train velocity inverse kinematic for tracking assuming zero obstacles

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((1,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0.7, 0.15, 0.15)
    max_obstacles = 0

    (dataset, filenames) = construct(random, (rawdatapaths[0],), max_obstacles, VIK_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    save_numpy(datasetpaths[0], datasetnames[0], dataset)
    save_numpy(datasetpaths[0], datasetnames[0] + '_filenames', filenames)
コード例 #6
0
ファイル: run_extrasets.py プロジェクト: daniena/legacy_th
def CAVIKAUGee_slot_dataset_test(datapath, seed=5, iterate_seed=False):

    # Construct a dataset using collision avoidance position tracking data only to train collision avoidance

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((0.20,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0, 0, 1)
    max_obstacles = 5

    ((training_inputs, training_outputs, validation_inputs, validation_outputs, test_inputs, test_outputs), filenames) = construct(random, (rawdatapaths[1],), max_obstacles, CAVIKAUGee_slots_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    path = datasetpaths[2] + '_test'
    name = datasetnames[2]
    make_path(path)
    
    numpy.save(path + '/' + name + '_filenames', filenames)
    numpy.save(path + '/' + name + '_test_inputs', test_inputs)
    numpy.save(path + '/' + name + '_test_outputs', test_outputs)
コード例 #7
0
def make_plots():
    seed = 100

    datapath = os.getcwd() + '/data'
    (random, randomstate, seed) = CAI_random(seed)
    (_, _, _, _, _, _, checkpointpath, modelspath, _) = CAI_args(datapath)

    plotpath = datapath + '/sessions/CAI/plots/training_history_plots'
    plot_all_most_recent_checkpoints(random,
                                     checkpointpath,
                                     performance_threshold_to_show=0.0,
                                     containing=None,
                                     save=True,
                                     plotpath=plotpath)

    modelnames = [
        'CAVIKAUGee_sphere_correct_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',  #'CAVIKAUGee_sphere_correct_activation12_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
        'CAVIKAUGee_slot_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
        'CAVIKAUGee_no_obst_input_control_experiment_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected'
    ]
    plot_histories_of_models_valfix(modelnames, 'training_history_comparison')
    plot_histories_of_models(('VIK_pyramid', ), 'training_history_trials')
コード例 #8
0
ファイル: run_extrasets.py プロジェクト: daniena/legacy_th
def iterate_model_checkpoints(modelname, index):
    random, _, _ = CAI_random(5)
    path = os.getcwd() + '/data/sessions/CAI/checkpoints/'
    model = thesis_load_model(random, path, modelname + '_checkpoint_' + str(index))
    index += 1
    return model, index
コード例 #9
0
ファイル: run_extrasets.py プロジェクト: daniena/legacy_th
def make_extrasets():
    datapath = os.getcwd() + '/data'
    random, _, _ = CAI_random(6)

    numsamples = 5000
    sample_checkpoint_period = int(numsamples/2)
    i_max = 1500
    generate_ca_trackpos_val(datapath, numsamples, sample_checkpoint_period, i_max)
    generate_ca_trackpos_test(datapath, numsamples, sample_checkpoint_period, i_max)

    CAVIKAUGee_slot_dataset_val(datapath, seed=4, iterate_seed=False)
    CAVIKAUGee_slot_dataset_test(datapath, seed=5, iterate_seed=False)

    modelnames = ['CAVIKAUGee_sphere_correct_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',#'CAVIKAUGee_sphere_correct_activation12_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
                  'CAVIKAUGee_slot_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
                  'CAVIKAUGee_no_obst_input_control_experiment_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected']

    path = datapath + '/datasets/CAI_CAVIKAUGee_slots_dataset/_val/'
    name = 'CAI_CAVIKAUGee_slots_dataset'
    batch_size = 512
    random, _, _ = CAI_random(5)
    modelpath = os.getcwd() + '/data/sessions/CAI/checkpoints/'

    #Val acc and loss:
    validation_input  = numpy.load(path + name + '_validation_inputs.npy')
    validation_output = numpy.load(path + name + '_validation_outputs.npy')

    validation_input_mean, validation_input_std = normalize_parameters(validation_input)
    validation_generator = DataGenerator(validation_input, normalize(validation_output), CAVIKAUGee_sphere_input_from_CAVIKee_slots_IO, validation_input_mean[0:9], validation_input_std[0:9], batch_size, (CAVIKAUGee_sphere_num_inputs,), random)

    validation_history = history_dataset(modelnames[0], validation_generator, steps=int(validation_input.shape[0]/batch_size))
    save_json(datapath + '/temp', 'sphere_val_history', validation_history)

    #validation_history = history_dataset(modelnames[1], (validation_input, validation_output), 0)
    validation_history = load_json(datapath + '/temp', 'slot_val_history')
    model = thesis_load_model(random, modelpath, modelnames[1] + '_checkpoint_' + str(25))
    val_acc, val_loss = test_dataset(model, (validation_input, validation_output), 0)
    validation_history['loss'] += [val_acc]
    validation_history['acc'] += [val_loss]
    save_json(datapath + '/temp', 'slot_val_history_updated', validation_history)

    validation_generator = DataGenerator(validation_input, normalize(validation_output), CAVIKAUGee_no_obst_control_input_from_CAVIKee_slots_IO, validation_input_mean[0:9], validation_input_std[0:9], batch_size, (CAVIKAUGee_no_obst_control_num_inputs,), random)
    #validation_history = history_dataset(modelnames[2], validation_generator, steps=int(validation_input.shape[0]/batch_size))
    validation_history = load_json(datapath + '/temp', 'control_val_history')
    model = thesis_load_model(random, modelpath, modelnames[2] + '_checkpoint_' + str(25))
    val_acc, val_loss = test_dataset(model, validation_generator, steps=int(validation_input.shape[0]/batch_size))
    validation_history['loss'] += [val_acc]
    validation_history['acc'] += [val_loss]
    save_json(datapath + '/temp', 'control_val_history_updated', validation_history)

    # Test acc and loss:
    path = datapath + '/datasets/CAI_CAVIKAUGee_slots_dataset/_test/'
    test_input  = numpy.load(path + name + '_test_inputs.npy')
    test_output = numpy.load(path + name + '_test_outputs.npy')

    batch_size = 32
    test_input_mean, test_input_std = normalize_parameters(test_input)
    sphere_test_generator = DataGenerator(test_input, normalize(test_output), CAVIKAUGee_sphere_input_from_CAVIKee_slots_IO, test_input_mean[0:9], test_input_std[0:9], batch_size, (CAVIKAUGee_sphere_num_inputs,), random)
    control_test_generator = DataGenerator(test_input, normalize(test_output), CAVIKAUGee_no_obst_control_input_from_CAVIKee_slots_IO, test_input_mean[0:9], test_input_std[0:9], batch_size, (CAVIKAUGee_no_obst_control_num_inputs,), random)
    
    datasetwrapper = (sphere_test_generator, (test_input, test_output), control_test_generator)
    modelinitials = ['sphere', 'slot', 'control']
    for modelname, dataset, modelinitial in zip(modelnames, datasetwrapper, modelinitials):
        model = thesis_load_model(random, datapath + '/sessions/CAI/checkpoints', modelname + '_checkpoint_' + str(25))
        test_acc, test_loss = test_dataset(model, dataset, int(test_input.shape[0]/batch_size))
        test_results = {'test_acc': test_acc, 'test_loss': test_loss}
        save_json(datapath + '/temp', modelinitial + '_test_results', test_results)    
コード例 #10
0
def training_history(modelname):
    random, _, _ = CAI_random(0)
    _, history = load_model_and_history_from_latest_checkpoint(
        random,
        os.getcwd() + '/data/sessions/CAI/checkpoints', modelname)
    return history
コード例 #11
0
def observe(seed,
            datapath,
            modelpath,
            modelnames,
            modelinitials,
            num_episodes,
            max_timesteps,
            max_obstacles,
            from_episode_num=None,
            from_latest_checkpoint=True,
            from_checkpoint_num=None,
            actuators='position',
            verbose=False):

    (random, randomstate, seed) = CAI_random(seed)

    if from_episode_num is not None:
        num_episodes = 1

    plotpath = datapath + '/sessions/CAI/plots'

    if not modelpath.endswith('/'):
        modelpath = modelpath + '/'

    inference_models = []
    for modelname in modelnames:
        model = None
        if from_latest_checkpoint:
            model, _ = load_model_and_history_from_latest_checkpoint(
                random, modelpath, modelname)
        else:
            model = thesis_load_model(
                random, modelpath,
                modelname + '_checkpoint_' + str(from_checkpoint_num))

        model.summary()

        inference_models += [Inference_Model(modelname, model, datapath)]

    simulation_histories = [[] for _ in range(num_episodes)]
    simulation_waypoints = [[] for _ in range(num_episodes)]

    model_histories = [[] for _ in modelinitials]

    telemetries = []
    for modelinitial in modelinitials:
        telemetries += [
            Telemetry(num_episodes, max_timesteps, max_obstacles, seed,
                      plotpath, modelinitial)
        ]

    rtpwrapped = rtpUR5()
    for i in range(num_episodes):
        if from_episode_num is not None:
            i = from_episode_num

        history_infer = pandas_episode_trajectory_initialize(
            max_timesteps, max_obstacles)
        history_solution = pandas_episode_trajectory_initialize(
            max_timesteps, max_obstacles)
        history_no_ca = pandas_episode_trajectory_initialize(
            max_timesteps, max_obstacles)

        if from_episode_num is None:
            (
                (waypoint_configurations, waypoint_cartesian_positions,
                 cartesian_obstacles, _, _, _, _, _, _), _, _
            ) = generate_and_simulate_forced_bias_pattern_near_trajectory_infer_and_compare(
                random,
                history_infer,
                history_solution,
                history_no_ca,
                inference_models[0],
                None,
                max_timesteps,
                exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                max_obstacles=max_obstacles,
                actuators='perfect_position',
                record=True)
            ca_tasks = [0 for _ in cartesian_obstacles]
            for obst_i, obstacle in enumerate(cartesian_obstacles):
                ca_tasks[obst_i] = ca_task(
                    vector([0, 0, 0]),
                    vector([
                        obstacle.center_x, obstacle.center_y, obstacle.center_z
                    ]), obstacle.radius, np.infty)

            top_priority_set_based_tasks = ca_tasks
        else:
            waypoint_configurations, waypoint_cartesian_positions, cartesian_obstacles = get_episode_better(
                datapath + '/rawdata/ca_trackpos/', from_episode_num)
            ca_tasks = [0 for _ in cartesian_obstacles]
            for obst_i, obstacle in enumerate(cartesian_obstacles):
                ca_tasks[obst_i] = ca_task(
                    vector([0, 0, 0]),
                    vector([
                        obstacle.center_x, obstacle.center_y, obstacle.center_z
                    ]), obstacle.radius, np.infty)

            top_priority_set_based_tasks = ca_tasks
            simulate(top_priority_set_based_tasks,
                     waypoint_configurations,
                     waypoint_cartesian_positions,
                     rtpwrapped,
                     max_timesteps,
                     history_infer,
                     actuators=actuators,
                     exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                     random=random,
                     record=True,
                     inference_model=inference_models[0])
            simulate(top_priority_set_based_tasks,
                     waypoint_configurations,
                     waypoint_cartesian_positions,
                     rtpwrapped,
                     max_timesteps,
                     history_solution,
                     actuators=actuators,
                     exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                     random=random,
                     record=True,
                     inference_model=None)
            simulate([],
                     waypoint_configurations,
                     waypoint_cartesian_positions,
                     rtpwrapped,
                     max_timesteps,
                     history_no_ca,
                     actuators=actuators,
                     exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                     random=random,
                     record=True,
                     inference_model=None)

        if from_episode_num is None:
            telemetries[0].gather(history_infer, history_solution,
                                  history_no_ca,
                                  waypoint_cartesian_positions[0],
                                  waypoint_cartesian_positions[1],
                                  waypoint_configurations[0],
                                  cartesian_obstacles)
        else:
            telemetries[0].gather(
                history_infer,
                history_solution,
                history_no_ca,
                waypoint_cartesian_positions[0],
                waypoint_cartesian_positions[1],
                waypoint_configurations[0],
                cartesian_obstacles,
                alternate_episode_index_name=from_episode_num)

        model_histories[0] = copy.deepcopy(history_infer)
        model_histories_index = 1
        for telemetry, inference_model in zip(telemetries[1:],
                                              inference_models[1:]):
            history_infer = pandas_episode_trajectory_initialize(
                max_timesteps, max_obstacles)

            simulate(top_priority_set_based_tasks,
                     waypoint_configurations,
                     waypoint_cartesian_positions,
                     rtpwrapped,
                     max_timesteps,
                     history_infer,
                     actuators=actuators,
                     exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
                     random=random,
                     record=True,
                     inference_model=inference_model)

            if from_episode_num is None:
                telemetry.gather(history_infer, history_solution,
                                 history_no_ca,
                                 waypoint_cartesian_positions[0],
                                 waypoint_cartesian_positions[1],
                                 waypoint_configurations[0],
                                 cartesian_obstacles)
            else:
                telemetry.gather(history_infer,
                                 history_solution,
                                 history_no_ca,
                                 waypoint_cartesian_positions[0],
                                 waypoint_cartesian_positions[1],
                                 waypoint_configurations[0],
                                 cartesian_obstacles,
                                 alternate_episode_index_name=from_episode_num)

            model_histories[model_histories_index] = copy.deepcopy(
                history_infer)
            model_histories_index += 1

        fig, _ = plot_ca_compare(model_histories, cartesian_obstacles)
        save_plot(
            fig, plotpath + '/seed' + str(seed) + '_nume' + str(num_episodes) +
            '_numt' + str(max_timesteps) +
            '_border_distance_model_comparisons', 'episode_' + str(i))

        fig, _ = plot_tracking_compare_models(model_histories,
                                              ('sphere', 'slot', 'control'),
                                              waypoint_cartesian_positions[1],
                                              markevery=1000)
        save_plot(
            fig, plotpath + '/seed' + str(seed) + '_nume' + str(num_episodes) +
            '_numt' + str(max_timesteps) + '_position_error_model_comparisons',
            'episode_' + str(i))

    for telemetry in telemetries:
        telemetry.end_and_save()
コード例 #12
0
ファイル: run_simulate.py プロジェクト: daniena/legacy_th
from simulation.simulation import generate_and_simulate, generate_and_simulate_forced_bias_pattern_near_trajectory, exit_criteria_at_end_waypoint_or_i_max
from learning.datagen import pandas_episode_trajectory_initialize
from learning.infer import *
from keras.models import load_model
from session import CAI_random
import os

if __name__ == '__main__':

    seed = 0
    (random, randomstate, seed) = CAI_random(seed)

    max_timesteps = 1500  #16000 for position kp="3"
    max_obstacles = 5
    num_episodes = 10
    history = pandas_episode_trajectory_initialize(max_timesteps,
                                                   max_obstacles)
    for _ in range(num_episodes):
        #generate_and_simulate(random, history, max_timesteps, exit_criteria=exit_criteria_at_end_waypoint_or_i_max, max_obstacles=max_obstacles, actuators='perfect_position', record=False, inference_model=None)
        generate_and_simulate_forced_bias_pattern_near_trajectory(
            random,
            history,
            max_timesteps,
            exit_criteria=exit_criteria_at_end_waypoint_or_i_max,
            max_obstacles=max_obstacles,
            actuators='perfect_position',
            record=False,
            inference_model=None)