Beispiel #1
0
def CAVIKee_sphere_dataset(datapath, seed=4, iterate_seed=False):

    # Construct a dataset using collision avoidance position tracking data only to train collision avoidance

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((0.55,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0.7, 0.15, 0.15)
    max_obstacles = 5

    (dataset, filenames) = construct(random, (rawdatapaths[1],), max_obstacles, CAVIKee_sphere_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    save_numpy(datasetpaths[3], datasetnames[3], dataset)
    save_numpy(datasetpaths[3], datasetnames[3] + '_filenames', filenames)
Beispiel #2
0
def VIK_dataset(datapath, seed=2, iterate_seed=False):

    # Construct a dataset using all data to train velocity inverse kinematic for tracking assuming zero obstacles

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((1,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0.7, 0.15, 0.15)
    max_obstacles = 0

    (dataset, filenames) = construct(random, (rawdatapaths[0],), max_obstacles, VIK_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    save_numpy(datasetpaths[0], datasetnames[0], dataset)
    save_numpy(datasetpaths[0], datasetnames[0] + '_filenames', filenames)
Beispiel #3
0
    def set_normalization_params(self):

        (_, _, datasetnames, _, _, datasetpaths, _, _,
         _) = CAI_args(self.datapath)

        datasetpath = ''
        datasetname = ''
        datasettype = ''
        if self.name[0:10] == 'CAVIKAUGee' or self.name[0:3] == 'VIK':
            datasetpath = datasetpaths[2]
            datasetname = datasetnames[2]
        else:
            print('Inference_Model not yet supported')
            exit(1)

        found_params_json = False
        for filename in os.listdir(datasetpath):
            if filename.find('_training_normalization_params') > -1:
                params = np.load(datasetpath + '/' + datasetname +
                                 '_training_normalization_params.npy')
                self.input_normalization_params = params[0]
                self.output_normalization_params = params[1]

                found_params_json = True
                break

        if not found_params_json:
            print(
                'Could not find normalization parameters, loading, calculating, and storing them for later expedience.'
            )
            input_nor_params = normalize_parameters(
                np.load(datasetpath + datasetname + '_training_inputs.npy'))
            output_nor_params = normalize_parameters(
                np.load(datasetpath + datasetname + '_training_outputs.npy'))

            np.save(
                datasetpath + '/' + datasetname +
                '_training_normalization_params',
                (input_nor_params, output_nor_params))

            self.input_normalization_params = input_nor_params
            self.output_normalization_params = output_nor_params
Beispiel #4
0
def CAVIKAUGee_slot_dataset_test(datapath, seed=5, iterate_seed=False):

    # Construct a dataset using collision avoidance position tracking data only to train collision avoidance

    (_, rawdatanames, datasetnames, _, rawdatapaths, datasetpaths, _, _, _) = CAI_args(datapath)
    (random, randomstate, seed) = CAI_random(seed, iterate_seed=iterate_seed)
    
    args_per_selection_method = ((0.20,),)
    selection_methods = (select_random_proportion,)
    split_proportion = (0, 0, 1)
    max_obstacles = 5

    ((training_inputs, training_outputs, validation_inputs, validation_outputs, test_inputs, test_outputs), filenames) = construct(random, (rawdatapaths[1],), max_obstacles, CAVIKAUGee_slots_IO_from_rawdata, selection_methods, args_per_selection_method, *split_proportion)
    path = datasetpaths[2] + '_test'
    name = datasetnames[2]
    make_path(path)
    
    numpy.save(path + '/' + name + '_filenames', filenames)
    numpy.save(path + '/' + name + '_test_inputs', test_inputs)
    numpy.save(path + '/' + name + '_test_outputs', test_outputs)
Beispiel #5
0
def make_plots():
    seed = 100

    datapath = os.getcwd() + '/data'
    (random, randomstate, seed) = CAI_random(seed)
    (_, _, _, _, _, _, checkpointpath, modelspath, _) = CAI_args(datapath)

    plotpath = datapath + '/sessions/CAI/plots/training_history_plots'
    plot_all_most_recent_checkpoints(random,
                                     checkpointpath,
                                     performance_threshold_to_show=0.0,
                                     containing=None,
                                     save=True,
                                     plotpath=plotpath)

    modelnames = [
        'CAVIKAUGee_sphere_correct_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',  #'CAVIKAUGee_sphere_correct_activation12_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
        'CAVIKAUGee_slot_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected',
        'CAVIKAUGee_no_obst_input_control_experiment_ReLU_SGDcustom_clipping_no_batchnormalization_simulated_annealing_greater_step_bigger_model_well_connected'
    ]
    plot_histories_of_models_valfix(modelnames, 'training_history_comparison')
    plot_histories_of_models(('VIK_pyramid', ), 'training_history_trials')