コード例 #1
0
This script shows how to use the hdf5 database structure, including saving,
loading, renaming, and deleting data
'''
import numpy as np

from abr_analyze import DataHandler
from download_examples_db import check_exists as examples_db

examples_db()
save_location = 'data_handling'
recorded_time = np.linspace(0, 1, 100)
recorded_data = np.random.rand(100, 3)
data_dict = {'trajectory': recorded_data, 'time': recorded_time}

# instantiate a database with your desired name
dat = DataHandler(db_name='abr_analyze_examples')

# save our data
dat.save(data=data_dict, save_location=save_location, overwrite=True)

# load our data
# we can specify what parameters to load
data = dat.load(parameters=['trajectory', 'time'], save_location=save_location)
trajectory = data['trajectory']
time = data['time']

# we can rename our save_location as well
new_save_location = 'data_handling_rename'
dat.rename(old_save_location=save_location,
           new_save_location=new_save_location,
           delete_old=True)
コード例 #2
0
def plot_performance(ax=None):
    test_group = "weighted_tests"
    main_db = "abr_neurorobotics2020_adaptation_data"
    test_list = [
        ["pd_no_weight", "", "k", main_db, test_group, "-"],
        ["pd", "", "tab:grey", main_db, test_group, "-"],
        ["pid", "PID", "tab:brown", main_db, test_group, "-"],
        ["nengo_cpu1k", "Adaptive CPU", "g", main_db, test_group, "-"],
        ["nengo_gpu1k", "Adaptive GPU", "r", main_db, test_group, "-"],
        ["nengo_loihi1k", "Adaptive Loihi", "b", main_db, test_group, "-"],
    ]
    pd_dat = DataHandler(db_name=main_db)

    # load in PD mean performance with and without weight for normalizing
    pd_no_weight_mean = pd_dat.load(
        parameters=["mean"],
        save_location="%s/pd_no_weight/statistical_error_0" % (test_group),
    )["mean"]
    pd_weight_mean = pd_dat.load(parameters=["mean"],
                                 save_location="%s/pd/statistical_error_0/" %
                                 (test_group))["mean"]

    n_sessions = 5
    n_runs = 50
    interpolated_samples = 400

    if ax is None:
        _, ax = plt.subplots(1, 1, figsize=(5, 5))

    for ii, test in enumerate(test_list):
        print("Processing test %i/%i: %s" % (ii + 1, len(test_list), test[0]))
        save_location = "%s/%s" % (test[4], test[0])
        errors = []
        for session in range(n_sessions):
            session_error = []
            for run in range(n_runs):
                print(
                    "%.3f processing complete..." %
                    (100 * ((run + 1) + (session * n_runs)) /
                     (n_sessions * n_runs)),
                    end="\r",
                )
                loc = "%s/session%03d/run%03d" % (save_location, session, run)
                session_error.append(
                    np.mean(
                        pd_dat.load(parameters=["error"],
                                    save_location=loc)["error"]))
            errors.append(session_error)

        n = 3000  # use 3000 sample points
        p = 0.95  # calculate 95% confidence interval
        sample = []
        upper_bound = []
        lower_bound = []
        sets = np.array(errors).shape[0]
        data_pts = np.array(errors).shape[1]
        print("Mean and CI calculation found %i sets of %i data points" %
              (sets, data_pts))
        errors = np.array(errors)
        for i in range(data_pts):
            data = errors[:, i]
            index = int(n * (1 - p) / 2)
            samples = np.random.choice(data, size=(n, len(data)))
            r = [np.mean(s) for s in samples]
            r.sort()
            ci = r[index], r[-index]
            sample.append(np.mean(data))
            lower_bound.append(ci[0])
            upper_bound.append(ci[1])

        ci_errors = {
            "mean": sample,
            "lower_bound": lower_bound,
            "upper_bound": upper_bound,
        }
        ci_errors["time_derivative"] = 0

        data = pd_dat.load(
            parameters=["mean", "upper_bound", "lower_bound"],
            save_location="%s/statistical_error_0" % save_location,
        )

        # subtract the PD performance with no weight
        data["mean"] = data["mean"] - pd_no_weight_mean
        data["upper_bound"] = data["upper_bound"] - pd_no_weight_mean
        data["lower_bound"] = data["lower_bound"] - pd_no_weight_mean

        # normalize by the PD performance with a weight
        data["mean"] = data["mean"] / (pd_weight_mean - pd_no_weight_mean)
        data["upper_bound"] = data["upper_bound"] / (pd_weight_mean -
                                                     pd_no_weight_mean)
        data["lower_bound"] = data["lower_bound"] / (pd_weight_mean -
                                                     pd_no_weight_mean)

        ax.fill_between(
            range(np.array(data["mean"]).shape[0]),
            data["upper_bound"],
            data["lower_bound"],
            color=test[2],
            alpha=0.5,
        )
        ax.plot(data["mean"], color=test[2], label=test[1], linestyle=test[5])
        ax.set_title("Performance")

    ax.set_ylabel("Percent error")
    ax.set_xlabel("Trial")
    ax.legend()
    ax.grid()
コード例 #3
0
        f for f in listdir(database_dir) if isfile(join(database_dir, f))
    ]
    print(
        f"No database passed in, the following are available in the repo database direction: {database_dir}"
    )
    for ii, fname in enumerate(onlyfiles):
        print(f"{ii}) {fname}")
    index = input("Which databse would you like to view?")
    db = onlyfiles[int(index)].split(".")[0]

# else:
#     db = sys.argv[1]
# if len(sys.argv)>2:
#     folder = sys.argv[2]

dat = DataHandler(db_name=db, database_dir=folder)


class bcolors:
    HEADER = "\033[95m"
    BLUE = "\033[94m"
    GREEN = "\033[92m"
    YELLOW = "\033[93m"
    RED = "\033[91m"
    ENDC = "\033[0m"
    BOLD = "\033[1m"
    UNDERLINE = "\033[4m"


# set some constants
LARGE_FONT = ("Verdana", 20)
コード例 #4
0
ファイル: data_handling.py プロジェクト: abr/abr_analyze
This script shows how to use the hdf5 database structure, including saving,
loading, renaming, and deleting data
"""
import numpy as np
from download_examples_db import check_exists as examples_db

from abr_analyze import DataHandler

examples_db()
save_location = "data_handling"
recorded_time = np.linspace(0, 1, 100)
recorded_data = np.random.rand(100, 3)
data_dict = {"trajectory": recorded_data, "time": recorded_time}

# instantiate a database with your desired name
dat = DataHandler(db_name="abr_analyze_examples")

# save our data
dat.save(data=data_dict, save_location=save_location, overwrite=True)

# load our data
# we can specify what parameters to load
data = dat.load(parameters=["trajectory", "time"], save_location=save_location)
trajectory = data["trajectory"]
time = data["time"]

# we can rename our save_location as well
new_save_location = "data_handling_rename"
dat.rename(
    old_save_location=save_location,
    new_save_location=new_save_location,
コード例 #5
0
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os

from abr_analyze import DataHandler
from abr_analyze.nengo import network_utils
from abr_control.controllers import signals
from abr_analyze.paths import cache_dir, figures_dir
from download_examples_db import check_exists as examples_db

import nengo

examples_db()
dat = DataHandler('abr_analyze_examples')
fig = plt.figure(figsize=(8, 12))
ax_list = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]

runs = 10
for ii in range(0, runs):
    data = dat.load(parameters=['input_signal'],
                    save_location='test_1/session000/run%03d' % ii)
    if ii == 0:
        input_signal = data['input_signal']
    else:
        input_signal = np.vstack((input_signal, data['input_signal']))

input_signal = np.squeeze(input_signal)

# specify our network parameters
コード例 #6
0
import matplotlib
import numpy as np

matplotlib.use("TkAgg")
import os

import matplotlib.pyplot as plt
from abr_control.controllers import signals
from download_examples_db import check_exists as examples_db

from abr_analyze import DataHandler
from abr_analyze.nengo import network_utils
from abr_analyze.paths import cache_dir, figures_dir

examples_db()
dat = DataHandler("abr_analyze_examples")
fig = plt.figure(figsize=(8, 12))
ax_list = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
data = dat.load(
    parameters=[
        "n_input",
        "n_output",
        "n_neurons",
        "n_ensembles",
        "pes",
        "intercepts",
        "seed",
        "encoders",
    ],
    save_location="nengo_data",
)
コード例 #7
0
against the first dimension of u_adapt.
"""
import matplotlib
import numpy as np

matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from download_examples_db import check_exists as examples_db

import abr_analyze.data_visualizer as vis
from abr_analyze import DataHandler
from abr_analyze.paths import figures_dir
from abr_analyze.plotting import TrajectoryError

examples_db()
dat = DataHandler("abr_analyze_examples")

data = dat.load(
    parameters=["input_signal", "u_adapt"], save_location="test_1/session000/run000"
)
input_signal = data["input_signal"].squeeze()
output_signal = data["u_adapt"][:, 0].squeeze()

fig, axes = plt.subplots(3, 1)

vis.plot_against_projection_2d(
    ax=axes[0], data_project=input_signal, data_plot=output_signal
)
vis.plot_against_projection_3d(
    ax=axes[1], data_project=input_signal, data_plot=output_signal
)
コード例 #8
0
def _example():
    import matplotlib.pyplot as plt

    # Instantiate database to save results
    db_name = "searchable_results_example"
    db_folder = None
    dat = DataHandler(db_name=db_name, database_dir=db_folder)

    # generate baseline json
    params = {
        "sin_params": {
            "A": 3,
            "shift": 5,
        },
        "time": [0, 5, 100],
        "exp": 2,
    }

    # if loading from json
    # with open(json_fp) as fp:
    #     params = json.load(fp)

    # example function that generates results
    # Needs to accept params dict as input and return dictionary of results
    def example_results(params):
        t = np.linspace(params["time"][0], params["time"][1],
                        params["time"][2])
        y = (params["sin_params"]["A"] *
             np.sin(t - params["sin_params"]["shift"])**params["exp"])
        return {"t": t, "y": y}

    # unique name for script that generates results
    # should update name if something changes in the script that would affect results
    script_name = "example_script"

    # get results
    print("--Getting results for baseline parameters--")
    results = example_results(params)

    # save in searchable format
    print("--Saving baseline results--")
    searchable_save(dat=dat,
                    results=results,
                    params=params,
                    script_name=script_name)

    # helper function to quickly create some variations of our parameter set
    print("--Generating parameter variations--")
    param_variations = gen_parameter_variations(params=params,
                                                variation_dict={
                                                    "sin_params/A": [5, 7, 10],
                                                    "exp": [3, 4]
                                                })

    # get results for each variation and save
    print("--Getting results for parameter variations--")
    for hash_id, varied_params in param_variations.items():
        print(f"\nGetting results for {hash_id}")
        # pretty printing of nested dictionaries
        print_nested(varied_params, indent=0, return_val=False)

        results = example_results(varied_params)
        print("Saving results")
        searchable_save(dat=dat,
                        results=results,
                        params=varied_params,
                        script_name=script_name)

    # now load all results that have these parameter values
    const_params = {
        "exp": 3,
    }
    # result keys to load
    result_keys = ["y"]

    # Load results that have a set of common parameters
    print(f"Loading results with parameters:\n{const_params}")
    results = load_results(
        script_name=script_name,
        const_params=const_params,
        saved_exp_hashes=None,
        result_keys=result_keys,
        dat=dat,
        ignore_keys=None,
    )

    # plot the results
    plt.figure()
    ax = plt.subplot(111)
    for hash_name in results:
        # ignore const and variable params keys
        if "params" in hash_name:
            continue
        # print(dict_nested2str(results[hash_name]))
        ax.plot(results[hash_name]["results"]["y"],
                label=results[hash_name]["name"])

    # print the values that are constant between all tests
    ax.text(
        0,
        -5,
        ("Constant Parameters\n" + "___________________\n" +
         dict_nested2str(results["const_params"])),
        fontsize=8,
    )
    plt.subplots_adjust(right=0.6)

    plt.legend()
    plt.tight_layout()
    plt.show()