Exemplo n.º 1
0
def gen_lookup_table(db_name, db_folder):
    dat = DataHandler(db_name=db_name, database_dir=db_folder)

    hashes = dat.load(save_location="params",
                      parameters=dat.get_keys("params"))
    lookup = {}
    for hash_id in hashes:
        params = dat.load(
            save_location=f"params/{hash_id}",
            parameters=dat.get_keys(f"params/{hash_id}", recursive=True),
        )
        for key, val in params.items():
            if key not in lookup:
                lookup[key] = {str(val): [hash_id]}
            elif str(val) not in lookup[key].keys():
                lookup[key][str(val)] = [hash_id]
            elif str(val) in lookup[key].keys():
                lookup[key][str(val)].append(hash_id)
    return lookup
Exemplo n.º 2
0
import matplotlib.pyplot as plt
import os

from abr_control._vendor.nengolib.stats import ScatteredHypersphere

from abr_analyze.nengo import network_utils, intercepts_scan
from abr_control.controllers import signals
from abr_analyze.paths import cache_dir
from abr_analyze import DataHandler
from download_examples_db import check_exists as examples_db

examples_db()
runs = 10
dat = DataHandler('abr_analyze_examples')
for ii in range(0, runs):
    data = dat.load(parameters=['input_signal'],
                    save_location='test_1/session000/run%03d' % ii)
    if ii == 0:
        input_signal = data['input_signal']
    else:
        input_signal = np.vstack((input_signal, data['input_signal']))

input_signal = np.squeeze(input_signal)

# specify our network parameters
seed = 0
n_neurons = 1000
n_ensembles = 1
n_input = 11

# ----------- Create your encoders ---------------
hypersphere = ScatteredHypersphere(surface=True)
examples_db()
save_location = 'data_handling'
recorded_time = np.linspace(0, 1, 100)
recorded_data = np.random.rand(100, 3)
data_dict = {'trajectory': recorded_data, 'time': recorded_time}

# instantiate a database with your desired name
dat = DataHandler(db_name='abr_analyze_examples')

# save our data
dat.save(data=data_dict, save_location=save_location, overwrite=True)

# load our data
# we can specify what parameters to load
data = dat.load(parameters=['trajectory', 'time'], save_location=save_location)
trajectory = data['trajectory']
time = data['time']

# we can rename our save_location as well
new_save_location = 'data_handling_rename'
dat.rename(old_save_location=save_location,
           new_save_location=new_save_location,
           delete_old=True)

# if we don't know the parameters, or want to load all of them and want to
# avoid writing out the entire list we can get all the keys at the save
# location
keys = dat.get_keys(save_location=new_save_location)
data = dat.load(parameters=keys, save_location=new_save_location)
Exemplo n.º 4
0
def plot_performance(ax=None):
    test_group = "weighted_tests"
    main_db = "abr_neurorobotics2020_adaptation_data"
    test_list = [
        ["pd_no_weight", "", "k", main_db, test_group, "-"],
        ["pd", "", "tab:grey", main_db, test_group, "-"],
        ["pid", "PID", "tab:brown", main_db, test_group, "-"],
        ["nengo_cpu1k", "Adaptive CPU", "g", main_db, test_group, "-"],
        ["nengo_gpu1k", "Adaptive GPU", "r", main_db, test_group, "-"],
        ["nengo_loihi1k", "Adaptive Loihi", "b", main_db, test_group, "-"],
    ]
    pd_dat = DataHandler(db_name=main_db)

    # load in PD mean performance with and without weight for normalizing
    pd_no_weight_mean = pd_dat.load(
        parameters=["mean"],
        save_location="%s/pd_no_weight/statistical_error_0" % (test_group),
    )["mean"]
    pd_weight_mean = pd_dat.load(parameters=["mean"],
                                 save_location="%s/pd/statistical_error_0/" %
                                 (test_group))["mean"]

    n_sessions = 5
    n_runs = 50
    interpolated_samples = 400

    if ax is None:
        _, ax = plt.subplots(1, 1, figsize=(5, 5))

    for ii, test in enumerate(test_list):
        print("Processing test %i/%i: %s" % (ii + 1, len(test_list), test[0]))
        save_location = "%s/%s" % (test[4], test[0])
        errors = []
        for session in range(n_sessions):
            session_error = []
            for run in range(n_runs):
                print(
                    "%.3f processing complete..." %
                    (100 * ((run + 1) + (session * n_runs)) /
                     (n_sessions * n_runs)),
                    end="\r",
                )
                loc = "%s/session%03d/run%03d" % (save_location, session, run)
                session_error.append(
                    np.mean(
                        pd_dat.load(parameters=["error"],
                                    save_location=loc)["error"]))
            errors.append(session_error)

        n = 3000  # use 3000 sample points
        p = 0.95  # calculate 95% confidence interval
        sample = []
        upper_bound = []
        lower_bound = []
        sets = np.array(errors).shape[0]
        data_pts = np.array(errors).shape[1]
        print("Mean and CI calculation found %i sets of %i data points" %
              (sets, data_pts))
        errors = np.array(errors)
        for i in range(data_pts):
            data = errors[:, i]
            index = int(n * (1 - p) / 2)
            samples = np.random.choice(data, size=(n, len(data)))
            r = [np.mean(s) for s in samples]
            r.sort()
            ci = r[index], r[-index]
            sample.append(np.mean(data))
            lower_bound.append(ci[0])
            upper_bound.append(ci[1])

        ci_errors = {
            "mean": sample,
            "lower_bound": lower_bound,
            "upper_bound": upper_bound,
        }
        ci_errors["time_derivative"] = 0

        data = pd_dat.load(
            parameters=["mean", "upper_bound", "lower_bound"],
            save_location="%s/statistical_error_0" % save_location,
        )

        # subtract the PD performance with no weight
        data["mean"] = data["mean"] - pd_no_weight_mean
        data["upper_bound"] = data["upper_bound"] - pd_no_weight_mean
        data["lower_bound"] = data["lower_bound"] - pd_no_weight_mean

        # normalize by the PD performance with a weight
        data["mean"] = data["mean"] / (pd_weight_mean - pd_no_weight_mean)
        data["upper_bound"] = data["upper_bound"] / (pd_weight_mean -
                                                     pd_no_weight_mean)
        data["lower_bound"] = data["lower_bound"] / (pd_weight_mean -
                                                     pd_no_weight_mean)

        ax.fill_between(
            range(np.array(data["mean"]).shape[0]),
            data["upper_bound"],
            data["lower_bound"],
            color=test[2],
            alpha=0.5,
        )
        ax.plot(data["mean"], color=test[2], label=test[1], linestyle=test[5])
        ax.set_title("Performance")

    ax.set_ylabel("Percent error")
    ax.set_xlabel("Trial")
    ax.legend()
    ax.grid()
Exemplo n.º 5
0
examples_db()
save_location = "data_handling"
recorded_time = np.linspace(0, 1, 100)
recorded_data = np.random.rand(100, 3)
data_dict = {"trajectory": recorded_data, "time": recorded_time}

# instantiate a database with your desired name
dat = DataHandler(db_name="abr_analyze_examples")

# save our data
dat.save(data=data_dict, save_location=save_location, overwrite=True)

# load our data
# we can specify what parameters to load
data = dat.load(parameters=["trajectory", "time"], save_location=save_location)
trajectory = data["trajectory"]
time = data["time"]

# we can rename our save_location as well
new_save_location = "data_handling_rename"
dat.rename(
    old_save_location=save_location,
    new_save_location=new_save_location,
    delete_old=True,
)

# if we don't know the parameters, or want to load all of them and want to
# avoid writing out the entire list we can get all the keys at the save
# location
keys = dat.get_keys(save_location=new_save_location)
Exemplo n.º 6
0
from abr_analyze import DataHandler
from abr_analyze.nengo import network_utils
from abr_analyze.paths import cache_dir, figures_dir

examples_db()
dat = DataHandler("abr_analyze_examples")
fig = plt.figure(figsize=(8, 12))
ax_list = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
data = dat.load(
    parameters=[
        "n_input",
        "n_output",
        "n_neurons",
        "n_ensembles",
        "pes",
        "intercepts",
        "seed",
        "encoders",
    ],
    save_location="nengo_data",
)

n_input = int(data["n_input"])
n_output = int(data["n_output"])
n_neurons = int(data["n_neurons"])
n_ensembles = int(data["n_ensembles"])
pes_learning_rate = float(data["pes"])
intercepts = data["intercepts"]
intercepts = np.array(intercepts)
intercepts = intercepts.reshape((n_ensembles, n_neurons))
"""
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt

from abr_analyze import DataHandler
import abr_analyze.data_visualizer as vis
from abr_analyze.plotting import TrajectoryError
from abr_analyze.paths import figures_dir
from download_examples_db import check_exists as examples_db

examples_db()
dat = DataHandler('abr_analyze_examples')

data = dat.load(parameters=['input_signal', 'u_adapt'],
                save_location='test_1/session000/run000')
input_signal = data['input_signal'].squeeze()
output_signal = data['u_adapt'][:, 0].squeeze()

fig, axes = plt.subplots(3, 1)

vis.plot_against_projection_2d(ax=axes[0],
                               data_project=input_signal,
                               data_plot=output_signal)
vis.plot_against_projection_3d(ax=axes[1],
                               data_project=input_signal,
                               data_plot=output_signal)
vis.plot_against_projection_4d(ax=axes[2],
                               data_project=input_signal,
                               data_plot=output_signal)
Exemplo n.º 8
0
import numpy as np

matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from download_examples_db import check_exists as examples_db

import abr_analyze.data_visualizer as vis
from abr_analyze import DataHandler
from abr_analyze.paths import figures_dir
from abr_analyze.plotting import TrajectoryError

examples_db()
dat = DataHandler("abr_analyze_examples")

data = dat.load(
    parameters=["input_signal", "u_adapt"], save_location="test_1/session000/run000"
)
input_signal = data["input_signal"].squeeze()
output_signal = data["u_adapt"][:, 0].squeeze()

fig, axes = plt.subplots(3, 1)

vis.plot_against_projection_2d(
    ax=axes[0], data_project=input_signal, data_plot=output_signal
)
vis.plot_against_projection_3d(
    ax=axes[1], data_project=input_signal, data_plot=output_signal
)
vis.plot_against_projection_4d(
    ax=axes[2], data_project=input_signal, data_plot=output_signal
)