コード例 #1
0
            a = i[e1]
            b = j[e1]
            c = i[e2]
            d = j[e2]
            if a != c and a != d and b != c and b != d:
                break
        R[a, d], R[a, b] = R[a, b], R[a, d]
        R[c, b], R[c, d] = R[c, d], R[c, b]
    return R

components, _, _ = load_results('S8-mvar.pickle')
components = components[0]
alpha_connectivity = np.zeros([NUM_COMPONENTS, SIGNAL_DIM, SIGNAL_DIM])
beta_connectivity, delta_connectivity, theta_connectivity = np.zeros_like(alpha_connectivity), np.zeros_like(alpha_connectivity), np.zeros_like(alpha_connectivity)
for j, component_j in enumerate(components):
    transfer_function, frequencies = transfer_function_from_filter(unstack_ar_coef(component_j), SAMPLING_RATE, fft_len=FFT_LEN)
    # for channel in range(SIGNAL_DIM):
    #     transfer_function[:, channel, channel] = 0
    transfer_function[:, np.arange(SIGNAL_DIM), np.arange(SIGNAL_DIM)] = 0
    dtf = np.abs(transfer_function) / sl.norm(transfer_function, axis=-1, keepdims=True)
    delta_connectivity[j] = np.sqrt(np.mean(np.array([dtf[k]**2 for k, freq in enumerate(frequencies) if DELTA_MIN <= freq <= DELTA_MAX]), axis=0))
    delta_connectivity[j] = keep_significant_edges(delta_connectivity[j])
    theta_connectivity[j] = np.sqrt(np.mean(np.array([dtf[k]**2 for k, freq in enumerate(frequencies) if THETA_MIN <= freq <= THETA_MAX]), axis=0))
    theta_connectivity[j] = keep_significant_edges(theta_connectivity[j])
    alpha_connectivity[j] = np.sqrt(np.mean(np.array([dtf[k]**2 for k, freq in enumerate(frequencies) if ALPHA_MIN <= freq <= ALPHA_MAX]), axis=0))
    alpha_connectivity[j] = keep_significant_edges(alpha_connectivity[j])
    beta_connectivity[j] = np.sqrt(np.mean(np.array([dtf[k]**2 for k, freq in enumerate(frequencies) if BETA_MIN <= freq < BETA_MAX]), axis=0))
    beta_connectivity[j] = keep_significant_edges(beta_connectivity[j])
vmax = max(np.max(alpha_connectivity), np.max(beta_connectivity))
fig, axs = plt.subplots(4, NUM_COMPONENTS)
images = []
コード例 #2
0
SAMPLING_RATE = 200
SAMPLE_LEN = 2
FFT_LEN = 100
CLASS_LABEL = ['Awake', 'N1', 'N2', 'N3', 'REM']

fig, axs = plt.subplots(5, 2)
subj_components, subj_mixing_coef, subj_labels = load_isruc_results(8, start=2)
images = []
for i, label in enumerate(np.unique(subj_labels)):
    axs[i, 0].set_ylabel(CLASS_LABEL[i], fontsize=12)
    axs[i, 0].set_yticks(np.arange(0, 18, 3))
    axs[i, 0].set_xticklabels([])
    axs[i, 0].set_yticklabels(['F3', 'C3', 'O1', 'F4', 'C4', 'O2'])
    subj_mixing_coef_i = np.mean(subj_mixing_coef[subj_labels == label],
                                 axis=0)
    ar_coef = unstack_ar_coef(
        np.tensordot(subj_mixing_coef_i, subj_components, axes=1))
    signal = autoregressive_sample(SAMPLE_LEN * SAMPLING_RATE, SIGNAL_DIM,
                                   SIGNAL_DIM**(-1 / 2), ar_coef)
    images.append(axs[i, 0].plot(signal + np.arange(0, 18, 3)))
    axs[i, 0].set_facecolor("#f2f3f4")
    axs[i, 0].grid(b=True,
                   which='major',
                   linestyle="-",
                   linewidth=1.5,
                   color="#ffffff",
                   zorder=3)
    axs[i, 0].grid(b=True,
                   which='minor',
                   linewidth=0.75,
                   color="#ffffff",
                   zorder=3)
コード例 #3
0
alm = Alm(solver='palm', verbose=True)
D_palm, C_palm, palm_likelihood, _ = alm.fit(x,
                                             MODEL_ORD,
                                             NUM_COMPS,
                                             PENALTY_PARAM,
                                             num_starts=NUM_STARTS,
                                             initial_comps=D_0,
                                             return_path=True,
                                             return_all=True)
palm_error = []
for i, Di in enumerate(D_palm):
    loss = []
    for Dis in Di:
        Dis_pred = np.zeros([NUM_COMPS, MODEL_ORD, SIG_DIM, SIG_DIM])
        for j in range(NUM_COMPS):
            Dis_pred[j] = unstack_ar_coef(Dis[j])
        d_loss, _, _ = ar_comp_dist(D, Dis_pred)
        loss.append(d_loss)
    palm_error.append(loss)

###################
# save results
###################
# save_results([palm_error, palm_likelihood], 'performance.pickle')

###################
# load results
###################
palm_error, palm_likelihood = load_results('performance.pickle')

fig, axs = plt.subplots(1, 2)
コード例 #4
0
"""
ALM Copyright (C) 2019  Addison Bohannon
"""

import numpy as np
import matplotlib.pyplot as plt
from alm.utility import unstack_ar_coef
from experiments.utility import load_results, periodogram_from_filter

SAMPLING_RATE = 200
FFT_LEN = 100

components, _, _ = load_results('S8-mvar.pickle')
components = components[0]
periodogram = [
    periodogram_from_filter(unstack_ar_coef(component),
                            SAMPLING_RATE,
                            fft_len=FFT_LEN) for component in components
]
f = plt.figure()
f.hold = True
colors = [
    "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c",
    "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"
]
for Pxx, freqs in periodogram:
    plt.plot((Pxx[:25]), figure=f, linewidth=3.0)
    ax = plt.gca()
    ax.set_xticks(np.arange(0, len(freqs[:25]), 5, dtype=np.int))
    ax.set_xticklabels(freqs[::5])
    ax.set_ylabel('Gain', fontsize=12)
コード例 #5
0
"""
ALM Copyright (C) 2019  Addison Bohannon
"""

import numpy as np
import matplotlib.pyplot as plt
from alm.utility import unstack_ar_coef
from experiments.utility import load_results, periodogram_from_filter

SAMPLING_RATE = 200
FFT_LEN = 100
colors = ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"]

components, _, _ = load_results('S8-mvar.pickle')
periodograms = [[periodogram_from_filter(unstack_ar_coef(component), SAMPLING_RATE, fft_len=FFT_LEN)
                for component in component_k] for component_k in components]
fig, axs = plt.subplots(1, 5)
for i, periodogram in enumerate(periodograms):
    for Pxx, freqs in periodogram:
        axs[i].plot((Pxx[:25]), linewidth=3.0)
        axs[i].set_xticks(np.arange(0, len(freqs[:25]), 5, dtype=np.int))
        axs[i].set_xticklabels(freqs[::5])
        axs[i].set_yscale('log')
        axs[i].set_ylabel('Gain', fontsize=12)
        axs[i].set_xlabel('Frequency (Hz)', fontsize=12)
        axs[i].set_title('Initialization ' + str(i), fontsize=14)
        axs[i].set_facecolor("#f2f3f4")
        axs[i].grid(b=True, which='major', linestyle="-", linewidth=1.5, color="#ffffff", zorder=3)
        axs[i].grid(b=True, which='minor', linewidth=0.75, color="#ffffff", zorder=3)
        axs[i].legend(('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10'))
コード例 #6
0
ファイル: n_vs_m-var.py プロジェクト: addisonbohannon/alm
OBS_LEN = [2**i for i in range(4, 11)]
SIGNAL_DIM = 5
NUM_COMPONENTS = 10
COEF_SUPPORT = 1
MODEL_ORDER = 2
NUM_STARTS = 10
PENALTY_PARAM = 1e-2
NUM_ITERATIONS = 10

error = np.zeros([NUM_ITERATIONS, len(NUM_OBS), len(OBS_LEN)])
#nll = np.zeros_like(error)
for iteration in range(NUM_ITERATIONS):
    D = initialize_autoregressive_components(max(NUM_OBS), MODEL_ORDER, SIGNAL_DIM)
    x = np.zeros([max(NUM_OBS), max(OBS_LEN), SIGNAL_DIM])
    for obs in range(max(NUM_OBS)):
        x[obs] = autoregressive_sample(max(OBS_LEN), SIGNAL_DIM, SIGNAL_DIM ** (-1 / 2), unstack_ar_coef(D[obs]))
    for i, n_i in enumerate(NUM_OBS):
        for j, m_i in enumerate(OBS_LEN):
            _, XtY, XtX = package_observations(x[:n_i, :m_i, :], MODEL_ORDER)
            D_ls = np.array([sl.solve(XtX_i, XtY_i, assume_a='pos') for XtX_i, XtY_i in zip(XtX, XtY)])
#            nll[iteration, :, i, j] = np.array(L_palm)
            error[iteration, i, j] = np.mean(sl.norm(D[:n_i] - D_ls, ord='fro', axis=(1, 2)))

###################
# save results
###################
#save_results(error, 'n_vs_m-var.pickle')

###################
# load results
###################
コード例 #7
0
ファイル: connectivity.py プロジェクト: addisonbohannon/alm
            d = j[e2]
            if a != c and a != d and b != c and b != d:
                break
        R[a, d], R[a, b] = R[a, b], R[a, d]
        R[c, b], R[c, d] = R[c, d], R[c, b]
    return R


components, _, _ = load_isruc_results(8, start=2)
alpha_connectivity = np.zeros([NUM_COMPONENTS, SIGNAL_DIM, SIGNAL_DIM])
beta_connectivity, delta_connectivity, theta_connectivity = np.zeros_like(
    alpha_connectivity), np.zeros_like(alpha_connectivity), np.zeros_like(
        alpha_connectivity)
for j, component_j in enumerate(components):
    transfer_function, frequencies = transfer_function_from_filter(
        unstack_ar_coef(component_j), SAMPLING_RATE, fft_len=FFT_LEN)
    # for channel in range(SIGNAL_DIM):
    #     transfer_function[:, channel, channel] = 0
    transfer_function[:, np.arange(SIGNAL_DIM), np.arange(SIGNAL_DIM)] = 0
    dtf = np.abs(transfer_function) / sl.norm(
        transfer_function, axis=-1, keepdims=True)
    delta_connectivity[j] = np.sqrt(
        np.mean(np.array([
            dtf[k]**2 for k, freq in enumerate(frequencies)
            if DELTA_MIN <= freq <= DELTA_MAX
        ]),
                axis=0))
    delta_connectivity[j] = keep_significant_edges(delta_connectivity[j])
    theta_connectivity[j] = np.sqrt(
        np.mean(np.array([
            dtf[k]**2 for k, freq in enumerate(frequencies)
コード例 #8
0
ファイル: n_vs_m.py プロジェクト: addisonbohannon/alm
        for _ in range(NUM_STARTS)
    ]
    for i, n_i in enumerate(NUM_OBS):
        for j, m_i in enumerate(OBS_LEN):
            alm_model = Alm(solver='palm')
            D_palm, _, L_palm, _ = alm_model.fit(x[:(n_i - 1), :(m_i - 1), :],
                                                 MODEL_ORDER,
                                                 NUM_COMPONENTS,
                                                 PENALTY_PARAM,
                                                 num_starts=NUM_STARTS,
                                                 initial_comps=D_0,
                                                 return_all=True)
            nll[iteration, :, i, j] = np.array(L_palm)
            error_palm = []
            for D_k in D_palm:
                D_pred = [unstack_ar_coef(Dj) for Dj in D_k]
                d_loss, _, _ = ar_comp_dist(D, D_pred)
                error_palm.append(d_loss)
            error[iteration, :, i, j] = np.array(error_palm)

###################
# save results
###################
# save_results([nll, error], 'n_vs_m.pickle')

###################
# load results
###################
nll, error = load_results('n_vs_m.pickle')

error /= NUM_COMPONENTS