Example #1
0
def plot_training_ex(X, Y_act, Y_pred, plot_title, axh=None):
    """Plot input/output data for flip-flop model."""

    # Create figure if an axis handle is not provided
    if axh is None:
        fh, axh = pt.create_subplot(n_row=1, n_col=1)
        axh = axh[0]
    else:
        fh = None

    # Set axis labels
    axh.set_xlabel('Timestep')
    axh.set_ylabel('Input/Output')
    axh.set_title(plot_title)

    # Loop over input/output pairs and plot
    n_ex = X.shape[0]
    for i in range(n_ex):
        x = X[i, :]
        y_act = Y_act[i, :]
        y_pred = Y_pred[i, :]

        # Scale y-axis values to be between i - .4 and i + .4
        x = (x * 0.4) + i + 1
        y_act = (y_act * 0.4) + i + 1
        y_pred = (y_pred * 0.4) + i + 1

        axh.plot(x, color='black')
        axh.plot(y_act, color='red')
        axh.plot(y_pred, color='orange')

    axh.set_yticks(np.arange(n_ex) + 1)

    return fh
Example #2
0
def calc_movement_onset(df):
    import numpy as np
    import plottools as pt
    import matplotlib as mpl
    mpl.use('PDF')
    import matplotlib.pyplot as plt
    import copy

    # Define speed threshold used for movement onset
    s_thresh = 0.2

    # Create figure
    ax_hndl = pt.create_subplot(1, 1)

    # Iterate over trials and calculate speed
    n_trials = df.shape[0]
    s_all = []
    onset_idx = []
    for i in range(n_trials):
        # Get velocity, time, calculate speed
        v = df['vel'][i][0:2, :]
        t = df['time'][i]
        s = np.sqrt(np.sum(v**2, 0))

        # First get timing events -- trajectory onset/offset
        traj_onset = df['trajectoryOnset'][i]
        traj_offset = df['trajectoryOffset'][i]

        # Get time of max speed.  Find this between trajectory onset and offset
        s_temp = copy.copy(s)  # Create a copy of s
        s_temp[
            t <
            traj_onset] = 0  # zero out all speeds pre-trajectory onset (movement onset can't occur here)
        s_temp[
            t >
            traj_offset] = 0  # also zero-out all speeds post-trajectory offset
        max_ind = np.argmax(s_temp)  # Index corresponding to the max speed
        s_max = s[max_ind]
        t_max = t[max_ind]

        # Define masks used to find movement onset
        s_mask = s < (s_max * s_thresh)  # All speeds less than the threshold
        t_onset_mask = t > traj_onset
        t_max_mask = t < t_max
        mask = s_mask & t_onset_mask & t_max_mask  # All time points after trajectory onset but before max speed
        valid_idx = np.nonzero(
            mask)  # Get list of valid indices (speeds less than the threshold)
        onset_idx.append(
            valid_idx[0][-1] + 1
        )  # Movement onset will be the first time point after the last non-zero value

        # Add speed to list
        s_all.append(s)

    # Add movement onset and speed to dataframe
    df['speed'] = s_all
    df['onset_idx'] = onset_idx

    return None
Example #3
0
def plot_history(history, axh=None):
    """Plot model training history."""

    # Create figure if an axis handle is not provided
    if axh is None:
        fh, axh = pt.create_subplot(n_row=1, n_col=1)
        axh = axh[0]
    else:
        fh = None

    # Plot training error and validation error
    loss_h, = axh.plot(history.history['loss'])
    val_loss_h, = axh.plot(history.history['val_loss'])

    # Add axis labels and legend
    axh.set_xlabel('Epoch')
    axh.set_ylabel('MSE')
    axh.legend((loss_h, val_loss_h), ('Training error', 'Validation error'))
    axh.set_title('Training history')

    return fh
Example #4
0
def plot_speed_profile(df):
    import numpy as np
    import plottools as pt
    import matplotlib as mpl
    mpl.use('PDF')
    import matplotlib.pyplot as plt
    import copy

    # Create figure
    ax_hndl = pt.create_subplot(1, 1)

    # Iterate over trials and calculate speed
    n_trials = df.shape[0]
    s_all = []
    for i in range(n_trials):
        # Get time, speed
        t = df['time'][i]
        s = df['speed'][i]

        # Align time so t=0 occurs at movement onset
        onset_idx = df['onset_idx'][i]
        t = t - t[onset_idx]

        # Plot trajectory
        plt.plot(t, s, 'k')
        plt.plot(t[onset_idx], s[onset_idx], 'ko')

    # Format figure
    plt.xlim([-500, 1000])
    plt.xlabel('Time (ms)')
    plt.ylabel('Speed')
    plt.suptitle('Reach speed')

    # Save figure
    fig_name = 'SpeedProfile'
    plt.savefig('results/{}.pdf'.format(fig_name))

    return None
Example #5
0
mpl.use('PDF')
mpl.rcParams['font.sans-serif'] = ['Helvetica']
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42

# Create some data to plot
x = np.linspace(-1.0, 1.0, 100)
y = x**2

# Sample data
x_samp = np.random.uniform(-1.0, 1.0, 1000)
e_samp = np.random.normal(0, 0.05, 1000)
y_samp = x_samp**2 + e_samp

# Create subplots
ax_hndl = pt.create_subplot(1, 2)

# Set first axis to be active
plot_no = 0
plt.sca(ax_hndl[plot_no])

# Plot -- scatter-style plot without a line
plt.plot(x_samp,
         y_samp,
         color=[0.75, 0.75, 0.75],
         marker='o',
         linestyle='None')
# Plot -- basic line with markers
plt.plot(x, y, color='k', linewidth=2)

# Format plot
Example #6
0
def pca_analysis(model, X):
    """Run PCA analysis on model to visualize hidden layer activity.

	To get the values of the intermediate layer, a new model needs to be 
	created. This model takes the normal input from the RNN, and returns the
	output of the rnn layer.  The values of the hidden layer can then be found
	using the 'predict()' method.
	"""

    # --- Get hidden layer activations and run PCA ---

    # Create new model and predict input
    inputs = model.input
    outputs = [model.layers[0].output, model.layers[1].output]
    act_model = tf.keras.Model(inputs=inputs, outputs=outputs)
    activations = act_model.predict(X)

    # Format activations into dimensions x observations
    # Input dimensions: (obs x time x units)
    n_inputs = X.shape[2]
    n_rnn = activations[0].shape[2]
    A = np.transpose(activations[0], (2, 0, 1))  # Now (units x obs x time)
    Y = np.transpose(activations[1], (2, 0, 1))
    A = A.reshape((n_rnn, -1), order='C')  # Now (units x (obs*time))
    Y = Y.reshape((n_inputs, -1), order='C')

    # Run PCA (note that this is actually PPCA)
    from sklearn.decomposition import PCA
    pca = PCA(n_components=20)
    pca.fit(A.T)
    Z = pca.transform(A.T)

    # --- Plot results ---

    # Create colormap for points.  Use an RGB map of the output
    color = np.copy(Y.T)
    color[color < -1] = -1
    color[color > 1] = 1
    color = (color + 1) / 2  # Colors must be between 0 and 1

    # Figure 1: Variance explained and 2D projections

    # Setup figure
    fh, ax_h = pt.create_subplot(n_row=1, n_col=4)

    # Subplot 1: Plot fraction of variance explained
    idx = 0
    dims = np.arange(pca.n_components_) + 1
    ax_h[idx].plot(dims, pca.explained_variance_ratio_, color='k', marker='.')
    ax_h[idx].set_xlabel('PCA dim.')
    ax_h[idx].set_ylabel('Fraction of variance explained')
    ax_h[idx].set_title('Fraction of variance explained')

    # Subplots 2-4: 2D projections
    # Iterate over dimensions and plot
    plot_dim = [[0, 1], [0, 2], [1, 2]]
    for ax, d in zip(ax_h[1:4], plot_dim):
        ax.scatter(Z[:, d[0]], Z[:, d[1]], marker='.', c=color)
        ax.set_xlabel('PC {}'.format(d[0] + 1))
        ax.set_ylabel('PC {}'.format(d[1] + 1))
        ax.set_title('Dim {} vs Dim {}'.format(d[0] + 1, d[1] + 1))

    # Save figure
    fh.savefig('results/FlipFlopRNN_{}.pdf'.format('PCA'))

    # Figure 2: 3d representation
    # Note that this needs to be done separately from the above plots
    # because a separate argument is required when creating a subplot
    # for a 3d plot

    # This import registers the 3D projection, but is otherwise unused.
    from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import

    # Create figure
    fh = plt.figure()
    ax = fh.add_subplot(111, projection='3d')

    # Plot
    ax.scatter(Z[:, 0], Z[:, 1], Z[:, 2], marker='o', c=color)
    ax.set_xlabel('PC 1')
    ax.set_ylabel('PC 2')
    ax.set_zlabel('PC 3')
    ax.set_title('Top 3 PCs')

    # Save figure
    fh.savefig('results/FlipFlopRNN_{}.pdf'.format('PCA_3D'))

    return None
Example #7
0
def pca_analysis(model, data):
    """Run PCA analysis on model to visualize hidden layer activity.

    To get the values of the intermediate layer, a new model needs to be 
    created. This model takes the normal input from the RNN, and returns the
    output of the rnn layer.  The values of the hidden layer can then be found
    using the 'predict()' method.
    """

    # Unpack train data dict
    X = data['X']
    Y = data['Y']

    # --- Get hidden layer activations and run PCA ---

    # Create new model and predict input
    inputs = model.input
    outputs = [model.layers[0].output, model.layers[1].output]
    act_model = tf.keras.Model(inputs=inputs, outputs=outputs)
    activations = act_model.predict(X)

    # Format activations into dimensions x observations
    # Input dimensions: (obs x time x units)
    n_inputs = X.shape[2]
    n_obs = X.shape[0]
    n_ts = X.shape[1]
    n_rnn = activations[0].shape[2]
    A = np.transpose(activations[0], (2, 0, 1))  # Now (units x obs x time)
    Y = np.transpose(activations[1], (2, 0, 1))
    A = A.reshape((n_rnn, -1), order='C')  # Now (units x (obs*time))
    Y = Y.reshape((n_inputs, -1), order='C')

    # Run PCA (note that this is actually PPCA)
    from sklearn.decomposition import PCA
    n_pcs = 20
    pca = PCA(n_components=n_pcs)
    pca.fit(A.T)

    # --- Figure 1: Variance explained ---

    # Setup figure
    fh, ax_h = pt.create_subplot(n_row=1, n_col=1)

    # Plot fraction of variance explained
    idx = 0
    dims = np.arange(pca.n_components_) + 1
    ax_h[idx].plot(dims, pca.explained_variance_ratio_, color='k', marker='.')
    ax_h[idx].set_xlabel('PCA dim.')
    ax_h[idx].set_ylabel('Fraction of variance explained')
    ax_h[idx].set_title('Fraction of variance explained')
    fig_name = 'FracVarExplained'
    fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))

    # --- Figure 2: 2D projections of the PCA representation ---

    # Setup figure.  Define dimensions to plot
    plot_dim = [[0, 1], [0, 2], [1, 2], [2, 3]]
    n_row = 2
    n_col = len(plot_dim)
    fh, ax_h = pt.create_subplot(n_row=n_row, n_col=n_col)

    # Find indices of specific trials to plot.  Here we want to focus on a
    # pair of diametrically-opposed targets (0 and 180 degrees)
    start_pos = X[:, 0, :]
    start_ang = np.rad2deg(np.arctan2(start_pos[:, 1], start_pos[:, 0]))
    mask = start_ang < 0
    start_ang[mask] = start_ang[mask] + 360
    targ_ang = [0, 180]
    targ_idx = [np.argmin(np.abs(start_ang - ang)) for ang in targ_ang]

    # Iterate over trials
    Z = np.zeros((n_obs, n_ts, n_pcs))
    n_samp = np.zeros((n_obs), dtype=int)
    for i in range(n_obs):
        # Get data from current trial and find PCA representation
        A_trial = activations[0][i, :, :]
        Z_trial = pca.transform(A_trial)
        Z[i, :, :] = Z_trial

        # Limit to valid portion of the trajectory
        Z_trial = Z_trial[data['onset'][i]:data['offset'][i]]
        n_samp[i] = Z_trial.shape[0]

        # Iterate over dimensions and plot
        for ax, d in zip(ax_h[0:n_col], plot_dim):
            plot_trajectory(Z_trial[:, d].T, X[i, 0, :], ax)

        # If trial is to be highlighted, plot in a separate set of axes
        if i in targ_idx:
            for ax, d in zip(ax_h[n_col:], plot_dim):
                plot_trajectory(Z_trial[:, d].T, X[i, 0, :], ax)

    # Set axis labels
    for ax, d in zip(ax_h, plot_dim * 2):
        ax.set_xlabel('PC {}'.format(d[0] + 1))
        ax.set_ylabel('PC {}'.format(d[1] + 1))
        ax.set_title('Dim {} vs Dim {}'.format(d[0] + 1, d[1] + 1))

    # Save figure
    fig_name = '2DProj'
    fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))

    # --- Figure 3: Linear mapping of PCA representation ---

    # Get data to fit linear model
    n_samp_all = np.sum(n_samp)
    Z_mat = np.zeros((n_samp_all, n_pcs))
    Y_mat = np.zeros((n_samp_all, n_inputs))
    ctr = 0
    for i in range(n_obs):
        onset_idx = np.arange(data['onset'][i], data['offset'][i])
        ctr_idx = np.arange(ctr, (ctr + len(onset_idx)))
        Z_mat[ctr_idx, :] = Z[i, onset_idx, :]
        Y_mat[ctr_idx, :] = data['Y'][i, onset_idx, :]
        ctr = ctr_idx[-1] + 1

    # Fit linear model -- do this independently for the X and Y dimensions
    from sklearn import linear_model
    reg_mdl = linear_model.LinearRegression(fit_intercept=False)
    reg_mdl.fit(Z_mat, Y_mat)
    r2 = reg_mdl.score(Z_mat, Y_mat)
    print('Linear fit: r2 = {}'.format(r2))
    W = reg_mdl.coef_

    # Plot predicted trajectories
    fh, ax_h = pt.create_subplot(n_row=1, n_col=1)
    for i in range(n_obs):
        # Predict cursor position from hidden unit activity
        Z_temp = Z[i, data['onset'][i]:data['offset'][i], :]
        y_pred = Z_temp @ W.T
        y_pred = reg_mdl.predict(Z_temp)
        plot_trajectory(y_pred.T, data['X'][i, 0, :], ax_h[0])

    # Format plot axes
    ax_h[0].set_title('Linear model - predicted trajectories')
    ax_h[0].set_xlabel('X position')
    ax_h[0].set_ylabel('Y position')

    # Save figure
    fig_name = 'LinearMapping'
    fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))

    return None