Example #1
0
def procrustes(a, b, all_sessions_firing, all_session_dm):

    a = 0
    b = 1

    data_set_1 = all_sessions_firing[a]
    data_set_2 = all_sessions_firing[b]

    session_a = np.transpose(data_set_1, [1, 0, 2]).reshape(
        data_set_1.shape[1], data_set_1.shape[0] * data_set_1.shape[2])
    session_b = np.transpose(data_set_2, [1, 0, 2]).reshape(
        data_set_2.shape[1], data_set_2.shape[0] * data_set_2.shape[2])

    # PCA on the neuronal data with n dimensions
    n = 8
    u_1, s_1, v_1 = svds(session_a, n)  # u is n x m
    u_2, s_2, v_2 = svds(session_b, n)

    proj_a = np.linalg.multi_dot([u_1.T,
                                  session_a])  # project onto the m manifolds
    proj_b = np.linalg.multi_dot([u_2.T, session_b])
    data = [proj_a, proj_b]
    aligned_data = hyp.align(data)

    aligned_a = aligned_data[0].reshape(aligned_data[0].shape[0],
                                        data_set_1.shape[0],
                                        data_set_1.shape[2])
    aligned_b = aligned_data[1].reshape(aligned_data[1].shape[0],
                                        data_set_2.shape[0],
                                        data_set_2.shape[2])

    aligned_corr = np.corrcoef(aligned_a, aligned_b)
    misaligned_corr = np.corrcoef(proj_a, proj_b)

    #Hyperalignment between tasks
    task_1 = aligned_b[:, :40, :]
    task_2 = aligned_b[:, 40:80, :]
    task_1_reshaped = task_1.reshape(task_1.shape[0],
                                     task_1.shape[1] * task_1.shape[2])
    task_2_reshaped = task_2.reshape(task_2.shape[0],
                                     task_2.shape[1] * task_2.shape[2])
    tasks = [task_1_reshaped, task_2_reshaped]
    aligned_tasks = hyp.align(tasks)
    task_1_aligned = aligned_tasks[0].reshape(task_1.shape[0], task_1.shape[1],
                                              task_1.shape[2])

    original_a = proj_a.reshape(aligned_data[0].shape[0], data_set_1.shape[0],
                                data_set_1.shape[2])
    original_b = proj_b.reshape(aligned_data[1].shape[0], data_set_2.shape[0],
                                data_set_2.shape[2])

    return aligned_a, aligned_b, original_a, original_b, task_1_aligned
Example #2
0
def spiral_alignment_checker(model, known_rot=True, relax=False, tol=1e-5):
    def get_vals(x):
        if hasattr(x, 'values'):
            return x.values
        else:
            return x

    def test_all_close(unaligned, aligned):
        if not relax:
            return np.allclose(aligned[0], aligned[1], atol=tol)
        else:
            d1 = np.max(np.abs(get_vals(unaligned[0]), get_vals(unaligned[1])))
            d2 = np.max(np.abs(get_vals(aligned[0]), get_vals(aligned[1])))
            return d2 - d1 <= tol  # either d1 and d2 are within tol of each other, or d2 < d1

    rot = np.array([[-0.50524616, -0.48383773, -0.71458195],
                    [-0.86275536, 0.26450786, 0.43091621],
                    [-0.01948098, 0.83422817, -0.55107518]])

    aligned_spirals1 = [spiral[0], np.dot(spiral[1], rot)]
    aligned_spirals2 = hyp.align(spiral, model=model)
    aligned_spirals3, fitted_model = hyp.align(spiral,
                                               model=model,
                                               return_model=True)

    # noinspection DuplicatedCode
    assert test_all_close(spiral, aligned_spirals1)
    assert test_all_close(spiral, aligned_spirals2)
    assert all([
        test_all_close(spiral, [a, b])
        for a, b in zip(aligned_spirals2, aligned_spirals3)
    ])

    if known_rot:
        assert all([
            np.allclose(a, b, atol=1e-5)
            for a, b in zip(aligned_spirals1, aligned_spirals3)
        ])
        assert compare_alignments(np.eye(3), fitted_model['model'].proj[0])
        assert compare_alignments(rot, fitted_model['model'].proj[1])
Example #3
0
def align_dictionary(dicts, n_iter=10):
    keys = list(dicts.keys())
    values = list(dicts.values())
    #values = pd.concat(dicts)
    # constant_filter = VarianceThreshold()
    # constant_filter.fit(values)
    aligned_values = values.copy()
    # for v in aligned_values:
    #     sns.heatmap(v)

    for n in range(n_iter):
        aligned_values = [
            pd.DataFrame(data=d, index=values[i].index)
            for i, d in enumerate(hyp.align(aligned_values, align='hyper'))
        ]
    print('pass algin')
    return {
        k: a
        for k, a in zip(keys, aligned_values)
    }  #returns dictionary in the same format as dicts, but where trajectories are aligned
Example #4
0
def weights_alignment_checker(model):
    def dists(x, y):
        return np.sqrt(np.sum(np.power(x - y, 2), axis=1))

    def get_mean_dists(x):
        dist_sum = 0
        for i, dx in enumerate(x):
            if hasattr(dx, 'values'):
                dx = dx.values
            for dy in x[:i]:
                if hasattr(dy, 'values'):
                    dy = dy.values
                # if shapes are identical, one may be "flipped" relative to the other
                dist_sum += np.min(
                    [np.mean(dists(dx, dy)),
                     np.mean(dists(dx[::-1, :], dy))])
        return dist_sum

    d1 = get_mean_dists(weights)
    d2 = get_mean_dists(hyp.align(weights, model=model))
    assert d1 > d2
Example #5
0
# -*- coding: utf-8 -*-
"""
=============================
Animated trajectory plotted with multidimensional scaling
=============================

This is a trajectory of brain data, hyperaligned and then plotted in 3D
with multidimensional scaling.
"""

# Code source: Andrew Heusser
# License: MIT

import hypertools as hyp
import numpy as np

data = hyp.load('weights')
aligned_w = hyp.align(data)

w1 = np.mean(aligned_w[:17], 0)
w2 = np.mean(aligned_w[18:], 0)

hyp.plot([w1, w2], animate=True, reduce='MDS')
Example #6
0
# -*- coding: utf-8 -*-
"""
=============================
Aligning matrices to a common space
=============================

In this example, we plot the trajectory of multivariate brain activity for
two groups of subjects that have been hyperaligned (Haxby et al, 2011).  First,
we use the align tool to project all subjects in the list to a common space.
Then we average the data into two groups, and plot.
"""

# Code source: Andrew Heusser
# License: MIT

# import
import hypertools as hyp
import numpy as np

# load example data
data = hyp.load('weights').get_data()
data = hyp.align(data, align='hyper')

# average into two groups
group1 = np.mean(data[:17], 0)
group2 = np.mean(data[18:], 0)

# plot
hyp.plot([group1[:100, :], group2[:100, :]])
def plot_aligned_ROI_trajectories(data, reduce='UMAP', align='hyper', n_iter=5, ndims=500, internal_reduce='IncrementalPCA', **kwargs):
    if type(data) == dict:
        for r in data.keys(): #roi
            plot_aligned_ROI_trajectories(data[r], reduce=reduce, align=align, ndims=ndims, internal_reduce=internal_reduce, title=r, **kwargs)
    else:
        #step 1: reduce dataset before aligning (runs much faster)
        reduced_data = hyp.reduce([x.data for x in data], reduce=internal_reduce, ndims=ndims)

        #step 2: smooth trajectories so they look prettier
        smoothed_data = tc.smooth(reduced_data, kernel_fun=tc.helpers.gaussian_weights, kernel_params={'var': 500})
        
        #step 3: align trajectories
        aligned_data = smoothed_data
        for i in range(n_iter):
            aligned_data = hyp.align(aligned_data, align=align)

        #now generate a plot
        hyp.plot(aligned_data, reduce=reduce, **kwargs)

plot_aligned_ROI_trajectories(data['Part1'])

We can see strong agreement across people in V1 and A1, whereas precentral gyrus responses are much more variable.  Now let's see if these patterns also hold for the second half of the dataset:

plot_aligned_ROI_trajectories(data['Part2'])

It looks like this pattern holds!  To test this idea formally, we could develop a measure of trajectory consistency across people (e.g. mean squared error between the corresponding timepoints, across all pairs of participants' trajectories).  We could also explore the extent to which different brain regions exhibit consistent patterns across people.

### Using different embedding spaces to obtain a more complete sense of high-dimensional space
When we visualize high-dimensional data as 3D shapes, we necessarily lose information.  One strategy for getting a better sense of the "true shape" of the data is to use different projection algorithms for embedding the data into the 3D space (this may be done using the `reduce` keyword).
Example #8
0
def test_null_align():
    spiral2 = hyp.align(spiral, model='NullAlign')
    weights2 = hyp.align(weights, model='NullAlign')

    assert all([np.allclose(x, y) for x, y in zip(spiral, spiral2)])
    assert all([np.allclose(x, y) for x, y in zip(weights, weights2)])