Exemplo n.º 1
0
def plot_mean_locations():
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)

    Ls_mean = np.mean(Ls_rot, 0)

    fig = create_figure(figsize=(1.4, 2.5))
    plt.subplot(211, aspect='equal')

    wheel_cmap = gradient_cmap(
        [colors[0], colors[3], colors[2], colors[1], colors[0]])

    for i, k in enumerate(node_perm):
        color = wheel_cmap((np.pi + pfs_th[k]) / (2 * np.pi))
        plt.plot(pfs[k, 0],
                 pfs[k, 1],
                 'o',
                 markerfacecolor=color,
                 markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45, 45)
    # plt.xlabel("$x$")
    plt.xticks([-40, -20, 0, 20, 40], [])
    plt.ylim(-45, 45)
    # plt.ylabel("$y$")
    plt.yticks([-40, -20, 0, 20, 40], [])

    # Now plot the inferred locations

    plt.subplot(212, aspect='equal')

    for i, k in enumerate(node_perm):
        color = wheel_cmap((np.pi + pfs_th[k]) / (2 * np.pi))
        plt.plot(Ls_mean[k, 0],
                 Ls_mean[k, 1],
                 'o',
                 markerfacecolor=color,
                 markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("Mean Locations")
    plt.xlim(-30, 30)
    plt.xticks([])
    plt.ylim(-30, 30)
    plt.yticks([])

    plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_mean_locations.pdf"))
    plt.show()
Exemplo n.º 2
0
def plot_single_trans_prob(trans_distn,
                           k,
                           ax=None,
                           xlim=(-5, 5),
                           ylim=(-5, 5),
                           n_pts=80):
    XX, YY = np.meshgrid(np.linspace(*xlim, n_pts), np.linspace(*ylim, n_pts))
    XY = np.column_stack((np.ravel(XX), np.ravel(YY)))

    D_reg = trans_distn.D_in
    inputs = np.hstack((np.zeros((n_pts**2, D_reg - 2)), XY))
    test_prs = trans_distn.pi(inputs)

    if ax is None:
        fig = plt.figure(figsize=(12, 3))
        ax = fig.add_subplot(1, K, k + 1)

    cmap = gradient_cmap([np.ones(3), colors[k]])
    im1 = ax.imshow(test_prs[:, k].reshape(*XX.shape),
                    extent=xlim + tuple(reversed(ylim)),
                    vmin=0,
                    vmax=1,
                    cmap=cmap)

    ax.set_xlim(xlim)
    ax.set_ylim(ylim)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_xlabel("$x_{t,1}$")
    ax.set_ylabel("$x_{t,2}$")
    ax.set_title("$\Pr(z_{{t+1}} = {0} \mid x_t)$".format(k + 1))
Exemplo n.º 3
0
def plot_mean_locations():
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)

    Ls_mean = np.mean(Ls_rot, 0)

    fig = create_figure(figsize=(1.4,2.5))
    plt.subplot(211, aspect='equal')

    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(pfs[k,0], pfs[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45, 45)
    # plt.xlabel("$x$")
    plt.xticks([-40, -20, 0, 20, 40], [])
    plt.ylim(-45, 45)
    # plt.ylabel("$y$")
    plt.yticks([-40, -20, 0, 20, 40], [])

    # Now plot the inferred locations


    plt.subplot(212, aspect='equal')

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Ls_mean[k,0], Ls_mean[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("Mean Locations")
    plt.xlim(-30, 30)
    plt.xticks([])
    plt.ylim(-30, 30)
    plt.yticks([])

    plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_mean_locations.pdf"))
    plt.show()
Exemplo n.º 4
0
   along with error bars.

 - take in an axis (or a figure) or create a new figure if not specified
 - take in a color or colormap for plotting


"""
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.colormaps import gradient_cmap

from pyglm.utils.theano_func_wrapper import seval


rwb_cmap = gradient_cmap([[1,0,0],
                          [1,1,1],
                          [0,0,0]])

class PlotProvider(object):
    """
    Abstract class for plotting a sample or a sequence of samples
    """
    def __init__(self, population):
        """
        Check that the model satisfies whatever criteria are appropriate
        for this model.
        """
        self.population = population

    def plot(self, sample, ax=None):
        """
Exemplo n.º 5
0
                   "medium green",
                   "dusty purple",
                   "orange",
                   "amber",
                   "clay",
                   "pink",
                   "greyish",
                   "light cyan",
                   "steel blue",
                   "forest green",
                   "pastel purple",
                   "mint",
                   "salmon",
                   "dark brown"]
    colors = sns.xkcd_palette(color_names)
    cmap = gradient_cmap(colors)
except:
    from matplotlib.cm import get_cmap
    colors = ['b', 'r', 'y', 'g', 'purple']
    cmap = get_cmap("jet")


from pybasicbayes.util.text import progprint_xrange
from pylds.util import random_rotation
from pyslds.models import DefaultSLDS

npr.seed(0)

# Set parameters
K = 5
D_obs = 100
Exemplo n.º 6
0
The classes should be able to:
 - plot either a single sample or the mean of a sequence of samples
   along with error bars.

 - take in an axis (or a figure) or create a new figure if not specified
 - take in a color or colormap for plotting


"""
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.colormaps import gradient_cmap

from pyglm.utils.theano_func_wrapper import seval

rwb_cmap = gradient_cmap([[1, 0, 0], [1, 1, 1], [0, 0, 0]])


class PlotProvider(object):
    """
    Abstract class for plotting a sample or a sequence of samples
    """
    def __init__(self, population):
        """
        Check that the model satisfies whatever criteria are appropriate
        for this model.
        """
        self.population = population

    def plot(self, sample, ax=None):
        """
Exemplo n.º 7
0
def plot_mean_and_pca_locations(result):
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs, scale=False)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)
    Ls_mean = np.mean(Ls_rot, 0)

    # Bin the data
    from pyhawkes.utils.utils import convert_continuous_to_discrete
    S_dt = convert_continuous_to_discrete(S, C, 0.25, 0, T)

    # Smooth the data to get a firing rate
    from scipy.ndimage.filters import gaussian_filter1d
    S_smooth = np.array([gaussian_filter1d(s, 4) for s in S_dt.T]).T

    # Run pca to gte an embedding
    from sklearn.decomposition import PCA
    pca = PCA(n_components=2)
    pca.fit(S_smooth)
    Z = pca.components_.T

    # Rotate
    R = compute_optimal_rotation(Z, pfs, scale=False)
    Z = Z.dot(R)

    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    fig = create_figure(figsize=(1.4,2.9))
    # plt.subplot(211, aspect='equal')
    ax = create_axis_at_location(fig, .3, 1.7, 1, 1)


    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Ls_mean[k,0], Ls_mean[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("Mean Locations")
    plt.xlim(-3, 3)
    plt.xticks([-2, 0, 2])
    plt.ylim(-3, 3)
    plt.yticks([-2, 0, 2])


    # plt.subplot(212, aspect='equal')
    ax = create_axis_at_location(fig, .3, .2, 1, 1)

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Z[k,0], Z[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("PCA Locations")
    plt.xlim(-.5, .5)
    # plt.xlabel("$x$")
    plt.xticks([-.4, 0, .4])
    plt.ylim(-.5, .5)
    # plt.ylabel("$y$")
    plt.yticks([-.4, 0, .4])

    # plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_mean_pca_locations.pdf"))
    plt.show()
Exemplo n.º 8
0
def plot_pca_locations():
    ### Plot the sampled locations for a few neurons

    # Bin the data
    from pyhawkes.utils.utils import convert_continuous_to_discrete
    S_dt = convert_continuous_to_discrete(S, C, 0.25, 0, T)

    # Smooth the data to get a firing rate
    from scipy.ndimage.filters import gaussian_filter1d
    S_smooth = np.array([gaussian_filter1d(s, 4) for s in S_dt.T]).T

    # Run pca to gte an embedding
    from sklearn.decomposition import PCA
    pca = PCA(n_components=2)
    pca.fit(S_smooth)
    Z = pca.components_.T

    # Rotate
    R = compute_optimal_rotation(Z, pfs)
    Z = Z.dot(R)

    fig = create_figure(figsize=(1.4,2.5))
    plt.subplot(211, aspect='equal')

    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(pfs[k,0], pfs[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45, 45)
    # plt.xlabel("$x$")
    plt.xticks([-40, -20, 0, 20, 40], [])
    plt.ylim(-45, 45)
    # plt.ylabel("$y$")
    plt.yticks([-40, -20, 0, 20, 40], [])

    # Now plot the inferred locations


    plt.subplot(212, aspect='equal')

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Z[k,0], Z[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("PCA Locations")
    plt.xlim(-25, 25)
    # plt.xlabel("$x$")
    plt.xticks([-20, 0, 20], [])
    plt.ylim(-25, 25)
    # plt.ylabel("$y$")
    plt.yticks([-20, 0, 20], [])

    plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_pca_locations.pdf"))
    plt.show()
Exemplo n.º 9
0
def plot_locations(result, offset=0):
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs, scale=False)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)

    fig = create_figure(figsize=(1.4,2.9))
    ax = create_axis_at_location(fig, .3, 1.7, 1, 1)

    # toplot = np.random.choice(np.arange(K), size=4, replace=False)
    toplot = np.linspace(offset,K+offset, 4, endpoint=False).astype(np.int)
    print(toplot)
    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    plot_colors = [wheel_cmap((np.pi+pfs_th[node_perm[j]])/(2*np.pi)) for j in toplot]

    for i,k in enumerate(node_perm):
        # plt.text(pfs[k,0], pfs[k,1], "%d" % i)
        if i not in toplot:
            color = 0.8 * np.ones(3)

            plt.plot(pfs[k,0], pfs[k, 1], 'o',
                     markerfacecolor=color, markeredgecolor=color,
                     markersize=4 + 4 * pf_size[k],
                     alpha=1.0)

    for i,k in enumerate(node_perm):
        # plt.text(pfs[k,0], pfs[k,1], "%d" % i)
        if i in toplot:
            j = np.where(toplot==i)[0][0]
            color = plot_colors[j]

            plt.plot(pfs[k,0], pfs[k, 1], 'o',
                     markerfacecolor=color, markeredgecolor=color,
                     markersize=4 + 4 * pf_size[k])



    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45,45)
    plt.xticks([-40, -20, 0, 20, 40])
    # plt.xlabel("$x$")
    plt.ylim(-45,45)
    plt.yticks([-40, -20, 0, 20, 40])
    # plt.ylabel("$y$")

    # Now plot the inferred locations
    # plt.subplot(212, aspect='equal')
    ax = create_axis_at_location(fig, .3, .2, 1, 1)
    for L in Ls_rot[::2]:
        for j in np.random.permutation(len(toplot)):
            k = node_perm[toplot][j]
            color = plot_colors[j]
            plt.plot(L[k,0], L[k,1], 'o',
                     markerfacecolor=color, markeredgecolor="none",
                     markersize=4, alpha=0.25)

    plt.title("Locations Samples")
    # plt.xlim(-30, 30)
    # plt.xticks([])
    # plt.ylim(-30, 30)
    # plt.yticks([])
    plt.xlim(-3, 3)
    plt.xticks([-2, 0, 2])
    plt.ylim(-3, 3)
    plt.yticks([-2, 0, 2])

    plt.savefig(os.path.join(results_dir, "locations_%d.pdf" % offset))
Exemplo n.º 10
0
def plot_correlation_matrix(Sigma,
                            betas,
                            words,
                            results_dir,
                            outname="corr_matrix.pdf",
                            blockify=False,
                            highlight=[]):

    # Get topic names
    topic_names = [np.array(words)[np.argmax(beta)]  for beta in betas.T]

    # Plot the log likelihood
    sz = 5.25/3.  # Three NIPS panels
    fig = create_figure(figsize=(sz, 2.5), transparent=True)
    fig.set_tight_layout(True)
    ax = fig.add_subplot(111)

    C = corr_matrix(Sigma)
    T = C.shape[0]
    lim = abs(C).max()
    cmap = gradient_cmap([colors[1], np.ones(3), colors[0]])

    if blockify:
        perm = find_blockifying_perm(C, k=4, nclusters=4)
        C = C[np.ix_(perm, perm)]

    im = plt.imshow(np.kron(C, np.ones((50,50))), interpolation="none", vmin=-lim, vmax=lim, cmap=cmap, extent=(1,T+1,T+1,1))

    from mpl_toolkits.axes_grid1 import make_axes_locatable
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("bottom", size="5%", pad=0.05)
    cbar = plt.colorbar(im, cax=cax,
                        orientation="horizontal",
                        ticks=[-1, -0.5, 0., 0.5, 1.0],
                        label="Topic Correlation")
    # cbar.set_label("Probability", labelpad=10)
    plt.subplots_adjust(left=0.05, bottom=0.1, top=0.9, right=0.85)

    # Highlight some cells
    import string
    from matplotlib.patches import Rectangle
    for i,(j,k) in enumerate(highlight):
        ax.add_patch(Rectangle((k+1, j+1), 1, 1, facecolor="none", edgecolor='k', linewidth=1))

        ax.text(k+1-1.5,j+1+1,string.ascii_lowercase[i], )

        print("")
        print("CC: ", C[j,k])
        print("Topic ", j)
        print(top_k(words, betas[:,j]))
        print("Topic ", k)
        print(top_k(words, betas[:,k]))
        print("")

    # Find the most correlated off diagonal entry
    C_offdiag = np.tril(C,k=-1)
    sorted_pairs = np.argsort(C_offdiag.ravel())
    for i in xrange(5):
        print("")
        imax,jmax = np.unravel_index(sorted_pairs[-i], (T,T))
        print("Correlated Topics (%d, %d): " % (imax, jmax))
        print(top_k(words, betas[:,imax]), "\n and \n", top_k(words, betas[:,jmax]))
        print("correlation coeff: ", C[imax, jmax])
        print("-" * 50)
        print("")

    print("-" * 50)
    print("-" * 50)
    print("-" * 50)

    for i in xrange(5):
        print("")
        imin,jmin = np.unravel_index(sorted_pairs[i], (T,T))
        print("Anticorrelated Topics (%d, %d): " % (imin, jmin))
        # print topic_names[imin], " and ", topic_names[jmin]
        print(top_k(words, betas[:,imin]), "\n and \n", top_k(words, betas[:,jmin]))
        print("correlation coeff: ", C[imin, jmin])
        print("-" * 50)
        print("")


    # Move main axis ticks to top
    ax.xaxis.tick_top()
    # ax.set_title("Topic Correlation", y=1.1)
    fig.savefig(os.path.join(results_dir, outname))

    plt.show()
Exemplo n.º 11
0
def plot_mean_and_pca_locations(result):
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs, scale=False)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)
    Ls_mean = np.mean(Ls_rot, 0)

    # Bin the data
    from pyhawkes.utils.utils import convert_continuous_to_discrete
    S_dt = convert_continuous_to_discrete(S, C, 0.25, 0, T)

    # Smooth the data to get a firing rate
    from scipy.ndimage.filters import gaussian_filter1d
    S_smooth = np.array([gaussian_filter1d(s, 4) for s in S_dt.T]).T

    # Run pca to gte an embedding
    from sklearn.decomposition import PCA
    pca = PCA(n_components=2)
    pca.fit(S_smooth)
    Z = pca.components_.T

    # Rotate
    R = compute_optimal_rotation(Z, pfs, scale=False)
    Z = Z.dot(R)

    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    fig = create_figure(figsize=(1.4,2.9))
    # plt.subplot(211, aspect='equal')
    ax = create_axis_at_location(fig, .3, 1.7, 1, 1)


    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Ls_mean[k,0], Ls_mean[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("Mean Locations")
    plt.xlim(-3, 3)
    plt.xticks([-2, 0, 2])
    plt.ylim(-3, 3)
    plt.yticks([-2, 0, 2])


    # plt.subplot(212, aspect='equal')
    ax = create_axis_at_location(fig, .3, .2, 1, 1)

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Z[k,0], Z[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("PCA Locations")
    plt.xlim(-.5, .5)
    # plt.xlabel("$x$")
    plt.xticks([-.4, 0, .4])
    plt.ylim(-.5, .5)
    # plt.ylabel("$y$")
    plt.yticks([-.4, 0, .4])

    # plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_mean_pca_locations.pdf"))
    plt.show()
Exemplo n.º 12
0
def plot_pca_locations():
    ### Plot the sampled locations for a few neurons

    # Bin the data
    from pyhawkes.utils.utils import convert_continuous_to_discrete
    S_dt = convert_continuous_to_discrete(S, C, 0.25, 0, T)

    # Smooth the data to get a firing rate
    from scipy.ndimage.filters import gaussian_filter1d
    S_smooth = np.array([gaussian_filter1d(s, 4) for s in S_dt.T]).T

    # Run pca to gte an embedding
    from sklearn.decomposition import PCA
    pca = PCA(n_components=2)
    pca.fit(S_smooth)
    Z = pca.components_.T

    # Rotate
    R = compute_optimal_rotation(Z, pfs)
    Z = Z.dot(R)

    fig = create_figure(figsize=(1.4,2.5))
    plt.subplot(211, aspect='equal')

    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(pfs[k,0], pfs[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45, 45)
    # plt.xlabel("$x$")
    plt.xticks([-40, -20, 0, 20, 40], [])
    plt.ylim(-45, 45)
    # plt.ylabel("$y$")
    plt.yticks([-40, -20, 0, 20, 40], [])

    # Now plot the inferred locations


    plt.subplot(212, aspect='equal')

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        plt.plot(Z[k,0], Z[k, 1], 'o',
                 markerfacecolor=color, markeredgecolor=color,
                 markersize=4 + 4 * pf_size[k],
                 alpha=0.7)

    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("PCA Locations")
    plt.xlim(-25, 25)
    # plt.xlabel("$x$")
    plt.xticks([-20, 0, 20], [])
    plt.ylim(-25, 25)
    # plt.ylabel("$y$")
    plt.yticks([-20, 0, 20], [])

    plt.tight_layout()
    plt.savefig(os.path.join(results_dir, "hipp_pca_locations.pdf"))
    plt.show()
Exemplo n.º 13
0
def plot_locations(result, offset=0):
    ### Plot the sampled locations for a few neurons
    _, _, _, _, Ls = result
    Ls_rot = []
    for L in Ls:
        R = compute_optimal_rotation(L, pfs, scale=False)
        Ls_rot.append(L.dot(R))
    Ls_rot = np.array(Ls_rot)

    fig = create_figure(figsize=(1.4,2.9))
    ax = create_axis_at_location(fig, .3, 1.7, 1, 1)

    # toplot = np.random.choice(np.arange(K), size=4, replace=False)
    toplot = np.linspace(offset,K+offset, 4, endpoint=False).astype(np.int)
    print toplot
    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    plot_colors = [wheel_cmap((np.pi+pfs_th[node_perm[j]])/(2*np.pi)) for j in toplot]

    for i,k in enumerate(node_perm):
        # plt.text(pfs[k,0], pfs[k,1], "%d" % i)
        if i not in toplot:
            color = 0.8 * np.ones(3)

            plt.plot(pfs[k,0], pfs[k, 1], 'o',
                     markerfacecolor=color, markeredgecolor=color,
                     markersize=4 + 4 * pf_size[k],
                     alpha=1.0)

    for i,k in enumerate(node_perm):
        # plt.text(pfs[k,0], pfs[k,1], "%d" % i)
        if i in toplot:
            j = np.where(toplot==i)[0][0]
            color = plot_colors[j]

            plt.plot(pfs[k,0], pfs[k, 1], 'o',
                     markerfacecolor=color, markeredgecolor=color,
                     markersize=4 + 4 * pf_size[k])



    #     plt.gca().add_patch(Circle((0,0), radius=rad, ec='k', fc="none"))
    plt.title("True Place Fields")
    plt.xlim(-45,45)
    plt.xticks([-40, -20, 0, 20, 40])
    # plt.xlabel("$x$")
    plt.ylim(-45,45)
    plt.yticks([-40, -20, 0, 20, 40])
    # plt.ylabel("$y$")

    # Now plot the inferred locations
    # plt.subplot(212, aspect='equal')
    ax = create_axis_at_location(fig, .3, .2, 1, 1)
    for L in Ls_rot[::2]:
        for j in np.random.permutation(len(toplot)):
            k = node_perm[toplot][j]
            color = plot_colors[j]
            plt.plot(L[k,0], L[k,1], 'o',
                     markerfacecolor=color, markeredgecolor="none",
                     markersize=4, alpha=0.25)

    plt.title("Locations Samples")
    # plt.xlim(-30, 30)
    # plt.xticks([])
    # plt.ylim(-30, 30)
    # plt.yticks([])
    plt.xlim(-3, 3)
    plt.xticks([-2, 0, 2])
    plt.ylim(-3, 3)
    plt.yticks([-2, 0, 2])

    plt.savefig(os.path.join(results_dir, "locations_%d.pdf" % offset))
Exemplo n.º 14
0
def plot_results(result):
    lls, plls, Weffs, Ps, Ls = result

    ### Colored locations
    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    fig = create_figure(figsize=(1.8, 1.8))
    # ax = create_axis_at_location(fig, .1, .1, 1.5, 1.5, box=False)
    ax = create_axis_at_location(fig, .6, .4, 1.1, 1.1)

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        # alpha = pfs_rad[k] / 47
        alpha = 0.7
        ax.add_patch(Circle((pfs[k,0], pfs[k,1]),
                            radius=3+4*pf_size[k],
                            color=color, ec="none",
                            alpha=alpha)
                            )

    plt.title("True place fields")
    # ax.text(0, 45, "True Place Fields",
    #         horizontalalignment="center",
    #         fontdict=dict(size=9))
    plt.xlim(-45,45)
    plt.xticks([-40, -20, 0, 20, 40])
    plt.xlabel("$x$ [cm]")
    plt.ylim(-45,45)
    plt.yticks([-40, -20, 0, 20, 40])
    plt.ylabel("$y$ [cm]")
    plt.savefig(os.path.join(results_dir, "hipp_colored_locations.pdf"))


    # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    Weff = np.array(Weffs[N_samples//2:]).mean(0)
    Weff = Weff[np.ix_(node_perm, node_perm)]
    lim = Weff[(1-np.eye(K)).astype(np.bool)].max()
    im = ax.imshow(np.kron(Weff, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmax=lim)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .1, .2,  .3])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0", ".1", ".2",  ".3"]

    ax.set_title("Inferred Weights")
    plt.savefig(os.path.join(results_dir, "hipp_W.pdf"))

    # # Plot the inferred connection probability
    # plt.figure()
    # plt.imshow(P, interpolation="none", cmap="Greys", vmin=0)
    # plt.colorbar()

        # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    P = np.array(Ps[N_samples//2:]).mean(0)
    P = P[np.ix_(node_perm, node_perm)]
    im = ax.imshow(np.kron(P, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmin=0, vmax=1)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .5, 1])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0.0", "0.5",  "1.0"]

    ax.set_title("Inferred Probability")
    plt.savefig(os.path.join(results_dir, "hipp_P.pdf"))


    plt.show()
Exemplo n.º 15
0
def plot_results(result):
    lls, plls, Weffs, Ps, Ls = result

    ### Colored locations
    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    fig = create_figure(figsize=(1.8, 1.8))
    # ax = create_axis_at_location(fig, .1, .1, 1.5, 1.5, box=False)
    ax = create_axis_at_location(fig, .6, .4, 1.1, 1.1)

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        # alpha = pfs_rad[k] / 47
        alpha = 0.7
        ax.add_patch(Circle((pfs[k,0], pfs[k,1]),
                            radius=3+4*pf_size[k],
                            color=color, ec="none",
                            alpha=alpha)
                            )

    plt.title("True place fields")
    # ax.text(0, 45, "True Place Fields",
    #         horizontalalignment="center",
    #         fontdict=dict(size=9))
    plt.xlim(-45,45)
    plt.xticks([-40, -20, 0, 20, 40])
    plt.xlabel("$x$ [cm]")
    plt.ylim(-45,45)
    plt.yticks([-40, -20, 0, 20, 40])
    plt.ylabel("$y$ [cm]")
    plt.savefig(os.path.join(results_dir, "hipp_colored_locations.pdf"))


    # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    Weff = np.array(Weffs[N_samples//2:]).mean(0)
    Weff = Weff[np.ix_(node_perm, node_perm)]
    lim = Weff[(1-np.eye(K)).astype(np.bool)].max()
    im = ax.imshow(np.kron(Weff, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmax=lim)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .1, .2,  .3])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0", ".1", ".2",  ".3"]

    ax.set_title("Inferred Weights")
    plt.savefig(os.path.join(results_dir, "hipp_W.pdf"))

    # # Plot the inferred connection probability
    # plt.figure()
    # plt.imshow(P, interpolation="none", cmap="Greys", vmin=0)
    # plt.colorbar()

        # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    P = np.array(Ps[N_samples//2:]).mean(0)
    P = P[np.ix_(node_perm, node_perm)]
    im = ax.imshow(np.kron(P, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmin=0, vmax=1)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .5, 1])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0.0", "0.5",  "1.0"]

    ax.set_title("Inferred Probability")
    plt.savefig(os.path.join(results_dir, "hipp_P.pdf"))


    plt.show()
Exemplo n.º 16
0
from sds.utils import permutation

import matplotlib.pyplot as plt
from hips.plotting.colormaps import gradient_cmap

import seaborn as sns

sns.set_style("white")
sns.set_context("talk")

color_names = [
    "windows blue", "red", "amber", "faded green", "dusty purple", "orange"
]

colors = sns.xkcd_palette(color_names)
cmap = gradient_cmap(colors)

true_rarhmm = rARHMM(nb_states=3, dm_obs=2, trans_type='recurrent')

# trajectory lengths
T = [1250, 1150, 1025]

true_z, x = true_rarhmm.sample(horizon=T)
true_ll = true_rarhmm.log_probability(x)

obs_prior = {'mu0': 0., 'sigma0': 1e12, 'nu0': 2, 'psi0': 1.}
trans_kwargs = {'degree': 3}
rarhmm = rARHMM(nb_states=3,
                dm_obs=2,
                trans_type='poly',
                obs_prior=obs_prior,
def identify():
    import matplotlib.pyplot as plt

    from hips.plotting.colormaps import gradient_cmap
    import seaborn as sns

    sns.set_style("white")
    sns.set_context("talk")

    color_names = [
        "windows blue", "red", "amber", "faded green", "dusty purple",
        "orange", "clay", "pink", "greyish", "mint", "light cyan",
        "steel blue", "forest green", "pastel purple", "salmon", "dark brown"
    ]

    colors = sns.xkcd_palette(color_names)
    cmap = gradient_cmap(colors)

    import os
    import random
    import torch

    import gym
    import option_a2c.sds

    seed = 1337

    random.seed(seed)
    npr.seed(seed)
    torch.manual_seed(seed)
    torch.set_num_threads(1)

    env = gym.make('Pendulum-ID-v1')

    env._max_episode_steps = 5000
    env.unwrapped._dt = 0.01
    env.unwrapped._sigma = 1e-4
    env.seed(seed)

    dm_obs = env.observation_space.shape[0]
    dm_act = env.action_space.shape[0]

    nb_train_rollouts, nb_train_steps = 45, 250
    nb_test_rollouts, nb_test_steps = 15, 100

    train_obs, train_act = sample_env(env, nb_train_rollouts, nb_train_steps)
    test_obs, test_act = sample_env(env, nb_test_rollouts, nb_test_steps)
    lele = env.action_space.sample()

    nb_states = 7

    obs_prior = {
        'mu0': 0.,
        'sigma0': 1e64,
        'nu0': (dm_obs + 1) + 23,
        'psi0': 1e-4 * 23
    }
    obs_mstep_kwargs = {'use_prior': True}

    trans_type = 'neural'
    trans_prior = {'l2_penalty': 1e-32, 'alpha': 1, 'kappa': 1}
    trans_kwargs = {
        'hidden_layer_sizes': (24, ),
        'norm': {
            'mean': np.array([0., 0., 0., 0.]),
            'std': np.array([1., 1., 8., 2.5])
        }
    }
    trans_mstep_kwargs = {'nb_iter': 50, 'batch_size': 256, 'lr': 5e-4}

    models, lls, scores = parallel_em(nb_jobs=10,
                                      nb_states=nb_states,
                                      obs=train_obs,
                                      act=train_act,
                                      trans_type=trans_type,
                                      obs_prior=obs_prior,
                                      trans_prior=trans_prior,
                                      trans_kwargs=trans_kwargs,
                                      obs_mstep_kwargs=obs_mstep_kwargs,
                                      trans_mstep_kwargs=trans_mstep_kwargs,
                                      nb_iter=200,
                                      prec=1e-2)
    rarhmm = models[np.argmax(scores)]

    print("rarhmm, stochastic, " + rarhmm.trans_type)
    print(np.c_[lls, scores])

    # rarhmm.em(train_obs, train_act, nb_iter=100,
    #           obs_mstep_kwargs=obs_mstep_kwargs,
    #           trans_mstep_kwargs=trans_mstep_kwargs,
    #           prec=1e-4, verbose=True)

    # plt.figure(figsize=(8, 8))
    # _, state = rarhmm.viterbi(train_obs, train_act)
    # _seq = npr.choice(len(train_obs))
    #
    # plt.subplot(211)
    # plt.plot(train_obs[_seq])
    # plt.xlim(0, len(train_obs[_seq]))
    #
    # plt.subplot(212)
    # plt.imshow(state[_seq][None, :], aspect="auto", cmap=cmap, vmin=0, vmax=len(colors) - 1)
    # plt.xlim(0, len(train_obs[_seq]))
    # plt.ylabel("$z_{\\mathrm{inferred}}$")
    # plt.yticks([])
    #
    # plt.show()

    torch.save(
        rarhmm,
        open(
            os.path.abspath(os.path.join(__file__, '..', '..')) +
            '/envs/hybrid/models/' + rarhmm.trans_type +
            "_rarhmm_pendulum_cart.pkl", "wb"))

    hr = [1, 5, 10, 15, 20, 25, 50]
    for h in hr:
        _mse, _smse, _evar = rarhmm.kstep_mse(test_obs, test_act, horizon=h)
        print(f"MSE: {_mse}, SMSE:{_smse}, EVAR:{_evar}")