Exemple #1
0
def tca(X,
        non_neg=True,
        R=10,
        prefix="",
        max_iter=500,
        epoc="all",
        effect="all"):

    # Fit CP tensor decomposition (two times).
    U = None
    V = None
    opti_str = 'ncp_bcd_'
    if non_neg:
        U = tt.ncp_bcd(X, rank=R, verbose=True, max_iter=max_iter, tol=1e-6)
        V = tt.ncp_bcd(X, rank=R, verbose=True, max_iter=max_iter, tol=1e-6)
    else:
        U = tt.cp_als(X, rank=R, verbose=True, max_iter=max_iter, tol=1e-6)
        V = tt.cp_als(X, rank=R, verbose=True, max_iter=max_iter, tol=1e-6)
        opti_str = 'cp_als_'
    # Compare the low-dimensional factors from the two fits.
    # fig, ax, po = tt.plot_factors(U.factors)
    # tt.plot_factors(V.factors, fig=fig)
    # fig.suptitle("raw models")
    # fig.tight_layout()

    # Align the two fits and print a similarity score.
    sim = tt.kruskal_align(U.factors,
                           V.factors,
                           permute_U=True,
                           permute_V=True)
    print(sim)

    # Plot the results again to see alignment.
    fig, ax, po = tt.plot_factors(U.factors,
                                  plots=["scatter", "scatter", "line"])
    tt.plot_factors(V.factors, plots=["scatter", "scatter", "line"], fig=fig)
    [x.set_xticks([11.5, 15.5, 27.5, 31.5]) for x in ax[:, 2]]

    ax[-1, 0].set_xlabel("SU #")
    ax[-1, 1].set_xlabel("Trial #")
    ax[-1, 2].set_xlabel("Time (s)")
    ax[-1, 2].set_xticklabels(["S", "+1", "T", "+1"])

    fig.suptitle("aligned models")
    fig.tight_layout()

    # Show plots.
    plt.show()
    fig.set_size_inches(40, 40)
    fig.set_dpi(300)
    fig.savefig(
        opti_str + prefix + "tca_trial_" + epoc + "_" + effect + "_" +
        str(X.shape[1]) + "_R" + str(R) + ".png",
        dpi=300,
        bbox_inches="tight",
    )
    return (U, V, sim)
Exemple #2
0
def plot_similarity_score(X, ranks=[1, 2, 3, 4, 5, 10, 20, 40, 60], n_runs=5):
    print('this may take awhile....')
    rank_similarity_scores = []
    for rank in ranks:
        U = []
        for n in range(n_runs):
            U_r = tensortools.ncp_bcd(X, rank=rank, verbose=False)
            U.append(U_r)

        similarity_scores = []
        for n in range(n_runs - 1):
            similarity = tensortools.kruskal_align(U[n].factors,
                                                   U[n + 1].factors,
                                                   permute_U=True,
                                                   permute_V=True)
            similarity_scores.append(similarity)

        rank_similarity_scores.append(similarity_scores)

    rank_similarity_scores = np.array(rank_similarity_scores)

    # plot similarity scores
    fig = plt.figure(figsize=(7, 3))
    x = np.arange(len(ranks))
    sem = rank_similarity_scores.std(axis=1) / np.sqrt(
        rank_similarity_scores.shape[1])
    plt.errorbar(x, rank_similarity_scores.mean(axis=1), yerr=sem)
    plt.xticks(ticks=x, labels=ranks)
    plt.xlabel('N factors')
    plt.ylabel('similarity mean+/-SEM')

    return fig
Exemple #3
0
def plot_fit_error(X, ranks=[1, 2, 3, 4, 5, 10, 15, 20]):
    print('this may take awhile....')
    fits = []
    errs = []
    for rank in ranks:
        U_r = tensortools.ncp_bcd(X, rank=rank, verbose=False)
        fits.append(U_r)
        errs.append(U_r.obj)

    fig, ax = plt.subplots(1, 2, figsize=(12, 3))
    ax[0].plot(ranks, errs, '-o')
    ax[0].set_ylabel('Final objective value')
    ax[0].set_xlabel('N factors')

    cmap = plt.cm.winter
    # optimization performance across iterations
    for i in range(0, len(ranks)):
        ax[1].plot(fits[i].obj_hist, '-', color=cmap(7 / (i + 1)))
        ax[1].set_xlabel('Optimization iteration')
        ax[1].set_ylabel('Objective (Fit error)')

    return fig
Exemple #4
0
 def tensor_decomp(self, network_tensor, n_comp):
     self.ent_vocab = [self.id2voc[i] for i in range(len(self.id2voc))]
     tensors = tt.ncp_bcd(network_tensor, n_comp, verbose=False)
     self.factors = tensors.factors.rebalance()
Exemple #5
0
import tensortools as tt
import numpy as np
import matplotlib.pyplot as plt

# Make synthetic dataset.
I, J, K, R = 25, 25, 25, 4  # dimensions and rank
X = tt.randexp_ktensor((I, J, K), rank=R).full()
X += np.random.randn(I, J, K) * .5
X = np.maximum(X, 0.0)

# Fit CP tensor decomposition (two times).
U = tt.ncp_bcd(X, rank=R, verbose=True)
V = tt.ncp_bcd(X, rank=R, verbose=True)

# Compare the low-dimensional factors from the two fits.
fig, ax, po = tt.plot_factors(U.factors)
tt.plot_factors(V.factors, fig=fig)
fig.suptitle("raw models")
fig.tight_layout()

# Align the two fits and print a similarity score.
sim = tt.kruskal_align(U.factors, V.factors, permute_U=True, permute_V=True)
print(sim)

# Plot the results again to see alignment.
fig, ax, po = tt.plot_factors(U.factors)
tt.plot_factors(V.factors, fig=fig)
fig.suptitle("aligned models")
fig.tight_layout()

# Show plots.
Exemple #6
0
def run_pipeline(filename):

    data = lm.loadmat(filename)
    session_name = os.path.basename(filename)[0:-4]
    (good_cells, pos_edges, trial_idx, spikelocations, spike_idx,
     location_vec) = prepareData(data)
    n_trials = 30
    n_cells = len(good_cells)
    shape = (n_cells, len(pos_edges) - 1, n_trials)
    counts = np.zeros(shape, dtype=float)
    _fast_bin(counts, trial_idx, spikelocations, spike_idx)

    occupancy = np.zeros((len(pos_edges) - 1, n_trials), dtype=float)
    _fast_occ(occupancy, data['trial'] - 1, location_vec)

    for iT in range(n_trials):
        tmp = occupancy[:, iT]
        idx_v = np.flatnonzero(tmp)
        idx_n = np.flatnonzero(tmp == 0)
        tmp[idx_n] = np.interp(idx_n, idx_v, tmp[idx_v])
        occupancy[:, iT] = tmp

    spMapN = np.zeros(counts.shape)
    for iC in range(n_cells):
        spMapN[iC, :, :] = np.divide(counts[iC, :, :], occupancy)

    spMapN = spi.gaussian_filter(spMapN, (0, 2, 0))

    n_cells = len(good_cells)
    n_bins = len(pos_edges) - 1
    spFlat = np.zeros((n_cells, n_trials * n_bins))

    for iC in range(n_cells):
        spFlat[iC, :] = spMapN[iC, :, :].ravel(order='F')
    #spFlat = spFlat-spFlat.mean(axis=1)[:,np.newaxis]
    spFlat = normalize(spFlat, axis=0, norm='l2')
    for iC in range(n_cells):
        for iT in range(n_trials):
            start = iT * n_bins
            stop = (iT + 1) * n_bins
            trial_idx = np.arange(start, stop)
            tmp = spFlat[iC, trial_idx]
            spMapN[iC, :, iT] = tmp

    R = 5
    # Fit CP tensor decomposition (two times).
    U = tt.ncp_bcd(spMapN, rank=R, verbose=False)
    V = tt.ncp_bcd(spMapN, rank=R, verbose=False)

    # Align the two fits and print a similarity score.
    sim = tt.kruskal_align(U.factors,
                           V.factors,
                           permute_U=True,
                           permute_V=True)
    #print(sim)

    # Plot the results again to see alignment.
    fig, ax, po = tt.plot_factors(U.factors)
    tt.plot_factors(V.factors, fig=fig)
    fig.suptitle("aligned models")
    fig.tight_layout()
    fig.savefig('C:\\temp\\try3\\' + session_name + '_tca.png')

    ff = np.matmul(np.transpose(spFlat), spFlat)
    plt.figure()
    ax = plt.imshow(ff)
    plt.colorbar()
    plt.axvline(x=n_bins * 20, color='red', ls='--', linewidth=1)
    plt.axvline(x=n_bins * 21, color='green', ls='--', linewidth=1)
    plt.axhline(y=n_bins * 20, color='red', ls='--', linewidth=1)
    plt.axhline(y=n_bins * 21, color='green', ls='--', linewidth=1)
    plt.savefig('C:\\temp\\try3\\' + session_name + '_cov.png')
    plt.close('all')