示例#1
0
文件: signal.py 项目: dibgerge/utkit
    def coherence(self, other, width, overlap=0, **kwargs):
        """
        Compute the short-time correlation coefficient of the signal. Uses the function
        :func:`scipy.signal.coherence` for this computation.

        Parameters
        ----------
        other : Signal
            The other Signal that will be used to perform the short time cross correlation.

        width : float
            Window size (in Signal index units) that will be used in computing the short-time
            correlation coefficient.

        overlap : float, optional
            Units (index units) of overlap between consecutive computations.

        Returns
        -------
        : array_like
            The computed short tiem cross-correlation function.
        """
        other = other(self.index)
        nperseg = int(width * self.fs)
        nol = int(overlap * self.fs)
        f, cxy = coherence(self.values, other.values, self.fs, nperseg=nperseg, noverlap=nol,
                           **kwargs)
        return Signal(cxy, index=f)
def coherence_plot(df):
  r"""Returns a plot showing the coherence matrix.

  Parameters
  ----------
  df : a pandas data frame
    Must indicies and column data set to the timestampls and eeg channel names.
    
  Returns
  -------
  html
    HTML code for the plot.

  """
  df = df.ix[::100]
  currd = df.as_matrix()
  currd = currd.T
  coherearr = []
  for i in range (0, len(currd)):
    temparr = []
    for j in range (0, len(currd)):
      temparr.append(np.mean(
          signal.coherence(currd[i], currd[j], 500)[1]
        ))
    coherearr.append(temparr)
  df = pd.DataFrame(data=np.array(coherearr), index=df.columns, columns=df.columns)
  fig = df.iplot(kind='heatmap',
    theme='solar', asFigure=True)
  fig.layout.update(dict(title="Coherence", height=800, width=800))
  colorscale = [[0, '#0000FF'],[.5, '#FFFFFF'], [1, '#FF0000']]  # custom colorscale
  fig.data.update( 
      dict(colorscale=colorscale, showscale = True,
        colorbar=dict(title="coherence")))
  return fig
示例#3
0
def computeCoherenceFeatures(lfp_data, channel_pairs, Fs, power_bands, event_indices, t_window):
	'''
	Inputs
		- lfp_data: dictionary of data, with one entry for each channel
		- channel_pairs: list of channel pairs
		- Fs: sampling frequency 
		- power_bands: list of power bands 
		- event_indices: N x M array of event indices, where N is the number of trials and M is the number of 
		                 different events 
		- t_window: length M array of time window (in seconds) to compute features over, one element for each feature 
	Outputs
		- features: dictionary with N entries (one per trial), with a C x K matric which C is the number of channel pairs 
					and K is the number of features (number of power bands times M)
	'''
	nperseg = int(Fs*0.25)
	noverlap = int(Fs*0.1875)
	t_window = [int(Fs*time) for time in t_window]  # changing seconds into samples

	N, M = event_indices.shape
	times = np.ones([N,M])
	for t,time in enumerate(t_window):
		times[:,t] = time*np.ones(N)

	features = dict()

	channels = lfp_data.keys()
	num_channel_pairs = len(channel_pairs)

	for trial in range(0,N):
		events = event_indices[trial,:]  # should be array of length M
		trial_powers = np.zeros([num_channel_pairs,M*len(power_bands)])
		for j, pair in enumerate(channel_pairs):
			chann1 = pair[0]
			chann2 = pair[1]
			chann_data1 = lfp_data[chann1]
			chann_data2 = lfp_data[chann2]
			feat_counter = 0
			for i,ind in enumerate(events):
				data1 = chann_data1[ind:ind + times[trial,i]]
				data1 = np.ravel(data1)
				data2 = chann_data2[ind:ind + times[trial,i]]
				data2 = np.ravel(data2)
				f, Cxy = signal.coherence(data1, data2, nperseg = nperseg, fs=Fs, noverlap=noverlap)
				#Cxy = Cxy/np.sum(Cxy)
				#Cxy = 10*np.log10(Cxy)
				Cxy = np.sqrt(Cxy)
				for k in range(0,len(power_bands)):
					low_band, high_band = power_bands[k]
					freqs = np.ravel(np.nonzero(np.greater(f,low_band)&np.less_equal(f,high_band)))
					tot_power_band = np.sum(Cxy[freqs])
					trial_powers[j,feat_counter] = tot_power_band
					#trial_powers[j,i*len(power_bands) + k] = np.sum(tot_power_band)/float(len(tot_power_band))
					feat_counter += 1
		features[str(trial)] = trial_powers

	return features
示例#4
0
    def test_identical_input(self):
        x = np.random.randn(20)
        y = np.copy(x)  # So `y is x` -> False

        f = np.linspace(0, 0.5, 6)
        C = np.ones(6)
        f1, C1 = coherence(x, y, nperseg=10)

        assert_allclose(f, f1)
        assert_allclose(C, C1)
示例#5
0
    def test_phase_shifted_input(self):
        x = np.random.randn(20)
        y = -x

        f = np.linspace(0, 0.5, 6)
        C = np.ones(6)
        f1, C1 = coherence(x, y, nperseg=10)

        assert_allclose(f, f1)
        assert_allclose(C, C1)
def get_coherence(s1,s2,sampling=5000):
    """ Gets the coherence between signals s1 and s2

    Input:
        s1,s2: two time series
        sampling: the sampling of the signal [def: 5000]

    Output:
        Cxy
    """
    from scipy.signal import coherence

    return coherence(s1,s2,sampling=sampling)
示例#7
0
def CalcCoherence(x,y):

    fs = 500
    f, Cxy = signal.coherence(x, y, fs, nperseg=256, noverlap = 128, detrend = False)
    plt.semilogy(f, Cxy, alpha = 0.2)
    plt.xlabel('frequency [Hz]')
    plt.ylabel('Coherence')
    plt.show()

    # equivalent degrees of freedom: (length(timeseries)/windowhalfwidth)*mean_coherence
    # calculate 95% confidence level
    edof = (len(x)/(256/2)) * Cxy.mean()
    gamma95 = 1.-(0.05)**(1./(edof-1.))
    print(gamma95)
    return np.array(Cxy)
示例#8
0
    def _pipe_as_flow(self, signal_packet):
        # Get signal_packet details
        hkey = signal_packet.keys()[0]
        ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']
        ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']
        signal = signal_packet[hkey]['data']
        fs = np.int(np.mean(1./np.diff(ax_0_ix)))

        # Assume undirected connectivity
        triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)

        # Initialize association matrix
        adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))

        # Derive signal segmenting for coherence estimation
        nperseg = int(self.secperseg*fs)
        noverlap = int(self.secperseg*fs*self.pctoverlap)

        freq, Cxy = coherence(signal[:, triu_ix],
                              signal[:, triu_iy],
                              fs=fs, window=self.window,
                              nperseg=nperseg, noverlap=noverlap,
                              axis=0)

        # Find closest frequency to the desired center frequency
        cf_idx = np.flatnonzero((freq >= self.cf[0]) &
                                (freq <= self.cf[1]))

        # Store coherence in association matrix
        adj[triu_ix, triu_iy] = np.mean(Cxy[cf_idx, :], axis=0)
        adj += adj.T

        new_packet = {}
        new_packet[hkey] = {
            'data': adj,
            'meta': {
                'ax_0': signal_packet[hkey]['meta']['ax_1'],
                'ax_1': signal_packet[hkey]['meta']['ax_1'],
                'time': {
                    'label': 'Time (sec)',
                    'index': np.float(ax_0_ix[-1])
                }
            }
        }

        return new_packet
def get_coherence(filename1, filename2):
	f1_data, f1_sample_rate = get_file_data(filename1)
	f2_data, f2_sample_rate = get_file_data(filename2)

	if f1_sample_rate != f2_sample_rate:
		print("Sample rates of the signals differ but have to be the same. Exiting... ")
		return
	else:
		sample_rate = f1_sample_rate
	f, coherence = signal.coherence(f1_data, f2_data, sample_rate, nperseg = 128)
	plot.gca().set_ylim([0.000001, 100])
	plot.semilogy(f, coherence)
	len_coherence = len(coherence)
	print("coherence vector len: {0}".format(len_coherence))
	print(coherence)
	sum_coherence = sum(coherence)
	print("coherence vector sum: {0}".format(sum_coherence))
	similarity = int(sum_coherence/len_coherence * 100.0)
	print("similarity(%): {0}".format(similarity))
	distorsion = 1 - coherence
	distorsion_avg = np.mean(distorsion)
	distorsion_std = np.std(distorsion)
	print("distorsion(%): {0:.1f} std: {1:.1f}".format(distorsion_avg * 100, distorsion_std * 100))
	log_coherence = abs(np.log10(coherence)) 
	log_coherence_sum = sum(log_coherence)
	print(log_coherence)

	# the numbers have been gotten from my personal observations
	threshold_mean = 1 # mean not more than 0.9 (1 - 0.9 = 1)
	threshold_std =  0.5 # with std dev not more than 0.5

	coh_sum = log_coherence_sum
	coh_mean = np.mean(log_coherence)
	coh_std =  np.std(log_coherence)

	print(
		"log coherence sum: {0:.1f} "
		"mean: {1:.1f} std: {2:.1f}".
		format(coh_sum, coh_mean, coh_std))

	signal_is = "GOOD"
	if coh_mean > threshold_mean or coh_std > threshold_std:
		signal_is = "BAD"
	print("The signal is {0}".format(signal_is))
		
	plot.show()
示例#10
0
def plot_coherence():
    casts = np.r_[92:123]
    # 33 comes from nperseg of 64
    coheres = np.zeros((len(casts), 33))
    for i, cast_num in enumerate(casts):
        C, sigma, theta, p, S, T = read_cast(cast_num)
        f, cohere = coherence(C, T, fs=24, nperseg=64)
        coheres[i] = cohere

    cohere_bins = np.r_[0:1:11j]
    cohere_f = np.zeros((f.size, cohere_bins.size - 1))
    for i, cohere_i in enumerate(coheres.T):
        cohere_f[i] = np.histogram(cohere_i, cohere_bins, density=True)[0]

    # Exact details need work. Mostly wanted to see at what frequency
    # coherence clearly starts to decrease
    # I'd say 4Hz
    plt.pcolormesh(f, cohere_bins, cohere_f.T, cmap='afmhot_r', vmax=4)
    return coheres
示例#11
0
def Coherence(ts1, ts2, fs, dt):

    """Compute magnitude squared coherence (using coherence)"""

    # Gapfill the missing value
    mk1 = np.isnan(ts1)
    mk2 = np.isnan(ts2)
    ts1[mk1 == True] = nanmean(ts1)
    ts1[mk2 == True] = nanmean(ts1)
    ts2[mk1 == True] = nanmean(ts2)
    ts2[mk2 == True] = nanmean(ts2)

    nwindow = 9  # Number of windows to smooth data
    length = math.floor(len(ts1) / nwindow)  # Length calculated by deviding the window
    nwindow_fl = math.floor(log2(length))  # Number of windows with floor window length
    window = int(2 ** nwindow_fl)  # segment_length

    f, Cxy = signal.coherence(ts1, ts2, fs, nperseg=window, detrend=dt)

    return [f, Cxy]
示例#12
0
 def coh(self, mode):
   t, B, E = self.get('t'), self.get('B' + mode), self.get('E' + mode)
   return signal.coherence(B, E, fs=1/( t[1] - t[0] ), nperseg=t.size/2, 
                           noverlap=t.size/2 - 1)[1]
示例#13
0
    anue_nue_cohspec[r] = np.zeros((howmanyfreqs[r], howmanytimes[r]))
    nue_nux_cohspec[r] = np.zeros((howmanyfreqs[r], howmanytimes[r]))
    beginhere[r] = Nperseg[r] / 2 + 1
    if mod(Nperseg[r], 2) == 0:
        times_cspec[r] = t[r][beginhere[r]:-beginhere[r] + 1]
    else:
        times_cspec[r] = t[r][beginhere[r]:-beginhere[r]]
print 'Computing coherence spectrograms now'
for r in rotrates:
    bh = beginhere[r]
    getfreqs = 'yes'
    for time in range(howmanytimes[r]):
        bup = ss.coherence(gwc[r][time:time + Nperseg[r]],
                           anuec[r][time:time + Nperseg[r]],
                           fs=1. / dt[r],
                           window=(Window),
                           nperseg=Nperseg[r] / 2,
                           noverlap=Nperseg[r] / 2 - 1,
                           nfft=Nfft[r])
        gw_anue_cohspec[r][:, time] = bup[1]
        bup = ss.coherence(gwc[r][time:time + Nperseg[r]],
                           nuxc[r][time:time + Nperseg[r]],
                           fs=1. / dt[r],
                           window=(Window),
                           nperseg=Nperseg[r] / 2,
                           noverlap=Nperseg[r] / 2 - 1,
                           nfft=Nfft[r])
        gw_nux_cohspec[r][:, time] = bup[1]
        bup = ss.coherence(gwc[r][time:time + Nperseg[r]],
                           nuec[r][time:time + Nperseg[r]],
                           fs=1. / dt[r],
示例#14
0
 def parallel_coh(self, first_elect, sec_elect):
     f_loop, Cxy_loop = coherence(self.volt_state[:, first_elect + 2],
                                  self.volt_state[:, sec_elect + 2],
                                  self.downsampling_rate,
                                  nperseg=self.downfreq_ratio)
     return f_loop, Cxy_loop
示例#15
0
                          fs,
                          nperseg=windowSize,
                          noverlap=0,
                          scaling="spectrum")
fxy, xyPower = signal.csd(x,
                          y,
                          fs,
                          nperseg=windowSize,
                          noverlap=0,
                          scaling="spectrum")

welchCoh1 = np.power(np.absolute(xyPower), 2)
welchCoh2 = xPower * yPower
welchCoh = welchCoh1 / welchCoh2

f, Cxy = signal.coherence(x, y, fs, nperseg=windowSize, noverlap=N / 200)
f2, Cxy2 = signal.coherence(x, y, fs, nperseg=windowSize, noverlap=0)

# my method
powXX = np.zeros(shape=(fftSize), dtype="complex")
powYY = np.zeros(shape=(fftSize), dtype="complex")
powXY = np.zeros(shape=(fftSize), dtype="complex")
# win
winFnc = signal.hanning(windowSize)
winFnc = winFnc / winFnc.sum()

# save stacked
stackX = np.zeros(shape=(fftSize), dtype="complex")
stackY = np.zeros(shape=(fftSize), dtype="complex")
arrX = np.zeros(shape=(windows, fftSize), dtype="complex")
arrY = np.zeros(shape=(windows, fftSize), dtype="complex")
示例#16
0
    # We now have all the data that is either LH or LDO
    if debug:
        print(inv)
        print(st)

    ## Sliding Window ##
    for stT in st.slide(window_length=window, step=stepsize):
        stT.detrend('constant')
        ## Here we grab the pressure and compute the coherence with the two different components ##
        press = stT.select(channel='LDO')[0].data
        if sta == 'WCI':
            press = press*1.43*10**-2 + 8*10**4
        else:
            press = press*0.1
        f,cxy1 = coherence(stT.select(channel='LH1')[0].data, press, nperseg=lenfft)
        f,cxy2 = coherence(stT.select(channel='LH2')[0].data, press, nperseg=lenfft)
        cmean = np.mean(np.array([cxy1[1:],cxy2[1:]]),axis=0)
        f = f[1:]
        cmean = cmean[(f>= fmin) & (f <= fmax)]
        cmean = np.mean(cmean)
        if 'c' not in vars():
            c = cmean
        else:
            c = np.vstack((c, cmean))
        c = np.vstack((c, cmean))
        if net != 'XX':
            stT.rotate(method="->ZNE", inventory=inv)
        stT.filter('bandpass',freqmin=fmin, freqmax=fmax)
        stT.taper(0.05)
        if debug:
示例#17
0
legend(loc=(1.02, 0.2))
ylabel('Variance (prop to)')
gca().set_xticklabels('')
subplot(313)
for ii, nn in enumerate(nomprsvec):
    semilogx(frqall[frqind],
             frqall[frqind] * psd['u'][nn][frqind],
             label=str(nn) + ' db',
             color=cm.YlGnBu(ii * 30 + 80))
xlabel('CPD')
savefig('../figures/VertModes/from_raw_data/VarSpec_' + moorname + '.png',
        bbox_inches='tight')

fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 7))
for ii, nn in enumerate(nomprsvec):
    f, Cxy = signal.coherence(pb, adat['v'].sel(nomprs=nn), nperseg=4096)
    fday = f * 48
    fint = fday < 0.8
    ax1.semilogx(fday[fint],
                 Cxy[fint],
                 label=str(nn) + ' db',
                 color=cm.YlGnBu(ii * 30 + 80))
ax1.legend(loc=(1.02, 0.2))
ax1.set_ylabel('with meridional velocity')
for ii, nn in enumerate(nomprsvec):
    f, Cxy = signal.coherence(pb, adat['v'].sel(nomprs=nn), nperseg=4096)
    fday = f * 48
    fint = fday < 0.8
    ax2.plot(fday[fint],
             Cxy[fint],
             label=str(nn) + ' db',
示例#18
0
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')

# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)
ax.set(xlabel='Frequency (Hz)',
       xlim=f[[0, 99]],
       ylabel='Power (dB)',
       title='Power spectrum of signal 1')

# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.)
ax.set(xlabel='Frequency (Hz)',
       xlim=f[[0, 49]],
       ylabel='Coherence',
       title='Coherence between the timeseries')
fig.tight_layout()

###############################################################################
# Now we put the signals at two locations on the cortex. We construct a
# :class:`mne.SourceEstimate` object to store them in.
#
# The timeseries will have a part where the signal is active and a part where
# it is not. The techniques we'll be using in this tutorial depend on being
# able to contrast data that contains the signal of interest versus data that
    meg_h5 = h5.File(downsamp_meg_file, 'r')
    dset = meg_h5['/' + subj + '/MEG/' + meg_sess[0] + '/resampled_truncated']
    meg_data = dset.value
    meg_h5.close()
    meg_timeseries[:, :, s] = meg_data

    #Version where MEG filtering occurs before downsampling
    #    dset_name = database['/' + subj + '/MEG/' + meg_sess[0] + '/timeseries']
    #    meg_data = pu.read_database(dset_name, rois).values
    #    filt_data = pac.butter_filter(meg_data[:trunc_meg, :], 500, max_f, 'low')
    #    downsamp_data = resample(filt_data, int(meg_con))
    #    meg_timeseries[:, :, s] = downsamp_data

    del fmri_data, meg_data

freq, coh = coherence(meg_timeseries[:, 0, 0], fmri_timeseries[:, 0, 0], fs=fs)
coh = coh - 1
n_iters = 1000
n = 0
coherence_perm = np.ndarray(shape=[n_iters, len(coh)])
while n != n_iters:
    print('%s: Running permutation coherence %d' % (pu.ctime(), n + 1))
    rand_subj = np.random.randint(0, len(subj_overlap))
    rand_roi = np.random.randint(0, len(rois))
    fmri_rand_ts = fmri_timeseries[:, rand_roi, rand_subj]

    rand_subj = np.random.randint(0, len(subj_overlap))
    rand_roi = np.random.randint(0, len(rois))
    meg_rand_ts = meg_timeseries[:, rand_roi, rand_subj]

    _, coh_perm = coherence(fmri_rand_ts, meg_rand_ts, fs=fs)
示例#20
0
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',
       title='Signal 1')
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')

# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],
       ylabel='Power (dB)', title='Power spectrum of signal 1')

# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',
       title='Coherence between the timeseries')
fig.tight_layout()

###############################################################################
# Now we put the signals at two locations on the cortex. We construct a
# :class:`mne.SourceEstimate` object to store them in.
#
# The timeseries will have a part where the signal is active and a part where
# it is not. The techniques we'll be using in this tutorial depend on being
# able to contrast data that contains the signal of interest versus data that
# does not (i.e. it contains only noise).
示例#21
0
           vmax=abs(cwtmatr).max(),
           vmin=-abs(cwtmatr).max())
plt.show()
# %%

d = read_one_file('/home/tai/Desktop/results_stable.axa')
# %%
x, yf = drawfft(np.abs(np.array(d['Nx'])))
plt.plot(x, yf)
plt.xlim([-5, 5])
plt.ylim([-200, 200])

# %%
y = alll[0]['RVx']
x = alll[0]['RVy']
f, Cxy = signal.coherence(x, y, 100, nperseg=1024)
plt.semilogy(f, Cxy)

# %%
from sklearn.cross_decomposition import CCA
# %%
data = get_trajectory(name='Adam', seq=1)
X = data[0].iloc[:1000, :6]
Y = data[0].iloc[10000:11000, :6]
R = np.random.rand(1000, 6) - 1 / 2

#%%
X = (X - X.mean()) / X.std()
Y = (Y - Y.mean()) / Y.std()

print(X.shape)
示例#22
0
 def coh(self, mode):
   t, b, e = self.get('t'), self.get('b' + mode), self.get('e' + mode)
   return signal.coherence(b, e, fs=1/( t[1] - t[0] ), nperseg=t.size/2, 
                           noverlap=t.size/2 - 1)[1]
示例#23
0
 def time_coherence(self):
     signal.coherence(self.x, self.y)
示例#24
0
 def time_coherence(self):
     signal.coherence(self.x, self.y)
def eig_centrality(X, fs=1000, connections=None, v0=None, tol=1e-3):
    """
    Given a window of data, X, build a graph G with coherency as edges
    and compute eigenvector centrality.

    Parameters
    ----------
    X: ndarray, shape(n,p)
        data array of n samples, p channels
    connections: list
        list of either (i) integers
            the integers are the channels that will become nodes
            and the graph will be complete
        or (ii) length 2 lists of integers
            the integers correspond to directed edges

    Returns
    -------
    eig_vec: ndarray, shape(p)
        eigenvector of adjacency matrix
    """

    # # build coherence graph
    # if connections == None:
    # 	n,p = X.shape
    # 	connections = range(p)
    # weightType = 'coherence'
    # G = build_network(X, connections, weightType)

    # # get the eigenvector centrality
    # eig_dict = nx.eigenvector_centrality(G, weight='coherence')
    # eig_vec = np.zeros(p)
    # for i in range(p):
    #     eig_vec[i] = eig_dict[i]
    # w = [1.0]

    # get the data shape
    n, p = X.shape

    # initialize the adjcency matrix
    A = np.zeros((p, p))

    # construct adjacency matrix
    for i in range(p):
        for j in range(i + 1, p):
            f, cxy = coherence(X[:, i], X[:, j], fs=fs)
            # cxy, f = cohere(X[:,i], X[:,j], Fs=fs) # agggg using matplotlib because scipy is being dumb
            c = np.mean(cxy)
            A[i, j] = c  # upper triangular part
            A[j, i] = c  # lower triangular part
            if np.isnan(c):
                print('(' + str(i) + ',' + str(j) + ") is nan")

    # EDIT: none of these are working. They return NaN eigenvalues - why??
    # print('Sums are: ', str(np.mean(A,axis=1)))

    # Method 0: Power Iteration
    maxiter = 12
    tol = 1e-5
    if v0 == None:
        v0 = np.ones(p) / np.sqrt(p)
    for i in range(maxiter):

        # multiply and normalize
        v_new = np.dot(A, v0)
        v_new = v_new / np.linalg.norm(v_new)

        # check for tolerance
        diff = np.linalg.norm(v_new - v0)
        if diff < tol:
            break

        # update iterate
        v0 = v_new

    v = v_new
    w = np.array([np.mean(np.dot(A, v) / v)])  # eigenvalue
    eig_vec = v

    # # Method 1: All eigenvectors of symmetric matrix
    # w,v = np.linalg.eigh(A, UPLO='U') # get the eigenvectors
    # eig_vec = v[:,-1]

    # # Method 2: Top 1 eigenvector of matrix
    # w, v = scipy.sparse.linalg.eigs(A, k=1, v0=v0, tol=tol)
    # eig_vec = v[:,0]x

    # # Method 3: Top 1 eigenvector of a symmetric matrix
    # w, v = scipy.sparse.linalg.eigsh(A, k=1, v0=v0, tol=tol)
    # eig_vec = v[:,0]

    return eig_vec