コード例 #1
0
def __csd_correlation(v, m):
    '''compute correlation coefficient between CSD estimate and CSD
    for a given source diameter'''
    if m == 'delta':
        icsd_input[m].update({'diam': v * pq.m})
        _icsd = icsd.DeltaiCSD(**icsd_input[m])
        corrcoef = pl.corrcoef(
            CSD_filtered.flatten(),
            pl.array(_icsd.filter_csd(_icsd.get_csd()) / pq.m).flatten())
    elif m == 'step':
        icsd_input[m].update({'diam': v * pq.m})
        _icsd = icsd.StepiCSD(**icsd_input[m])
        corrcoef = pl.corrcoef(
            CSD_filtered.flatten(),
            pl.array(_icsd.filter_csd(_icsd.get_csd()) / pq.m).flatten())
    elif m == 'spline':
        icsd_input[m].update({'diam': v * pq.m})
        _icsd = icsd.SplineiCSD(**icsd_input[m])
        corrcoef = pl.corrcoef(
            CSD76ptF.flatten(),
            pl.array(_icsd.filter_csd(_icsd.get_csd()) / pq.m).flatten())
    else:
        raise Exception, 'm = %s should be either [delta, step, spline]' % m

    return corrcoef[0, -1]
コード例 #2
0
ファイル: stats.py プロジェクト: mGolos/OldCodeSamples
def matricesCorrelation(M1,
                        M2,
                        corrFunc=pearsonr,
                        finite=False,
                        posit=False,
                        avg=True,
                        **kwa):
    ''' Return the correlation between the two matrices, defined by the mean across
    correlations for each columns. If posit=True, the correlation is done for positive matrices.
    '''
    if M1.shape != M2.shape:
        print('Functional Connectomes shape are not the same')
        return 0

    N = M1.shape[0]
    corr = zeros(N)

    if corrFunc == corrcoef:
        if posit:
            for c in range(N):
                corr[c] = corrcoef(where(M1[c] < 0, 0, M1[c]),
                                   where(M2[c] < 0, 0, M2[c]))[0, 1]
        else:
            for c in range(N):
                corr[c] = corrcoef(M1[c], M2[c])[0, 1]
    elif corrFunc == pearsonr:
        if posit:
            for c in range(N):
                corr[c] = pearsonr(where(M1[c] < 0, 0, M1[c]),
                                   where(M2[c] < 0, 0, M2[c]))[0]
        else:
            for c in range(N):
                corr[c] = pearsonr(M1[c], M2[c])[0]
    else:
        if posit:
            for c in range(N):
                corr[c] = corrFunc(where(M1[c] < 0, 0, M1[c]),
                                   where(M2[c] < 0, 0, M2[c]), **kwa)
        else:
            for c in range(N):
                corr[c] = corrFunc(M1[c], M2[c], **kwa)

    if finite:
        corr = where(isfinite(corr), corr, 0.)

    if avg:
        return corr.mean()
    else:
        return corr
コード例 #3
0
def whatyouwant(paramin):
    global dir_pri1, dir_pri2, dir_pri3
    dens_moy = arange(0.02, 0.981, 0.03)

    for d1 in range(len(dens_moy)):
        dir_sub1 = dir_pri1 + '/Norm_%.2f_DensMoy_%.2f' % (paramin,
                                                           dens_moy[d1])
        nbpatterns1 = len(os.listdir(dir_sub1))

        for p1 in range(nbpatterns1):
            d_patty1 = dir_sub1 + '/pattern_%.3i.npy' % p1
            patty1 = np_load(d_patty1)

            dir_sub2 = dir_pri2 + '/Norm_0.50_DensMoy_0.02'
            nbpatterns2 = len(os.listdir(dir_sub2)) / 2

            for p2 in range(nbpatterns2):
                d_patty2 = dir_sub2 + '/pattern_%.3i.npy' % p2
                patty2 = np_load(d_patty2)
                d_patty0 = dir_sub2 + '/states_%.3i.jpeg' % p2

                if  (corrcoef(patty1, patty2)[0,1] > 0.7) \
                and (abs(norm(patty1) - norm(patty2)) / norm(patty2) < 10.05):
                    direction = dir_pri3 + '/C_W%.2fD%.2fP%.3i_Pica_%.3i' % (
                        paramin, dens_moy[d1], p1, p2)
                    cmd = commands.getoutput('cp ' + d_patty2 + " " +
                                             direction + '.npy')
                    toimage(
                        array(zip(*reversed(patty2.reshape(1, len(
                            patty2))))).T).save(direction + '.jpeg')
                    print direction, abs(norm(patty1) -
                                         norm(patty2)) / norm(patty2)
    return 1
コード例 #4
0
ファイル: util.py プロジェクト: issfangks/milo-lab
def plot_xy(cursor, query, prefix=None, color='b', marker='.', xlog=False, ylog=False, xlabel='', ylabel='', title=''):
    """
        Executes the 'query' which should return two numerical columns.
    """
    cursor.execute(query)
    x_list = []
    y_list = []
    for row in cursor:
        (x, y) = row
        if (x != None and y != None):
            x_list.append(x)
            y_list.append(y)
    
    X = pylab.array(x_list)
    Y = pylab.array(y_list)
    pylab.figure()
    pylab.hold(True)
    pylab.plot(X, Y, color=color, marker=marker, linestyle='None')
    if (xlog):
        pylab.xscale('log')
    if (ylog):
        pylab.yscale('log')
    
    pylab.title(title + " (R^2 = %.2f)" % pylab.corrcoef(X,Y)[0,1]**2)
    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)
    if (prefix != None):
        pylab.savefig('../res/%s.pdf' % prefix, format='pdf')
    pylab.hold(False)
コード例 #5
0
ファイル: ComparesUs.py プロジェクト: mGolos/OldCodeSamples
def whatyouwant(paramin):
    global dir_pri1, dir_pri2, dir_pri3
    normW2 = arange(1.4, 2.601, 0.05).tolist()
    dens_moy = arange(0.02, 0.981, 0.03)

    for d1 in range(len(dens_moy)):
        dir_sub1 = dir_pri1 + '/Norm_%.2f_DensMoy_%.2f' % (paramin,
                                                           dens_moy[d1])
        nbpatterns1 = len(os.listdir(dir_sub1))

        for p1 in range(nbpatterns1):
            d_patty1 = dir_sub1 + '/pattern_%.3i.npy' % p1
            patty1 = np_load(d_patty1)

            for w2 in range(len(normW2)):
                dir_sub2 = dir_pri2 + '/Norm_%.2f' % (normW2[w2])
                nbpatterns2 = len(os.listdir(dir_sub2)) / 2

                for p2 in range(nbpatterns2):
                    d_patty2 = dir_sub2 + '/pattern_%.3i.npy' % p2
                    patty2 = np_load(d_patty2)
                    d_patty0 = dir_sub2 + '/states_%.3i.jpeg' % p2

                    if  (corrcoef(patty1, patty2)[0,1] > 0.99) \
                    and (abs(norm(patty1) - norm(patty2)) / norm(patty2) < 0.0005):
                        direction = dir_pri3 + '/C_W%.2fD%.2fP%.3i_D_W%.2fP%.3i' % (
                            paramin, dens_moy[d1], p1, normW2[w2], p2)
                        cmd = commands.getoutput('cp ' + d_patty2 + " " +
                                                 direction + '.npy')
                        cmd = commands.getoutput('cp ' + d_patty0 + " " +
                                                 direction + '.jpeg')
                        print direction, abs(norm(patty1) -
                                             norm(patty2)) / norm(patty2)
    return 1
コード例 #6
0
ファイル: spikes.py プロジェクト: kghose/neurapy
  def correl_single_window(sbwA, sbwB):
    """sbwA and sbwB are obtained as subwindowsA[:,n,:,:], with n marching through all the windows.
    Indexes - 0 -> epochs
              1 -> subwindows
              2 -> start/stop

    """
    N0 = sbwA.shape[0]
    N1 = sbwA.shape[1]
    spk_countA = pylab.zeros((N0,N1))
    spk_countB = pylab.zeros((N0,N1))
    for n in xrange(N0): #Epochs
      for m in xrange(N1): #subwindows
        spk_countA[n,m] = sbwA[n,m,1] - sbwA[n,m,0]
        spk_countB[n,m] = sbwB[n,m,1] - sbwB[n,m,0]



    spk_countA = spk_countA.flatten() - spk_countA.mean()
    spk_countB = spk_countB.flatten() - spk_countB.mean()
    R = pylab.corrcoef(spk_countA, spk_countB)
    if R.size:
      r = R[0,1]
    else:
      r = 0
    return r
コード例 #7
0
ファイル: stats.py プロジェクト: mGolos/OldCodeSamples
def triSupPearsonCor(mat1, mat2, finite=False, norm=False, mask=None):
    if mask == None:
        mask = triSup(mat1, ind=True)

    corr = corrcoef(mat1.take(mask), mat2.take(mask))[0, 1]

    if finite:
        return where(isfinite(corr), corr, 0.)
    else:
        return corr
コード例 #8
0
ファイル: pybmi.py プロジェクト: gservers/myopen
def calcCorrcoef(widget, msg):
	global g_frhist, g_behavhist, g_corrcoef
	print("length of history %d\n" % len(g_frhist));
	a = pylab.zeros((len(g_frhist),256+4))
	i = 0
	for v in g_frhist:
		j=0
		for u in v:
			a[i,j] = u
			j = j+1
		i = i+1
	g_corrcoef = pylab.corrcoef(pylab.transpose(a))
コード例 #9
0
ファイル: stats.py プロジェクト: mGolos/OldCodeSamples
def patternChecking(candidats,
                    patterns,
                    out='patterns',
                    test='equal',
                    areIn=False,
                    opt=0.9):
    ''' Return the candidats that are not in patterns or the extended patterns.
    out - "patterns": pattens extended with the different candidats
        - "candidats": the different candidats from patterns
    test - "equal": equality
         - "corrcoef": Pearson correlation.
    '''

    if not len(patterns):
        try:
            candidats[0][0]
            return candidats
        except:
            return array([candidats])

    else:
        if out == 'patterns': nruter = list(patterns)
        elif out == 'candidats': nruter = []

        if test == 'equal': simil = lambda x, y: x in y
        elif test == 'corrcoef':
            simil = lambda x, y: corrcoef(x, y)[0, 1:].max()
        elif test == 'fPearson':
            simil = lambda x, y: fPearsonCorrelation(x, y).max()

        if areIn:
            testFunc = lambda x, y: simil(x, y) >= opt
        else:
            testFunc = lambda x, y: simil(x, y) <= opt

        try:
            candidats[0][0]
            for i in range(len(candidats)):
                if testFunc(candidats[i], patterns):
                    nruter.append(candidats[i])
        except:
            if testFunc(candidats, patterns):
                nruter.append(candidats)

        return nruter
コード例 #10
0
    def __get_melody_similarity(self, np_audio_1, np_audio_2):

        loud_threshold_1 = max(np_audio_1) * rs.BELOW_LOUDEST_PEAK
        loud_threshold_2 = max(np_audio_2) * rs.BELOW_LOUDEST_PEAK

        silence = rs.SILENCE_BETWEEN_REPEATS * rs.RATE

        print('__get_melody_similarity', len(np_audio_1), len(np_audio_2))

        peaks_in_np_audio_1 = self.__get_peaks(np_audio_1,
                                               loud_threshold=loud_threshold_1,
                                               step=silence)
        peaks_in_np_audio_2 = self.__get_peaks(np_audio_2,
                                               loud_threshold=loud_threshold_2,
                                               step=silence)

        # if quantity of peaks is different -> these are different melodies
        if len(peaks_in_np_audio_1) != len(peaks_in_np_audio_2):
            return 0

        aligned_1, aligned_2 = self.__align_melodies(np_audio_1, np_audio_2)

        # if the melodies' lengths differ a lot -> these are different melodies
        deviation = rs.MELODY_LENGTH_DEVIATION * rs.RATE
        print(
            deviation,
            abs(len(aligned_1) - len(aligned_2)),
        )
        print(len(aligned_1), len(aligned_2), '\n')
        if abs(len(aligned_1) - len(aligned_2)) > deviation:
            return 0

        end = self.__get_min_length_from_audio_couple(aligned_1, aligned_2) - 1

        trimmed_audio_1, trimmed_audio_2 = np_audio_1[:end], np_audio_2[:end]

        frequencies_1 = self.__get_frequency(trimmed_audio_1)
        frequencies_2 = self.__get_frequency(trimmed_audio_2)

        result = pylab.corrcoef(frequencies_1, frequencies_2)
        similarity = abs(float(result[1][0]))

        return round(similarity, 3)
コード例 #11
0
def plot_xy(cursor,
            query,
            prefix=None,
            color='b',
            marker='.',
            xlog=False,
            ylog=False,
            xlabel='',
            ylabel='',
            title=''):
    """
        Executes the 'query' which should return two numerical columns.
    """
    cursor.execute(query)
    x_list = []
    y_list = []
    for row in cursor:
        (x, y) = row
        if (x != None and y != None):
            x_list.append(x)
            y_list.append(y)

    X = pylab.array(x_list)
    Y = pylab.array(y_list)
    pylab.figure()
    pylab.hold(True)
    pylab.plot(X, Y, color=color, marker=marker, linestyle='None')
    if (xlog):
        pylab.xscale('log')
    if (ylog):
        pylab.yscale('log')

    pylab.title(title + " (R^2 = %.2f)" % pylab.corrcoef(X, Y)[0, 1]**2)
    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)
    if (prefix != None):
        pylab.savefig('../res/%s.pdf' % prefix, format='pdf')
    pylab.hold(False)
コード例 #12
0
    def correl_single_window(sbwA, sbwB):
        """sbwA and sbwB are obtained as subwindowsA[:,n,:,:], with n marching through all the windows.
    Indexes - 0 -> epochs
              1 -> subwindows
              2 -> start/stop

    """
        N0 = sbwA.shape[0]
        N1 = sbwA.shape[1]
        spk_countA = pylab.zeros((N0, N1))
        spk_countB = pylab.zeros((N0, N1))
        for n in xrange(N0):  #Epochs
            for m in xrange(N1):  #subwindows
                spk_countA[n, m] = sbwA[n, m, 1] - sbwA[n, m, 0]
                spk_countB[n, m] = sbwB[n, m, 1] - sbwB[n, m, 0]

        spk_countA = spk_countA.flatten() - spk_countA.mean()
        spk_countB = spk_countB.flatten() - spk_countB.mean()
        R = pylab.corrcoef(spk_countA, spk_countB)
        if R.size:
            r = R[0, 1]
        else:
            r = 0
        return r
コード例 #13
0
ファイル: pop_sizes.py プロジェクト: pselvaraj87/covasim-1
for k, key in enumerate(keys):
    label = f'{int(float(key[1:]))/1000}k: {results[k][-1]:0.0f}'
    pl.plot(results[k], label=label, lw=3, color=colors[k])
    print(label)
# pl.legend()
pl.title('Total number of infections')
pl.xlabel('Day')
pl.ylabel('Number of infections')
sc.commaticks(axis='y')

pl.subplot(1, 3, 2)
for k, key in enumerate(keys):
    label = f'{int(float(key[1:]))/1000}k: {results[k][-1]/popsizes[k]*100:0.1f}'
    pl.plot(results[k] / popsizes[k] * 100, label=label, lw=3, color=colors[k])
    print(label)
# pl.legend()
pl.title('Attack rate')
pl.xlabel('Day')
pl.ylabel('Attack rate (%)')

pl.subplot(1, 3, 3)
fres = [res[-1] for res in results]
pl.scatter(popsizes, fres, s=150, c=colors)
pl.title('Correlation')
pl.xlabel('Population size')
pl.ylabel('Number of infections')
sc.commaticks(axis='x')
sc.commaticks(axis='y')

print(pl.corrcoef(popsizes, fres))
コード例 #14
0
def whatyouwant(paramin):
    global dir_pri, dir_pri2

    dens_moy = arange(0.02, 0.981, 0.03)  # (33)
    #dens_moy = [0.74] # (33)
    for d in range(len(dens_moy)):
        dir_sub = '/Norm_%.2f_DensMoy_%.2f' % (paramin, dens_moy[d])
        if not os.path.exists(dir_pri + dir_sub):
            print 'Pas de sous-dossier ' + dir_sub
            return 0
        if not os.path.exists(dir_pri2 + dir_sub):
            os.mkdir(dir_pri2 + dir_sub)
        if not os.path.exists(dir_pri2 + dir_sub + '/ok'):
            nb_patterns = 0
            err = -1
            nb_de_candidats = len(os.listdir(dir_pri + dir_sub)) / 2

            if nb_de_candidats < 250:
                for i in range(nb_de_candidats):
                    candidat = dir_pri + dir_sub + '/pattern_%.3i.npy' % i
                    #destination = '../FilesIN/pattern_000.npy'
                    ##destination = '../FilesIN/initialization.npy'
                    #cmd = commands.getoutput('cp ' + candidat + " " + destination)

                    if candidat.sum() == 0:
                        break

                    #print i,nb_de_candidats,paramin,dens_moy[d]
                    prog = "../evaCure/Main.py"
                    sys.argv = [prog , "-dens_moy",str(dens_moy[d]), \
                                    '-normW',str(paramin), \
                                    '-fileIN', str(candidat), \
                                '-N','66', \
                                '-m','1', \
                                '-dt','0.01', \
                                '-dur','100', \
                                '-a','0.', \
                                '-b','1.', \
                                '-theta','0.5', \
                            '-nper','3000', \
                            '-err','0.00001', \
                                '-GUI','0', \
                                '-save','0', \
                            '-test','2', \

                                '-netw','0', \
                                '-patt','1', \
                            '-init','3', \
                                '-who','0', \
                            #'-fileIN','../FilesIN/initialization.npy', \
                                '-dens','0.5', \
                            '-noise','0.', \
                                '-conn','5', \
                                '-upd','0', \
                                '-Dx','0.1', \
                                '-posit','0', \
                                '-ap0e0','0', \
                                '-mT','0', \
                                '-p_var','0.', \
                            #'-normW','2.', \
                                '-tau_theta','1', \
                                '-theta_ref','0.5', \
                            #'-dens_moy','0.3', \

                                '-priT','0', \
                                '-resh','1', \
                                '-nbcol','1', \

                                '-thre','0', \
                            '-gamma','30', \
                                '-derr','0.2', \
                                '-tau','1', \
                                '-intFDx','0']
                    execfile(prog)

                    #Network.closers()
                    #coooo = corrcoef(np_load(candidat), Network.states)[0,1]
                    #err = 1. - where(isfinite(coooo), coooo, 0)
                    #print i,nb_de_candidats,paramin,dens_moy[d] , coooo, err
                    err = 1 - corrcoef(np_load(candidat), Network.states)[0, 1]
                    if err < 1e-3:
                        #if Network.dpos < 1e-3:
                        destination2 = dir_pri2 + dir_sub + '/pattern_%.3i.npy' % nb_patterns
                        nb_patterns += 1

                        cmd = commands.getoutput('cp ' + candidat + " " +
                                                 destination2)
            #print 'Norm %.2f, DensMoy %.2f, candidats %i, retenu %i, dcorr %.2e' %(paramin,dens_moy[d],nb_de_candidats,nb_patterns,Network.dpos)
            cmd = commands.getoutput('touch ' + dir_pri2 + dir_sub + '/ok')
            print 'Norm %.2f, DensMoy %.2f, candidats %i, retenu %i, dcorr %.2e' % (
                paramin, dens_moy[d], nb_de_candidats, nb_patterns, err)
    return 1
コード例 #15
0
    def _generate_histo_plot(self):
        """ This function generates the actual histogram plot"""
        fighist = pylab.figure()

        nr_of_feats = len(self.indexlist)
        # again, number of subplot columns is 8 at most while
        # using as many rows as necessary
        if nr_of_feats <= 8: nr_of_cols = nr_of_feats
        else: nr_of_cols = 8
        nr_of_rows = (len(self.indexlist) - 1) / 8 + 1
        fighist.set_size_inches((5 * nr_of_cols, 3 * nr_of_rows))

        important_features = dict()
        important_feature_names = pylab.array([])
        itercount = 1
        for feat_index in self.indexlist:
            # backgroundcolor for the feature importance text
            bg_color = self.own_colormap(self.normalizer(\
                        self.feature_time_series[tuple(feat_index)]))
            fighist.add_subplot(nr_of_rows, nr_of_cols, itercount)
            itercount += 1
            # plot the actual histogram
            pylab.hist([
                self.time_series_histo[label][:, feat_index[0], feat_index[1]]
                for label in self.mean_time_series.keys()
            ],
                       bins=20,
                       normed=True,
                       histtype='step')
            # write feature importance as fig.text
            cal = pylab.gca().axis()
            pylab.text((.23 * cal[0] + .77 * cal[1]),
                       (.8 * cal[3] + .2 * cal[2]),
                       '%.2f' %
                       self.feature_time_series[feat_index[0], feat_index[1]],
                       fontsize='xx-large',
                       bbox=dict(fc=bg_color, ec=bg_color, alpha=0.6, pad=14))
            # Title uses feature name
            pylab.title(
                'Channel %s at %dms' %
                (self.channel_names[feat_index[1]], float(feat_index[0]) /
                 self.sample_data.sampling_frequency * 1000),
                fontsize='x-large')

            # initialize, if no important features known yet
            if important_features.values() == []:
                for label in self.mean_time_series.keys():
                    important_features[label] = \
                        pylab.array(self.time_series_histo[label][:,
                                    feat_index[0], feat_index[1]])
            # stack current important feature with previous
            else:
                for label in self.mean_time_series.keys():
                    important_features[label] = pylab.vstack(
                        (important_features[label],
                         pylab.array(
                             self.time_series_histo[label][:, feat_index[0],
                                                           feat_index[1]])))
            # memorize feature name
            important_feature_names = \
             pylab.append(important_feature_names, \
             [('%s' % self.channel_names[feat_index[1]]).ljust(4, '_')\
             + ('%dms' % (float(feat_index[0]) / \
             self.sample_data.sampling_frequency * 1000)).rjust(6, '_')])

        self.corr_important_feat_names = important_feature_names
        for label in important_features.keys():
            self.corr_important_feats[label] = \
                pylab.corrcoef(important_features[label])
        # Draw the "feature development" plots of the important features
        self._generate_feature_development_plots(important_features)
        return fighist
コード例 #16
0
import sys
from pylab import show, plot, figure, subplot, mean, genfromtxt, corrcoef, legend

x = genfromtxt(fname=sys.argv[1], skip_header = 1, usecols = 0 )
xi = x

for col in xrange(1,7):
	figure(1)
	subplot(2,3,col )
	y =  genfromtxt(fname=sys.argv[1], skip_header = 1, usecols = col )
	yi = y
	#slope calculated for b = 0 in y = ax + b
	a = mean(xi*yi)/mean(xi**2)
	corrr = corrcoef(yi, a*xi) 
	#print 'corrr ', corrr[1][0]
	print 'slope '+str(col), a
	print corrcoef(yi, a*xi)[1][0] 
	#def func(xi, a):
	#	return a*xi
	#popt, pcov = curve_fit(func, xi, yi)	
	#print 'popt', popt
	#print 'pcov', pcov 
	line = a*xi
	plot(xi,line,'r-',xi,yi,'o', label = a)
	legend()
	#legend(['m ', 'r**2 '],[a, corrr])
show()
コード例 #17
0
xlabel("Time (s)  |  Señal Procesada")

subplot(2, 1, 2)
plot(timeArray, canal1)
plot(peaksNoFiltro / fs, canalNoFiltro[peaksNoFiltro])
ylabel('Amplitude')
xlabel("Time (s)  |  Señal No-Procesada")

plt.show()

plot(peaksFiltrado / fs, picosProcesado_suavizado, color='k')  #,linewidth=3.5)
plot(peaksFiltrado / fs, picosFiltradoEnOriginal)

plt.show()

print(corrcoef(picosFiltradoEnOriginal, picosProcesado_suavizado))

print(corrcoef(canalNoFiltro[peaksNoFiltro], picosProcesado_suavizado))
'''
print("Correlación de ubicación de puntos máximos" + str(np.corrcoef(peaksFiltrado, peaksNoFiltro)))

print("Correlación de valores de puntos máximos" + str(np.corrcoef(valuesFiltrado, valuesNoFiltro)))


posPicosFiltrado = list(peaksFiltrado)
valoresFiltrado = list(valuesFiltrado['peak_heights'])

print(type(posPicosFiltrado))
print(type(valuesFiltrado))
print(type(valoresFiltrado))
print(len(peaksFiltrado))
コード例 #18
0
def whatyouwant(paramin):
    global dir_pri
    dir_sub = dir_pri + '/Norm_%.2f' %paramin
    os.mkdir(dir_sub)

    multi_dens = arange(0.02,0.981,0.03) # (33)
    nb_patterns = 0
    tendance =[]
    for d in range(len(multi_dens)):
        for i in range(100):
            prog = "../evaCure/Main.py"
            sys.argv = [prog , "-dens",str(multi_dens[d]), \
                               '-normW',str(paramin), \
                        '-N','66', \
                        '-m','1', \
                        '-dt','0.01', \
                        '-dur','100', \
                        '-a','0.', \
                        '-b','1.', \
                        '-theta','0.5', \
                        '-nper','3000', \
                        '-err','0.00001', \
                        '-GUI','0', \
                        '-save','0', \
                        '-test','1', \

                        '-netw','0', \
                        '-patt','1', \
                        '-init','0', \
                        '-who','0', \
                        '-fileIN','../FilesIN/initialization.npy', \
                    #'-dens','0.5', \
                        '-noise','0.', \
                        '-conn','5', \
                        '-upd','0', \
                        '-Dx','0.1', \
                        '-posit','0', \
                        '-ap0e0','0', \
                        '-mT','0', \
                        '-p_var','0.', \
                    #'-normW','2.', \
                        '-tau_theta','1', \
                        '-theta_ref','0.5', \
                    '-dens_moy','0.26', \

                        '-priT','0', \
                        '-resh','1', \
                        '-nbcol','1', \

                        '-thre','0', \
                        '-gamma','30', \
                        '-derr','0.2', \
                        '-tau','1', \
                        '-intFDx','0']
            execfile(prog)

            maybe = True
            for j in range(1,nb_patterns + 1):
                #if corrcoef(np_load(dir_sub+'/pattern_%.3i.npy'%(j-1)), Network.X)[0,1] > 0.90:
                if corrcoef(np_load(dir_sub+'/pattern_%.3i.npy'%(j-1)), Network.states)[0,1] > 0.90:
                    maybe = False
                    tendance[j-1][d] += 1
                    break
            if maybe:
                #saveStates(dir_sub+'/', Network.X, resh=1, opt=nb_patterns)
                saveStates(dir_sub+'/', Network.states, resh=1, opt=nb_patterns)
                tendance.append(zeros(len(multi_dens)))
                tendance[nb_patterns][d] = 1
                nb_patterns += 1
        print 'norm :%.2f ; dens:%.3f'%(paramin,multi_dens[d])

    ofi = open(dir_sub+'/tendance.txt', 'w')
    for i in range(len(multi_dens)):
        ofi.write('%.2f\t' %multi_dens[i])
        for j in range(len(tendance)):
            ofi.write('%.3i\t' %tendance[j][i])
        ofi.write('\n')
    ofi.close()

    return 1
コード例 #19
0
import sys
from pylab import show, plot, figure, subplot, mean, genfromtxt, corrcoef, legend

x = genfromtxt(fname=sys.argv[1], skip_header=1, usecols=0)
xi = x

for col in xrange(1, 7):
    figure(1)
    subplot(2, 3, col)
    y = genfromtxt(fname=sys.argv[1], skip_header=1, usecols=col)
    yi = y
    #slope calculated for b = 0 in y = ax + b
    a = mean(xi * yi) / mean(xi**2)
    corrr = corrcoef(yi, a * xi)
    #print 'corrr ', corrr[1][0]
    print 'slope ' + str(col), a
    print corrcoef(yi, a * xi)[1][0]
    #def func(xi, a):
    #	return a*xi
    #popt, pcov = curve_fit(func, xi, yi)
    #print 'popt', popt
    #print 'pcov', pcov
    line = a * xi
    plot(xi, line, 'r-', xi, yi, 'o', label=a)
    legend()
    #legend(['m ', 'r**2 '],[a, corrr])
show()
コード例 #20
0
    def _generate_histo_plot(self):
        """ This function generates the actual histogram plot"""
        fighist = pylab.figure()
        
        nr_of_feats = len(self.indexlist)
        # again, number of subplot columns is 8 at most while
        # using as many rows as necessary
        if nr_of_feats <= 8: nr_of_cols = nr_of_feats
        else: nr_of_cols = 8
        nr_of_rows = (len(self.indexlist) - 1) / 8 + 1
        fighist.set_size_inches((5 * nr_of_cols,  3 * nr_of_rows))

        important_features = dict()
        important_feature_names = pylab.array([])
        itercount = 1
        for feat_index in self.indexlist:
            # backgroundcolor for the feature importance text
            bg_color = self.own_colormap(self.normalizer(\
                        self.feature_time_series[tuple(feat_index)]))
            fighist.add_subplot(nr_of_rows, nr_of_cols, itercount)
            itercount += 1
            # plot the actual histogram
            pylab.hist([self.time_series_histo[label]
                        [:, feat_index[0], feat_index[1]]
                        for label in self.mean_time_series.keys()],
                        bins=20, normed=True , histtype='step')
            # write feature importance as fig.text
            cal = pylab.gca().axis()
            pylab.text((.23 * cal[0] + .77 * cal[1]),
                       (.8 * cal[3] + .2 * cal[2]), '%.2f' %
                       self.feature_time_series[feat_index[0], feat_index[1]],
                       fontsize='xx-large',
                       bbox=dict(fc=bg_color, ec=bg_color, alpha=0.6, pad=14))
            # Title uses feature name
            pylab.title('Channel %s at %dms' %
                        (self.channel_names[feat_index[1]],
                         float(feat_index[0]) / 
                         self.sample_data.sampling_frequency * 1000),
                        fontsize='x-large')
            
            # initialize, if no important features known yet
            if important_features.values() == []:
                for label in self.mean_time_series.keys():
                    important_features[label] = \
                        pylab.array(self.time_series_histo[label][:,
                                    feat_index[0], feat_index[1]])
            # stack current important feature with previous
            else:
                for label in self.mean_time_series.keys():
                    important_features[label] = pylab.vstack(
                        (important_features[label],
                         pylab.array(self.time_series_histo[label]
                         [:, feat_index[0], feat_index[1]])))
            # memorize feature name
            important_feature_names = \
             pylab.append(important_feature_names, \
             [('%s' % self.channel_names[feat_index[1]]).ljust(4, '_')\
             + ('%dms' % (float(feat_index[0]) / \
             self.sample_data.sampling_frequency * 1000)).rjust(6, '_')])
        
        self.corr_important_feat_names = important_feature_names
        for label in important_features.keys():
            self.corr_important_feats[label] = \
                pylab.corrcoef(important_features[label])
        # Draw the "feature development" plots of the important features
        self._generate_feature_development_plots(important_features)
        return fighist