示例#1
0
def data_to_ch(data):
    ch = {}
    for ch_ind in range(1, 97):
        ch[ch_ind] = {}
        ch[ch_ind]["bl"] = data[ch_ind]["blanks"]
        ch[ch_ind]["bl_mu"] = pl.mean(ch[ch_ind]["bl"])
        ch[ch_ind]["bl_sem"] = pl.std(ch[ch_ind]["bl"]) / pl.sqrt(len(ch[ch_ind]["bl"]))
        for ind in sorted(data[ch_ind].keys()):
            if ind != "blanks":
                k = ind[0]
                if k not in ch[ch_ind]:
                    ch[ch_ind][k] = {}
                    ch[ch_ind][k]["fr"] = []
                    ch[ch_ind][k]["fr_mu"] = []
                    ch[ch_ind][k]["fr_sem"] = []
                    ch[ch_ind][k]["pos_y"] = []
                    ch[ch_ind][k]["dprime"] = []
                ch[ch_ind][k]["fr"].append(data[ch_ind][ind]["on"])
                ch[ch_ind][k]["fr_mu"].append(pl.mean(data[ch_ind][ind]["on"]))
                ch[ch_ind][k]["fr_sem"].append(pl.std(data[ch_ind][ind]["on"]) / pl.sqrt(len(data[1][ind]["on"])))
                ch[ch_ind][k]["pos_y"].append(ind[2])
                # print ch[ch_ind][k]['pos_y']
                # print pl.std(data[ch_ind][ind]['on'])
                ch[ch_ind][k]["dprime"].append(
                    (pl.mean(data[ch_ind][ind]["on"]) - ch[ch_ind]["bl_mu"])
                    / ((pl.std(ch[ch_ind]["bl"]) + pl.std(data[ch_ind][ind]["on"])) / 2)
                )
                # print ch[ch_ind]['OSImage_5']['pos_y']
    return ch
示例#2
0
def corr_score(file1, file2, delta, bin=1., dur=100., ncell=500):
    """Similarity score by correlation coefficient. The spike trains are convolved with a triangular kernel."""
    d1 = numpy.loadtxt(file1)
    d2 = numpy.loadtxt(file2)
    x = numpy.zeros(int(ncell * dur / bin))
    y = numpy.zeros(int(ncell * dur / bin))
    for j in range(ncell):
        if d1.size == 2:
            s1 = numpy.array(d1[0] * (d1[1] == j))
        else:
            s1 = d1[d1[:, 1] == j, 0]
        if d2.size == 2:
            s2 = numpy.array(d2[0] * (d2[1] == j))
        else:
            s2 = d2[d2[:, 1] == j, 0]
        kern = numpy.append(numpy.arange(delta / bin),
                            numpy.arange(delta / bin, -1, -1))
        ts1, dump = pylab.histogram(s1, numpy.arange(0., dur + bin, bin))
        ts2, dump = pylab.histogram(s2, numpy.arange(0., dur + bin, bin))
        x[j * dur / bin:(j + 1) * dur / bin] = numpy.convolve(
            ts1, kern, 'same')
        y[j * dur / bin:(j + 1) * dur / bin] = numpy.convolve(
            ts2, kern, 'same')
    x = x - pylab.mean(x)
    y = y - pylab.mean(y)
    cor = sum(x * y) / (len(x) * pylab.std(x) * pylab.std(y))
    return cor
示例#3
0
def scatter_stats(db, s1, s2, f1=None, f2=None, **kwargs):
    if f1 == None:
        f1 = lambda x: x  # constant function

    if f2 == None:
        f2 = f1

    x = []
    xerr = []

    y = []
    yerr = []

    for k in db:
        x_k = [f1(x_ki) for x_ki in db[k].__getattribute__(s1).gettrace()]
        y_k = [f2(y_ki) for y_ki in db[k].__getattribute__(s2).gettrace()]

        x.append(pl.mean(x_k))
        xerr.append(pl.std(x_k))

        y.append(pl.mean(y_k))
        yerr.append(pl.std(y_k))

        pl.text(x[-1], y[-1], " %s" % k, fontsize=8, alpha=0.4, zorder=-1)

    default_args = {"fmt": "o", "ms": 10}
    default_args.update(kwargs)
    pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
    pl.xlabel(s1)
    pl.ylabel(s2)
示例#4
0
def compare_models(db, stoch="itn coverage", stat_func=None, plot_type="", **kwargs):
    if stat_func == None:
        stat_func = lambda x: x

    X = {}
    for k in sorted(db.keys()):
        c = k.split("_")[2]
        X[c] = []

    for k in sorted(db.keys()):
        c = k.split("_")[2]
        X[c].append([stat_func(x_ki) for x_ki in db[k].__getattribute__(stoch).gettrace()])

    x = pl.array([pl.mean(xc[0]) for xc in X.values()])
    xerr = pl.array([pl.std(xc[0]) for xc in X.values()])
    y = pl.array([pl.mean(xc[1]) for xc in X.values()])
    yerr = pl.array([pl.std(xc[1]) for xc in X.values()])

    if plot_type == "scatter":
        default_args = {"fmt": "o", "ms": 10}
        default_args.update(kwargs)
        for c in X.keys():
            pl.text(pl.mean(X[c][0]), pl.mean(X[c][1]), " %s" % c, fontsize=8, alpha=0.4, zorder=-1)
        pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
        pl.xlabel("First Model")
        pl.ylabel("Second Model")
        pl.plot([0, 1], [0, 1], alpha=0.5, linestyle="--", color="k", linewidth=2)

    elif plot_type == "rel_diff":
        d1 = sorted(100 * (x - y) / x)
        d2 = sorted(100 * (xerr - yerr) / xerr)
        pl.subplot(2, 1, 1)
        pl.title("Percent Model 2 deviates from Model 1")

        pl.plot(d1, "o")
        pl.xlabel("Countries sorted by deviation in mean")
        pl.ylabel("deviation in mean (%)")

        pl.subplot(2, 1, 2)
        pl.plot(d2, "o")
        pl.xlabel("Countries sorted by deviation in std err")
        pl.ylabel("deviation in std err (%)")
    elif plot_type == "abs_diff":
        d1 = sorted(x - y)
        d2 = sorted(xerr - yerr)
        pl.subplot(2, 1, 1)
        pl.title("Percent Model 2 deviates from Model 1")

        pl.plot(d1, "o")
        pl.xlabel("Countries sorted by deviation in mean")
        pl.ylabel("deviation in mean")

        pl.subplot(2, 1, 2)
        pl.plot(d2, "o")
        pl.xlabel("Countries sorted by deviation in std err")
        pl.ylabel("deviation in std err")
    else:
        assert 0, "plot_type must be abs_diff, rel_diff, or scatter"

    return pl.array([x, y, xerr, yerr])
示例#5
0
def scatter_stats(db, s1, s2, f1=None, f2=None, **kwargs):
    if f1 == None:
        f1 = lambda x: x  # constant function

    if f2 == None:
        f2 = f1

    x = []
    xerr = []

    y = []
    yerr = []

    for k in db:
        x_k = [f1(x_ki) for x_ki in db[k].__getattribute__(s1).gettrace()]
        y_k = [f2(y_ki) for y_ki in db[k].__getattribute__(s2).gettrace()]

        x.append(pl.mean(x_k))
        xerr.append(pl.std(x_k))

        y.append(pl.mean(y_k))
        yerr.append(pl.std(y_k))

        pl.text(x[-1], y[-1], ' %s' % k, fontsize=8, alpha=.4, zorder=-1)

    default_args = {'fmt': 'o', 'ms': 10}
    default_args.update(kwargs)
    pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
    pl.xlabel(s1)
    pl.ylabel(s2)
示例#6
0
def plot2():
    import pylab as pl
    hs, ds = [], []
    for event, time in load():
        if event == main_start:
            start_time = time
        elif event == main_end:
            d0, h0 = days_hours(start_time)
            d1, h1 = days_hours(time)
            hs.append((h0, h1))
            ds.append((d0, d1))
            pl.plot([d0, d1], [h0, h1], 'b')
    ihs, fhs = zip(*hs)
    ids, fds = zip(*ds)
    pl.plot(ids, ihs, 'g')
    pl.plot([ids[0], ids[-1]], [pl.mean(ihs)] * 2, 'g--')
    pl.plot(fds, fhs, 'r')
    pl.plot([fds[0], fds[-1]], [pl.mean(fhs)] * 2, 'r--')
    f, i = pl.mean(fhs), pl.mean(ihs)
    pl.plot([fds[0], fds[-1]], [(f + i) / 2] * 2, 'b--')
    print i, f, f - i, (f + i) / 2
    std_i, std_f = pl.std(ihs), pl.std(fhs)
    print std_i, std_f
    pl.xlim(ids[0], fds[-1])
    pl.ylim(4, 28)
    pl.grid(True)
    pl.xlabel('Time [day]')
    pl.ylabel('Day interval [hours]')
    pl.show()
示例#7
0
 def stderr(X,Y=None):
     if len(X) <= 1: return 0.0
     stderr_x = pow(pylab.std(X),2)/len(X)
     if Y:
         if len(Y) <= 1: return 0.0
         stderr_y = pow(pylab.std(Y),2)/len(Y)
     else: stderr_y = 0
     return math.sqrt(stderr_x + stderr_y)
示例#8
0
 def stderr(X, Y=None):
     if len(X) <= 1: return 0.0
     stderr_x = pow(pylab.std(X), 2) / len(X)
     if Y:
         if len(Y) <= 1: return 0.0
         stderr_y = pow(pylab.std(Y), 2) / len(Y)
     else:
         stderr_y = 0
     return math.sqrt(stderr_x + stderr_y)
示例#9
0
文件: postprocess.py 项目: sth/pyQCD
def Vplot(Ws):
	"""Calculate the potential function and plot it"""

	N_bstrp = input("Please enter the number of bootstraps: ")
	N_bin = input("Please enter the bin size: ")
	style = raw_input("Please enter a linestyle: ")

	Ws = bin(Ws,N_bin)
	aVs = pl.zeros((N_bstrp,) + pl.shape(Ws)[1:])
	bs = pl.zeros((N_bstrp,3))
        
	for i in xrange(N_bstrp):
		W = pl.mean(bootstrap(Ws),axis=0)
		aVs[i] = calcaV(W,method="fit")
		bs[i] = potfit(aVs[i,:,0])

			
	r = pl.arange(1,7)
	aV = pl.mean(aVs,axis=0)
	aVerr = pl.std(aVs,axis=0)
	b = pl.mean(bs,axis=0)

	a_s = 0.5 / pl.sqrt((1.65 + bs[:,1]) / bs[:,0])
	sigmas = bs[:,0] / a_s**2
	Bs = bs[:,1]
	As = bs[:,2] / a_s

	a = pl.mean(a_s)
	aerr = pl.std(a_s)
	sigma = pl.mean(sigmas)
	sigmaerr = pl.std(sigmas)
	B = pl.mean(Bs)
	Berr = pl.std(Bs)
	A = pl.mean(As)
	Aerr = pl.std(As)

	print("Fit parameters:")
	print("sigma = %f +/- %f fm^-2 = %f +/- %f MeV^2"
		% (sigma, sigmaerr, sigma * 197**2, sigmaerr * 197**2))
	print("B = %f +/- %f" % (B, Berr))
	print("A = %f +/- %f fm^-1 = %f +/- %f MeV"
		% (A, Aerr, A*197, Aerr*197))
	print("Lattice spacing, a = %f +/- %f fm = %f +/- %f MeV^-1"
		% (a, aerr, a/197, aerr/197))
	
	r_fit = pl.arange(0.25,r[-1]+1,0.1)
	aV_fit = V(b,r_fit)
	
	handles = []
	handles.append(pl.errorbar(r,aV[:,0],yerr=aVerr[:,0],fmt='o'+style[0]))
	handles.append(pl.plot(r_fit,aV_fit,style))
	pl.ylim([0,pl.nanmax(aV)+0.25])
	pl.xlim([0,pl.nanmax(r_fit)+0.25])
	pl.xlabel("$r / a$")
	pl.ylabel("$aV(r)$")

	return aV,handles
示例#10
0
def calcavesky():
    input = open("noise.dat", 'r')
    aperture = []
    counts = []
    area = []
    #j=0
    for line in input:
        if line.find('#') > -1:  #skip lines with '#' in them
            continue
        if line.find('.fits') > -1:  #skip lines with '#' in them
            j = 0
            continue
        j = j + 1
        if (j > 3):
            t = line.split()
            aperture.append(float(t[0]))
            counts.append(float(t[1]))
            area.append(float(t[2]))
    input.close()
    aperture = N.array(aperture, 'f')
    counts = N.array(counts, 'f')
    area = N.array(area, 'f')

    ap = N.zeros(npoints, 'f')

    aparea = N.zeros(nap, 'f')
    aveap = N.zeros(nap, 'f')
    aveaperr = N.zeros(nap, 'f')
    avearea = N.zeros(nap, 'f')
    aveareaerr = N.zeros(nap, 'f')
    #for i in range(len(ap)):
    for i in range(nap):
        #print i, len(ap),aperture[i],aperture[i+1]
        if (i < (nap - 1)):
            ap = N.compress(
                (aperture >= aperture[i]) & (aperture < aperture[i + 1]),
                counts)
            aparea = N.compress(
                (aperture >= aperture[i]) & (aperture < aperture[i + 1]), area)
        else:
            ap = N.compress((aperture >= aperture[i]) & (aperture < 20.),
                            counts)
            aparea = N.compress((aperture >= aperture[i]) & (aperture < 20.),
                                area)

        #print ap
        #aparea=N.compress((aperture >= aperture[i]) & (aperture < aperture[i+1]),area)
        aveap[i] = N.average(ap)
        aveaperr[i] = pylab.std(ap)
        avearea[i] = N.average(aparea)
        aveareaerr[i] = pylab.std(aparea)
        print "ave sky = %8.4f +/- %8.4f" % (N.average(ap), pylab.std(ap))
        print "ave area = %8.4f +/- %8.4f" % (N.average(aparea),
                                              pylab.std(aparea))
    return aveap, aveaperr, avearea, aveareaerr
示例#11
0
def neuron_type_alphas(df):
    print "-----------------------------------------------------"
    df2 = df.drop_duplicates(subset=['neuron name', 'neuron type'])
    types = []
    alphas = []
    dists = []
    for neuron_type, group in df2.groupby('neuron type'):
        print "------------"
        print neuron_type
        print "mean alpha", pylab.mean(group['alpha']), '+/-',\
                            pylab.std(group['alpha'], ddof=1)
        print "mean distance", pylab.mean(group['dist']), '+/-',\
                               pylab.std(group['dist'], ddof=1)
        types.append(neuron_type)
        alphas.append(pylab.array(group['alpha']))
        dists.append(pylab.array(group['dist']))
    indices = range(len(types))

    def alphas_chisquare(alphas1, alphas2):
        counts1 = defaultdict(int)
        counts2 = defaultdict(int)
        unique_alphas = set()
        for alpha in alphas1:
            counts1[alpha] += 1
            unique_alphas.add(alpha)
        for alpha in alphas2:
            counts2[alpha] += 1
            unique_alphas.add(alpha)

        f_obs = []
        f_exp = []
        for alpha in sorted(unique_alphas):
            f_obs.append(counts1[alpha])
            f_exp.append(counts2[alpha])

        return chisquare(f_obs, f_exp)

    for idx1, idx2 in combinations(indices, 2):
        print "------------"
        type1, type2 = types[idx1], types[idx2]
        alphas1, alphas2 = alphas[idx1], alphas[idx2]
        dists1, dists2 = dists[idx1], dists[idx2]
        print type1 + ' vs. ' + type2
        #print ttest_ind(dist1, dist2, equal_var=False)
        #print mannwhitneyu(dist1, dist2, alternative='two-sided')

        print "alphas ks-test", ks_2samp(alphas1, alphas2)
        print "alphas mann-whitney test", mannwhitneyu(alphas1, alphas2)
        print "alphas earth movers distance", wasserstein_distance(
            alphas1, alphas2)
        print "alphas chi square", alphas_chisquare(alphas1, alphas2)
        print "alphas welchs t-test", len(alphas1), len(alphas2), ttest_ind(
            alphas1, alphas2, equal_var=False)
        print "dists welch's t-test", len(dists1), len(dists2), ttest_ind(
            dists1, dists2, equal_var=False)
示例#12
0
def main(data):
    ave = pylab.mean(data)
    std = pylab.std(data)

    ave_p_std = ave + std
    ave_m_std = ave - std

    pylab.hist(data, bins=range(0, 111, 5))
    ylims = pylab.ylim()
    pylab.plot([ave_m_std] * 2, ylims, 'r')
    pylab.plot([ave_p_std] * 2, ylims, 'r')
    pylab.grid()
    pylab.title('average = %g, std = %g' % (pylab.mean(data), pylab.std(data)))
    pylab.show()
示例#13
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0) / sqrt(
                    len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0) - amin(
                    data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0) - mean(
                    data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
示例#14
0
def collapse_sims(mort_draws):
	med = pl.mean(mort_draws, axis=0)
	lower = pl.array(sp.mquantiles(mort_draws,axis=0,prob=.025))[0]
	upper = pl.array(sp.mquantiles(mort_draws,axis=0,prob=.975))[0]
	std = pl.std(mort_draws,axis=0)

	return pl.np.core.records.fromarrays([med,lower,upper,std], [('med','<f8'),('lower','<f8'),('upper','<f8'),('std','<f8')])
 def generate_normalized_test_data(self,
                                   channels, 
                                   time_points, 
                                   function, 
                                   sampling_frequency, 
                                   initial_phase=0.0):
     """
     A method which generates a normalized (mu = 0, sigma =1) signal for testing, with
     the specified number of "channels" which are all generated using the given function
     """
    
     #Generate an empty ndarray
     data = numpy.zeros((time_points, channels))
     
     #Compute the values for all channels
     for channel_index in range(channels):
         for time_index in range(time_points):
             data[time_index, channel_index] = function(2.0 * numpy.pi * (channel_index + 1) * (time_index / sampling_frequency + initial_phase))
         current_channel = data[:, channel_index]
         current_channel = (current_channel - pylab.mean(current_channel))/pylab.std(current_channel)
         data[:, channel_index] = current_channel
         
     #Generate a time series build out of the data
     test_data = TimeSeries(input_array = data, 
                            channel_names = [("test_channel_%s" % i) for i in range(channels)],
                            sampling_frequency = sampling_frequency,
                            start_time = initial_phase,
                            end_time = float(time_points) / sampling_frequency + initial_phase)
     
     return test_data
示例#16
0
    def zeroPaddData(self,desiredLength,paddmode='zero',where='end'):    
        #zero padds the time domain data, it is possible to padd at the beginning,
        #or at the end, and further gaussian or real zero padding is possible        
        #might not work for gaussian mode!

        desiredLength=int(desiredLength)
        #escape the function        
        if desiredLength<0:
            return 0

        #calculate the paddvectors        
        if paddmode=='gaussian':
            paddvec=py.normal(0,py.std(self.getPreceedingNoise())*0.05,desiredLength)
        else:
            paddvec=py.ones((desiredLength,self.tdData.shape[1]-1))
            paddvec*=py.mean(self.tdData[-20:,1:])
            
        timevec=self.getTimes()
        if where=='end':
            #timeaxis:
            newtimes=py.linspace(timevec[-1],timevec[-1]+desiredLength*self.dt,desiredLength)
            paddvec=py.column_stack((newtimes,paddvec))
            longvec=py.row_stack((self.tdData,paddvec))
        else:
            newtimes=py.linspace(timevec[0]-(desiredLength+1)*self.dt,timevec[0],desiredLength)
            paddvec=py.column_stack((newtimes,paddvec))
            longvec=py.row_stack((paddvec,self.tdData))
            
        self.setTDData(longvec)
示例#17
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    combined_avg_temp = []
    for year in years:
        avgdailytemp = []
        for city in multi_cities:
            temp_tempdata = climate.get_yearly_temp(city,year)
            if avgdailytemp == []:
                avgdailytemp = temp_tempdata
            else:
                avgdailytemp += temp_tempdata #= pylab.concatenate((avgdailytemp,temp_tempdata))
        
        avgdailytemp = avgdailytemp / len(multi_cities)
        combined_avg_temp.append(pylab.std(avgdailytemp))
    return combined_avg_temp
示例#18
0
文件: ps5.py 项目: yunzc/triple-o-2
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    std_dev_list = []
    for year in years:
        daily_sum = 0
        #sum up the daily temperatures for each city
        for city in multi_cities:
            daily_sum += climate.get_yearly_temp(city, year)
        daily_avg = daily_sum / len(multi_cities)  #average across the cities
        #standard deviations of the daily averages for the whole year
        std_dev = pylab.std(daily_avg)
        std_dev_list.append(std_dev)  #add to list
    return pylab.array(std_dev_list)  # return array of the list
示例#19
0
def _CalcMutualNearestNeighbors(hull_points, all_points):
    all_points_list = list(all_points)
    ds = distance.pdist(list(all_points))
    std_d = p.std(ds)
    
    square_ds = distance.squareform(ds)
    nearest_neighbors = {}
    
    for i, point in enumerate(all_points_list):
        if point not in hull_points:
            continue
        
        my_ds = [(d, j) for j, d in enumerate(square_ds[i])
                 if j != i]
        my_ds.sort()
        nearest_neighbors[point] = set([j for d,j in my_ds[:3]])
    
    no_mutual = set()
    for i, point in enumerate(all_points_list):
        if point not in hull_points:
            continue
        
        no_nbrs = True
        for neighbor_index in nearest_neighbors.get(point, []):
            neighbor = all_points_list[neighbor_index]
            neighbor_set = nearest_neighbors.get(neighbor, [])
            if i in neighbor_set:
                no_nbrs = False
        
        if no_nbrs:
            no_mutual.add(point)
                
    return no_mutual
示例#20
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    # TODO
    multi_cities_std_dev = list()
    for year in years:
        daily_temp = list()
        for day in range(1, get_days(year) + 1):
            tmdate = datetime.datetime(year, 1, 1) + datetime.timedelta(day - 1)
            city_temp = list()
            
            for city in multi_cities:
                city_temp.append(climate.get_daily_temp(city, tmdate.month, tmdate.day, year))
            daily_temp.append(pylab.mean(city_temp))
        multi_cities_std_dev.append(pylab.std(daily_temp))
    return pylab.array(multi_cities_std_dev)
示例#21
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    # TODO
    all_std = pylab.array([])
    for year in years:
        days = len(climate.get_yearly_temp('NEW YORK', year))
        each_year_overall = pylab.array([0] * days)
        for city in multi_cities:
            each_year_city = climate.get_yearly_temp(city, year)
            each_year_overall = each_year_city + each_year_overall
        each_year_avg = each_year_overall / len(multi_cities)
        each_year_std = pylab.std(each_year_avg)
        all_std = pylab.append(all_std, each_year_std)
    return all_std
示例#22
0
def mpcorr_nospk5(filename, maxtime=10000., pre=0., post=50., path='./'):
    data = numpy.loadtxt(path + 'v' + filename)
    cor5 = []
    f = open(path + filename, 'r')
    f.readline()
    f.readline()
    f.readline()
    f.readline()
    if f.readline() != '':
        datasp = numpy.loadtxt(path + filename)
        issubth = numpy.ones(data[:, 1].size, dtype=bool)
        for j in range(datasp.size / 2):
            if datasp.size == 2:
                id0 = numpy.argmin((data[:, 1] - (datasp[j] - pre))**2)
            else:
                id0 = numpy.argmin((data[:, 1] - (datasp[j, 0] - pre))**2)
            issubth[id0:id0 +
                    2 * int((pre + post) / 0.1)] = 0  # if stepsize = 0.1
    for j in range(5):
        mp = data[(data[:, 1] >= j * maxtime / 5.) * (data[:, 1] <
                                                      (j + 1) * maxtime / 5.),
                  0:3:2]
        mp = mp[issubth[j * maxtime * 10 / 5.:(j + 1) * maxtime * 10 / 5.], :]
        cor5.append(
            miscfunc.corrcoef(mp[mp[:, 0] == 1, 1], mp[mp[:, 0] == 2, 1]))
    cor = pylab.mean(cor5)
    sc = pylab.std(cor5)
    return cor, sc
示例#23
0
 def update(self,dt,val):
     avrg = Wmavrg.update(self,dt,val)
     std = pylab.std(self.samples_nonone)*self.k # this takes long
     if avrg != None:
         return avrg,avrg+std,avrg-std
     else:
         return (None,None,None)
示例#24
0
文件: perfplots.py 项目: huppd/huppy
def get_times(paths, runs, pattern):
    """ extracts times """
    time_min = []
    time_mean = []
    time_std = []
    fails = []
    for path in paths:
        temptime = []
        fails.append(0.)
        for run in runs:
            tempnew = ex.extract(path + 'output' + str(run),
                                 pattern,
                                 isarray=False)
            if isinstance(tempnew, pl.ndarray):
                if tempnew:
                    temptime.append(tempnew[0])
                else:
                    fails[-1] += 1.
            else:
                temptime.append(tempnew)
        time_min.append(min(temptime))
        time_mean.append(pl.mean(temptime))
        time_std.append(pl.std(temptime))
        fails[-1] /= len(runs)
    return time_min, fails, time_mean, time_std
示例#25
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    std_devs = []
    for year in years:
        yearly_temp = []
        for month in range(1, 13):
            for day in range(1, 32):
                # for each day of the year
                city_temps = []
                for city in multi_cities:
                    # in each city
                    try:
                        temp = climate.get_daily_temp(city, month, day, year)
                        city_temps.append(temp)
                    except:
                        pass
                if len(city_temps) > 0:
                    daily_temp = sum(city_temps) / len(city_temps)
                    yearly_temp.append(daily_temp)
        std_devs.append(pylab.std(yearly_temp))

    return pylab.array(std_devs)
示例#26
0
 def post_lecture(self):
     STD = std(self.Y, 1)
     MM = mean(self.Y, 1)
     TT, self.NN = self.Y.shape
     if self.centred:
         for t in xrange(0, TT):
             self.Y[t, :] = (self.Y[t, :] - MM[t]) / STD[t]
示例#27
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    std_devs = []
    for year in years:
        yearly_temps = []
        for month in range(1, 13):
            for day in range(1, 32):
                avg_temp = 0
                viable = False
                for city in multi_cities:
                    try:
                        avg_temp += climate.get_daily_temp(
                            city, month, day, year)
                        viable = True
                    except:
                        pass
                if viable:
                    avg_temp /= len(multi_cities)
                    yearly_temps += [avg_temp]
        std_devs += [pylab.std(yearly_temps)]
    return pylab.array(std_devs)
    """for year in years:
示例#28
0
def flow_rate_hist(sheets):
    ant_rates = []
    weights = []
    for sheet in sheets:
        ants, seconds, weight = flow_rate(sheet)
        ant_rate = seconds / ants
        #ant_rate = ants / seconds
        ant_rates.append(ant_rate)
        weights.append(float(weight))
        #weights.append(seconds)

    weights = pylab.array(weights)
    weights /= sum(weights)

    #print "ants per second"
    print "seconds per ant"
    mu = pylab.mean(ant_rates)
    print "mean", pylab.mean(ant_rates)
    wmean = pylab.average(ant_rates, weights=weights)
    print "weighted mean", wmean
    print "median", pylab.median(ant_rates)
    print "std", pylab.std(ant_rates, ddof=1)
    ant_rates = pylab.array(ant_rates)
    werror = (ant_rates - mu) * weights
    print "weighted std", ((sum(werror ** 2))) ** 0.5
    print "weighted std 2", (pylab.average((ant_rates - mu)**2, weights=weights)) ** 0.5
    pylab.figure()
    pylab.hist(ant_rates)
    pylab.savefig('ant_flow_rates.pdf', format='pdf')
    pylab.close()
示例#29
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual
        city temperatures for the given cities in a given year.
    """
    std_dev = []
    for year in years:
        yearly_temps = None
        for city in multi_cities:
            if yearly_temps is None:
                yearly_temps = climate.get_yearly_temp(city, year)
            else:
                yearly_temp = climate.get_yearly_temp(city, year)
                yearly_temps = pylab.vstack((yearly_temps, yearly_temp))
        if yearly_temps.ndim > 1:
            yearly_temps = pylab.average(yearly_temps, axis=0)
        std_dev.append(pylab.std(yearly_temps))
    return pylab.array(std_dev)
示例#30
0
 def update(self,t,val):
     oldavg = self.avg
     avrg = Ema.update(self,t,val)
     
     self.__samples.append((t,self.lastvalue))
     self.__samples_nonone.append(self.lastvalue)
     
     if oldavg == None:
         self.first_t = t 
         return (None,None,None)
     newavg = avrg
     
     # check limits of timeframe 
     while t - self.__samples[0][0] > self.timeframe and len(self.__samples) > 2:
         _, pv = self.__samples.pop(0)
         del self.__samples_nonone[0]
         #self.variance += (val-pv)*(val-newavg+pv-oldavg)/(self.timeframe) # this seems to use constant number of samples
         #std = math.sqrt(self.variance)
         
     std = pylab.std(self.__samples_nonone)*self.k # this takes long
     
     if avrg != None:
         return avrg,avrg+std,avrg-std
     else:
         return (None,None,None)
示例#31
0
def pc_pm_std(data, ndim):
    """
    This is a helper function.
    It returns the value of +1 * std(x), where x is the ndim-th principal
    component of the data

    Parameters:
    -----------
    data: `array` (*n*-by-*d*)
        the data on which the principal component analysis is performed.
    ndim: `integer`
        the number of the principal axis on which the analysis is performed.
        **NOTE** this is zero-based, i.e. to compute the first principal
        component, ndim=0

    Returns:
    --------
    std_pc: `array` (1-by-*d*)
        the vector that points in the direction of the *ndim*th principal
        axis, and has the length of the standard deviation of the scores
        along this axis.

    """

    u,s,v = svd(data.T, full_matrices = False)
    direction = u[:, ndim : ndim + 1]
    scale = std(dot(direction.T, data.T))
    return scale * direction.T
示例#32
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual
        city temperatures for the given cities in a given year.
    """

    annual_deviations = []

    for year in years:
        cities_temp = pylab.array(
            [climate.get_yearly_temp(city, year) for city in multi_cities])

        daily_mean = cities_temp.mean(axis=0)
        dev = pylab.std(daily_mean)
        annual_deviations.append(dev)

    return pylab.array(annual_deviations)
示例#33
0
def null_models_analysis(models_df):
    print "-----------------------------------------------------"
    df2 = models_df[models_df['model'] != 'neural']
    for model, group in df2.groupby('model'):
        print '***%s***' % model
        trials = len(group['success'])
        successes = sum(group['success'])
        #success_rate = pylab.mean(group['success'])

        ratios = group['ratio']
        '''
        successes = 0
        trials = 0
        ratios = []
        for (neuron_name, neuron_type), group2 in group.groupby(['neuron name', 'neuron type']):
            group2 = group2.head(n=20)
            successes += sum(group2['success'])
            trials += len(group2['success'])
            ratios.append(pylab.mean(group2['ratio']))
        '''

        print "success rate", float(successes) / float(
            trials), "trials", trials
        print "binomial p-value", binom_test(successes, trials)
        print "neural to %s ratio" % model, pylab.mean(
            ratios), "+/-", pylab.std(ratios, ddof=1)
        print "t-test p-value", len(ratios), ttest_1samp(ratios, popmean=1)
示例#34
0
文件: draws.py 项目: tazjel/rad2py
def draw_normal_histogram(x, bins, y_label='', x_label='', title="", body=""):
    "Plot a histogram chart"
    # x are matplotlib pylab arrays, body is a StringIO
    import pylab
    import matplotlib
    # clear graph
    matplotlib.pyplot.clf()
    matplotlib.use('Agg')
    n, bins1, patches = pylab.hist(x,
                                   bins,
                                   histtype='bar',
                                   facecolor='green',
                                   alpha=0.75)
    #pylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    pylab.ylabel(y_label)
    pylab.xlabel(x_label)
    # add a line showing the expected distribution
    mu = pylab.mean(x)
    sigma = pylab.std(x)
    y = pylab.normpdf(bins, mu, sigma)
    l = pylab.plot(bins, y, 'k--', linewidth=1.5)

    pylab.title(title)

    pylab.grid(True)
    pylab.savefig(body)
    return body.getvalue()
示例#35
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities. 

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual 
        city temperatures for the given cities in a given year.
    """
    
   
    annual_stds_container = []
    for year in years:
        daily_avgs = []
        for month in range(1,13):
            for day in range(1,32):
                try:
                    city_day_data = []
                    for city in multi_cities:
                        city_day_data.append(climate.get_daily_temp(city, month, day,
                            year))
                    daily_avg = pylab.array(city_day_data).mean()
                    daily_avgs.append(daily_avg)
                except AssertionError:
                    pass
        annual_stds_container.append(pylab.std(daily_avgs))
    return pylab.array(annual_stds_container)            
示例#36
0
 def getelnNoise(self,tdData):
     #returns the uncertainty due to electronic noise
     
     #signal preceeding the pulse (X and Y channel)  
     precNoise=self.getPreceedingNoise(tdData)
     #is this normalization really correct?!
     elnNoise = py.std(precNoise, ddof = 1,axis=0)/py.sqrt(precNoise.shape[0])
     return elnNoise
示例#37
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0)/ sqrt(len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0)-amin(data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0)-mean(data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
  def __init__(self, fp, delimiter="\t", require_header=False):
    """Load matrix of floats into self.

    File Format:
    ===========
    First line: Column titles. 
    -------
      First column is row variable name, all other columns are sample IDs
      e.g.:
        miRNA_ID	sample_id_1	sample_id_2...
        
    Next lines: rows of float data. Each row represents a single variable.
    -----
      e.g. 
        hsa-let-7e	332.0	690.0...

    Args:
      fp: [*str] like open filepointer to matrix of floats.
      delimiter: str of column delimiter
      require_header: bool if to require first line file header from input
    """
    self.rows = {}

    for line in fp:
      # Skip header comments
      if line[0] == "#": continue
      first_row = line.strip('\n').split(delimiter)
      
      # Verify that the first line looks like column headers.
      if require_header and first_row != "miRNA_ID":
        # Headers are required but the first row seems malformed. Error.
        raise ValueError, "Line not valid matrix column table header."
      elif first_row[0] == "miRNA_ID":
        # This looks a list of sample_IDs as column titles. Set samples.
        self.samples = first_row[1:]
      else:
        # This looks like a data row. Add it.
        self.samples = None
        self._add_row(first_row)
      break # Exit loop after parsing first non-comment line

    # Parse all other lines as rows of floats named by the first column entry.
    for line in fp:
      # WARNING: DO NOT STRIP TRAILING TABS
      row = line.strip('\n').split(delimiter)
      self._add_row(row)

    # Set matrix dimensions from last row.
    self.n = len(row) -1 # first entry is variable name
    self.m = len(self.rows)

    # Compute and store all variable standard deviations.
    self.stds = {}
    for name, values in self.rows.items():
      # Remove None's from values; DO NOT remove zeros!
      std = pylab.std(filter(lambda x: x is not None, values)) 
      self.stds[name] = std
示例#39
0
def xyamb(xytab,qu,xyout=''):

    mytb=taskinit.tbtool()

    if not isinstance(qu,tuple):
        raise Exception,'qu must be a tuple: (Q,U)'

    if xyout=='':
        xyout=xytab
    if xyout!=xytab:
        os.system('cp -r '+xytab+' '+xyout)

    QUexp=complex(qu[0],qu[1])
    print 'Expected QU = ',qu   # , '  (',pl.angle(QUexp)*180/pi,')'

    mytb.open(xyout,nomodify=False)

    QU=mytb.getkeyword('QU')['QU']
    P=pl.sqrt(QU[0,:]**2+QU[1,:]**2)

    nspw=P.shape[0]
    for ispw in range(nspw):
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        if (st.nrows()>0):
            q=QU[0,ispw]
            u=QU[1,ispw]
            qufound=complex(q,u)
            c=st.getcol('CPARAM')
            fl=st.getcol('FLAG')
            xyph0=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
            print 'Spw = '+str(ispw)+': Found QU = '+str(QU[:,ispw])  # +'   ('+str(pl.angle(qufound)*180/pi)+')'
            #if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
            #     (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
            if ( pl.absolute(pl.angle(qufound/QUexp)*180/pi)>90.0 ):
                c[0,:,:]*=-1.0
                xyph1=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
                st.putcol('CPARAM',c)
                QU[:,ispw]*=-1
                print '   ...CONVERTING X-Y phase from '+str(xyph0)+' to '+str(xyph1)+' deg'
            else:
                print '      ...KEEPING X-Y phase '+str(xyph0)+' deg'
            st.close()
    QUr={}
    QUr['QU']=QU
    mytb.putkeyword('QU',QUr)
    mytb.close()
    QUm=pl.mean(QU[:,P>0],1)
    QUe=pl.std(QU[:,P>0],1)
    Pm=pl.sqrt(QUm[0]**2+QUm[1]**2)
    Xm=0.5*atan2(QUm[1],QUm[0])*180/pi

    print 'Ambiguity resolved (spw mean): Q=',QUm[0],'U=',QUm[1],'(rms=',QUe[0],QUe[1],')','P=',Pm,'X=',Xm

    stokes=[1.0,QUm[0],QUm[1],0.0]
    print 'Returning the following Stokes vector: '+str(stokes)
    
    return stokes
示例#40
0
def plot_histogram(histogram, html_writer, title='', max_pathway_length=8, xmin=None, xlim=20, error_bars=True, min_to_show=20, legend_loc='upper left'):
    fig = pylab.figure()

    pylab.hold(True)

    reps = 1000
    
    y_offset = 0
    offset_step = 0.007
    colors = {1:'r', 2:'orange', 3:'green', 4:'cyan', 5:'blue', 'Rest':'violet', 'Not first':'k--', 'No known regulation':'grey', 'Activated':'green', 'Inhibited':'r', 'Mixed regulation':'blue'}
    for key, value in histogram.iteritems():
        if len(value) >= min_to_show:
            m = stats.cmedian(value)
            
            sample_std = None
            
            if error_bars:
                sample_vals = []
                i = 0
                while i < reps:
                    samples = []
                    while len(samples) < len(value):
                        samples.append(random.choice(value))
                    sample_vals.append(pylab.median(samples))
                    i += 1
                
                sample_std = pylab.std(sample_vals)
                        
            plotting.cdf(value, label='%s (med=%.1f, N=%d)' % \
                (key, m, len(value)),
                style=colors.get(key, 'grey'), std=sample_std, y_offset=y_offset)
            y_offset += offset_step
            

    xmin = -1 * xlim if xmin == None else xmin
    pylab.xlim(xmin, xlim)
    pylab.xlabel('Irreversability')
    #pylab.xlabel('deltaG')
    pylab.ylabel('Cumulative distribution')
    legendfont = matplotlib.font_manager.FontProperties(size=11)
    pylab.legend(loc=legend_loc, prop=legendfont)
    pylab.title(title)
    pylab.hold(False)
    
    if 'Not first' in histogram:
        print '%s, first vs. non-first ranksum test: ' % title + '(%f, %f)' % stats.ranksums(histogram[1], histogram['Not first'])
    
    if 'Inhibited' in histogram:
        print '%s, inhibited vs. non-regulated ranksum test: ' % title + '(%f, %f)' % stats.ranksums(histogram['Inhibited'], histogram['No known regulation'])
         
    
    #for k1, h1 in histogram.iteritems():
    #    for k2, h2 in histogram.iteritems():
    #        print k1, k2, stats.ranksums(h1, h2)
    
    return fig
示例#41
0
    def old_bar_graph_WOULD_NEED_FIX(
           img_dst, points, independent, dependent, search):
        from pylab import arange, xticks, xlim, mean, std
        # Lines
        plots = []
        labels = []
        args = {}
        for point in points:
            args[point.get(independent)] = True
        keys = args.keys()
        keys.sort()
        ind = {}
        bars = {}
        for k in keys:
            ind[k] = keys.index(k)
            bars[k] = {}
        # Float is necessary
        ticks = arange(len(args), dtype='float')

        for point_type in selected:
            x_val = []
            y_val = []
            for point in selected[point_type]:
                x_val.append(point.get(independent))
                y_val.append(point.get(dependent))

            width = 1./(len(selected)+2)
            for point in selected[point_type]:
                pi = point.get(independent)
                pd = point.get(dependent)
                # bars[independent][screen] = ([points],position,color)
                if point_type not in bars[pi].keys():
                    bars[pi][point_type] = ([pd], \
                                            ind[pi]+width*len(bars[pi]), \
                                            color)
                else:
                    bars[pi][point_type][0].append(pd)
            labels.append(label_type(point_type))

        plotlist = {}
        for pi in bars:
            for pt in bars[pi]:
                if len(bars[pi][pt][0]) > 1:
                    error = std(bars[pi][pt][0])
                else:
                    error = 0
                p = bar(bars[pi][pt][1],mean(bars[pi][pt][0]),width,\
                      color=bars[pi][pt][2], yerr=error)
                plotlist[pt] = p
                ticks[ind[pi]] += width/2
        plots = plotlist.values()
        plots.sort()
        keys = args.keys()
        keys.sort()
        xticks(ticks, keys)
        xlim(-width,len(ticks))
 def __init__(self, v, bin_precision=5):
   self.size = len(v)
   self.mean = pylab.mean(v)
   self.std = pylab.std(v)
   self.min = min(v)
   self.max = min(v)
   self.bins = {}
   for x in v:
     key = ("%%.%df" % precision) % x 
     bins[key] = bins.get(key, 0) + 1
示例#43
0
 def getMeans(self, function, particles):
     m = [];
     v = [];
     for d in range(function.dim):
         tab = []
         for part in range(len(particles)):
             tab.append(particles[part].pos[d]);
         m.append(pl.mean(tab));
         v.append(pl.std(tab));
         
     return m, v;
示例#44
0
def DFA(data, npoints=None, degree=1, use_median=False):
    """
    computes the detrended fluctuation analysis
    returns the fluctuation F and the corresponding window length L

    :args:
        data (n-by-1 array): the data from which to compute the DFA
        npoints (int): the number of points to evaluate; if omitted the log(n)
            will be used
        degree (int): degree of the polynomial to use for detrending
        use_median (bool): use median instead of mean fluctuation

    :returns:
        F, L: the fluctuation F as function of the window length L

    """
    # max window length: n/4

    #0th: compute integral
    integral = cumsum(data - mean(data))

    #1st: compute different window lengths
    n_samples = npoints if npoints is not None else int(log(len(data)))
    lengths = sort(array(list(set(
            logspace(2,log(len(data)/4.),n_samples,base=exp(1)).astype(int)
             ))))

    #print lengths
    all_flucs = []
    used_lengths = []
    for wlen in lengths:
        # compute the fluctuation of residuals from a linear fit
        # according to Kantz&Schreiber, ddof must be the degree of polynomial,
        # i.e. 1 (or 2, if mean also counts? -> see in book)
        curr_fluc = []
#        rrt = 0
        for startIdx in arange(0,len(integral),wlen):
            pt = integral[startIdx:startIdx+wlen]
            if len(pt) > 3*(degree+1):
                resids = pt - polyval(polyfit(arange(len(pt)),pt,degree),
                                  arange(len(pt)))
#                if abs(wlen - lengths[0]) < -1:
#                    print resids[:20]
#                elif rrt == 0:
#                    print "wlen", wlen, "l0", lengths[0]
#                    rrt += 1
                curr_fluc.append(std(resids, ddof=degree+1))
        if len(curr_fluc) > 0:
            if use_median:
                all_flucs.append(median(curr_fluc))
            else:
                all_flucs.append(mean(curr_fluc))
            used_lengths.append(wlen)
    return array(all_flucs), array(used_lengths)
示例#45
0
 def calcunc(self,tdDatas):
     #not used anymore, older version, should we remove it???
      #tdDatas is a np array of tdData measurements
     if tdDatas.shape[0]==1:
         repeatability=py.zeros((len(tdDatas[0,:,0]),2))
     else:
         repeatability=py.std(py.asarray(tdDatas[:,:,1:3]),axis=0, ddof = 1)/py.sqrt(self.numberOfDataSets)
     #this line is wrong
     elnNoise=tdDatas[0,:,3:]
     uncarray = py.sqrt(repeatability**2 + elnNoise**2)
     
     return uncarray
示例#46
0
def sigclip(im,nsig):
    # returns min and max values of image inside nsig sigmas
    temp = im.ravel()
    sd = pl.std(temp)
    m = pl.average(temp)
    gt = temp > m-nsig*sd
    lt = temp < m+nsig*sd
    temp = temp[gt*lt]
    mini = min(temp)
    maxi = max(temp)
    
    return mini,maxi
示例#47
0
 def print_statistics(self, diffrent_data=None):
     if diffrent_data is None:
         data = self.dictionary
     else:
         data = diffrent_data
     for key in data:
         log.info("Key > {}".format(key))
         data[key] = [float(i) for i in data[key]]
         log.info("\tMax > {}".format(max(data[key])))
         log.info("\tMin > {}".format(min(data[key])))
         log.info("\tAvg > {}".format(pylab.mean(data[key])))
         log.info("\tStd > {}".format(pylab.std(data[key])))
示例#48
0
def doclength_histogram(path, prefix):
  values = p.array(raw_doc_lengths(prefix).values())
  num_bins = 1000
  bin_upper_limit = p.mean(values) + 3 * p.std(values)
  print "UL: "+ str(bin_upper_limit)
  bins = p.array(range(1,1001)) * (bin_upper_limit/1000.0)
  p.hist(values, bins)
  p.xlabel('Document size (unicode codepoints)')
  p.ylabel('Number of documents')
  p.title('Document Size Histogram for %s' % prefix)
  p.savefig(path, dpi=72)
  p.close()
def densityplot(data):
    """
    Plots a histogram of daily returns from data, plus fitted normal density.
    """
    dailyreturns = percent_change(data)
    pylab.hist(dailyreturns, bins=200, normed=True)
    m, M = min(dailyreturns), max(dailyreturns)
    mu = pylab.mean(dailyreturns)
    sigma = pylab.std(dailyreturns)
    grid = pylab.linspace(m, M, 100)
    densityvalues = pylab.normpdf(grid, mu, sigma)
    pylab.plot(grid, densityvalues, 'r-')
    pylab.show()
示例#50
0
def ar_similar(x):
    """
    returns an ar(1)-process that has the same autocorrelation(1), same
    amplitude (std) and mean as the given 1D vector x

    parameters
    ----------
    x : *array*
        The data to which a similar ar(1)-process is sought

    returns
    -------
    y : *array*
        Result of a random ar(1)-process with same correlation coefficient as x


    """
    dat = x - mean(x)
    alpha = dot(dat[1:], dat[:-1]) / sqrt(dot(dat[1:], dat[1:]) * dot(dat[:-1],
        dat[:-1]))
    res = ar_process(len(dat), alpha).squeeze()
    res = res - mean(res)
    return res / std(res) * std(dat) + mean(x)
示例#51
0
 def printFrameInfo(self):
     """ Prints data about frame times """
     intervalsMS = numpy.array(self.window.frameIntervals)*1000
     self.intervalsms = intervalsMS
     m=pylab.mean(intervalsMS)
     sd=pylab.std(intervalsMS)
     distString= "Mean=%.1fms,    s.d.=%.1f,    99%%CI=%.1f-%.1f" %(m,sd,m-3*sd,m+3*sd)
     nTotal=len(intervalsMS)
     nDropped=sum(intervalsMS>(1.5*m))
     self.droppedframes = ([x for x in intervalsMS if x > (1.5*m)],[x for x in range(len(intervalsMS)) if intervalsMS[x]>(1.5*m)])
     droppedString = "Dropped/Frames = %i/%i = %.3f%%" %(nDropped,nTotal,nDropped/float(nTotal)*100)
     print "Actual vsyncs displayed:",self.vsynccount
     print "Frame interval statistics:", distString
     print "Drop statistics:", droppedString