Beispiel #1
0
def moving_average(y, window_length):
    """
    Compute the moving average of y with specified window length.

    Args:
        y: an 1-d pylab array with length N, representing the y-coordinates of
            the N sample points
        window_length: an integer indicating the window length for computing
            moving average

    Returns:
        an 1-d pylab array with the same length as y storing moving average of
        y-coordinates of the N sample points
    """
    mov_avg_list = []  #create list to store moving average
    for i in range(len(y)):
        if i < window_length - 1:  #when we don't have enough previous value
            ith_avg = pylab.average(
                y[:i + 1])  #average current value with previous values
            mov_avg_list.append(ith_avg)
        else:
            ith_avg = pylab.average(y[i + 1 - window_length:i +
                                      1])  #average of the values in window
            mov_avg_list.append(ith_avg)
    mov_avg = pylab.array(mov_avg_list)  #convert to array
    return mov_avg
Beispiel #2
0
def main():
    # read in the data
    args = parseCMD()
    fileName = args.fileN
    mcSteps, Es, Ms, E2 = pl.loadtxt(fileName, unpack=True)

    # get parameters from header of data file
    estFile = open(fileName,'r')
    estLines = estFile.readlines();
    T = float(estLines[0].split()[-1])
    L = float(estLines[1].split()[-1])
    H = float(estLines[2].split()[-1])

    # Compute specific heat
    Cv = (pl.average(E2) - (pl.average(Es))**2)/(L*L*T*T)
    print 'C_v: ',Cv

    # plot energy
    fig1 = pl.figure(1)
    ax = fig1.add_subplot(111)
    pl.ylabel('Energy', fontsize=20)
    pl.xlabel('MC steps', fontsize=20)
    ax.plot(mcSteps,Es, marker='o', linewidth=0,
            markerfacecolor='None', markeredgecolor='Orange')

    # plot macroscopic magnetization
    fig2 = pl.figure(2)
    bx = fig2.add_subplot(111)
    pl.ylabel('Magnetization', fontsize=20)
    pl.xlabel('MC steps', fontsize=20)
    bx.plot(mcSteps,Ms, marker='o', linewidth=0,
            markerfacecolor='None', markeredgecolor='Lime')

    pl.show()
def makeplot(filename):
    T0 = 2452525.374416
    P = 0.154525
    
    X = pl.load(filename)
    x = X[:,0]
    y = X[:,1]
    print x[0] # check for HJD faults
    
    #orbital phase
    p = (x-T0)/P
    
    pl.figure(figsize=(6,4))
    pl.subplots_adjust(hspace=0.47,left=0.16)
    
    pl.subplot(211)
    pl.scatter(p,y,marker='o',s=0.1,color='k')
    pl.ylim(-0.06,0.06)
    pl.xlim(pl.average(p)-1.25,pl.average(p)+1.25)
    pl.ylabel('Intensity')
    pl.xlabel('Orbital Phase')
    
    pl.subplot(212)
    f,a = ast.signal.dft(x,y,0,4000,1)
    pl.plot(f,a,'k')
    pl.ylabel('Amplitude')
    pl.xlabel('Frequency (c/d)')
    #pl.ylim(yl[0],yl[1])
    
    #pl.vlines(3636,0.002,0.0025,color='k',linestyle='solid')
    #pl.vlines(829,0.002,0.0025,color='k',linestyle='solid')
    #pl.text(3500,0.00255,'DNO',fontsize=11)
    #pl.text(700,0.00255,'lpDNO',fontsize=11)
    pl.ylim(0.0,0.004)
    pl.savefig('%spng'%filename[:-3])
Beispiel #4
0
def flow_rate_hist(sheets):
    ant_rates = []
    weights = []
    for sheet in sheets:
        ants, seconds, weight = flow_rate(sheet)
        ant_rate = seconds / ants
        #ant_rate = ants / seconds
        ant_rates.append(ant_rate)
        weights.append(float(weight))
        #weights.append(seconds)

    weights = pylab.array(weights)
    weights /= sum(weights)

    #print "ants per second"
    print "seconds per ant"
    mu = pylab.mean(ant_rates)
    print "mean", pylab.mean(ant_rates)
    wmean = pylab.average(ant_rates, weights=weights)
    print "weighted mean", wmean
    print "median", pylab.median(ant_rates)
    print "std", pylab.std(ant_rates, ddof=1)
    ant_rates = pylab.array(ant_rates)
    werror = (ant_rates - mu) * weights
    print "weighted std", ((sum(werror ** 2))) ** 0.5
    print "weighted std 2", (pylab.average((ant_rates - mu)**2, weights=weights)) ** 0.5
    pylab.figure()
    pylab.hist(ant_rates)
    pylab.savefig('ant_flow_rates.pdf', format='pdf')
    pylab.close()
Beispiel #5
0
def AT(s_ij=0, s_ik=0, s_jk=0, S=0):
    '''
	Calculates: SEFD = (2k/S) * (s_ij*s_ik)/(s_jk)
	'''
    kb = 1.38e3  # Boltzmann's Constant in Jy m^2 K^-1
    s_ij = pl.average(s_ij)
    s_ik = pl.average(s_ik)
    s_jk = pl.average(s_jk)
    return (2 * kb / S) * (s_ij * s_ik) / (s_jk - s_ij * s_ik)
Beispiel #6
0
def AT(s_ij=0, s_ik=0, s_jk=0, S=0):
	'''
	Calculates: SEFD = (2k/S) * (s_ij*s_ik)/(s_jk)
	'''
	kb = 1.38e3 # Boltzmann's Constant in Jy m^2 K^-1
	s_ij = pl.average(s_ij)
	s_ik = pl.average(s_ik)
	s_jk = pl.average(s_jk)
	return (2*kb/S)*(s_ij*s_ik)/(s_jk-s_ij*s_ik)
def visualize ():
    sample_rate, snd = load_sample(".\\hh-closed\\dh9.WAV")
    print snd.dtype
    data = normalize(snd)
    print data.shape
    n = data.shape[0]
    length = float(n)
    print length / sample_rate, "s"
    timeArray = arange(0, length, 1)
    timeArray = timeArray / sample_rate
    timeArray = timeArray * 1000  #scale to milliseconds
    ion()
    if False:
        plot(timeArray, data, color='k')
        ylabel('Amplitude')
        xlabel('Time (ms)')
        raw_input("press enter")
        exit()
    p = fft(data) # take the fourier transform
    nUniquePts = ceil((n+1)/2.0)
    print nUniquePts
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / float(n) # scale by the number of points so that
                 # the magnitude does not depend on the length
                 # of the signal or on its sampling frequency
    p = p**2  # square it to get the power

    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) -1] = p[1:len(p) - 1] * 2 # we've got even number of points fft

    print p
    freqArray = arange(0, nUniquePts, 1.0) * (sample_rate / n);
    plot(freqArray/1000, 10*log10(p), color='k')
    xlabel('Frequency (kHz)')
    ylabel('Power (dB)')
    raw_input("press enter")

    m = average(freqArray, weights = p)
    v = average((freqArray - m)**2, weights= p)
    r = sqrt(mean(data**2))
    s = var(data**2)
    print "mean freq", m #TODO: IMPORTANT: this is currently the mean *power*, not the mean freq.  What we want is mean freq weighted by power
    print "var freq", v
    print "rms", r
    print "squared variance", s
Beispiel #8
0
def hist_values(parameter, group, strategy, decay_type, label, y_limit=None):
    values = group[parameter]
    values = list(values)
    
    binsize = 0.05
    if 'zoom' in label:
        binsize = 0.01
    
    if y_limit == None:
        y_limit = max(values)
    cutoff = 1
    #weights = np.ones_like(values)/float(len(values))
    
    weights = group['num_lines'] / sum(group['num_lines'])
    weights = np.array(weights)
    
    mu = pylab.average(values, weights=weights)
    sigma2 = pylab.var(values)
    
    pylab.figure()
    pylab.hist(values, weights=weights, bins=np.arange(0, cutoff + binsize, binsize))
    title_items = []
    title_items.append('%s maximum likelihood values %s %s %s' % (parameter, strategy, decay_type, label))
    title_items.append('mean of estimates = %f' % mu)
    title_items.append('variance of estimates = %f' % sigma2)
    title_str = '\n'.join(title_items)
    #pylab.title(parameter + ' maximum likelihood values ' + str(strategy) + ' ' + str(outname))
    #pylab.title(title_str)
    print title_str
    pylab.xlabel('%s mle' % parameter, fontsize=20)
    pylab.ylabel('weighted proportion', fontsize=20)
    pylab.xlim((0, 1))
    pylab.ylim((0, y_limit))
    pylab.savefig('repair_ml_hist_%s_%s_%s_%s.pdf' % (parameter, strategy, decay_type, label), format='pdf')
    pylab.close()
Beispiel #9
0
def show_grey_channels(I):
    K = average(I, axis=2)
    for i in range(3):
        J = zeros_like(I)
        J[:, :, i] = K
        figure(i+10)
        imshow(J)
Beispiel #10
0
def gen_cities_avg(climate, multi_cities, years):
    """
    Compute the average annual temperature over multiple cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to average over (list of str)
        years: the range of years of the yearly averaged temperature (list of
            int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the average annual temperature over the given
        cities for a given year.
    """

    avg_temp_list = []  #list of the average temperature each year
    for year in years:
        temp_sum_over_cities = 0  #total sum of all the average temperature of each city
        for city in multi_cities:
            #get an array of the temperatures at this city and year
            year_temps = climate.get_yearly_temp(city, year)
            avg_year_temp = pylab.average(
                year_temps)  #find year average of this city
            temp_sum_over_cities += avg_year_temp
        #average the yearly averages from each city
        avg_over_cities = temp_sum_over_cities / len(multi_cities)
        avg_temp_list.append(avg_over_cities)
    return pylab.array(avg_temp_list)  #return pylab 1-d array
Beispiel #11
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual
        city temperatures for the given cities in a given year.
    """
    std_dev = []
    for year in years:
        yearly_temps = None
        for city in multi_cities:
            if yearly_temps is None:
                yearly_temps = climate.get_yearly_temp(city, year)
            else:
                yearly_temp = climate.get_yearly_temp(city, year)
                yearly_temps = pylab.vstack((yearly_temps, yearly_temp))
        if yearly_temps.ndim > 1:
            yearly_temps = pylab.average(yearly_temps, axis=0)
        std_dev.append(pylab.std(yearly_temps))
    return pylab.array(std_dev)
Beispiel #12
0
def time_test(trials):
    """
    finds time ratio for 1000 points and 1000 cycles to get an average
    """
    ratio = []
    for i in range(int(trials)):
        st = rejection("sine", 1000, 0, pl.pi)[1]
        ut = rejection("uniform", 1000, 0, pl.pi)[1]
        ratio.append(st / ut)
    return pl.average(ratio)
Beispiel #13
0
 def movingaverage(x,L):
     ma = pl.zeros(len(x),dtype='Float64')
     # must take the lead-up zone into account (prob slow)
     for i in range(0,L):
         ma[i] = pl.average(x[0:i+1])
 
     for i in range(L,len(x)):
         ma[i] = ma[i-1] + 1.0/L*(x[i]-x[i-L])
         
     return ma
Beispiel #14
0
    def properties(self, n=100):
        """
        Prints the statistical properties (maximal and average degree) of the results.

        It goes through the labels we have set with `self.set_labels`.

        Parameters:

            `n`: optional
                the number of the generated networks to create statistics.

        Example::

            >>> r.set_labels("200_020")
            ['200_020']
            >>> r.properties(n=100)
            ====================
            label = 200_020
            number of generated networks = 100
            divs = [0.81104014013270886, 1.0],
            probs=[[ 0.27609856  0.23149258]
             [ 0.23149258  0.26091629]]
               avg max deg =  9.87+-1.0115993937, avg average deg=3.5651+-0.175231419857

        """
        out = Output(os.path.join(self.project_dir, "properties.txt"))
        assert isinstance(n, int) and n > 0
        if not isinstance(self.labels, list) or not self.labels:
            raise NoLabelsError
        for label in self.labels:
            run = self.runs[label]
            doubleline = "=" * 20
            out.write(doubleline)
            out.write("label = %s" % label)

            divs, probs = run["divs"], run["probs"]
            pm = mfng.ProbMeasure(divs, probs)
            K = run.get("K") or run.get(
                "k")  # Older files have k instead of K.
            ipm = pm.iterate(K=K)
            out.write(
                "number of generated networks = %s\ndivs = %s,\nprobs=%s" %
                (n, divs, probs))
            maxdegs = []
            avgdegs = []
            for i in range(n):
                nw = ipm.generate(n=run["n"])
                deg = nw.degree()
                maxdegs.append(max(deg))
                avgdegs.append(pylab.average(deg))
            avgmaxdeg, avgmaxdegsigma = avg_sigma(maxdegs)
            avgavgdeg, avgavgdegsigma = avg_sigma(avgdegs)
            out.write("\navg max deg = %5s+-%5s, avg average deg=%5s+-%5s\n" %
                      (avgmaxdeg, avgmaxdegsigma, avgavgdeg, avgavgdegsigma))
        del out
Beispiel #15
0
def img2ascii(filename, map_array=None):
    a = imread(filename)

    print "Converting ..."
    # useful only when reading .jpg files.
    # PIL is used for jpegs; converting PIL image to numpy array messes up. 
    # a = a[::-1, :] 

    # convert image to grayscale.
    if len(a.shape) > 2:
        a = 0.21 * a[:,:,0] + 0.71 * a[:,:,1] + 0.07 * a[:,:,2]
    a_r, a_c = a.shape[:2]
    a_max = float(a.max())

    blk_siz = 1 #size of block

    if map_array == None:
        # just linearly map gray level to characters.
        # giving lowest gray level to space character.
        out_file = open(filename + 'lin' + str(blk_siz) + '.txt', 'w')
        print "File %s opened" %out_file.name
        for i in range(0, a_r, blk_siz*2):
            for j in range(0, a_c, blk_siz):
                b = a[i:i+2*blk_siz, j:j+blk_siz]
                b_char = chr(32+int((1-average(b))*94))
                out_file.write(b_char)
            out_file.write("\n")
        out_file.close()
    else:
        # map based on visual density of characters.
        out_file = open(filename + 'arr' + str(blk_siz) + '.txt', 'w')
        print "File %s opened" %out_file.name
        for i in range(0, a_r, blk_siz*2):
            for j in range(0, a_c, blk_siz):
                b = a[i:i+2*blk_siz, j:j+blk_siz]
                b_mean = int(average(b)/a_max*(len(map_array)-1))
                b_char = chr(map_array[b_mean])
                out_file.write(b_char)
            out_file.write("\n")
        out_file.close()
        
    print "%s Converted! \nWritten to %s" %(filename, out_file.name)
Beispiel #16
0
def ScaleDelayError(ScaleNDelay):
    # x1 and x2 must exist in calling namespace
    scale = ScaleNDelay[0]
    delay = ScaleNDelay[1]
    t1 = arange(len(x1))
    t2 = arange(len(x2))
    t1s = scale * t1 + delay
    # resample the scaled x1 onto t2
    x1r = interp(t2, t1s, x1)
    err = x1r - x2
    RMSError = sqrt(average(err**2))
    return RMSError
Beispiel #17
0
def sigclip(im,nsig):
    # returns min and max values of image inside nsig sigmas
    temp = im.ravel()
    sd = pl.std(temp)
    m = pl.average(temp)
    gt = temp > m-nsig*sd
    lt = temp < m+nsig*sd
    temp = temp[gt*lt]
    mini = min(temp)
    maxi = max(temp)
    
    return mini,maxi
Beispiel #18
0
    def plot(self):
        """generate the plot formatting"""
        if self.data == None:
            print "Must load and parse data first"
            sys.exit()
            
        for k,v in self.data.iteritems():
            for type, data in v.iteritems():
                pylab.clf()
                height = int(self.height)
                width = int(self.width)
                pylab.figure()
                ax = pylab.gca()
                ax.set_xlabel('<--- Width = %s wells --->' % str(width))
                ax.set_ylabel('<--- Height = %s wells --->' % str(height))
                ax.set_yticks([0,height/10])
                ax.set_xticks([0,width/10])
                ax.set_yticklabels([0,height])
                ax.set_xticklabels([0,width])
                ax.autoscale_view()
                pylab.jet()
            #color = self.makeColorMap()
            #remove zeros for calculation of average
                flattened = []
                for i in data:
                    for j in i:
                        flattened.append(j)
                flattened = filter(lambda x: x != 0.0, flattened)
                Aver=pylab.average(flattened)
                name = type.replace(" ", "_")
                fave = ("%.2f") % Aver
                pylab.title(k.strip().split(" ")[-1] + " Heat Map (Average = "+fave+"%)")
                ticks = None
                vmax = None
                if type == "Region DR":
                    ticks = [0.0,0.2,0.4,0.6,0.8,1.0]
                    vmax = 1.0
                else:
                    ticks = [0.0,0.4,0.8,1.2,1.6,2.0]
                    vmax = 2.0
                    
                pylab.imshow(data, vmin=0, vmax=vmax, origin='lower')
                pylab.colorbar(format='%.2f %%',ticks=ticks)
                pylab.vmin = 0.0
                pylab.vmax = 2.0
            #pylab.colorbar()     

                if self.savePath is None:
                    save = "%s_heat_map_%s.png" % (name,k)
                else:
                    save = path.join(self.savePath,"%s_heat_map_%s.png" % (name,k))
                pylab.savefig(save)
                pylab.clf()
Beispiel #19
0
def moving_average(y, window_length):
    """
    Compute the moving average of y with specified window length.

    Args:
        y: an 1-d pylab array with length N, representing the y-coordinates of
            the N sample points
        window_length: an integer indicating the window length for computing
            moving average

    Returns:
        an 1-d pylab array with the same length as y storing moving average of
        y-coordinates of the N sample points
    """
    out = []
    for i in range(1, len(y) + 1):
        if i < window_length:
            out.append(pylab.average(y[:i]))
        else:
            out.append(pylab.average(y[i - window_length:i]))
    return pylab.array(out)
Beispiel #20
0
def avg_sigma(numlist):
    """Returns with the average and sigma of the listed numbers.

    Parameter:
      `numlist`: list or array

    """
    average = pylab.average(numlist)
    numlist = pylab.array(numlist)
    sum2 = sum((numlist - average)**2)
    sigma = pylab.sqrt(sum2 / (len(numlist) - 1))
    return average, sigma
Beispiel #21
0
def avg_sigma(numlist):
    """Returns with the average and sigma of the listed numbers.

    Parameter:
      `numlist`: list or array

    """
    average = pylab.average(numlist)
    numlist = pylab.array(numlist)
    sum2 = sum((numlist - average)**2)
    sigma = pylab.sqrt(sum2 / (len(numlist)-1))
    return average, sigma
Beispiel #22
0
    def properties(self, n=100):
        """
        Prints the statistical properties (maximal and average degree) of the results.

        It goes through the labels we have set with `self.set_labels`.

        Parameters:

            `n`: optional
                the number of the generated networks to create statistics.

        Example::

            >>> r.set_labels("200_020")
            ['200_020']
            >>> r.properties(n=100)
            ====================
            label = 200_020
            number of generated networks = 100
            divs = [0.81104014013270886, 1.0],
            probs=[[ 0.27609856  0.23149258]
             [ 0.23149258  0.26091629]]
               avg max deg =  9.87+-1.0115993937, avg average deg=3.5651+-0.175231419857

        """
        out = Output(os.path.join(self.project_dir, "properties.txt"))
        assert isinstance(n, int) and n>0
        if not isinstance(self.labels, list) or not self.labels:
            raise NoLabelsError
        for label in self.labels:
            run = self.runs[label]
            doubleline = "=" * 20
            out.write(doubleline)
            out.write("label = %s" % label)

            divs, probs = run["divs"], run["probs"]
            pm = mfng.ProbMeasure(divs, probs)
            K = run.get("K") or run.get("k")  # Older files have k instead of K.
            ipm = pm.iterate(K=K)
            out.write("number of generated networks = %s\ndivs = %s,\nprobs=%s" % (n,divs, probs))
            maxdegs = []
            avgdegs = []
            for i in range(n):
                nw = ipm.generate(n=run["n"])
                deg = nw.degree()
                maxdegs.append(max(deg))
                avgdegs.append(pylab.average(deg))
            avgmaxdeg, avgmaxdegsigma = avg_sigma(maxdegs)
            avgavgdeg, avgavgdegsigma = avg_sigma(avgdegs)
            out.write("\navg max deg = %5s+-%5s, avg average deg=%5s+-%5s\n" %
                    (avgmaxdeg, avgmaxdegsigma, avgavgdeg, avgavgdegsigma))
        del out
def process_window(sample_rate, data):
    # print "processing window"
    # print data.dtype
    # print data.shape
    n = data.shape[0]
    length = float(n)
    # print length / sample_rate, "s"
    p = fft(data) # take the fourier transform
    nUniquePts = ceil((n+1)/2.0)
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / float(n) # scale by the number of points so that
                 # the magnitude does not depend on the length
                 # of the signal or on its sampling frequency
    p = p**2  # square it to get the power

    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) -1] = p[1:len(p) - 1] * 2 # we've got even number of points fft
    freqArray = arange(0, nUniquePts, 1.0) * (sample_rate / n);

    if sum(p) == 0:
        raise Silence
    m = average(freqArray, weights = p)
    v = sqrt(average((freqArray - m)**2, weights= p))
    r = sqrt(mean(data**2))
    s = var(data**2)
    print "mean freq", m #TODO: IMPORTANT: this is currently the mean *power*, not the mean freq.  What we want is mean freq weighted by power
    # print freqArray
    # print (freqArray - m)
    # print p
    print "var freq", v
    print "rms", r
    print "squared variance", s
    return [m, v, r, s]
Beispiel #24
0
 def writeMetricsFile(self, filename):
     '''Writes the lib_cafie.txt file'''
     if self.data == None:
         print "Must load and parse data first"
         sys.exit()
     cafie_out = open(filename,'w')
     for k,v in self.data.iteritems():
         for type, data in v.iteritems():
             flattened = []
             for i in data:
                 for j in i:
                     flattened.append(j)
             flattened = filter(lambda x: x != 0.0, flattened)
             Aver=pylab.average(flattened)
             name = type.replace(" ", "_")
             if 'LIB' in k:
                 if len(flattened)==0:
                     cafie_out.write('%s = %s\n' % (name,0.0))
                     Aver = 0
                 else:
                     cafie_out.write('%s = %s\n' % (name,Aver))
                     Aver=pylab.average(flattened)
     cafie_out.close()
Beispiel #25
0
def amptime(uv, baseline="0_1", pol="xx", applycal=False):
	'''
	Plots Amp vs Time for a single baseline. 
	'''
	fig = pl.figure()
	ax = fig.add_subplot(111)
	aipy.scripting.uv_selector(uv, baseline, pol)
	for preamble, data, flags in uv.all(raw=True):
		uvw, t, (i, j) = preamble
		ax.plot(t, pl.average(pl.absolute(data)), 'ks', mec='None', alpha=0.2, ms=5)
	hfmt = dates.DateFormatter('%m/%d %H:%M')
	ax.xaxis.set_major_locator(dates.HourLocator())
	ax.xaxis.set_major_formatter(hfmt)
	ax.set_ylim(bottom = 0)
	pl.xticks(rotation='vertical')	
Beispiel #26
0
 def calcTDData(self,tdDatas):
     #tdDatas is a a 3d array of measurements, along with their uncertainties
     #meantdData is the weighted sum of the different measurements
     #meantdData,sumofweights=py.average(tdDatas[:,:,1:3],axis=0,weights=1.0/tdDatas[:,:,3:]**2,returned=True)
     meantdData=py.average(tdDatas[:,:,1:3],axis=0)
     #use error propagation formula
     noise=py.sqrt(py.mean(self.getAllPrecNoise()[0]**2))
     if tdDatas.shape[0]==1:
         rep = py.zeros((len(tdDatas[0,:,0]),2))
     else:
         rep = py.std(tdDatas[:,:,1:3],axis=0, ddof=1)/py.sqrt(self.numberOfDataSets)
     unc = py.sqrt(rep**2+noise**2)
     #unc=py.sqrt(1.0/sumofweights)
     #time axis are all equal
     return py.column_stack((tdDatas[0][:,0],meantdData,unc))       
Beispiel #27
0
def r_squared(y, estimated):
    """
    Calculate the R-squared error term.

    Args:
        y: 1-d pylab array with length N, representing the y-coordinates of the
            N sample points
        estimated: an 1-d pylab array of values estimated by the regression
            model

    Returns:
        a float for the R-squared error term
    """
    SSres = ((y - estimated)**2).sum()
    SStot = ((y - pylab.average(y))**2).sum()
    return 1 - (SSres / SStot)
Beispiel #28
0
def calculateScores(arr, HEIGHT, WIDTH):
    rowlen,collen = arr.shape
    scores = pylab.zeros(((rowlen/INCREMENT),(collen/INCREMENT)))
    score = []
    for row in range(rowlen/INCREMENT):
        for column in range(collen/INCREMENT):            
            keypassed,size = getAreaScore(row,column,arr, HEIGHT, WIDTH)
            scores[row,column] = round(float(keypassed)/float(size)*100,2)
            if keypassed > 2:
                score.append(round(float(keypassed)/float(size)*100,2))         
            
            #scores[0,0] = 0
            #scores[HEIGHT/INCREMENT -1,WIDTH/INCREMENT -1] = 100
    print scores
    
    flattened = []
    for i in score:
        flattened.append(i)
    flattened = filter(lambda x: x != 0.0, flattened)    
    average=pylab.average(flattened)
    
    return score, scores, average
Beispiel #29
0
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
                  robot_type):
    """
    Runs NUM_TRIALS trials of the simulation and returns the mean number of
    time-steps needed to clean the fraction MIN_COVERAGE of the room.

    The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
    speed SPEED, in a room of dimensions WIDTH x HEIGHT.

    num_robots: an int (num_robots > 0)
    speed: a float (speed > 0)
    width: an int (width > 0)
    height: an int (height > 0)
    min_coverage: a float (0 <= min_coverage <= 1.0)
    num_trials: an int (num_trials > 0)
    robot_type: class of robot to be instantiated (e.g. StandardRobot or
                RandomWalkRobot)
    """
    test_result = []

    for t in range(num_trials):
        #         anim = ps2_visualize.RobotVisualization(num_robots, width, height, 0.5)
        test_space = RectangularRoom(width, height)
        list_robots = []
        for item in range(num_robots):
            list_robots.append(robot_type(test_space, speed))
        count_step = 0
        while test_space.getNumCleanedTiles() < (min_coverage *
                                                 test_space.getNumTiles()):
            #             anim.update(test_space, list_robots)
            for item in list_robots:
                item.updatePositionAndClean()
            count_step += 1
#         anim.done()
        test_result.append(count_step)

    return pylab.average(test_result)
def plot_effect_num_cashiers_on_cust_wait_time(customersPerMinute = 10, numCashiersToTestUpTo = 12):
    assert customersPerMinute > 0
    assert numCashiersToTestUpTo > 0
    assert type(customersPerMinute) == type(numCashiersToTestUpTo) == int
    store = Store(customersPerMinute)
    worstCase = []
    averageCase = []
    rangeOfNumCashiers = range(1, numCashiersToTestUpTo + 1)
    for i in rangeOfNumCashiers:
        store.run_simulation(i)
        timeOnLineData = [x.timeOnLine / 60. for x in store.completedCustomers]
        averageCase.append(pylab.average(timeOnLineData))
        worstCase.append(max(timeOnLineData))
        store.reset_store()
    
    pylab.plot(rangeOfNumCashiers, worstCase, label='Longest Time on Line') 
    pylab.plot(rangeOfNumCashiers, averageCase, label = 'Average Time on Line')
    
    pylab.title('Effect of Adding Additional Cashiers \n on Customer Wait Time')
    pylab.xlabel('Number of Cashiers')
    pylab.ylabel('Customer Wait Time in Minutes \n (if store receives {} customers per minute)'.format(store.customersPerMinute))
    pylab.legend()
    pylab.xticks(rangeOfNumCashiers)  
    pylab.show()
Beispiel #31
0
def gen_cities_avg(climate, multi_cities, years):
    """
    Compute the average annual temperature over multiple cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to average over (list of str)
        years: the range of years of the yearly averaged temperature (list of
            int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the average annual temperature over the given
        cities for a given year.
    """
    yearly_avgs = pylab.array([])
    for year in years:
        cities_temps = pylab.array([])
        for city in multi_cities:
            city_temps = climate.get_yearly_temp(city, year)
            cities_temps = pylab.hstack((cities_temps, city_temps))
        yearly_avg = pylab.average(cities_temps)
        yearly_avgs = pylab.hstack((yearly_avgs, yearly_avg))
    return yearly_avgs
Beispiel #32
0
if __name__ == '__main__':
    climate_data = Climate('data.csv')  #initialize data
    # Part A.4
    years = pylab.array(TRAINING_INTERVAL)  #the x values of the dataset
    temp_list = [
    ]  #use climate class to obtain the temperature on Jan 10th in NYC that year
    for year in TRAINING_INTERVAL:
        temp_list.append(climate_data.get_daily_temp('NEW YORK', 1, 10, year))
    temp = pylab.array(temp_list)  #array of temperature (y) values
    models = generate_models(years, temp, [1])  #fit to degree one
    evaluate_models_on_training(years, temp, models)  #plot AI
    #now want annual temperature
    ann_temp_list = []  #use climate data to obtain annual average temperature
    for year in TRAINING_INTERVAL:
        year_temperatures = climate_data.get_yearly_temp('NEW YORK', year)
        average_year_temp = pylab.average(year_temperatures)
        ann_temp_list.append(average_year_temp)
    ann_temp = pylab.array(ann_temp_list)
    models_ann = generate_models(years, ann_temp, [1])
    evaluate_models_on_training(years, ann_temp, models_ann)  #plot AII
    # Part B
    #national yearly temperatures
    #get national average temperature in the years
    nat_avg_temp = gen_cities_avg(climate_data, CITIES, TRAINING_INTERVAL)
    #generate model
    nat_temp_model = generate_models(years, nat_avg_temp, [1])  #fit with deg 1
    evaluate_models_on_training(years, nat_avg_temp, nat_temp_model)  #plot B
    # Part C
    #take moving average of national average temperature
    mov_nat_temp = moving_average(
        nat_avg_temp, 5)  #window size of 5 on the national temperatures
Beispiel #33
0
def Froudenumber(flmlname):
    print("\n********** Calculating the Froude number\n")
    # warn user about assumptions
    print(
        "Froude number calculations makes three assumptions: \n i) domain height = 0.1m \n ii) mid point domain is at x = 0.4 \n iii) initial temperature difference is 1.0 degC"
    )
    domainheight = 0.1
    domainmid = 0.4
    rho_zero, T_zero, alpha, g = le_tools.Getconstantsfromflml(flmlname)
    gprime = rho_zero * alpha * g * 1.0  # this has assumed the initial temperature difference is 1.0 degC

    # get list of vtus
    filelist = le_tools.GetFiles('./')
    logs = [
        'diagnostics/logs/time.log', 'diagnostics/logs/X_ns.log',
        'diagnostics/logs/X_fs.log'
    ]
    try:
        # if have extracted information already just use that
        os.stat('diagnostics/logs/time.log')
        os.stat('diagnostics/logs/X_ns.log')
        os.stat('diagnostics/logs/X_fs.log')
        time = le_tools.ReadLog('diagnostics/logs/time.log')
        X_ns = [
            x - domainmid
            for x in le_tools.ReadLog('diagnostics/logs/X_ns.log')
        ]
        X_fs = [
            domainmid - x
            for x in le_tools.ReadLog('diagnostics/logs/X_fs.log')
        ]
    except OSError:
        # otherwise get X_ns and X_fs and t from vtus
        time, X_ns, X_fs = le_tools.GetXandt(filelist)
        f_time = open('./diagnostics/logs/time.log', 'w')
        for t in time:
            f_time.write(str(t) + '\n')
        f_time.close()
        f_X_ns = open('./diagnostics/logs/X_ns.log', 'w')
        for X in X_ns:
            f_X_ns.write(str(X) + '\n')
        f_X_ns.close()
        f_X_fs = open('./diagnostics/logs/X_fs.log', 'w')
        for X in X_fs:
            f_X_fs.write(str(X) + '\n')
        f_X_fs.close()

        # shift so bot X_ns and X_fs are
        # distance of front from
        #initial position (mid point of domain)
        X_ns = [x - domainmid for x in X_ns]
        X_fs = [domainmid - x for x in X_fs]

    # Calculate U_ns and U_fs from X_ns, X_fs and t
    U_ns = le_tools.GetU(time, X_ns)
    U_fs = le_tools.GetU(time, X_fs)
    U_average = [[], []]

    # If possible average
    # (if fronts have not travelled far enough then will not average)
    start_val, end_val, average_flag_ns = le_tools.GetAverageRange(
        X_ns, 0.2, domainheight)
    if average_flag_ns == True:
        U_average[0].append(pylab.average(U_ns[start_val:end_val]))

    start_val, end_val, average_flag_fs = le_tools.GetAverageRange(
        X_fs, 0.25, domainheight)
    if average_flag_fs == True:
        U_average[1].append(pylab.average(U_fs[start_val:end_val]))

    # plot
    fs = 18
    pylab.figure(num=1, figsize=(16.5, 11.5))
    pylab.suptitle('Front speed', fontsize=fs)

    pylab.subplot(221)
    pylab.plot(time, X_ns, color='k')
    pylab.axis([0, 45, 0, 0.4])
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$X$ (m)', fontsize=fs)
    pylab.title('no-slip', fontsize=fs)

    pylab.subplot(222)
    pylab.plot([x / domainheight for x in X_ns],
               [U / math.sqrt(gprime * domainheight) for U in U_ns],
               color='k')
    pylab.axis([0, 4, 0, 0.6])
    pylab.grid('on')
    pylab.axhline(0.406, color='k')
    pylab.axhline(0.432, color='k')
    pylab.text(3.95,
               0.396,
               'Hartel 2000',
               bbox=dict(facecolor='white', edgecolor='black'),
               va='top',
               ha='right')
    pylab.text(3.95,
               0.442,
               'Simpson 1979',
               bbox=dict(facecolor='white', edgecolor='black'),
               ha='right')
    pylab.xlabel('$X/H$', fontsize=fs)
    pylab.ylabel('$Fr$', fontsize=fs)
    pylab.title('no-slip', fontsize=fs)
    if average_flag_ns == True:
        pylab.axvline(2.0, color='k')
        pylab.axvline(3.0, color='k')
        pylab.text(
            0.05,
            0.01,
            'Average Fr = ' + '{0:.2f}'.format(
                U_average[0][0] / math.sqrt(gprime * domainheight)) +
            '\nvertical lines indicate the range \nover which the average is taken',
            bbox=dict(facecolor='white', edgecolor='black'))

    pylab.subplot(223)
    pylab.plot(time, X_fs, color='k')
    pylab.axis([0, 45, 0, 0.4])
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$X$ (m)', fontsize=fs)
    pylab.title('free-slip', fontsize=fs)

    pylab.subplot(224)
    pylab.plot([x / domainheight for x in X_fs],
               [U / math.sqrt(gprime * domainheight) for U in U_fs],
               color='k')
    pylab.axis([0, 4, 0, 0.6])
    pylab.grid('on')
    pylab.axhline(0.477, color='k')
    pylab.text(3.95,
               0.467,
               'Hartel 2000',
               va='top',
               bbox=dict(facecolor='white', edgecolor='black'),
               ha='right')
    pylab.xlabel('$X/H$', fontsize=fs)
    pylab.ylabel('$Fr$', fontsize=fs)
    pylab.title('free-slip', fontsize=fs)
    if average_flag_fs == True:
        pylab.text(
            0.05,
            0.01,
            'Average Fr  = ' + '{0:.2f}'.format(
                U_average[1][0] / math.sqrt(gprime * domainheight)) +
            '\nvertical lines indicate the range \nover which the average is taken',
            bbox=dict(facecolor='white', edgecolor='black'))
        pylab.axvline(2.5, color='k')
        pylab.axvline(3.0, color='k')

    pylab.savefig('diagnostics/plots/front_speed.png')
    return
        requestor_rep.append(requestor_avg)
        time.append(x + 1)
        #        Add new workers
        for y in xrange(random.randint(1, 20)):
            workers.append(Worker(x + 1, getAverageRep(workers)))
        for y in xrange(random.randint(1, 5)):
            requestors.append(Requestor(x + 1, getAverageRep(requestors)))

    #Create lines for plot of average overall worker and requestor reputation over time
    work_line.set_xdata(time)
    work_line.set_ydata(worker_rep)
    req_line.set_ydata(requestor_rep)
    req_line.set_xdata(time)
    ax.relim()
    ax.autoscale_view()
    plt.draw()
    plt.show()

    #Plot histogram of all worker reputations
    P.figure()
    all_reputations = [x.get_reputation() for x in workers]
    sigma = P.std(all_reputations)
    mu = P.average(all_reputations)
    n, bins, patches = P.hist(all_reputations,
                              20,
                              normed=1,
                              histtype='step',
                              cumulative=True)
    y = P.normpdf(bins, mu, sigma)
    P.plot(bins, y)
            requestor.start_job(workers)
        worker_rep.append(worker_avg)
        requestor_rep.append(requestor_avg)
        time.append(x+1)
#        Add new workers
        for y in xrange(random.randint(1,20)):
            workers.append(Worker(x+1,getAverageRep(workers)))
        for y in xrange(random.randint(1,5)):
            requestors.append(Requestor(x+1,getAverageRep(requestors)))

        
    #Create lines for plot of average overall worker and requestor reputation over time
    work_line.set_xdata(time)
    work_line.set_ydata(worker_rep)
    req_line.set_ydata(requestor_rep)
    req_line.set_xdata(time)
    ax.relim()
    ax.autoscale_view()
    plt.draw()
    plt.show()
    
    #Plot histogram of all worker reputations
    P.figure()
    all_reputations = [x.get_reputation() for x in workers]
    sigma = P.std(all_reputations)
    mu = P.average(all_reputations)
    n, bins, patches = P.hist(all_reputations, 20, normed=1, histtype='step',cumulative=True)
    y = P.normpdf(bins, mu, sigma)
    P.plot(bins, y)
    
    
Beispiel #36
0
#x = (x - T0) / P

ft = []
date = []
peaks = []

f0 = 3000
f1 = 4000

for i in range(0,N,int(fitlength/2.0)):
		
	if i + fitlength/2.0 <= len(x):
                print 'somewhere'
                
                date.append(pl.average(x[i:i + fitlength]))
                f,a = ast.signal.dft(x[i:i+fitlength],y[i:i+fitlength],f0,f1,1)
                ft.append(a)
                #sort,argsort = sci.fastsort(a)
                #peaks.append(f[argsort[-1]])
                
		# / len(x[i:i+fitlength]))
                print i, i+fitlength
	else:
                print 'finally'
		#x = fitwave(y[i:len(t)+1],t[i:len(t)+1],freq)
                f,a = ast.signal.dft(x[i:len(x)+1],y[i:len(x)+1],f0,f1,1)
                ft.append(a)
                #sort,argsort = sci.fastsort(a)
                #peaks.append(f[argsort[-1]])
		date.append(pl.average(x[i:-1]))# / len(x[i:-1]))
Beispiel #37
0
 def plot_sigTau_driven(self,th1_cs,slip,figNum=11,plotBekker=False):
     th_m = self._thm
     th2 = self._th2
     r = self._radius
     b = self._width
     k1 = self._k1
     k2 = self._k2
     n = self._n
     phi = self._phi
     K = self._K
     c = self._coh
     
     incr = (th1_cs - th2) / 100.0    # plot increment
     th_arr = py.arange(0,th1_cs, incr) # find sigma, tau at these discrete vals
     sig_arr = py.zeros(len(th_arr))
     tau_arr = py.zeros(len(th_arr))
     slip_arr = py.zeros(len(th_arr))
     
     for idx in range(0,len(th_arr)):
         th = th_arr[idx]
         if(th <= th_m):
             # we're in the bototm region
             sig_curr = sig_2(th,th_m,th1_cs,th2,r,b,n,k1,k2)
             tau_curr = tau_d2(th,th_m,th1_cs,th2,r,b,n,k1,k2,c,K,phi,slip)
             slip_j = jdriven(th,th1_cs,r,slip)
             sig_arr[idx] = sig_curr
             tau_arr[idx] = tau_curr
             slip_arr[idx] = slip_j
             
         else:
             # we're in the top region ()
             sig_curr = sig_1(th, th1_cs,r,b,n,k1,k2)
             tau_curr = tau_d1(th,th1_cs,r,b,n,k1,k2,c,phi,K,slip)
             slip_j = jdriven(th,th1_cs,r,slip)
             sig_arr[idx] = sig_curr
             tau_arr[idx] = tau_curr
             slip_arr[idx] = slip_j
             
     
     if( self._plots):        
         fig = plt.figure()
         ax = fig.add_subplot(211,title='Fig.'+str(figNum) )
         ax.plot(radToDeg(th_arr),sig_arr,radToDeg(th_arr),tau_arr,linewidth=1.5)
         ax.set_xlabel('theta [deg]')
         ax.set_ylabel('stress [psi]')
         ax.legend((r'$\sigma$',r'$\tau$'))
         ax.grid(True)
         # can also plot Bekker's solution
         if(plotBekker):
             th_bek, sig_bek, tau_bek = self.get_sigTau_Bekker_driven(slip)
             ax.plot(radToDeg(th_bek),sig_bek, radToDeg(th_bek), tau_bek, linewidth=1.5)
             ax.legend((r'$\sigma$',r'$\tau$',r'$\sigma_bek$',r'$\tau_bek$'))
             
         # take a look at what I"m using for slip displacement also
         ax = fig.add_subplot(212)
         ax.plot(radToDeg(th_arr),slip_arr,linewidth=1.5)
         ax.set_xlabel('theta [deg]')
         if( self._units == 'ips'):
             ax.set_ylabel('slip disp.[in]')
         else:
             ax.set_ylabel('slip disp.[m]')
         ax.grid(True)
         
         # polar plots
         fig=plt.figure()
         ax=fig.add_subplot(111,projection='polar')
         ax.plot(th_arr,sig_arr,'b',linewidth=1.5)
         ax.plot(th_arr,tau_arr,'r--',linewidth=1.5)
         # fix the axes
         ax.grid(True)
         if( self._units == 'ips'):
             leg = ax.legend((r'$\sigma$ [psi]',r'$\tau$'))
         else:
             leg = ax.legend((r'$\sigma$ [Pa]',r'$\tau$'))
         leg.draggable()
         ax.set_theta_zero_location('S')
         # also, draw the tire
         polar_r_offset = py.average(sig_arr)
         theta = py.arange(0.,2.*py.pi+0.05,0.05)
         tire_pos = py.zeros(len(theta))
         ax.plot(theta,tire_pos,color='k',linewidth=1.0)
         ax.set_rmin(-polar_r_offset)
         ax.set_title(r'driven wheel stresses, $\theta_1$ = %4.3f [rad]' %th1_cs)
         ax.set_thetagrids([-10,0,10,20,30,40,50,60])
Beispiel #38
0
#print '\nDone...\n'



# bin spectra together in 0.1 phase bins
im3 = []
klist = []
k = 0
temp = pl.zeros(len(pf.getdata(ff[0])[xx]),dtype=float)

for i in range(0,100):
    for j in range(len(ff)):
        if phase[j] >= i*0.01 and phase[j] < (i+1)*0.01:
             temp += pf.getdata(ff[i])[xx]-pl.median(pf.getdata(ff[i])[xx])#pf.getdata(ff[argsort[j]])
             k+=1
    ave = pl.average(temp)
    im3.append(temp)
    temp = pl.zeros(len(pf.getdata(ff[0])[xx]),dtype=float)
    klist.append(k)
    k = 0

#print sum(klist)
#print len(klist)




pl.figure()
pl.gray()
pl.imshow(im3, interpolation='nearest', aspect='auto',cmap=pl.cm.gray_r,extent=(6500,6625,1,0))
#ax = pl.axes()
Beispiel #39
0
# try to flatten lightcurves using Fourier Filter

import scipy.signal as sci
import pylab as pl
import os


X = pl.load('speclc_Ha.dat')
x = X[:,0]
y = X[:,1]



fft = sci.fft(y-pl.average(y))

l = len(fft)
dt = x[1]-x[0]

# kill the negative frequencies
#fft[l/2:] = 0.0

# kill the low frequency bits > ~ 1hour
k = int(24.0*l*dt)
print k

fft[0:5] = 0.0
fft[-5:] = 0.0

yy = sci.ifft(fft)

Beispiel #40
0
    climate = Climate('data.csv')
    pla_training_years = pylab.array(TRAINING_INTERVAL)
    pla_testing_years = pylab.array(TESTING_INTERVAL)

    # Part A.4 I
    temps = []
    for year in TRAINING_INTERVAL:
        temp = climate.get_daily_temp('NEW YORK', 1, 10, year)
        temps.append(temp)
    pla_temps = pylab.array(temps)
    models = generate_models(pla_training_years, pla_temps, [1])
    evaluate_models_on_training(pla_training_years, pla_temps, models)
    # Part A.4 II
    temps = []
    for year in TRAINING_INTERVAL:
        temp = pylab.average(climate.get_yearly_temp('NEW YORK', year))
        temps.append(temp)
    pla_temps = pylab.array(temps)
    nyc_avg_models = generate_models(pla_training_years, pla_temps, [1])
    evaluate_models_on_training(pla_training_years, pla_temps, nyc_avg_models)

    # Part B
    cities_avg = gen_cities_avg(climate, CITIES, pla_training_years)
    avg_models = generate_models(pla_training_years, cities_avg, [1])
    evaluate_models_on_training(pla_training_years, cities_avg, avg_models)
    print('avg_models', avg_models)

    # Part C
    cities_avg = gen_cities_avg(climate, CITIES, pla_training_years)
    mvg_avg = moving_average(cities_avg, 5)
    mvg_avg_models = generate_models(pla_training_years, mvg_avg, [1])
Beispiel #41
0
outer_pass2.append( pl.arange(len(fib.flux))*0 )
outer_pass2.append( pl.arange(len(fib.flux))*0 )

n = 100
tmp0 = fibers[0]
tmpn = fibers[n]
fibers[0] = tmpn
fibers[n] = tmp0

stack_x = fibers[0].xaxis
stack_f = fibers[0].flux

for f in fibers:

# FINDS THE EMISSION LINES, WHERE FLUX IS ABOVE THE AVERAGE
    flx_above_medi0 = pl.find( f.flux > pl.average(f.flux) )
    flx_above_medi1 = []

# FINDS THE EMISSION THAT HAVE MORE THAN 5 PIXELS ABOVE
# AVERAGE FLUX. THEN RECORDS THE AVERAGE X-COORD FOR THE LINE.
    tmp = [ flx_above_medi0[0] ]
    for i in range(1,len(flx_above_medi0)):
        if flx_above_medi0[i] - tmp[-1] < 2: tmp.append( flx_above_medi0[i] )
        else:
            if len(tmp)>=5: f.xpeaks.append( pl.average(tmp) )
            tmp = [ flx_above_medi0[i] ]

    if f==fibers[0]: continue

# NOW WE NEED TO FIND THE RESCALING FACTOR (s) AND OFFSET (m)
    f.s = pl.average( (pl.array(fibers[0].xpeaks)-fibers[0].xpeaks[0]+1) / (pl.array(f.xpeaks)-f.xpeaks[0]+1) )
    def plot_smf_with_fit(sp, phi, elo, ehi, color_i):

        inds = phi > 0

        x_data = smfdata.lmass[inds]
        y_data = phi[inds]
        elo_data = elo[inds]
        ehi_data = ehi[inds]

        sp.errorbar(x_data,
                    y_data,
                    xerr=0.25 / 2,
                    yerr=[elo_data, ehi_data],
                    ls='',
                    marker='o',
                    mew=2,
                    ms=11,
                    mfc='none',
                    mec=color_i,
                    ecolor=color_i,
                    zorder=10)

        ###  fitting toy models
        scale_factors = pylab.zeros(simulation.n_timesteps)
        chi2s = pylab.zeros(simulation.n_timesteps)

        log_errors = (pylab.log10(y_data / (y_data - elo_data)) + pylab.log10(
            (y_data + ehi_data) / y_data)) / 2.

        weights = 1. / ((elo_data + ehi_data) / 2.)**2
        weights = 1. / log_errors**2

        bestfit_models = numpy.zeros((simulation.n_timesteps, len(x_data)))

        for i_model in range(simulation.n_timesteps):

            y_model = simulation.SMFs[i_model]
            y_model = numpy.interp(x_data, simulation.lmassbars, y_model)

            numer = pylab.sum(y_data * weights)
            denom = pylab.sum(y_model * weights)

            scale = numer / denom
            scale_factors[i_model] = scale
            bestfit_models[i_model] = y_model * scale

            chi2s[i_model] = pylab.sum(
                (pylab.log10(y_data) - pylab.log10(y_model * scale))**2 /
                log_errors**2)

        Pchi = 1. / chi2s**2
        Pchi = pylab.exp(-0.5 * chi2s)

        ###  plotting best-fit model SMF
        i_best = pylab.find(Pchi == Pchi.max())[0]
        sp.plot(x_data, bestfit_models[i_best], color='r', lw=1.5)

        ###  plotting marginalized best-fit SMF
        best_smf = pylab.average(bestfit_models, axis=0, weights=Pchi)

        sp.plot(x_data, best_smf, color='k', lw=5, zorder=1)
        sp.plot(x_data, best_smf, color='gray', lw=2, zorder=1)

        ###  plotting stats
        best_f_icl = pylab.average(simulation.get_f_ICL(),
                                   axis=0,
                                   weights=Pchi)

        ###  plotting info text
        t = 'f$_{ICL}$ = %.1f%%' % (best_f_icl * 100)
        t += '\nN$_{initial}$ = %i' % simulation.ngal_initial
        t += '\nN$_{best-fit}$ = %i' % pylab.average(
            simulation.ngal_timesteps, axis=0, weights=Pchi)
        t += ' (%.1f%%)' % (100. * pylab.average(
            simulation.ngal_timesteps, axis=0, weights=Pchi) /
                            simulation.ngal_initial)

        sp.text(0.03,
                0.03,
                t,
                horizontalalignment='left',
                verticalalignment='bottom',
                transform=sp.transAxes)
Beispiel #43
0
# resample zwift power onto edge
CrossPlotFig = plt.figure()
sc = plt.scatter(edge_power_x, zwift_power_r_r, s=5, c=base_t, \
            cmap=plt.get_cmap('brg'), edgecolors='face' )
plt.colorbar(orientation='horizontal')
plt.title('Infocrank Vs PowerTap P1 Over Time (sec)\n(delay removed)')
plt.xlabel('PowerTap P1 (w)')
plt.ylabel('Infocrank via Zwift (w)')
plt.grid(b=True, which='major', axis='both')
a = plt.axis()
plt.axis([0, a[1], 0, a[3]])
plt.show()

#
#   linear regression
#
from pylab import polyfit, average, ones, where, logical_and, nonzero
ii      = nonzero( logical_and( base_t>=0,      \
                   logical_and(edge_power_x>50,   \
                               edge_power_x<1000) ))
x = edge_power_x[ii]
y = zwift_power_r_r[ii]
coef = polyfit(x, y, deg=1)
slope = coef[0]
offset = coef[1]
print 'slope = %5.3f, offset = %i' % (slope, offset)
y_fit = slope * x + offset
color = average(edge_t[ii]) * ones(len(edge_t[ii]))
plt.plot(x, y_fit, 'k-')
plt.show()
Beispiel #44
0
    def nvnReportPipe(
            self):  #Executes functions that generate result.html file
        try:
            steps = int(self.paraSteps.get())
            hrr = int(self.paraHrr.get())
        except:
            self.printer(
                "You need to enter integer values for HRR and step parameters.\n"
            )
        if self.listbox.curselection() != ():
            query = self.genelist[int(self.listbox.curselection()[0])][0]

            #### Plot expression profile function
            try:
                plotDatabase = codecs.open(
                    open('dbconf.txt', 'r').read().rstrip().split(".")[0] +
                    ".plt",
                    mode='r',
                    encoding='ASCII',
                    errors='ignore').readlines()
                ticka = [""] + plotDatabase[0].replace(
                    ",", "\n").lstrip().rstrip().split("\t")
                for i in plotDatabase:
                    if query in i:
                        query = i
                data = query.split()[1:]
                temp = []
                for i in range(len(data)):
                    temp.append([
                        map(float, data[i].replace("-",
                                                   "\t").rstrip().split()),
                        average(
                            map(float, data[i].replace("-",
                                                       "\t").rstrip().split()))
                    ])
                fig = plt.figure(figsize=(12, 7))
                ax = fig.add_subplot(111)
                plt.subplots_adjust(left=0.1, right=0.97, top=0.93, bottom=0.3)
                ax.set_ylabel("Signal value")
                ax.set_title(query.split()[0])
                ax.grid(True)
                plt.xticks(range(len(ticka) + 1),
                           ticka,
                           rotation=90,
                           fontsize="small",
                           horizontalalignment="center")
                ax.plot([0], [0])
                crossX = []
                crossY = []
                for i in range(len(temp)):
                    ax.plot([i + 1] * len(temp[i][0]), temp[i][0], "g.")
                    crossX.append([i + 1])
                    crossY.append(temp[i][1])
                ax.plot(crossX, crossY, "-ro")
                ax.plot([i + 2], [0])
                canvas = FigureCanvasAgg(fig)
                canvas.print_figure("profile.png")
                plt.clf()
            except:
                self.printer(
                    "Failed to generate an expression profile of your gene of interes.\nThe expression matrix used for plotting of expression profiles must be present and named "
                    + open('dbconf.txt', 'r').read().rstrip().split(".")[0] +
                    ".plt!")
            ###Call network creator
            try:
                networkViewer.makeNetwork(query.split()[0], steps, hrr)
            except:
                self.printer(
                    "Failed to generate an co-expression network of your gene of interes.\nThe HRR network file used must be present named "
                    + open('dbconf.txt', 'r').read().rstrip().split(".")[0] +
                    ".hrr!")
            ### Calculate PCC of a gene to all genes in database
            try:
                query = self.queries[int(
                    self.listbox.curselection()[0])].split("\t")
                expVector = map(float, query[5:])
                expVector = numpy.array(expVector)
                nomi = expVector - (numpy.sum(expVector) / len(expVector))
                denomi = numpy.sqrt(numpy.sum(nomi**2))
                rValues = numpy.dot(self.nominator, nomi) / numpy.dot(
                    self.denominator, denomi)
                displayList = []
                for i in range(len(rValues)):
                    displayList.append([rValues[i], self.annoDict[i]])
                displayList.sort(reverse=True)
            except:
                displayList = []
                self.printer(
                    "Failed to calculate Pearson correlation co-efficient list.\n"
                )

            ###Create html document with results
            header = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">\n<html>\n<head>\n<!-- blarg -->\n</head>\n<body>\n'
            try:
                header += '<big><b>Summary page for: %s</b></big>\n<br><b>Expression profile:</b>\n<IMG SRC="profile.png"><br>\n' % (
                    displayList[0][1].split()[0] + "\t" +
                    displayList[0][1].split()[1])
            except:
                pass
            header += '<b>HRR based co-expression network:</b>\nGreen, organge and red edges represent HRR values of %s, %s and %s, respectively.</b><br>\n' % (
                int(hrr) / 3, (int(hrr) / 3) * 2, hrr)
            header += '<embed src="network.svg" width="1200" height="1200" type="image/svg+xml" pluginspage="http://www.adobe.com/svg/viewer/install/" />\n<br>'
            #header += '<iframe src="network.svg" width="1500" height="1500">\n</iframe>\n'
            header += "<br><b>MapMan ontology analysis of the above network:</b>\n"
            try:
                header += open("mapRes.mapman", "r").read()
            except:
                pass
            header += "\n<br><b>Pearson correlation based co-expression analysis:</b>\n<pre>"

            v = open("result.html", "w")
            v.close()
            v = open("result.html", "a")
            v.write(header)
            for i in range(len(self.annoDict)):
                v.write(
                    str(displayList[i][0])[:6] + "\t" + displayList[i][1] +
                    "\n")
            v.write("</pre>")
            v.close()
            self.printer(
                "Probeset specific result calculated and available in result.html file.\n"
            )
Beispiel #45
0
def xforce_stat(sf):
  stat = fluidity_tools.stat_parser(sf)
  time_avg_xforce = pylab.average(stat["0"]["Velocity"]["force%1"][-20:])
  return time_avg_xforce
Beispiel #46
0
sa = []

for f in files:
    print 'Reading %s ' % f
    name, eph = string.split(f)
    T0 = ephemeris[eph]
    P = 0.154525
    
    X = pl.load(name)
    x = (X[:,2] - T0)/P
    xx.append(x - int(x[0]))
    aa.append(X[:,0])
    
    # let phase at 0.8 -> 1.0
    tpp = X[:,1]
    tpp -= pl.average(tpp[0])
    #tpp += 1.0
    
    pp.append(tpp)
    sa.append(X[:,3])
    sp.append(X[:,4])
    

# now sort observations in terms of orbital phase
xx = pl.array([i for i in pl.flatten(xx)])
pp = pl.array([i for i in pl.flatten(pp)])
aa = pl.array([i for i in pl.flatten(aa)])
sa = pl.array([i for i in pl.flatten(sa)])
sp = pl.array([i for i in pl.flatten(sp)])

arg = xx.argsort()
print runlen
runlen = max(runlen)+0.4





##############################################################################################
pl.figure(figsize=(6,4))

pl.subplots_adjust(hspace=0.47,left=0.16)

pl.subplot(211)
pl.scatter(x1,y1,marker='o',s=0.1,color='k')
pl.ylim(-0.06,0.06)
pl.xlim(pl.average(x1)-runlen/2,pl.average(x1)+runlen/2)
pl.ylabel('Intensity')
pl.xlabel('Orbital Phase')

pl.subplot(212)
#f,a = ast.signal.dft(x1,y1,0,4000,1)
pl.plot(f1,a1,'k')
pl.ylabel('Amplitude')
pl.xlabel('Frequency (c/d)')
pl.ylim(0.0,0.007)
pl.vlines(3560,0.002,0.0025,color='k',linestyle='solid')
pl.vlines(950,0.002,0.0025,color='k',linestyle='solid')
pl.text(3425,0.00255,'DNO',fontsize=11)
pl.text(750,0.00255,'lpDNO',fontsize=11)
pl.ylim(0.0,0.007)
Beispiel #48
0
def completeness():#measure completeness on final image
	
    #combinepath=bcdpath+'pbcd/Combine/output_apex_step2'
    combinepath=bcdpath+'pbcd/Combine/apex_1frame_step2'
    os.chdir(combinepath)

    file='mosaic_extract_final.tbl'
    input=open(file,'r')
    xgal=[]#positions of previous detections with snr > 3
    ygal=[]
    fap4gal=[]
    for line in input:
	if line.find('#') > -1: #skip lines with '#' in them
	    continue
	if line.find('\\') > -1: #skip lines with '#' in them
	    continue
	if line.find('|') > -1: #skip lines with '#' in them
	    continue
	t=line.split()
	xgal.append(float(t[8]))
	ygal.append(float(t[10]))
	fap4gal.append(float(t[28]))
    input.close()
    xgal=N.array(xgal,'f')
    ygal=N.array(ygal,'f')


    fsimall=[]
    matchflagsimall=[]
    f2all=[]
    f3all=[]
    f4all=[]
    deblendsimall=[]
    snrsimall=[]

    myminmag=24.75
    mymaxmag=27.4
    myfmin=10.**((25.-mymaxmag)/2.5)#ZP=25
    myfmax=10.**((25.-myminmag)/2.5)#ZP=25


    #below is loop to create image w/artificial sources, extract, and compare

    for k in range(100):
	    createflag=1.#create image w/artificial sources?
	    detectflag=1.#detect sources in image?
	    if createflag > 0.1:
		    xnoise=[]
		    ynoise=[]
		    infile=open('noisecoords.dat','r')
		    for line in infile:
			    t=line.split()
			    xnoise.append(float(t[0]))
			    ynoise.append(float(t[1]))
	    infile.close()
	
	
	    nstar=10
    
	    xsim=N.zeros(nstar,'d')
	    ysim=N.zeros(nstar,'d')
	    msim=N.zeros(nstar,'d')
	    outfile=open('stars.coords.dat','w')
	    for i in range(nstar):
	    #j=int(round(1.*len(xnoise)*random.uniform(0,1)))

	    #xsim[i]=xnoise[j]
	    #ysim[i]=ynoise[j]
		    j=0
		    for j in range(10000):
			    xt=int(round(random.uniform(5.,125.)))
			    yt=int(round(random.uniform(5.,140.)))
			    d=pylab.sqrt((xt-xgal)**2+(yt-ygal)**2)#make sure sim galaxies are not near real galaxies
			    if min(d) > -1.:
				    d2=pylab.sqrt((xt-xsim)**2+(yt-ysim)**2)#make sure sim points are not on top of each other
				    if min(d2) > 5.:
					    print i,'got a good point after ',j,' tries',xt,yt
					    break
				    j=j+1
		    xsim[i]=xt
		    ysim[i]=yt
		    k=random.uniform(myfmin,myfmax)
		    msim[i]=25.-2.5*pylab.log10(k)
	    #print k,msim[i] 
		    s='%8.2f %8.2f %8.2f \n' % (xsim[i],ysim[i],msim[i])
		    outfile.write(s)
	    outfile.close()
	      
	
	#os.system('rm stars.coords.dat')
	#iraf.starlist('stars.coords.dat',nstars=100,spatial='uniform',xmax=130,ymax=145,luminosity='uniform',minmag=22.,maxmag=30.0,mzero=22.0,sseed='INDEF',power=0.6,alpha=0.74,lseed='INDEF')
	
    
	    os.system('rm mosaic-completeness.fits')
        #iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=1.13,magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')#don't convolve w/PRF
	    #os.system('cp ../cal/MIPS24_PRF_HD_center.fits .')#convolve star w/SSC PRF
	    os.system('cp ../cal/mips24_prf_mosaic_2.45_4x.fits .')#convolve star w/SSC PRF
	    iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=14,star='mips24_prf_mosaic_2.45_4x.fits',magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')
        #os.system('cp ../cal/PRF_estimate.fits .')#convolve gaussian w/measured PRF
        #iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=15,star='PRF_estimate.fits',magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')
	    os.system('ls *.fits')
	    os.system('pwd')
	    iraf.display('mosaic_minus_median_extract.fits',1,contrast=0.01)
	    iraf.display('mosaic-completeness.fits',2,contrast=0.01)
	    iraf.tvmark(1,'stars.coords.dat')
	    iraf.tvmark(2,'stars.coords.dat')
	    fsim=10.**((25.-msim)/2.5)#ZP=25

	    if createflag < .1:#read in positions and magnitudes of artdata sources
		    xsim=[]
		    ysim=[]
		    msim=[]
		    infile=open('stars.coords.dat','r')
		    for line in infile:
			    if line.find('#') > -1:
				    continue
			    t=line.split()
			    xsim.append(float(t[0]))
			    ysim.append(float(t[1]))
			    msim.append(float(t[2]))
		    infile.close()
		    xsim=N.array(xsim,'f')
		    ysim=N.array(ysim,'f')
		    msim=N.array(msim,'f')
		    
		    fsim=10.**((25.-msim)/2.5)#ZP=25

	    if detectflag > 0.1:#now run detection on mosaic-completeness.fits
		    combinepath=bcdpath+'pbcd/Combine/'
		    os.chdir(combinepath)
		    print combinepath
		    #os.system('apex_1frame.pl -n apex_1frame_MIPS24_step2.nl -i output_apex_step2/mosaic-completeness.fits')
	
		    #combinepath=bcdpath+'pbcd/Combine/output_apex_step2'
		    os.system('apex_1frame.pl -n apex_1frame_step2all.nl -i apex_1frame_step2/mosaic-completeness.fits')
	
		    combinepath=bcdpath+'pbcd/Combine/apex_1frame_step2'
		    os.chdir(combinepath)
		    print combinepath
		    file='mosaic-completeness_extract_raw.tbl'
		    input=open(file,'r')
		    ngal=0
		    for line in input:
			    if line.find('Conversion') > -1:
				    t=line.split('=')
				    convfactor=float(t[1])#conversion from ADU to uJy
  	    #aperr=aveaperr*convfactor #convert noise in ADU to uJy using conv factor from apex
				    print "Conversion Factor = ",convfactor
	    #print "aveaperr = ",aveaperr
	    #print "aperr = ",aperr
				    continue
			    if line.find('#') > -1: #skip lines with '#' in them
				    continue
			    if line.find('\\') > -1: #skip lines with '#' in them
				    continue
			    if line.find('|') > -1: #skip lines with '#' in them
				    continue
			    ngal=ngal+1
		    input.close()
    
	

	    id24 = N.zeros(ngal,'f')
	    imagex24 = N.zeros(ngal,'f')
	    imagey24  = N.zeros(ngal,'f')
	    ra24 = N.zeros(ngal,'f')
	    dec24 = N.zeros(ngal,'f')
	    f24 = N.zeros(ngal,'d')#flux
	    errf24 = N.zeros(ngal,'d')
	    fap1 = N.zeros(ngal,'d')#flux in aperture 1 (1,1.5,2,2.6,3,3.5,4,4.5,5.,5.5) pixels
	    fap2 = N.zeros(ngal,'d')#flux
	    fap3 = N.zeros(ngal,'d')#flux
	    fap4 = N.zeros(ngal,'d')#flux in ap 4 - this is one w/ap cor of 1.67 (Calzetti et al 2007)
	    fap5 = N.zeros(ngal,'d')#flux
	    fap6 = N.zeros(ngal,'d')#flux
	    fap7 = N.zeros(ngal,'d')#flux
	    fap8 = N.zeros(ngal,'d')#flux
	    fap9 = N.zeros(ngal,'d')#flux
	    fap10 = N.zeros(ngal,'d')#flux
	    snr24 = N.zeros(ngal,'d')#SNR calculated by mopex
	    deblend = N.zeros(ngal,'f')#SNR calculated by mopex
	    

	    input=open(file,'r')
	    i=0
	    output=open('xy24raw.dat','w')
	    for line in input:
		    if line.find('#') > -1: #skip lines with '#' in them
			    continue
		    if line.find('\\') > -1: #skip lines with '#' in them
			    continue
		    if line.find('|') > -1: #skip lines with '#' in them
			    continue
	 
	
		    t=line.split()
	#print "length of t = ",len(t)
	#print (t[8]),(t[10]),(t[13]),(t[14]),(t[18]),(t[2]),(t[23]),(t[24]),(t[25]),(t[26]),(t[27]),(t[28]),(t[29]),(t[30]),(t[31]),(t[32])

		    (imagex24[i],imagey24[i],f24[i],errf24[i],snr24[i],deblend[i],fap1[i],fap2[i],fap3[i],fap4[i],fap5[i],fap6[i],fap7[i],fap8[i],fap9[i],fap10[i])=(float(t[8]),float(t[10]),float(t[13]),float(t[14]),float(t[18]),float(t[2]),float(t[25]),float(t[26]),float(t[27]),float(t[28]),float(t[29]),float(t[30]),float(t[31]),float(t[32]),float(t[33]),float(t[34]))
		    s='%6.2f %6.2f \n'%(imagex24[i],imagey24[i])
		    output.write(s)

		    i=i+1
	    input.close()#44 -> 43
	    output.close()
	    iraf.tvmark(1,'xy24raw.dat',color=204,radi=2)
	    iraf.tvmark(2,'xy24raw.dat',color=204,radi=2)
    
	    delta=1.#max number of pixels for a match

	    #get rid of objects that were detected in original image.  Otherwise, matching will think any object near a sim galaxy is the sim galaxy.  A faint galaxy placed on type of a pre-existing bright galaxy will be detected.

            newgalflag=N.ones(len(imagex24),'i')
	    for i in range(len(imagex24)):
		    (imatch, matchflag,nmatch)=findnearest(imagex24[i],imagey24[i],xgal,ygal,delta)
		    if matchflag > 0.:
			    dflux=abs(fap4gal[imatch] - fap4[i])/fap4[i]
			    if dflux < .1:#position of real galaxy, flux difference less than 10% -> not a new galaxy
				    newgalflag[i] = 0
	    #keep only galaxies that are new
	    imagex24 = N.compress(newgalflag,imagex24)
	    imagey24  = N.compress(newgalflag,imagey24)
	    fap1 = N.compress(newgalflag,fap1)
	    fap2 = N.compress(newgalflag,fap2)
	    fap3 = N.compress(newgalflag,fap3)
	    fap4 = N.compress(newgalflag,fap4)
	    fap5 = N.compress(newgalflag,fap5)
	    fap6 = N.compress(newgalflag,fap6)
	    fap7 = N.compress(newgalflag,fap7)
	    fap8 = N.compress(newgalflag,fap8)
	    fap9 = N.compress(newgalflag,fap9)
	    fap10 =N.compress(newgalflag,fap10)
	    snr24 =N.compress(newgalflag,snr24)
	    deblend = N.compress(newgalflag,deblend)

	    delta=2.#max number of pixels for a match
	    matchflagsim=N.zeros(len(xsim),'i')
	    fmeas1=N.zeros(len(xsim),'f')
	    fmeas2=N.zeros(len(xsim),'f')
	    fmeas3=N.zeros(len(xsim),'f')
	    fmeas4=N.zeros(len(xsim),'f')
	    fmeas5=N.zeros(len(xsim),'f')
	    fmeas6=N.zeros(len(xsim),'f')
	    fmeas7=N.zeros(len(xsim),'f')
	    fmeas8=N.zeros(len(xsim),'f')
	    fmeas9=N.zeros(len(xsim),'f')
	    fmeas10=N.zeros(len(xsim),'f')
	    fmeas24=N.zeros(len(xsim),'f')
	    deblendsim=N.zeros(len(xsim),'f')
	    snrsim=N.zeros(len(xsim),'f')
	    for i in range(len(xsim)):
		    (imatch, matchflag,nmatch)=findnearest(xsim[i],ysim[i],imagex24,imagey24,delta)
		    matchflagsim[i]=matchflag
		    if matchflag > .1:
			    fmeas1[i]=fap1[int(imatch)]
			    fmeas2[i]=fap2[int(imatch)]
			    fmeas3[i]=fap3[int(imatch)]
			    fmeas4[i]=fap4[int(imatch)]
			    fmeas5[i]=fap5[int(imatch)]
			    fmeas6[i]=fap6[int(imatch)]
			    fmeas7[i]=fap7[int(imatch)]
			    fmeas8[i]=fap8[int(imatch)]
			    fmeas9[i]=fap9[int(imatch)]
			    fmeas10[i]=fap10[int(imatch)]
			    fmeas24[i]=f24[int(imatch)]
			    deblendsim[i]=deblend[int(imatch)]
			    snrsim[i]=snr24[int(imatch)]
			    



	    fsimall=fsimall+list(fsim)
	    matchflagsimall=matchflagsimall+list(matchflagsim)
	    f2all=f2all+list(fmeas2)
	    f3all=f3all+list(fmeas3)
	    f4all=f4all+list(fmeas4)
	    deblendsimall=deblendsimall+list(deblendsim)
	    snrsimall=snrsimall+list(snrsim)


    fsim=N.array(fsimall,'f')
    matchflagsim=N.array(matchflagsimall,'f')
    fmeas2=N.array(f2all,'f')
    fmeas3=N.array(f3all,'f')
    fmeas4=N.array(f4all,'f')
    deblendsim=N.array(deblendsimall,'f')
    snrsim=N.array(snrsimall,'f')


    #make plots using all realizations 
    pylab.cla()
    pylab.clf()
    fsim=fsim*convfactor
    fs=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fsim)
    #f1=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas1)
    f2=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas2)
    f3=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas3)
    f4=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas4)
    #f242=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas24)

    r4=pylab.median(fs/f4)
    r3=pylab.median(fs/f3)
    r2=pylab.median(fs/f2)
    print "average ratios ap 4",pylab.average(fs/f4),r4,pylab.std((fs/f4)/pylab.average(fs/f2))
    print "average ratios ap 3",pylab.average(fs/f3),pylab.median(fs/f3),pylab.std((fs/f3)/pylab.average(fs/f3))
    print "average ratios ap 2",pylab.average(fs/f2),pylab.median(fs/f2),pylab.std((fs/f2)/pylab.average(fs/f2))

    s='f4 w/apcor = %3.2f(%4.2f)'%(r4,pylab.average(abs(fs-f4*r4)/fs))
    pylab.plot(fs,f4*r4,'b.',label=s)
    pylab.plot(fs,f4,'bo',label='f4')
    s='f3 w/apcor = %3.2f(%4.2f)'%(r3,pylab.average(abs(fs-f3*r3)/fs))
    pylab.plot(fs,f3*r3,'g.',label=s)
    pylab.plot(fs,f3,'go',label='f3')
    s='f2 w/apcor = %3.2f(%4.2f)'%(r2,pylab.average(abs(fs-f2*r2)/fs))
    pylab.plot(fs,f2*r2,'r.',label=s)
    pylab.plot(fs,f2,'ro',label='f2')
    #pylab.plot(fs,f1,'co',label='f1')
    #pylab.plot(fs,f242,'k.',label='f24')
    pylab.legend(loc='best')
    x=N.arange(0.,max(fs),10.)
    y=x
    pylab.plot(x,y,'k-')
    #y=2.*x
    #pylab.plot(x,y,'k--')
    #y=3.*x
    #pylab.plot(x,y,'k--')
    #y=4.*x
    #pylab.plot(x,y,'k--')
    #y=5.*x
    #pylab.plot(x,y,'k--')
    pylab.xlabel('F(24) Input')
    pylab.ylabel('F(24) measured')
    #pylab.axis([0.,50.,0.,50.])
    s=str(prefix)+'fluxcomp.eps'
    pylab.savefig(s)

    pylab.cla()
    pylab.clf()


    nbins=20
    fmin=10.#min(fsim)
    fmax=max(fsim)
    df=5.#(fmax-fmin)/(1.*nbins)
    bins=N.arange(fmin,(fmax+df),df)



    (xbin,ybin,ybinerr)=mystuff.completeness(bins,fsim,matchflagsim)
    s=str(prefix)+'FracComplvsFlux.dat'
    outdat=open(s,'w')
    print "Completeness vs Input Flux"
    for i in range(len(xbin)):
	    print i, xbin[i],ybin[i],ybinerr[i]
	    t='%8.2f %8.4f %8.4f\n'%(xbin[i],ybin[i],ybinerr[i])
	    outdat.write(t)
    outdat.close()
    #for i in range(len(fsim)):
	#if snrsim[i] > 3.:
	#    print i, fsim[i],matchflagsim[i],deblendsim[i],abs(fsim[i]-fmeas4[i]*1.67)/fsim[i],snrsim[i]
    #(xbin,ybin2,ybin2err)=mystuff.scipyhist2(bins,fmeas4)
    #pylab.plot(xbin,ybin,'bo')
    #pylab.plot(xbin,ybin2,'ro')
    #s=str(prefix)+'NDetectvsFlux.eps'
    #pylab.savefig(s)

    pylab.cla()
    pylab.clf()
    pylab.plot(xbin,ybin,'ko')
    pylab.errorbar(xbin,ybin,yerr=ybinerr,fmt=None,ecolor='k')
    s=str(prefix)+'FracComplvsFlux.eps'
    pylab.axhline(y=1.0,ls='-')
    pylab.axhline(y=.8,ls='--')
    pylab.axvline(x=80.0,ls=':',color='b')
    pylab.xlabel('Input Flux (uJy)')
    pylab.ylabel('Completeness')
    pylab.axis([0.,max(xbin)+df,-.05,1.05])

    pylab.savefig(s)
    
    os.system('cp *.eps /Users/rfinn/clusters/spitzer/completeness/.')
    os.system('cp *vsFlux.dat /Users/rfinn/clusters/spitzer/completeness/.')
Beispiel #49
0
 f = string.strip(f)
 
 if f[-4:] == '.dat':
     if f[-8:] != 'FFOP.dat':
         print f
         X = pl.load(f)
         
         x = X[:,2]
         # limit orbital phase between 0.8 and 1.2
         lt = x < 1.2
         gt = x > 0.8
         x = x[lt*gt]
 
         if len(x) != 0:
             a = X[:,0][lt*gt]
             p = X[:,1][lt*gt] - pl.average(X[:,1][lt*gt])
             sa = X[:,3][lt*gt]
             sp = X[:,4][lt*gt]
             
     
             # get outliers to fall in respectable range
             lt = p < -1.0
             p[lt] += 1.0
             gt = p > 0.5
             p[gt] -= 1.0
             
             
             
             
             anew.append(a)
             pnew.append(p)
    
    #instanciation of pop2 neurons   
    pop2.append(pop2n.pop2n(CpES=CpES2, CpCS=CpCS, g_x2x2=g_x2x2, g_x1x2=g_x1x2, I2=I2, g_x2x2_slow=g_x2x2_slow, c2=c2, noise=noise2))
    pop2[j].x2=x2_init[j]
    pop2[j].y2=y2_init[j]
    
#connections between neurons
for i in range(nbn1):
    pop1[i].connect_syn_pop2n(pop2[:])
    pop1[i].connect_gap(pop1[:])
for j in range(nbn2):
    pop2[j].connect_syn_pop1n(pop1[:])
    pop2[j].connect_syn_pop2n(pop2[:])
    pop2[j].connect_gap(pop2[:])

x1bar=pb.average(x1_init)
x2bar=pb.average(x2_init)
zbar=pb.average(z_init)

#get pop2 fixed point coordinates. (pop1 need to be computed at each timestep)


############################
# Integration loop
############################
count_samples=0  
for ti in pb.arange(t_stop/dt):
    for i in range(nbn1):
        #pop1[i].x0=x0_variable[ti]
        #pop1[i].CpES=CpES_variable[ti]
        x1_nsamples[i,count_samples],y1_nsamples[i,count_samples],z_nsamples[i,count_samples]=pop1[i].euler(dt, 0, x1bar,x2bar,zbar, ti) # 4th parameter: x2bar 
Beispiel #51
0
def ami_ensemble_stats(indata, nlat, nlon):
    """generates ensemble mean, max, and min for the ami data generated with get_amiuk.py
	the stats code is incredibly simple iteration with missing value handling, because 
	it used to be fortran code, nothing clever or optimised."""

    import numpy as np
    from pylab import average

    mean = np.zeros((149, nlat, nlon))
    meanobs = np.zeros((61, nlat, nlon))
    minm = np.zeros((149, nlat, nlon))
    minobs = np.zeros((61, nlat, nlon))
    maxm = np.zeros((149, nlat, nlon))
    maxobs = np.zeros((61, nlat, nlon))

    #mean map
    for j in range(0, nlat):
        for k in range(0, nlon):
            for i in range(0, 149):
                mcount = 0
                for h in range(0, 10):
                    if indata[h, i, j, k] != -1.0:
                        mean[i, j, k] = mean[i, j, k] + indata[h, i, j, k]
                        mcount += 1
                if mcount > 0:
                    mean[i, j, k] = mean[i, j, k] / mcount
                else:
                    mean[i, j, k] = np.nan
            ocount = 0
            for i in range(0, 61):
                if indata[11, i, j, k] != -1.0:
                    meanobs[i, j, k] = average(indata[11, i, j, k])
                    ocount += 1
            if ocount > 0:
                meanobs[i, j, k] = meanobs[i, j, k] / ocount
            else:
                meanobs[i, j, k] = np.nan

    #minimal map
    for j in range(0, nlat):
        for k in range(0, nlon):
            for i in range(0, 149):
                minm[i, j, k] = min(indata[0:10, i, j, k])
            mac = np.ma.count(indata[11, :, j, k])
            for i in range(0, 61):
                # check to see if any values left after mask, if none, use missing value
                if mac < 2:
                    minobs[i, j, k] = np.nan
                else:
                    minobs[i, j, k] = min(indata[11, :, j, k])

    #maximal map
    for j in range(0, nlat):
        for k in range(0, nlon):
            for i in range(0, 149):
                maxm[i, j, k] = max(indata[0:10, i, j, k])
            mac = np.ma.count(indata[11, :, j, k])
            for i in range(0, 61):
                # check to see if any values left after mask, if none, use missing value
                if mac < 2:
                    maxobs[i, j, k] = np.nan
                else:
                    maxobs[i, j, k] = max(indata[11, :, j, k])

    return (mean, meanobs, maxm, maxobs, minm, minobs)
Beispiel #52
0
def Froudenumber(flmlname):
  print "\n********** Calculating the Froude number\n"
  # warn user about assumptions
  print "Froude number calculations makes three assumptions: \n i) domain height = 0.1m \n ii) mid point domain is at x = 0.4 \n iii) initial temperature difference is 1.0 degC"
  domainheight = 0.1
  domainmid = 0.4
  rho_zero, T_zero, alpha, g = le_tools.Getconstantsfromflml(flmlname)
  gprime = rho_zero*alpha*g*1.0 # this has assumed the initial temperature difference is 1.0 degC

  # get list of vtus
  filelist = le_tools.GetFiles('./')
  logs = ['diagnostics/logs/time.log','diagnostics/logs/X_ns.log','diagnostics/logs/X_fs.log']
  try:
    # if have extracted information already just use that
    os.stat('diagnostics/logs/time.log')
    os.stat('diagnostics/logs/X_ns.log')
    os.stat('diagnostics/logs/X_fs.log')
    time = le_tools.ReadLog('diagnostics/logs/time.log')
    X_ns = [x-domainmid for x in le_tools.ReadLog('diagnostics/logs/X_ns.log')]
    X_fs = [domainmid-x for x in le_tools.ReadLog('diagnostics/logs/X_fs.log')]
  except OSError:
    # otherwise get X_ns and X_fs and t from vtus
    time, X_ns, X_fs = le_tools.GetXandt(filelist)
    f_time = open('./diagnostics/logs/time.log','w')
    for t in time: f_time.write(str(t)+'\n')
    f_time.close()
    f_X_ns = open('./diagnostics/logs/X_ns.log','w')
    for X in X_ns: f_X_ns.write(str(X)+'\n')
    f_X_ns.close()
    f_X_fs = open('./diagnostics/logs/X_fs.log','w')
    for X in X_fs: f_X_fs.write(str(X)+'\n')
    f_X_fs.close()

    # shift so bot X_ns and X_fs are 
    # distance of front from 
    #initial position (mid point of domain)
    X_ns = [x-domainmid for x in X_ns]
    X_fs = [domainmid-x for x in X_fs]

  # Calculate U_ns and U_fs from X_ns, X_fs and t
  U_ns = le_tools.GetU(time, X_ns)
  U_fs = le_tools.GetU(time, X_fs)
  U_average = [[],[]]

  # If possible average 
  # (if fronts have not travelled far enough then will not average)
  start_val, end_val, average_flag_ns = le_tools.GetAverageRange(X_ns, 0.2, domainheight)
  if average_flag_ns == True: U_average[0].append(pylab.average(U_ns[start_val:end_val]))
  
  start_val, end_val, average_flag_fs = le_tools.GetAverageRange(X_fs, 0.25, domainheight)
  if average_flag_fs == True: U_average[1].append(pylab.average(U_fs[start_val:end_val]))
  
  # plot
  fs = 18
  pylab.figure(num=1, figsize = (16.5, 11.5))
  pylab.suptitle('Front speed', fontsize = fs)

  pylab.subplot(221)
  pylab.plot(time,X_ns, color = 'k')
  pylab.axis([0,45,0,0.4])
  pylab.grid('on')
  pylab.xlabel('$t$ (s)', fontsize = fs)
  pylab.ylabel('$X$ (m)', fontsize = fs)
  pylab.title('no-slip', fontsize = fs)
    
  pylab.subplot(222)
  pylab.plot([x/domainheight for x in X_ns],[U/math.sqrt(gprime*domainheight) for U in U_ns], color = 'k')
  pylab.axis([0,4,0,0.6])
  pylab.grid('on')
  pylab.axhline(0.406, color = 'k')
  pylab.axhline(0.432, color = 'k')
  pylab.text(3.95,0.396,'Hartel 2000',bbox=dict(facecolor='white', edgecolor='black'), va = 'top', ha = 'right')
  pylab.text(3.95,0.442,'Simpson 1979',bbox=dict(facecolor='white', edgecolor='black'), ha = 'right')
  pylab.xlabel('$X/H$', fontsize = fs)
  pylab.ylabel('$Fr$', fontsize = fs)
  pylab.title('no-slip', fontsize = fs)
  if average_flag_ns == True:
    pylab.axvline(2.0, color = 'k')
    pylab.axvline(3.0, color = 'k')
    pylab.text(0.05, 0.01, 'Average Fr = '+'{0:.2f}'.format(U_average[0][0]/math.sqrt(gprime*domainheight))+'\nvertical lines indicate the range \nover which the average is taken', bbox=dict(facecolor='white', edgecolor='black'))
  
  pylab.subplot(223)
  pylab.plot(time,X_fs, color = 'k')
  pylab.axis([0,45,0,0.4])
  pylab.grid('on')
  pylab.xlabel('$t$ (s)', fontsize = fs)
  pylab.ylabel('$X$ (m)', fontsize = fs)
  pylab.title('free-slip', fontsize = fs)
    
  pylab.subplot(224)
  pylab.plot([x/domainheight for x in X_fs],[U/math.sqrt(gprime*domainheight) for U in U_fs], color = 'k')
  pylab.axis([0,4,0,0.6])
  pylab.grid('on')
  pylab.axhline(0.477, color = 'k')
  pylab.text(3.95,0.467,'Hartel 2000', va = 'top',bbox=dict(facecolor='white', edgecolor='black'), ha = 'right')
  pylab.xlabel('$X/H$', fontsize = fs)
  pylab.ylabel('$Fr$', fontsize = fs)
  pylab.title('free-slip', fontsize = fs)
  if average_flag_fs == True:
    pylab.text(0.05, 0.01, 'Average Fr  = '+'{0:.2f}'.format(U_average[1][0]/math.sqrt(gprime*domainheight))+'\nvertical lines indicate the range \nover which the average is taken', bbox=dict(facecolor='white', edgecolor='black'))
    pylab.axvline(2.5, color = 'k')
    pylab.axvline(3.0, color = 'k')

  pylab.savefig('diagnostics/plots/front_speed.png')
  return
Beispiel #53
0
summary(net)

M=net.ecount()
N = net.vcount()
Mmax = N*(N-1)/2
Mmax
M/Mmax
p = 1.*M/Mmax

net.diameter()

net.components()
cc=net.components()  # (összefüggő) komponensek, (connected) components
ccs = cc.sizes()
max(ccs)
average(ccs)
css   # Így kifut.
array(ccs)  # Így jobban néz ki, csak pylabbal vagy numpy-vel
len(ccs)   # ipython parancssorban a zárójel elhagyható
sizedist = [ccs.count(i) for i in range(max(ccs)+1)]
array(sizedist)
plot(sizedist, "o")
grid()
"A komponensek méreteloszlása".decode("utf-8")   # unicode sztring lesz.
title("A komponensek méretei".decode("utf-8"))
xlabel("")

title("A komponensek méretgyakorisága (Erdős-Rényi 1000,0.001)".decode("utf-8"))
xlabel("méret (csúcsok száma)".decode("utf-8"))
ylabel("darabszám".decode("utf-8"))
savefig("meretgyakorisag_ER1000_001.pdf")
Beispiel #54
0
def xforce_stat(sf):
    stat = fluidity_tools.stat_parser(sf)
    time_avg_xforce = pylab.average(stat["0"]["Velocity"]["force%1"][-20:])
    return time_avg_xforce
Beispiel #55
0
def main():

    args = parseCMD()
   
    # Check if our data file exists, if not: write one.
    # Otherwise, open the file and plot.
    check = glob.glob('*JackKnifeData_Cv.dat*')
    
    if check == []:
        
        fileNames = args.fileNames
        skip = args.skip
        
        temps,Cvs,CvsErr = pl.array([]),pl.array([]),pl.array([])
        Es, EsErr = pl.array([]),pl.array([])
        ET, ETerr = pl.array([]),pl.array([])

  
        # open new data file, write headers
        fout = open('JackKnifeData_Cv.dat', 'w')
        fout.write('#%15s\t%16s\t%16s\t%16s\t%16s\t%16s\t%16s\n'% (
            'T', 'Ecv', 'EcvErr', 'Et', 'EtErr','Cv', 'CvErr'))

        # perform jackknife analysis of data, writing to disk
        for fileName in fileNames:
            temp = float(fileName[-40:-34])
            temps = pl.append(temps, temp)

            # grab and analyze energy data
            Ecv,Eth = pl.loadtxt(fileName, unpack=True, usecols=(4,-5))
            E = pl.average(Ecv)
            Et = pl.average(Eth)
            EErr = pl.std(Ecv)/pl.sqrt(float(len(Ecv)))
            EtErr = pl.std(Eth)/pl.sqrt(float(len(Eth)))
            Es = pl.append(Es,E)
            ET = pl.append(ET, Et)
            EsErr = pl.append(EsErr, EErr)
            ETerr = pl.append(ETerr, EtErr)
            
            # grab and analyze specific heat data
            EEcv, Ecv, dEdB = pl.loadtxt(fileName, unpack=True, usecols=(11,12,13))
            jkAve, jkErr = jk.jackknife(EEcv[skip:],Ecv[skip:],dEdB[skip:])
            Cvs = pl.append(Cvs,jkAve)
            CvsErr = pl.append(CvsErr,jkErr)
            
            fout.write('%16.8E\t%16.8E\t%16.8E\t%16.8E\t%16.8E\t%16.8E\t%16.8E\n' %(
                temp, E, EErr, Et, EtErr, jkAve, jkErr))
            print 'T = ',str(temp),' complete.'
        
        fout.close()

    else:
        print 'Found existing data file in CWD.'
        temps,Es,EsErr,ET,ETerr,Cvs,CvsErr = pl.loadtxt(
                'JackKnifeData_Cv.dat', unpack=True)

    # plot specific heat for QHO
    tempRange = pl.arange(0.01,1.0,0.01)
    Eanalytic = 0.5/pl.tanh(1.0/(2.0*tempRange))
    CvAnalytic = 1.0/(4.0*(tempRange*pl.sinh(1.0/(2.0*tempRange)))**2)

    pl.figure(1)
    ax1 = pl.subplot(211)
    pl.plot(tempRange,CvAnalytic, label='Exact')
    pl.errorbar(temps,Cvs,CvsErr, label='PIMC',color='Violet',fmt='o')
    pl.ylabel('Specific Heat',fontsize=20)
    pl.title('1D QHO -- 1 boson',fontsize=20)
    pl.legend(loc=2)

    pl.setp(ax1.get_xticklabels(), visible=False)
    ax2 = pl.subplot(212, sharex=ax1)
    pl.plot(tempRange,Eanalytic, label='Exact')
    pl.errorbar(temps,Es,EsErr, label='PIMC virial',color='Lime',fmt='o')
    pl.errorbar(temps,ET,ETerr, label='PIMC therm.',color='k',fmt='o')
    #pl.scatter(temps,Es, label='PIMC')
    pl.xlabel('Temperature [K]',fontsize=20)
    pl.ylabel('Energy [K]',fontsize=20)
    pl.legend(loc=2)

    pl.savefig('1Dqho_largerCOM_800000bins_CvANDenergy.pdf',
            format='pdf', bbox_inches='tight')

    pl.show()
Beispiel #56
0
 def plot_sigTau_towed(self,th1_cs,skid,figNum=11):
     th0 = self._th0
     th2 = self._th2
     r = self._radius
     b = self._width
     k1 = self._k1
     k2 = self._k2
     n = self._n
     phi = self._phi
     K = self._K
     c = self._coh
     
     incr = (th1_cs - th2) / 100.0    # plot increment
     th_arr = py.arange(0,th1_cs + incr, incr) # find sigma, tau at these discrete vals
     sig_arr = py.zeros(len(th_arr))
     tau_arr = py.zeros(len(th_arr))
     slip_arr = py.zeros(len(th_arr))
     
     for idx in range(0,len(th_arr)):
         th = th_arr[idx]
         if(th < th0):
             # we're in the bototm region
             sig_curr = sig_2(th,th0,th1_cs,th2,r,b,n,k1,k2)
             tau_curr = -tau_t2(th,th0,th1_cs,th2,r,b,n,k1,k2,c,K,phi,skid)
             slip_j = j2(th,th0,r,skid)
             sig_arr[idx] = sig_curr
             tau_arr[idx] = tau_curr
             slip_arr[idx] = slip_j
             
         else:
             # we're in the top region ()
             sig_curr = sig_1(th, th1_cs,r,b,n,k1,k2)
             tau_curr = tau_t1(th,th0,th1_cs,r,b,n,k1,k2,c,K,phi,skid)
             slip_j = j1(th,th0,th1_cs,r,skid)
             sig_arr[idx] = sig_curr
             tau_arr[idx] = tau_curr
             slip_arr[idx] = slip_j
             
     if( self._plots):        
         fig = plt.figure()
         ax = fig.add_subplot(211,title='Fig. ' + str(figNum) +' skid=' + str(skid))
         ax.plot(radToDeg(th_arr),sig_arr,radToDeg(th_arr),tau_arr,linewidth=1.5)
         ax.set_xlabel('theta [deg]')
         if( self._units == 'ips'):
             ax.set_ylabel('stress [psi]')
         else:
             ax.set_ylabel('stress [Pa]')
         ax.legend((r'$\sigma$($\theta$)',r'$\tau$($\theta$)'))
         ax.grid(True)
         # take a look at what I"m using for slip displacement also
         ax = fig.add_subplot(212)
         ax.plot(radToDeg(th_arr),slip_arr,linewidth=1.5)
         ax.set_xlabel('theta [deg]')
         if( self._units == 'ips'):
             ax.set_ylabel('slip disp.[in]')
         else:
             ax.set_ylabel('slip disp.[m]')
         ax.grid(True)
         
          # polar plots
         fig=plt.figure()
         ax=fig.add_subplot(111,projection='polar')
         ax.plot(th_arr,sig_arr/1000.,'b',linewidth=1.5)
         ax.plot(th_arr,tau_arr/1000.,'r--',linewidth=1.5)
         # fix the axes
         ax.grid(True)
         if( self._units == 'ips'):
             leg = ax.legend((r'$\sigma$ [kip]',r'$\tau$'))
         else:
             leg = ax.legend((r'$\sigma$ [kPa]',r'$\tau$'))
         leg.draggable()
         ax.set_theta_zero_location('S')
         # also, draw the tire
         polar_r_offset = py.average(sig_arr)/1000.
         theta = py.arange(0.,2.*py.pi+0.05,0.05)
         tire_pos = py.zeros(len(theta))
         ax.plot(theta,tire_pos,color='k',linewidth=1.0)
         ax.set_rmin(-polar_r_offset)
         ax.set_title(r'towed wheel stresses,  $\theta_1$ = %4.3f [rad]' %th1_cs)
         ax.set_thetagrids([-10,0,10,20,30,40,50,60])
         
     return [sig_arr, tau_arr]
 
 
 # write average subtracted spectrum to new fits file
 #pf.writeto('avesub%s'%i,data=data,header=head)
 
 start = head['CRVAL1']
 step = head['CDELT1']
 length = head['NAXIS1']
 x = start + pl.arange(0,length)*step
 
 # hydrogen alpha
 dl = v/c*6563.0
 w1 = x > 6563 - dl
 w2 = x < 6563 + dl
 
 imHa.append(data[w1*w2]-pl.average(data[w1*w2]))
 #imHa.append((data[w1*w2]))
 #imHa.append((data[w1*w2]-pl.average(data[w1*w2])-(ave[w1*w2]-pl.average(ave[w1*w2]))))
 
 dl = v/c*4860.0
 w1 = x > 4860 - dl
 w2 = x < 4860 + dl
 #data = pf.getdata(i)
 imHb.append(data[w1*w2]-pl.average(data[w1*w2]))
 #imHb.append((data[w1*w2]-pl.average(data[w1*w2])-(ave[w1*w2]-pl.average(ave[w1*w2]))))
 #imHb.append(data[w1*w2])
 
 dl = v/c*4686
 w1 = x > 4686 - dl
 w2 = x < 4686 + dl
 #data = pf.getdata(i)
Beispiel #58
0
def main():

    args = parseCMD()

    # Check if our data file exists, if not: write one.
    # Otherwise, open the file and plot.
    check = glob.glob('*JackKnifeData_Cv.dat*')

    if check == []:

        fileNames = args.fileNames
        skip = args.skip
        temps, Cvs, CvsErr = pl.array([]), pl.array([]), pl.array([])
        timeSteps = pl.array([])

        # open new data file, write headers
        fout = open('JackKnifeData_Cv.dat', 'w')
        fout.write('#%09s\t%10s\t%10s\t%10s\n' % ('T', 'tau', 'Cv', 'CvErr'))

        # perform jackknife analysis of data, writing to disk
        for fileName in fileNames:
            tau = float(fileName[-21:-14])
            temp = float(fileName[-40:-34])
            timeSteps = pl.append(timeSteps, tau)
            temps = pl.append(temps, temp)
            EEcv, Ecv, dEdB = pl.loadtxt(fileName,
                                         unpack=True,
                                         usecols=(11, 12, 13))
            jkAve, jkErr = aTools.jackknife(EEcv[skip:], Ecv[skip:],
                                            dEdB[skip:])
            print '<est> = ', jkAve, ' +/- ', jkErr
            Cvs = pl.append(Cvs, jkAve)
            CvsErr = pl.append(CvsErr, jkErr)
            fout.write('%10s\t%10s\t%10s\t%10s\n' % (temp, tau, jkAve, jkErr))
        fout.close()

    else:
        print 'Found existing data file in CWD.'
        temps, timeSteps, Cvs, CvsErr = pl.loadtxt('JackKnifeData_Cv.dat',
                                                   unpack=True)

    # make array of energies
    Es, EsErr = pl.array([]), pl.array([])
    ET, ETerr = pl.array([]), pl.array([])

    for fileName in args.fileNames:
        Ecv, Eth = pl.loadtxt(fileName, unpack=True, usecols=(4, -5))
        Es = pl.append(Es, pl.average(Ecv))
        ET = pl.append(ET, pl.average(Eth))
        EsErr = pl.append(EsErr, pl.std(Ecv) / pl.sqrt(float(len(Ecv))))
        ETerr = pl.append(ETerr, pl.std(Eth) / pl.sqrt(float(len(Eth))))

    # plot specific heat for QHO
    tempRange = pl.arange(0.01, 1.0, 0.01)
    Eanalytic = 0.5 / pl.tanh(1.0 / (2.0 * tempRange))
    CvAnalytic = 1.0 / (4.0 * (tempRange * pl.sinh(1.0 /
                                                   (2.0 * tempRange)))**2)

    if args.timeStepScaling:
        pl.figure(1)
        pl.plot(timeSteps, 0.5 / pl.tanh(1.0 / (2.0 * (temps))), label='Exact')
        pl.errorbar(timeSteps,
                    Es,
                    EsErr,
                    label='PIMC virial',
                    color='Lime',
                    fmt='o')
        pl.errorbar(timeSteps,
                    ET,
                    ETerr,
                    label='PIMC therm.',
                    color='Navy',
                    fmt='o')
        pl.xlabel('Time Step [1/K]')
        pl.ylabel('Energy [K]')
        pl.title('1D QHO -- 1 boson -- T=%s' % temps[0])
        pl.legend(loc=1)

        pl.figure(2)
        pl.plot(timeSteps,
                1.0 / (4.0 * (temps * pl.sinh(1.0 / (2.0 * temps)))**2),
                label='Exact')
        pl.errorbar(timeSteps,
                    Cvs,
                    CvsErr,
                    label='PIMC virial',
                    color='Navy',
                    fmt='o')
        pl.xlabel('Time Step [1/K]')
        pl.ylabel('Specific Heat [K]')
        pl.title('1D QHO -- 1 boson -- T=%s' % temps[0])
        pl.legend(loc=1)

    else:
        pl.figure(1)
        pl.plot(tempRange, CvAnalytic, label='Exact')
        pl.errorbar(temps, Cvs, CvsErr, label='PIMC', color='Violet', fmt='o')
        pl.xlabel('Temperature [K]')
        pl.ylabel('Specific Heat')
        pl.title('1D QHO -- 1 boson')
        pl.legend(loc=2)

        pl.figure(2)
        pl.plot(tempRange, Eanalytic, label='Exact')
        pl.errorbar(temps,
                    Es,
                    EsErr,
                    label='PIMC virial',
                    color='Lime',
                    fmt='o')
        pl.errorbar(temps, ET, ETerr, label='PIMC therm.', color='k', fmt='o')
        #pl.scatter(temps,Es, label='PIMC')
        pl.xlabel('Temperature [K]')
        pl.ylabel('Energy [K]')
        pl.title('1D QHO -- 1 boson')
        pl.legend(loc=2)

    pl.show()
Beispiel #59
0
def edge_length_stats():
    df = pd.read_csv('neuromorpho_lengths.csv', names=['name', 'points', 'mean_edge_length'])
    print pylab.average(df['mean_edge_length'], weights=df['points'])