def check_sigmaG(a, axis): from scipy.special import erfinv factor = 1. / (2 * np.sqrt(2) * erfinv(0.5)) sigmaG1 = sigmaG(a, axis=axis) q25, q75 = np.percentile(a, [25, 75], axis=axis) sigmaG2 = factor * (q75 - q25) assert_array_almost_equal(sigmaG1, sigmaG2)
def test_sigmaG(axis): np.random.seed(0) a = np.random.random((20, 40, 60)) from scipy.special import erfinv factor = 1. / (2 * np.sqrt(2) * erfinv(0.5)) sigmaG1 = sigmaG(a, axis=axis) q25, q75 = np.percentile(a, [25, 75], axis=axis) sigmaG2 = factor * (q75 - q25) assert_array_almost_equal(sigmaG1, sigmaG2)
def create_histogram(filename, xdata, tot_events, THRESH, RATE, data=[]): ''' Takes in the column data from a .out hit file and sets the histogram properties of it. ''' #Filename convention: particle_C3F8_LABEL #Look behind for C3F8_ and ahead for . but don't include filename_convention = r"(?<=C3F8_)[\w]+(?=.)" file_label = re.search(filename_convention, filename).group() #Add statistics to label total, mean, threshold_count = get_statistics(xdata, THRESH, data) bubble_rate = calculate_bubble_rates(xdata, tot_events, THRESH, RATE, False, data) #file_label += #"\nNum_hits: " + str(int(round(total))) + \ #"\nMean (keV): " + str(round(mean, 2)) + \ #"\nHits above threshold: " + str(int(round(threshold_count))) + \ file_label += "\nBubble rate (n/Hr): " + str(round(bubble_rate, 2)) #Now that calulations are done, cut to threshold energy trimmed_xdata = trim_threshold(xdata, THRESH) #Bins for trimmed data q25 = np.percentile(trimmed_xdata, 25) q75 = np.percentile(trimmed_xdata, 75) SigmaG = astroMLstats.sigmaG(trimmed_xdata) binsize = 2.7 * SigmaG / (tot_events**(1. / 3)) bins = np.append( np.arange( start=THRESH, #trimmed_xdata.min(), stop=trimmed_xdata.max(), step=binsize), trimmed_xdata.max()) bins = np.append(0., bins) n, _, _ = plt.hist(x=xdata, bins=bins, density=False, histtype="step", label=file_label) #fancyhist(xdata, bins = "scott", density = False, histtype = "step", label = file_label) return n
def procesar(fichero): # Abrimos el fichero f = fits.open(fichero) # Obtenemos la matriz con los datos im = f[0].data # Calculamos la señal ruido del orden 42, correspondiente a 5500 Angstrom # Utilizaremos el rango de píxeles entre 1500 y 1700, puesto que en este rango solo hay continuo # Haremos el cociente de la mediana de los datos entre la desviación típica snr = np.median(im[42, 1500:1700]) / sigmaG(im[42, 1500:1700]) # Obtenemos el tiempo de exposicion exptime = np.float(f[0].header["EXPTIME"]) # Obtenemos el día juliano y lo pasamos a entero date = f[0].header["DATE"] dt = parser.parse(date) time = astropy.time.Time(dt) juldate = time.jd # Dividimos el tiempo de exposicion entre 10 para hallar la relación Señal-Ruido/Tiempo-exposicion time_exp10 = exptime / 10 # Calculamos snr/time_exp10 snr_time = snr / time_exp10 # Obtenemos el nombre del objeto observado name = f[0].header["OBJECT"] # Escribimos en el fichero de registro if os.path.exists(LOG_SNR): file = open(LOG_SNR, "a") else: file = open(LOG_SNR, "w") file.write("@juldate,snr/exptime,object\n") # Comprobamos que ese mismo fichero no se haya procesado anteriormente, para ello comparamos con el dia juliano if not existeNoche(str(round(juldate, 6))): file.write( str(round(juldate, 6)) + "," + str(round(snr_time, 4)) + "," + name + "\n") file.close()
def Plot1night(night): #Directorio de trabajo y numero de ficheros DIR = './Rut01_dat' nfiles = len(glob.glob('./Rut01_dat/*' + night + '.spot')) #Inicializamos algunas variables XX = np.zeros((nfiles, 199)) YY = np.zeros((nfiles, 199)) dX = np.zeros((nfiles, 199)) dY = np.zeros((nfiles, 199)) IN = np.zeros((nfiles, 199)) JD = np.zeros((nfiles, 199)) #Leemos todos los ficheros creados para cada arco y los almacenamos en las matrices correspondientes ii = 0 for file in glob.glob('./Rut01_dat/*' + night + '.spot'): colnames = ('IdSpot', 'posX', 'posY', 'distX', 'distY', 'Intensidad', 'jd') table = ascii.read(file, format='csv', names=colnames, comment='@') jda = np.array(table["jd"]) x = np.array(table["posX"]) y = np.array(table["posY"]) dx = np.array(table["distX"]) dy = np.array(table["distY"]) I = np.array(table["Intensidad"]) if ii == 0: today = np.floor(jda[0]) XX[ii, :] = x YY[ii, :] = y dX[ii, :] = dx dY[ii, :] = dy IN[ii, :] = I JD[ii, :] = jda - today ii = ii + 1 # Calculamos los offsets de cada spot respecto a la mediana de ese spot en todos los arcos de la noche nXX = np.zeros((nfiles, 199)) nYY = np.zeros((nfiles, 199)) nIN = np.zeros((nfiles, 199)) for i in range(199): nXX[:, i] = (XX[:, i] - np.median(XX[:, i])) #*0.037517/5500. * 299792458 nYY[:, i] = (YY[:, i] - np.median(YY[:, i])) #*0.037517/5500. * 299792458 nIN[:, i] = IN[:, i] / np.mean(IN[:, i]) # Inicializamos el plot plt.figure(figsize=(12, 7)) gs = gridspec.GridSpec(3, 1) gs.update(left=0.08, right=0.95, bottom=0.08, top=0.93, wspace=0.2, hspace=0.1) # Plot para los offsets relativos en la dirección X ax = plt.subplot(gs[0, 0]) ax.set_ylabel(r'$\Delta x$ (mpix)') ax.get_xaxis().set_ticks([]) ax.set_ylim([-50, 55]) ax.set_xlim([np.min(JD) * 24. - 0.2, np.max(JD) * 24. + 0.2]) for i in range(199): Xplot = (XX[:, i] - np.median(XX[:, i])) * 1.e3 plt.plot((JD[:, i]) * 24., Xplot, '+', c='Silver', zorder=-1, alpha=0.6) for i in range(nfiles): Xplot = nXX[i, :] * 1.e3 plt.errorbar((JD[i, 0]) * 24., np.median(Xplot), yerr=sigmaG(Xplot), fmt='o', c='b', zorder=1) # Plot para los offsets relativos en la dirección Y ax = plt.subplot(gs[1, 0]) ax.set_ylabel(r'$\Delta y$ (mpix)') ax.get_xaxis().set_ticks([]) ax.set_ylim([-50, 50]) ax.set_xlim([np.min(JD) * 24. - 0.2, np.max(JD) * 24. + 0.2]) for i in range(199): Xplot = (YY[:, i] - np.median(YY[:, i])) * 1.e3 plt.plot((JD[:, i]) * 24., Xplot, '+', c='Silver', zorder=-1, alpha=0.6) for i in range(nfiles): Xplot = nYY[i, :] * 1.e3 plt.errorbar((JD[i, 0]) * 24., np.median(Xplot), yerr=sigmaG(Xplot), fmt='o', c='r', zorder=1) # Plot para la intensidad ax = plt.subplot(gs[2, 0]) ax.set_ylabel('Norm. Intensity') ax.set_xlabel('JD-2457594 (h)') ax.set_ylim([0.95, 1.02]) ax.set_xlim([np.min(JD) * 24. - 0.2, np.max(JD) * 24. + 0.2]) for i in range(199): plt.plot((JD[:, i]) * 24., nIN[:, i], '+', c='Silver', zorder=-1, alpha=0.6) for i in range(nfiles): plt.errorbar((JD[i, 0]) * 24., np.median(nIN[i, :]), yerr=sigmaG(nIN[i, :]), fmt='o', c='forestgreen', zorder=1) plt.savefig("./Rut01_dat/Rutina01_plot_1night_" + night[0:6] + ".pdf")
def start(): direktorij=output_folder.get() if not os.path.exists(direktorij): os.makedirs(direktorij) finishing_string='Name & Mean & Median\\\\ \n' starting_list=['Name','Mean','Median'] finishing_list=[] f.seek(0) textval=[a.get() for a in txtv] textu=[a.get() for a in txtu] textl=[a.get() for a in txtl] labele=OrderedDict([(textval[j],j) for j in range(len(textval)) if(len(textval[j])!=0)]) labeleu=OrderedDict([(textval[j],float(textu[j])) for j in range(len(textval)) if((len(textu[j])!=0) and (len(textval[j])!=0))]) labelel=OrderedDict([(textval[j],float(textl[j])) for j in range(len(textval)) if((len(textl[j])!=0) and (len(textval[j])!=0))]) X=np.array([[0. for i in range(Number_of_columns)] for j in range(Number_of_rows)]) i=0 for line in f: line0=line.strip() line1=line0.split() passing=1 for dicts, vals in labele.items(): if dicts in labelel: if(labelel[dicts]>float(line1[vals])): passing=0 if dicts in labeleu: if(labeleu[dicts]<float(line1[vals])): passing=0 if(passing==1): for j in range(Number_of_columns): X[i][j]=float(line1[j]) i=i+1 X=X[0:i,:] X=np.array([[X[k][l] for l in range(Number_of_columns)] for k in range(i) ]) fig=plt.figure(figsize=(20,15)) count=1 for ime, val in labele.items(): ax=fig.add_subplot(2,3,count) ax.set_xlabel(ime, size=30) ax.hist(X[:,val], bins=50, normed=False, histtype='stepfilled',color='blue',facecolor='blue') ax.axvline(np.mean(X[:,val]), color='orange', linestyle='--') ax.axvline(np.median(X[:,val]), color='green', linestyle='--') ax.xaxis.major.formatter._useMathText = True ax.ticklabel_format(style='sci', axis='x', scilimits=(-5,5)) text='Mean and median:\n'+str("mean$\\rightarrow$ $ {:.2uL}$\n".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))+"median$\\rightarrow$ $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val]) ) )) finishing_string=finishing_string+ime+" & "+"$ {:.2uL}$".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))+" & $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val])))+"\\\\ \n" finishing_list.append([ime,"$ {:.2uL}$".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))," $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val])))]) ax.text(.65,.9,text,transform = ax.transAxes) count=count+1 plt.tight_layout() plt.savefig(direktorij+'/Histogrami.png') plt.close() reportwin(starting_list,finishing_string,finishing_list) for names0,vals0 in labele.items(): fig=plt.figure(figsize=(30,25)) labele1=deepcopy(labele) del labele1[names0] labele2=deepcopy(labele1) nx=ceil(np.sqrt(len(labele1)*(len(labele1)-1)/2)) ny=ceil(len(labele1)*(len(labele1)-1)/2/nx) #fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) counts=1 cmap_multicolor = plt.cm.jet for names,vals in labele1.items(): del labele2[names] for names1, vals1 in labele2.items(): N0, xedges0, yedges0 = binned_statistic_2d(X[:,vals], X[:,vals1], X[:,labele[names0]], 'mean', bins=100) ax=fig.add_subplot(ny,nx,counts) im=ax.imshow(N0.T, origin='lower',extent=[xedges0[0], xedges0[-1], yedges0[0], yedges0[-1]], aspect='auto', interpolation='nearest', cmap=cmap_multicolor) plt.xlim(xedges0[0], xedges0[-1]) plt.ylim(yedges0[0], yedges0[-1]) plt.xlabel(names, size=30) plt.ylabel(names1, size=30) ax.xaxis.major.formatter._useMathText = True ax.yaxis.major.formatter._useMathText = True ax.ticklabel_format(style='sci', axis='x', scilimits=(-5,5)) #m_1 = np.linspace(xedges0[0], xedges0[-1], 100) #m_2 = np.linspace(yedges0[0], yedges0[-1], 100) #MX,MY = np.meshgrid(m_1, m_2) #Z = sigmas(MX,np.median(X[:,vals]),sigmaG(X[:,vals]), MY,np.median(X[:,vals1]),sigmaG(X[:,vals1])) H, xbins, ybins = np.histogram2d(X[:,vals], X[:,vals1],bins=100) Nsigma = convert_to_stdev(np.log(H)) cont=plt.contour(0.5 * (xbins[1:] + xbins[:-1]),0.5 * (ybins[1:] + ybins[:-1]),Nsigma.T,levels=[0.6827,0.6827,0.9545, 0.9545], colors=['.25','.25','0.5','0.5'],linewidths=2) counts=counts+1 cmap_multicolor.set_bad('w', 1.) fig.subplots_adjust(bottom=0.1) cbar_ax = fig.add_axes([0.1, 0.05, 0.8, 0.025]) cb=fig.colorbar(im, cax=cbar_ax, format=r'$%.1f$',orientation='horizontal') cb.set_label(str('$\\langle '+names0.replace('$','')+'\\rangle $'), size=30) plt.savefig(direktorij+'/'+''.join([i for i in names0 if (i.isalpha() or i.isdigit())])+'.png',bbox_inches='tight') plt.close()
def limites(y): median_setlim = np.median(y) error_setlim = sigmaG(y) lim_sup = median_setlim + 5. * error_setlim lim_inf = median_setlim - 5. * error_setlim return lim_sup, lim_inf
for data, label, ls in zip((X, Y, X_sample), labels, linestyles): g = data[:, 0] gr = data[:, 2] ri = data[:, 3] r = g - gr i = r - ri mask = (gr > 0.3) & (gr < 1.0) g = g[mask] r = r[mask] i = i[mask] w = -0.227 * g + 0.792 * r - 0.567 * i + 0.05 sigma = sigmaG(w) ax.hist(w, bins=np.linspace(-0.08, 0.08, 100), linestyle=ls, histtype='step', label=label + '\n\t' + r'$\sigma_G=%.3f$' % sigma, normed=True) ax.legend(loc=2) ax.text(0.95, 0.95, '$w = -0.227g + 0.792r$\n$ - 0.567i + 0.05$', transform=ax.transAxes, ha='right', va='top', size=14) ax.set_xlim(-0.07, 0.07) ax.set_ylim(0, 55) ax.set_xlabel('$w$') ax.set_ylabel('$N(w)$')
for data, label, ls in zip((X, Y, X_sample), labels, linestyles): g = data[:, 0] gr = data[:, 2] ri = data[:, 3] r = g - gr i = r - ri mask = (gr > 0.3) & (gr < 1.0) g = g[mask] r = r[mask] i = i[mask] w = -0.227 * g + 0.792 * r - 0.567 * i + 0.05 sigma = sigmaG(w) ax.hist(w, bins=np.linspace(-0.08, 0.08, 100), linestyle=ls, histtype='step', label=label + '\n\t' + r'$\sigma_G=%.3f$' % sigma, normed=True) ax.legend(loc=2) ax.text(0.95, 0.95, '$w = -0.227g + 0.792r$\n$ - 0.567i + 0.05$', transform=ax.transAxes, ha='right', va='top')
def runRutina04(directorio): # Abrimos el fichero con el listado de ficheros bias infile = open(FICH_BIAS, 'r') # Abrimos el fichero donde escribiremos los resultados outfile = open("./Rut04_dat/nivel_bias_" + directorio + ".txt", "w") outfile.write( "@fichero, bias_medio, bias_mediana, bias_desvTipica, dia_juliano\n") # Variable para almacenar todos los valores de todos los bias de una noche biasNoche = [] # Procesamos cada una de las lineas del fichero for line in infile: #Eliminamos de la linea el retorno de carro (\n) line = line.strip() #Comprobamos que la linea tenga información y no sea una linea en blanco if len(line) > 0: # Abrimos el fichero de bias hdulist = fits.open(line) #Obtenemos la matriz con los datos tbdata = hdulist[0].data #Obtenemos el dia juliano del bias date = hdulist[0].header["DATE"] dt = parser.parse(date) time = astropy.time.Time(dt) juldate = time.jd #cerramos el fichero hdulist.close() nombre = line[line.index("/") + 1:] media = np.mean(tbdata) mediana = np.median(tbdata) desviacion = sigmaG(tbdata) biasNoche.append(tbdata) outfile.write(nombre + "," + str(round(media, 4)) + "," + str(mediana) + "," + str(round(desviacion, 4)) + "," + str(round(juldate, 6)) + "\n") outfile.close() infile.close() # Añadimos el valor de la mediana de todos los bias de la noche al fichero master # Si existe el fichero lo abrimos en modo "a", sino lo creamos if os.path.exists(FICH_MASTER): file = open(FICH_MASTER, "a") else: file = open(FICH_MASTER, "w") file.write("@juldate,bias_mediana,bias_medio,bias_desvTipica\n") mediana_total = np.median(biasNoche) media_total = np.mean(biasNoche) desvTipica_total = sigmaG(biasNoche) #Comprobamos que la entrada en el fichero no exista. En caso de no existir escribimos nueva entrada if not existeNoche(np.int(juldate)): file.write( str(np.int(juldate)) + "," + str(round(mediana_total, 4)) + "," + str(round(media_total, 4)) + "," + str(round(desvTipica_total, 4)) + "\n") file.close() # Realizamos el checkeo de valores umbrales. # Si el bias medio está entre 810 y 830 es correcto, y si el ruido de lectura es menor que 6 será también correcto. if media_total >= 810 and media_total <= 830: print "... Nivel BIAS medio: %.2f ADUs ... OK" % (media_total) else: print "... Nivel BIAS medio: %.2f ADUs ... NO OK! - CHECK" % ( media_total) if desvTipica_total < 6: print "... Ruido de lectura medio: %.2f ADUs ... OK" % ( desvTipica_total) else: print "... Ruido de lectura medio: %.2f ADUs ... NO OK! - CHECK" % ( desvTipica_total)
def plot_cdf(data=None, showplots=False, filename="", xlabel=None, plotlabel=""): t0=time.time() ax=plt.figure(num=None, figsize=(10.0, 10.0)) # determine cumulative frequency distribution by sorting values # this is faster than the percentile function ndata=len(data) index=np.argsort(data, axis=None) median=data[index[int(ndata/2.0)]] sigma_mad=mymad(data, median=median, sigma=True) min=data[index[0]] max=data[index[-1]] ndata, (dmin, dmax), mean, variance, skewness, kurtosis = \ stats.describe(data, axis=None) sigma=math.sqrt(variance) print('min, max, mean, sigma, median, sigma_mad: ') print(min, max, mean, sigma, median, sigma_mad) sigmaIQ=aml.sigmaG(data) print('sigmaIQ: ', aml.sigmaG(data)) print('Elapsed time(secs): ',time.time() - t0) q10, q90 = np.percentile(data, [10.0, 90.0]) sigma80= (q90-q10) * 0.5000* 0.7803 print(q10, q90) print('sigma80: ', sigma80) q25, q50, q75 = np.percentile(data, [25.0, 50.0, 75.0]) print(q25, q50, q75) step_pc=ndata/100.0 ipc=50.0 ipoint_pc=ipc*step_pc print('median: ', int(ipoint_pc), index[int(ipoint_pc)]) print('median: ', data[index[int(ipoint_pc)]]) print('Elapsed time(secs): ',time.time() - t0) range=np.linspace(0.0,100.0,101) #print 'range: ', range dist=np.zeros(101) i=-1 for pc in range: i=i+1 dist[i]=mypercentile(data, pc, index, verbose=True, debug=False) print('Elapsed time(secs): ',time.time() - t0) plotcdf=True if plotcdf: xdata=dist ydata=range/100.0 #title=filename + '[' + str(ext) + ']' title=filename + ': ' + plotlabel ylabel='Cumulative frequency' plt.plot(xdata, ydata, 'k', color='black', markersize=1) plt.xlim([median-(5.0*sigma_mad),median+(5.0*sigma_mad)]) plt.title(title) if xlabel != None: plt.xlabel(xlabel) plt.ylabel(ylabel) # Compute the CDF; need to check that cdf is not off by one step via # reversing the cdf by hand to a pdf, by eye I see an offset when the # nsteps=100 but it is not visible for nsteps=1000 so it looks like the # pdf and/or cdf is shofted by 1 step nsteps=1000 xmin=median-(5.0*sigma_mad) xmax=median+(5.0*sigma_mad) xrange=10.0*sigma_mad # use nsteps+1 so that there is a value at the midpt x = np.linspace(xmin,xmax,nsteps+1) pdf=mlab.normpdf(x,median,sigma_mad) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf) pdf=mlab.normpdf(x,median,sigmaIQ) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf,color='green') pdf=mlab.normpdf(x,median,sigma80) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf,color='red') basename=os.path.basename(filename) ext="" plt.savefig(basename + '_' + str(ext) + plotlabel + '_cdf.png') if showplots: plt.show() plt.close()
def plot_band(data=None, colname=None, color=None, normpdf=False, xlimit_min=None, xlimit_max=None, xscale=None, xlabel=None, filename=None): global t0 if color == None: color='k' # determine cumulative frequency distribution by sorting values # this is faster than the percentile function ndata=len(data) index=np.argsort(data, axis=None) median=data[index[int(ndata/2.0)]] sigma_mad=mymad(data, median=median, sigma=True) min=data[index[0]] max=data[index[-1]] ndata, (dmin, dmax), mean, variance, skewness, kurtosis = \ stats.describe(data, axis=None) sigma=math.sqrt(variance) print('min, max, mean, sigma, median, sigma_mad: ') print(min, max, mean, sigma, median, sigma_mad) sigmaIQ=aml.sigmaG(data) print('sigmaIQ: ', aml.sigmaG(data)) print('Elapsed time(secs): ',time.time() - t0) q10, q90 = np.percentile(data, [10.0, 90.0]) sigma80= (q90-q10) * 0.5000* 0.7803 print(q10, q90) print('sigma80: ', sigma80) q25, q50, q75 = np.percentile(data, [25.0, 50.0, 75.0]) print(q25, q50, q75) step_pc=ndata/100.0 ipc=50.0 ipoint_pc=ipc*step_pc print('median: ', int(ipoint_pc), index[int(ipoint_pc)]) print('median: ', data[index[int(ipoint_pc)]]) print('Elapsed time(secs): ',time.time() - t0) range=np.linspace(0.0,100.0,101) #print 'range: ', range dist=np.zeros(101) i=-1 for pc in range: i=i+1 dist[i]=mypercentile(data, pc, index, verbose=True, debug=False) print('Elapsed time(secs): ',time.time() - t0) plotcdf=True if plotcdf: xdata=dist ydata=range/100.0 #title=filename + '[' + str(ext) + ']' title=filename ylabel='Cumulative frequency' plt.plot(xdata, ydata, color=color, markersize=1, linestyle='-', linewidth=2) if xlimit_min == None: xlimit_min=median-(5.0*sigma_mad) if xlimit_max == None: xlimit_max=median+(5.0*sigma_mad) ax=plt.figtext(0.7, 0.4, 'plt.figtext: Hello World') print('Default font size ', ax.get_size()) #ax.set_size(ax.get_size()*2.0) #print('Default font size ', ax.get_size()) #plt.figtext(0.5, 0.5, 'Font size: ' + str(plt.get_size())) plt.xlim([xlimit_min, xlimit_max]) if xscale: plt.xscale('log') plt.title(title) if xlabel != None: plt.xlabel(xlabel) plt.ylabel(ylabel) #plt.tick_params(axis='both', which='major', labelsize=10) #plt.tick_params(axis='both', which='minor', labelsize=8) #plt.xlim(plot_xlimits) # Compute the CDF; need to check that cdf is not off by one step via # reversing the cdf by hand to a pdf, by eye I see an offset when the # nsteps=100 but it is not visible for nsteps=1000 so it looks like the # pdf and/or cdf is shofted by 1 step if normpdf: nsteps=1000 xmin=median-(5.0*sigma_mad) xmax=median+(5.0*sigma_mad) xrange=10.0*sigma_mad # use nsteps+1 so that there is a value at the midpt x = np.linspace(xmin,xmax,nsteps+1) pdf=mlab.normpdf(x,median,sigma_mad) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf) pdf=mlab.normpdf(x,median,sigmaIQ) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf,color='green') pdf=mlab.normpdf(x,median,sigma80) dx=xrange/nsteps cdf = np.cumsum(pdf*dx) plt.plot(x,cdf,color='red')