Пример #1
0
def plotBar(data=None,color_id=None,figure_id=None,name=None,flag=False):
	ax = pl.subplot(figure_id)
	width = 0.8
	x=sp.arange(7)
	if not (name=="VaribenchSelected"):
		pl.bar(x-0.4,data,width=width,color=color_t[color_id],hatch="/o/o/")
	else:
		pl.bar(x-0.4,data,width=width,color=color_t[color_id],hatch="ooo")

	tmp = data.copy()
	tmp[1::] = 0
	pl.xticks(x,['All','Pure',']0.0,1.0[','[0.1,0.9]','[0.2,0.8]','[0.3,0.7]','[0.4,0.6]'],fontsize=font_size,rotation=90)
	ln = sp.log10(len(name))
	pl.text(3.5-ln,0.95,name)
	if flag:
		remove_border(left=False)
		pl.yticks([0.5,0.6,0.7,0.8,0.9,1.0])
		pl.grid(axis='y')
		pl.tick_params(axis='y',which="both",labelleft='off',left='off')
	else:
		pl.ylabel("AUC")
		remove_border()
		pl.yticks([0.5,0.6,0.7,0.8,0.9,1.0])
		pl.grid(axis='y')
	pl.ylim(0.5,1)
	pl.xlim(-0.5,7.5)
	return ax
Пример #2
0
def plotPathDistribution(g,outputFolder,shown=False):
	#get the raw histogram, then normalize the data to be a probability distribution
	#hist = g.path_length_hist()
	#print(hist)
	xs, ys = zip(*[(int(left), count) for left, _, count in g.path_length_hist(directed=g.is_directed()).bins()])

	#normalize the y values to make a probability distribution
	total = 0
	for ct in ys:
		total += ct
	normalized = [(float(ys[i]) / float(total)) for i in range(0,len(ys))]
	ys = tuple(normalized)
	#print("normalized ys: ",ys)

	pylab.text(0,0,"SOME TEXT")
	pylab.axis([0,xs[-1]+1,0.0,max(ys)+0.05])
	pylab.bar(xs, ys,width=1.0)
	#pylab.axis([0,xs[-1],0.0,ys[-1]])
	#pylab.xlim(0,max(max(xs),1))
	pylab.title("path-length probability distribution")
	pylab.xlabel("path length")
	pylab.ylabel("Px")
	if outputFolder[-1] != "/":
		outputFolder += "/"
	pylab.savefig(outputFolder+"PathLengthDistribution.png")

	if shown:
		pylab.show()
Пример #3
0
def barGraph(data, **kw):
    """Draws a bar graph for the given data"""
    from pylab import bar
    kw.setdefault('barw', 0.5)
    kw.setdefault('log', 0)
    kw.setdefault('color', 'blue')
    xs = [i+1 for i, row in enumerate(data)]
    names, ys = zip(*data)
    names = [n.replace('_', '\n') for n in names]
    #print 'got xs %s, ys %s, names %s' % (xs, ys, names)
    bar(xs, ys, width=kw['barw'], color=kw['color'], align='center', log=kw['log'])
    ax = pylab.gca()
    def f(x, pos=None):
        n = int(x) - 1
        if n+1 != x: return ''
        if n < 0: return ''
        try:
            return names[n]
        except IndexError: return ''

    ax.xaxis.set_major_formatter(pylab.FuncFormatter(f))
    ax.xaxis.set_major_locator(pylab.MultipleLocator(1))
    ax.set_xlim(0.5, len(names)+0.5)
    for l in ax.get_xticklabels():
        pylab.setp(l, rotation=90)
    start = 0.08, 0.18
    pos = (start[0], start[1], 0.99-start[0], 0.95-start[1])
    ax.set_position(pos)
Пример #4
0
def plot_size_and_time(file_list, time_list):
    sizes = []
    times = []
    labels = []
    for key in file_list.keys():
        total_size = 0
        total_time = 0
        for f in file_list[key]:
            total_size += os.path.getsize(f)        
        for t in time_list[key]:
            total_time += t
        sizes.append(total_size)
        times.append(total_time)
        labels.append(key)
        
    pylab.subplot(211)
    width = 0.5
    xlocations = numpy.array(range(len(labels))) + width
    pylab.bar(xlocations, sizes, width=width)
    ticklabels = ['%s (%g MB)' % (labels[ii], sizes[ii]/2**20) for ii in range(len(labels))]
    pylab.xticks(xlocations + width/2.0, ticklabels)
    pylab.title('File sizes')

    pylab.subplot(212)
    pylab.bar(xlocations, times, width=width)
    ticklabels = ['%s (%g s)' % (labels[ii], times[ii]) for ii in range(len(labels))]
    pylab.xticks(xlocations + width/2.0, ticklabels)
    pylab.title('Time to process')

    pylab.show()
Пример #5
0
def plotDegreeDistribution(g,outputFolder,shown=False):
	#get the raw histogram, then normalize the data to be a probability distribution
	dist = g.degree_distribution()
	xs, ys = zip(*[(left, count) for left, _, count in dist.bins()])

	#normalize the y values to make a probability distribution
	total = 0
	for ct in ys:
		total += ct
	normalized = [(float(ys[i]) / float(total)) for i in range(0,len(ys))]
	ys = tuple(normalized)
	#print("normalized ys: ",ys)

	df = open(outputFolder+"/DegreeDistributionHist.txt","w+")
	df.write(str(dist))
	df.close()
	
	print("max degree is: "+str(max(xs)))
	
	pylab.axis([0,xs[-1]+1,0.0,max(ys)+0.05])
	pylab.bar(xs, ys,width=1.0)
	pylab.title("vertex degree probability distribution")
	pylab.xlabel("degree")
	pylab.ylabel("Px")
	if outputFolder[-1] != "/":
		outputFolder += "/"
	pylab.savefig(outputFolder+"DegreeDistribution.png")
	if shown:
		pylab.show()
Пример #6
0
def plot_stable_features(X_train,y_train,featnames,**kwargs):
    from sklearn.linear_model import LassoLarsCV,RandomizedLasso

    n_resampling = kwargs.pop('n_resampling',200)
    n_jobs = kwargs.pop('n_jobs',-1)
    
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', UserWarning)
        # estimate alphas via xvalidation 
        lars_cv = LassoLarsCV(cv=6,n_jobs=n_jobs).fit(X_train,y_train)        
        alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)

        clf = RandomizedLasso(alpha=alphas, random_state=42, n_jobs=n_jobs,
                              n_resampling=n_resampling)
        clf.fit(X_train,y_train)
        importances = clf.scores_ 
        indices = np.argsort(importances)[::-1]

        pl.bar(range(len(featnames)), importances[indices],
               color="r", align="center")
        pl.xticks(np.arange(len(featnames))+0.5,featnames[indices],
                  rotation=45,horizontalalignment='right')
        pl.xlim(-0.5,len(featnames)-0.5)
        pl.subplots_adjust(bottom=0.2)
        
        pl.ylim(0,np.max(importances)*1.01)
        pl.ylabel('Selection frequency (%) for %d resamplings '%n_resampling)
        pl.title("Stability Selection: Selection Frequencies")
Пример #7
0
def plot_importances(clf,featnames,outfile,**kwargs):

    pl.figure(figsize=(16,4))

    featnames = np.array(featnames)
    importances = clf.feature_importances_
    imp_std = np.std([tree.feature_importances_ for tree in clf.estimators_],
                     axis=0)
    indices = np.argsort(importances)[::-1]

    #for featname in featnames[indices]:
    #    print featname

    trunc_featnames = featnames[indices]
    trunc_featnames = trunc_featnames[0:24]
    trunc_importances = importances[indices]
    trunc_importances = trunc_importances[0:24]
    trunc_imp_std = imp_std[indices]
    trunc_imp_std = trunc_imp_std[0:24]

    pl.bar(range(len(trunc_featnames)), trunc_importances,
           color="r", yerr=trunc_imp_std, align="center")
    pl.xticks(np.arange(len(trunc_featnames))+0.5,trunc_featnames,rotation=45,
              horizontalalignment='right')
    pl.xlim(-0.5,len(trunc_featnames)-0.5)
    pl.ylim(0,np.max(trunc_importances+trunc_imp_std)*1.01)

#    pl.bar(range(len(featnames)), importances[indices],
#           color="r", yerr=imp_std[indices], align="center")
#    pl.xticks(np.arange(len(featnames))+0.5,featnames[indices],rotation=45,
#              horizontalalignment='right')
#    pl.xlim(-0.5,len(featnames)-0.5)
#    pl.ylim(0,np.max(importances+imp_std)*1.01)
    pl.subplots_adjust(bottom=0.2)
    pl.show()
def plot(xdata, ydata, std, title, xlabel, ylabel, label, color, alpha, miny, maxy, num=1):
    import matplotlib

    # matplotlib.use('Agg')
    import pylab
    import matplotlib.font_manager

    # all goes to figure num
    pylab.figure(num=num, figsize=(9.5, 9))
    pylab.gca().set_position([0.10, 0.20, 0.85, 0.60])
    # let the plot have fixed y-axis scale
    ywindow = maxy - miny
    # pylab.gca().set_ylim(miny, maxy+ywindow/5.0)
    pylab.gca().set_ylim(miny, maxy)
    # pylab.plot(xdata, ydata, 'b.', label=label, color=color)
    # pylab.plot(xdata, ydata, 'b-', label='_nolegend_', color=color)
    pylab.bar(xdata, ydata, 0.9, label=label, color=color, alpha=alpha)
    t = pylab.title(title)
    # http://old.nabble.com/More-space-between-title-and-secondary-x-axis-td31722298.html
    t.set_y(1.05)
    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)
    prop = matplotlib.font_manager.FontProperties(size=12)
    leg = pylab.legend(loc="upper right", fancybox=True, prop=prop)
    leg.get_frame().set_alpha(0.5)
Пример #9
0
def _make_var_histogram(values, logp, nbins, ci, weights):
    # Produce a histogram
    hist, bins = np.histogram(values, bins=nbins, range=ci,
                              #new=True,
                              normed=True, weights=weights)

    # Find the max likelihood for values in each bin
    edges = np.searchsorted(values, bins)
    histbest = [np.max(logp[edges[i]:edges[i+1]])
                if edges[i] < edges[i+1] else -inf
                for i in range(nbins)]

    # scale to marginalized probability with peak the same height as hist
    histbest = np.exp(np.asarray(histbest) - max(logp)) * np.max(hist)

    import pylab
    # Plot the histogram
    pylab.bar(bins[:-1], hist, width=bins[1]-bins[0])

    # Plot the kernel density estimate
    #density = KDE1D(values)
    #x = linspace(bins[0],bins[-1],100)
    #pylab.plot(x, density(x), '-k', hold=True)

    # Plot the marginal maximum likelihood
    centers = (bins[:-1]+bins[1:])/2
    pylab.plot(centers, histbest, '-g', hold=True)
Пример #10
0
def show_char_use(uchars, ucount):
	"""
	Plot spread of characters used in different sets:
	- digits
	- lowercase
	- uppercase
	- symbols (not alphanumeric)
	"""

	# Symbols are all printable characters minus alphanumerics
	charsymbols = "".join(set.difference(set(string.printable), set(string.digits+string.ascii_letters)))

	charsets = [string.digits, string.ascii_lowercase, string.ascii_uppercase, charsymbols]
	charsetnames = ['digits', 'lowercase', 'uppercase', 'symbols']

	for idx, (cs, csn) in enumerate(zip(charsets, charsetnames)):
		# Select charset subset
		thischars = [i for i in uchars if i in cs]
		thiscount = [c for i, c in zip(uchars, ucount) if i in cs]
		thiscountn = [t/(1.0*sum(thiscount)) for t in thiscount]
		if (HAVE_PYLAB):
			pylab.figure(100+idx);
			pylab.title("Spread of %s" % csn)
			thisidx = numpy.arange(len(thiscount))
			pylab.bar(thisidx-0.4, thiscountn)
			pylab.xticks(thisidx, thischars)
		else:
			print "Spread of %s" % csn
			for c, n in zip(thischars, thiscountn):
				# There are N=len(thischars) characters in this set,
				# so on average each occurs 1/N times. A terminal window
				# is 80 chars wide, which we equate to 4/N.
				bar = "="*int(round(70.0/4.0*n*len(thischars)))
				print " %s %2.0f %s" % (c, n*100, bar)
Пример #11
0
def plotHousing(impression):
    """假设impression是一个字符串,必须是‘flat’, ‘volatile’或者是‘fair’
       生成房价随时间变化的图表"""
    f = open("midWestHousingPrices.txt", "r")
    # 文件的每一行是年季度价格
    # 数据来自美国中部区域
    labels, prices = ([], [])
    for line in f:
        year, quarter, price = line.split(" ")
        label = year[2:4] + "\n Q" + quarter[1]
        labels.append(label)
        prices.append(float(price) / 1000)
    quarters = pylab.arange(len(labels))
    width = 0.8
    if impression == "flat":
        pylab.semilogy()
    pylab.bar(quarters, prices, width)
    pylab.xticks(quarters + width / 2.0, labels)
    pylab.title("Housing Prices in U.S. Midwest")
    pylab.xlabel("Quarter")
    pylab.ylabel("Average Price($1,000's)")
    if impression == "flat":
        pylab.ylim(10, 10 ** 3)
    elif impression == "volatile":
        pylab.ylim(180, 220)
    elif impression == "fair":
        pylab.ylim(150, 250)
    else:
        raise ValueError
Пример #12
0
def reaction_times_first_step(sessions):
    median_reaction_times = np.zeros([len(sessions),4])
    all_reaction_times = []
    for i,session in enumerate(sessions):
        event_times = ut.get_event_times(session.time_stamps, session.event_codes, session.IDs)
        ITI_start_times = event_times['ITI_start']
        center_poke_times = sorted(np.hstack((event_times['high_poke'], event_times['low_poke'])))
        reaction_times = 1000 * _latencies(ITI_start_times,  center_poke_times)[1:-1]
        all_reaction_times.append(reaction_times)
        transitions = (session.blocks['trial_trans_state'] == session.CTSO['transitions'])[:len(reaction_times)] # Transitions common/rare.
        outcomes = session.CTSO['outcomes'][:len(reaction_times)].astype(bool)
        median_reaction_times[i, 0] = np.median(reaction_times[ transitions &  outcomes])  # Common transition, rewarded.
        median_reaction_times[i, 1] = np.median(reaction_times[~transitions &  outcomes])  # Rare transition, rewarded.
        median_reaction_times[i, 2] = np.median(reaction_times[ transitions & ~outcomes])  # Common transition, non-rewarded.
        median_reaction_times[i, 3] = np.median(reaction_times[~transitions & ~outcomes])  # Rare transition, non-rewarded.
    mean_RTs = np.mean(median_reaction_times,0)
    SEM_RTs  = np.sqrt(np.var(median_reaction_times,0)/len(sessions))
    p.figure(1)
    p.clf()
    p.title('First step reaction times')
    p.bar([1,2,3,4], mean_RTs, yerr = SEM_RTs)
    p.ylim(min(mean_RTs) * 0.8, max(mean_RTs) * 1.1)
    p.xticks([1.4, 2.4, 3.4, 4.4], ['Com. Rew.', 'Rare Rew.', 'Com. Non.', 'Rare. Non.'])
    p.xlim(0.8,5)
    p.ylabel('Reaction time (ms)')
    all_reaction_times = np.hstack(all_reaction_times)
    bin_edges = np.arange(0,3001)
    rt_hist = np.histogram(all_reaction_times, bin_edges)[0]
    cum_rt_hist = np.cumsum(rt_hist) / float(len(all_reaction_times))
    p.figure(2)
    p.clf()
    p.plot(bin_edges[:-1],cum_rt_hist)
    p.ylim(0,1)
    p.xlabel('Time from ITI start (ms)')
    p.ylabel('Cumumative fraction of first central pokes.')
Пример #13
0
def IPvis(fName):
	cIP=IPread(fName)
	med=[str(xx) for xx in flatten(cIP)]
	slash=['/24','/23','/22','/21','/20','/19','/18','/17','/16']
	l=len(slash)
	m=[0]*l
	s=[0]*l
	for i,w in enumerate(slash):
		ld=IPnet(cIP,w)
		#~ ld=IPprune(ld,2)
		v=IPquant(ld)
		st,m[i],s[i]=netStat(v)
	wid=.3
	ind=range(l)
	pl.bar(ind,m,color='b',yerr=s,label='213.87.0.0',width=wid,align='center')
	pl.xlabel('Subnet',fontsize=20)
	pl.ylabel('Number of existing subnets',fontsize=20)
	pl.xticks(ind, slash)
	pl.title('/16 network subnets',fontsize=20)
	q1='Total number of uniqe IPs :'+str(len(set(med)))
	q2='Total number of tests :'+str(len(med))
	pl.text(l/3,max(m)-(max(m)-min(m))/10,q1)
	pl.text(l/3,max(m)-(max(m)-min(m))/7,q2)
	pl.legend()
	pl.show()
Пример #14
0
def plotFeatureImportance(featureImportance, title, originalImage=None, lim=0.06, colorate=None):
    """
    originalImage : the index of the original image. If None, ignore
    """
    indices = featureImportanceIndices(len(featureImportance), originalImage)
    pl.figure()
    pl.title(title)
    if colorate is not None:
        nbType = len(colorate)
        X = [[] for i in range(nbType)]
        Y = [[] for i in range(nbType)]
        for j, f in enumerate(featureImportance):
            X[j % nbType].append(j)
            Y[j % nbType].append(f)
        for i in range(nbType):
            pl.bar(X[i], Y[i], align="center", label=colorate[i][0], color=colorate[i][1])
        pl.legend()
    else:
        pl.bar(range(len(featureImportance)), featureImportance, align="center")
    #pl.xticks(pl.arange(len(indices)), indices, rotation=-90)
    pl.xlim([-1, len(indices)])
    pl.ylabel("Feature importance")
    pl.xlabel("Filter indices")
    pl.ylim(0, lim)
    pl.show()
Пример #15
0
 def histPlot(self,data,name,count,opdir,type=None):
   '''
   Plot a histogram of data
 
   Parameters
   -----------
 
   data : numpy dataset
   name : plot title
   count : plot number 
   opdir : None (no save) or filename to save png image to
   type: unused
 
   '''
   try:
     hist,bins = np.histogram(self.fix(data[data>0],iter=0),bins=50)
     width = 0.7*(bins[1]-bins[0])
     center = (bins[:-1]+bins[1:])/2
     plt.figure(count);plt.clf();count+=1
     plt.bar(center,hist,align='center',width=width)
     plt.title(name)
     if opdir:
       fig.savefig('%s/hist_%s_%d.png'%(opdir,name.replace(' ','_'),count))
     else:
       fig.show()
   except:
     pass
Пример #16
0
 def plotSpectrum(self,spectrum,title):
     fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
     index, bar_width = spectrum.index.values,0.2
     for i in range(spectrum.shape[1]):
         plt.bar(index + i*bar_width, spectrum.icol(i).values, bar_width, color=mpl.cm.jet(1.*i/spectrum.shape[1]), label=spectrum.columns[i])
     plt.xlabel('Allele') ;plt.xticks(index + 3*bar_width, index) ;plt.legend();
     plt.title('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSize); self.pdf.savefig(fig);self.fignumber+=1
def plotCoeff(X, y, obj, featureNames, whichReg):
    """ Plot Regression's Coeff
    """
    clf = classifiers[whichReg]
    clf,_,_ = fitAlgo(clf, X,y, opt= True, param_dict = param_dist_dict[whichReg])
    if whichReg == "LogisticRegression":
    	coeff = np.absolute(clf.coef_[0])
    else:
    	coeff = np.absolute(clf.coef_)
    print coeff
    indices = np.argsort(coeff)[::-1]
    print indices
    print featureNames
    featureList = []
    # num_features = len(featureNames)
    print("Feature ranking:")
    for f in range(num_features):
        featureList.append(featureNames[indices[f]])
        print("%d. feature %s (%.2f)" % (f, featureNames[indices[f]], coeff[indices[f]]))
    fig = pl.figure(figsize=(8,6),dpi=150)
    pl.title("Feature importances",fontsize=30)
    # pl.bar(range(num_features), coeff[indices],
    #         yerr = std_importance[indices], color=paired[0], align="center",
    #         edgecolor=paired[0],ecolor=paired[1])
    pl.bar(range(num_features), coeff[indices], color=paired[0], align="center",
            edgecolor=paired[0],ecolor=paired[1])
    pl.xticks(range(num_features), featureList, size=15,rotation=90)
    pl.ylabel("Importance",size=30)
    pl.yticks(size=20)
    pl.xlim([-1, num_features])
    # fix_axes()
    pl.tight_layout()
    save_path = 'plots/'+obj+'/'+whichReg+'_feature_importances.pdf'
    fig.savefig(save_path)
def editIncreaseProbabilityByBinnedStartingEdits(cleanData):
	"""bear with me on this one.... 
this function plots the chance an article has more edits after being posted vs the starting edits.
code below isn't pretty, but the chart it produces is."""
	cleanData = sort(cleanData, 'editsB')
	editsB = [d['editsB'] for d in cleanData]
	colors = ["#3366FF","#6633FF","#CC33FF","#FF33CC","#FF3366","#FF6633","#FFCC33","#CCFF33","#66FF33","#33FF66","#33FFCC","#33CCFF","#003DF5","#002EB8","#F5B800","#B88A00"]
	numBins = 10
	moreThanBins = {}
	for i in range (1,numBins+1):
		low = len(editsB)*(i-1)/numBins
		high = len(editsB)*(i)/numBins-1
		dataName = "bin%iof%i" % (i, numBins)
		locals()[dataName] = cleanData[low:high]
		CDF = Cdf.MakeCdfFromList([d['deltaNorm'] for d in locals()[dataName]])
		sortedComparison = [d['moreEdits'] for d in locals()[dataName]]
		moreThanBins[editsB[low]] = scipy.mean(sortedComparison)
	pylab.clf()
	for i in range(len(moreThanBins)):
		pylab.bar(moreThanBins.keys()[i], moreThanBins.values()[i], color=colors[i], width=4)
	pylab.axis([0,200,0,1])
	pylab.xlabel(r'$edits_{beforePosting}$', fontsize=16)
	pylab.ylabel(r'chance of an item having more ${edits}_{afterPosting}$', fontsize=16)
	pylab.title('starting edits vs. edit increase probability', fontsize=24)
	pylab.savefig('editIncreaseProbabilityByBinnedStartingEdits.png')
Пример #19
0
def command(args):
    from pylab import bar, yticks, subplots_adjust, show
    from numpy import arange

    import sr.tools.bom.bom as bom
    import sr.tools.bom.parts_db as parts_db

    db = parts_db.get_db()
    m = bom.MultiBoardBom(db)
    m.load_boards_args(args.arg)
    m.prime_cache()

    prices = []

    for srcode, pg in m.items():
        if srcode == "sr-nothing":
            continue

        prices.append((srcode, pg.get_price()))

    prices.sort(key=lambda x: x[1])

    bar(0, 0.8, bottom=range(0, len(prices)), width=[x[1] for x in prices],
        orientation='horizontal')

    yticks(arange(0, len(prices)) + 0.4, [x[0] for x in prices])

    subplots_adjust(left=0.35)

    show()
Пример #20
0
def plot_cumulative_score(smod,
                          seqs,
                          size=(6, 2),
                          fname=None):
    """plot_cumulative_score."""
    sig = cumulative_score(seqs, smod)
    plt.figure(figsize=size)
    sigp = np.copy(sig)
    sigp[sigp < 0] = 0
    plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g')
    sign = np.copy(sig)
    sign[sign >= 0] = 0
    plt.bar(range(len(sign)), sign, alpha=0.3, color='r')
    plt.grid()
    plt.xlabel('Position')
    plt.ylabel('Importance score')
    if fname:
        plt.draw()
        figname = '%s_importance.png' % (fname)
        plt.savefig(
            figname, bbox_inches='tight', transparent=True, pad_inches=0)
    else:
        figname = None
        plt.show()
    plt.close()
    return figname
Пример #21
0
    def test_varwald_sample(self):
        pars={'A':1.0, 'b':2.0, 'v':.5, 'ter':.1}
        acc=VarWaldAccumulator( theta=pars['ter'],
                                gamma=pars['v'],
                                alpha=pars['b'],
                                A=pars['A'])
        nsamples=100000
        x=np.linspace(0,10, nsamples)

        import pylab as pl
        samp=acc.sample(nsamples)
        #dens=scipy.stats.gaussian_kde(samp[samp<10])
        #pl.hist(acc.sample(nsamples),200, normed=True)
        h,hx=np.histogram(samp, density=True, bins=1000)

        hx=hx[:-1]+(hx[1]-hx[0])/2.
        assert np.all(np.abs(h-acc.pdf(hx))<0.5)

        if True:
            #pl.subplot(2,1,1)
            #pl.hist(samp[samp<10],300, normed=True, alpha=.3)
            pl.bar(hx,h, width=hx[1]-hx[0], alpha=.3)
            pl.title(str(pars))
            #pl.xlim(0,3)

            #pl.subplot(2,1,2)
            pl.plot(x,acc.pdf(x), color='red', label='analytical')
            #pl.plot(x,dens(x),    color='green', label='kde')
            pl.legend()
            self.savefig()
Пример #22
0
    def test_wald_sample(self):
        acc=ShiftedWaldAccumulator(.2, .2, 2.0)
        nsamples=100000
        x=np.linspace(0,10, nsamples)
        
        import pylab as pl
        samp=acc.sample(nsamples)
        #dens=scipy.stats.gaussian_kde(samp[samp<10])

        pl.hist(acc.sample(nsamples),200, normed=True)
        h,hx=np.histogram(samp, density=True, bins=1000)
        hx=hx[:-1]+(hx[1]-hx[0])/2.
        #assert np.all(np.abs(h-acc.pdf(hx))<1.5)

        # kolmogoroff smirnov tests whether samples come from CDF
        D,pv=scipy.stats.kstest(samp, acc.cdf)
        print D,pv
        assert pv>.05, "D=%f,p=%f"%(D,pv)
        if True:
            pl.clf()
            #pl.subplot(2,1,1)
            #pl.hist(samp[samp<10],300, normed=True, alpha=.3)


            #pl.subplot(2,1,2)
            pl.bar(hx, h, alpha=.3, width=hx[1]-hx[0])
            pl.plot(x,acc.pdf(x), color='red', label='analytical')
            #pl.plot(x,dens(x),    color='green', label='kde')
            pl.xlim(0,3)
            pl.legend()
            self.savefig()
def plot_importances(imp, clfName, obj):
    imp=np.vstack(imp)
    print imp
    mean_importance = np.mean(imp,axis=0)
    std_importance = np.std(imp,axis=0)
    indices = np.argsort(mean_importance)[::-1]
    print indices
    print featureNames
    featureList = []
    # num_features = len(featureNames)
    print("Feature ranking:")
    for f in range(num_features):
        featureList.append(featureNames[indices[f]])
        print("%d. feature %s (%.2f)" % (f, featureNames[indices[f]], mean_importance[indices[f]]))
    fig = pl.figure(figsize=(8,6),dpi=150)
    pl.title("Feature importances",fontsize=30)
    pl.bar(range(num_features), mean_importance[indices],
            yerr = std_importance[indices], color=paired[0], align="center",
            edgecolor=paired[0],ecolor=paired[1])
    pl.xticks(range(num_features), featureList, size=15,rotation=90)
    pl.ylabel("Importance",size=30)
    pl.yticks(size=20)
    pl.xlim([-1, num_features])
    # fix_axes()
    pl.tight_layout()
    save_path = 'plots/'+obj+'/'+clfName+'_feature_importances.pdf'
    fig.savefig(save_path)
Пример #24
0
def check_distribution_nball():
    
    dim =3
    n_measure_sets = 2**dim
    measure_sets = [ (k,) for k in range(n_measure_sets) ]
    
    region = lambda x: l2norm(x)<=1.0
    
    a = array([ random.normalvariate(0.5, .2 ) for k in range(dim) ])
    drift = lambda t: array([ random.normalvariate(0.5, .2 ) for k in range(dim) ])
    f = lambda x : sum([(2.0**k)*a for k,a in enumerate([xi>=0 for xi in x])])
    
    dt = 0.03
    x = zeros(dim)
    n_samples = 2000
    
    distribution, distribution_nocom = hitting_value_distribuion( n_samples, measure_sets, x, f, dt, drift, region )
    
    for k in distribution:
        print k
    
    print 'sum:', sum(distribution)
    
    print '\n', mean(distribution), var(distribution), sig_m(distribution)
    
    import pylab as p
    print '\n\n'
    
    left = range(len(distribution))
    
    p.bar(left, distribution, 1 )
    p.plot(left, distribution_nocom, 'ro' )
    p.show()
Пример #25
0
def plot_question(fname, question_text, data):
    import pylab
    import numpy as np
    from matplotlib.font_manager import FontProperties
    from matplotlib.text import Text
    pylab.figure().clear()
    pylab.title(question_text)
    #pylab.xlabel("Verteilung")
    #pylab.subplot(101)
    if True or len(data) < 3:
        width = 0.95
        pylab.bar(range(len(data)), [max(y, 0.01) for x, y in data], 0.95, color="g")
        pylab.xticks([i+0.5*width for i in range(len(data))], [x for x, y in data])
        pylab.yticks([0, 10, 20, 30, 40, 50])
        #ind = np.arange(len(data))
        #pylab.bar(ind, [y for x, y in data], 0.95, color="g")
        #pylab.ylabel("#")
        #pylab.ylim(ymax=45)
        #pylab.ylabel("Antworten")
        #pylab.xticks(ind+0.5, histo.get_ticks())
        #pylab.legend(loc=3, prop=FontProperties(size="smaller"))
        ##pylab.grid(True)
    else:
        pylab.pie([max(y, 0.1) for x, y in data], labels=[x for x, y in data], autopct="%.0f%%")
    pylab.savefig(fname, format="png", dpi=75)
Пример #26
0
def plotDirections(aabb=(),mask=0,bins=20,numHist=True,noShow=False,sphSph=False):
	"""Plot 3 histograms for distribution of interaction directions, in yz,xz and xy planes and
	(optional but default) histogram of number of interactions per body. If sphSph only sphere-sphere interactions are considered for the 3 directions histograms.

	:returns: If *noShow* is ``False``, displays the figure and returns nothing. If *noShow*, the figure object is returned without being displayed (works the same way as :yref:`yade.plot.plot`).
	"""
	import pylab,math
	from yade import utils
	for axis in [0,1,2]:
		d=utils.interactionAnglesHistogram(axis,mask=mask,bins=bins,aabb=aabb,sphSph=sphSph)
		fc=[0,0,0]; fc[axis]=1.
		subp=pylab.subplot(220+axis+1,polar=True);
		# 1.1 makes small gaps between values (but the column is a bit decentered)
		pylab.bar(d[0],d[1],width=math.pi/(1.1*bins),fc=fc,alpha=.7,label=['yz','xz','xy'][axis])
		#pylab.title(['yz','xz','xy'][axis]+' plane')
		pylab.text(.5,.25,['yz','xz','xy'][axis],horizontalalignment='center',verticalalignment='center',transform=subp.transAxes,fontsize='xx-large')
	if numHist:
		pylab.subplot(224,polar=False)
		nums,counts=utils.bodyNumInteractionsHistogram(aabb if len(aabb)>0 else utils.aabbExtrema())
		avg=sum([nums[i]*counts[i] for i in range(len(nums))])/(1.*sum(counts))
		pylab.bar(nums,counts,fc=[1,1,0],alpha=.7,align='center')
		pylab.xlabel('Interactions per body (avg. %g)'%avg)
		pylab.axvline(x=avg,linewidth=3,color='r')
		pylab.ylabel('Body count')
	if noShow: return pylab.gcf()
	else:
		pylab.ion()
		pylab.show()
Пример #27
0
def check_distribution():
    
    f = lambda x: math.atan2(x[0],x[1])
    region=lambda x: l2norm(x)<=1.0
    
    drift=lambda t: array((0.7, 0.5 ))
    
    n_samples = 10000
    
    n_measure_sets = 16
    measure_sets = [ interval( 2.*pi*k/n_measure_sets-pi, 2.*pi*(k+1)/n_measure_sets-pi ) for k in range(n_measure_sets) ]
    
    
    x = (0.3, 0. )
    dt = 0.02
    
    distribution, distribution_nocom = hitting_value_distribuion( n_samples, measure_sets, x, f, dt, drift, region )
    
    for k in distribution:
        print k
    
    print 'sum:', sum(distribution)
    
    print '\n', mean(distribution), var(distribution), sig_m(distribution)
    
    
    
    import pylab as p
    print '\n\n'
    
    left = [  (2.*k/n_measure_sets-1)*pi  for k in range(n_measure_sets) ]
    
    p.bar(left, distribution, 2.*pi/n_measure_sets )
    p.plot(left, distribution_nocom, 'ro' )
    p.show()
Пример #28
0
def bar_plot_1(data):
    """Generates bar plot from data"""
    x_labels=[a for (a,b) in data]
    y_data=[b for (a,b) in data]
    # Create chart
    pos=range(1,len(x_labels)+1)
    P.figure(1,figsize=(11,7))
    P.bar(left=pos,height=y_data,log=True,width=.6,color="lightgrey",edgecolor="#8094B6")
    pos2=[a+.3 for a in pos]
    P.xticks(pos2,x_labels)
    P.title("Evolution of network data size over time",fontsize="x-large")
    P.xlabel("Network data sets (year published)",fontsize="large")
    P.ylabel("Number of vertices [log(N)]",fontsize="large")
    text_color="black"
    for i in range(len(y_data)):
        if i<2:
            P.text(pos[i]+0.01,y_data[i]+5,int_to_scinot(y_data[i]),color=text_color)
        elif i==2:
            P.text(pos[i]+0.01,y_data[i]+100,int_to_scinot(y_data[i]),color=text_color)
        elif i==3:
            P.text(pos[i]+0.01,y_data[i]+1000,int_to_scinot(y_data[i]),color=text_color)
        elif i==4:
            P.text(pos[i]+0.01,y_data[i]+100000,int_to_scinot(y_data[i]),color=text_color)
        else:
            P.text(pos[i]+0.01,y_data[i]+1000000,int_to_scinot(y_data[i]),color=text_color)
    P.savefig("../../images/figures/net_size_evo.png",dpi=100,format="png")
Пример #29
0
def reaction_times_second_step(sessions, fig_no = 1):
    'Reaction times for second step pokes as function of common / rare transition.'
    sec_step_IDs = ut.get_IDs(sessions[0].IDs, ['right_active', 'left_active'])
    median_RTs_common = np.zeros(len(sessions))
    median_RTs_rare   = np.zeros(len(sessions))
    for i,session in enumerate(sessions):
        event_times = ut.get_event_times(session.time_stamps, session.event_codes, session.IDs)
        left_active_times = event_times['left_active']
        right_active_times = event_times['right_active']
        left_reaction_times  = _latencies(left_active_times,  event_times['left_poke'])
        right_reaction_times = _latencies(right_active_times, event_times['right_poke'])
        ordered_reaction_times = np.hstack((left_reaction_times,right_reaction_times))\
                                 [np.argsort(np.hstack((left_active_times,right_active_times)))]
        transitions = session.blocks['trial_trans_state'] == session.CTSO['transitions']  # common vs rare.                 
        median_RTs_common[i] = np.median(ordered_reaction_times[ transitions])
        median_RTs_rare[i]    = np.median(ordered_reaction_times[~transitions])
    mean_RT_common = 1000 * np.mean(median_RTs_common)
    mean_RT_rare   = 1000 * np.mean(median_RTs_rare)
    SEM_RT_common = 1000 * np.sqrt(np.var(median_RTs_common/len(sessions)))
    SEM_RT_rare   = 1000 * np.sqrt(np.var(median_RTs_rare  /len(sessions)))
    p.figure(fig_no)
    p.bar([1,2],[mean_RT_common, mean_RT_rare], yerr = [SEM_RT_common,SEM_RT_rare])
    p.xlim(0.8,3)
    p.ylim(mean_RT_common * 0.8, mean_RT_rare * 1.1)
    p.xticks([1.4, 2.4], ['Common', 'Rare'])
    p.title('Second step reaction times')
    p.ylabel('Reaction time (ms)')
    print('Paired t-test P value: {}'.format(ttest_rel(median_RTs_common, median_RTs_rare)[1]))
Пример #30
0
def plotTimeline(df):
    t_resol = "1W"
    ax = pl.figure(1,(20,5))
    color = ['yellow','cyan','green','lime','red','magenta','purple','blue','grey']

    x_old = df.Bounty.resample(t_resol,how="count").index
    y_old = np.zeros_like(df.Bounty.resample(t_resol,how="count").values)
    y_others = np.zeros_like(df.Bounty.resample(t_resol,how="count").values)

    i=0
    for program in df.Program.unique():
        dfprog = df[df.Program == program]
        countBountiesProg = dfprog.Bounty.resample(t_resol,how="count")
        X = countBountiesProg.index
        i0 = np.argwhere(X[0] == x_old)[0]
        Y = countBountiesProg.values

        if df.Program[df.Program == program].count() < 90:
            y_others[i0:i0+len(X)] += Y
            continue

        pl.bar(X[0],80,width=7,color=color[i],lw=0.0,alpha=0.05)
        iMax = np.argmax(Y)
        #pl.bar(countBountiesProg.index[iMax],80,width=7,color=color[i],lw=0.0,alpha=0.2)
        pl.bar(X,Y,width=7,bottom=y_old[i0:i0+len(X)],lw=0.05,color=color[i],label=program)
        y_old[i0:i0+len(X)] += Y
        i+=1

        #pl.bar(x_old,y_others,width=7,bottom=y_old,lw=0.05,color='lightgrey',label=program)

    #pl.xlabel("Time [weeks]")
    #pl.ylabel("(Cumulative) bounties awarded")
    pl.legend(loc=0)
    pl.savefig(figuredir + "timeline.eps")
Пример #31
0
# dictionary for analysis two variable nation and clubs
dict_na_clubs = {}

nations = list(club_count.index)
clubs = list(club_count.Club)

# total nations are 164
file = open("Nation_Clubs.txt", "w")

while count < 164:
    dict_na_clubs.update({nations[count]: clubs[count]})
    file.write(str(nations[count]) + "," + str(clubs[count]) + "\n")
    count += 1

file.close()
value = list(dict_na_clubs.values())
key = list(dict_na_clubs.keys())
g.bar(key[:11], value[:11])

for x in range(0, 11):
    g.text(x - 0.10,
           value[x] + 0.10,
           str(value[x]),
           color="blue",
           fontweight="bold")

g.xticks(rotation=90)
g.title("Top 10 nations with highest clubs")
g.show()
Пример #32
0
# Simple bar graph

import pylab

values = [20, 80, 50, 75]
indices = [i for i in range(len(values))]

# figsize adjusts the entire figure dimensions -- place before other pylab instructions
#pylab.figure(figsize=(10,5))

pylab.title("Simple Bar Graph Demo")
pylab.xlabel('X-axis label')
pylab.ylabel('Y-axis label')

# 1. These next two lines put labels on the x axis, one at each of the indices
#names = ['fred','ming','rose','rich']
#pylab.xticks(indices,names,rotation=90)

# 2. What does ylim do?
#pylab.ylim([0,100])

# simple plot
pylab.bar(indices, values)

# 3. more complex plot especially when combined with xticks line above
#mybarwidth = 0.8    # default is 0.8; you might want it smaller
#pylab.bar(indices,values,mybarwidth,align='center')
Пример #33
0
def fitFluorBrightnessT(colourFilter, metadata, channame='', i=0, rng=None):
    #nPh = (colourFilter['A']*2*math.pi*(colourFilter['sig']/(1e3*metadata.getEntry('voxelsize.x')))**2)
    #nPh = nPh*metadata.getEntry('Camera.ElectronsPerCount')/metadata.getEntry('Camera.TrueEMGain')
    #from mpl_toolkits.mplot3d import Axes3D

    nPh = getPhotonNums(colourFilter, metadata)
    t = (colourFilter['t'].astype('f') - metadata['Protocol.DataStartsAt']
         ) * metadata.getEntry('Camera.CycleTime')
    NEvents = len(t)

    if rng is None:
        rng = nPh.mean() * 3

    Nco = nPh.min()

    n, xbins, ybins = np.histogram2d(
        nPh, t, [np.linspace(0, rng, 50),
                 np.linspace(0, t.max(), 20)])
    bins = xbins[:-1]

    xb = xbins[:-1][:, None] * np.ones([1, ybins.size - 1])
    yb = ybins[:-1][None, :] * np.ones([xbins.size - 1, 1])

    res0 = FitModel(
        fITmod2,
        [n.max() * 3, 1, np.median(nPh), 20, 1e2, 1e2, 100], n, xb, yb, Nco)
    print((res0[0]))

    PL.AddRecord('/Photophysics/FluorBrightness/fITmod2',
                 munge_res(fITmod2, res0))

    A, Ndet, lamb, tauI, a, Acrit, bg = res0[0]
    #Ndet = Ndet**2
    #NDetM = NDetM**2
    #Acrit = Acrit**2
    #a = (1+erf(a))/2

    Ndet = np.sqrt(Ndet**2 + 1) - 1  #+ve
    bg = np.sqrt(bg**2 + 1) - 1
    Acrit = np.sqrt(Acrit**2 + 1) - 1
    a = (1 + erf(a)) / 2  # [0,1]
    #bg = sqrt(bg**2 + 1) - 1
    #k = sqrt(k**2 + 1) - 1

    NDetM = bg

    rr = fITmod2(res0[0], xb, yb, Nco)
    if USE_GUI:

        pylab.figure()
        pylab.subplot(131)
        pylab.imshow(n, interpolation='nearest')
        pylab.colorbar()

        pylab.subplot(132)
        pylab.imshow(rr, interpolation='nearest')
        pylab.colorbar()

        pylab.subplot(133)
        pylab.imshow(n - rr, interpolation='nearest')
        pylab.colorbar()

        pylab.title(channame)

        pylab.figure()

        t_ = np.linspace(t[0], t[-1], 100)

        #sc = (lamb/(ybins[1] - ybins[0]))
        #sc = len(ybins)
        sc = 1. / (1 - np.exp(-(ybins[1] - ybins[0]) / lamb))
        print(('sc = ', sc))
        y1 = sc * A / ((t_ / tauI)**a + 1)
        pylab.plot(t_, y1)
        pylab.plot(t_, sc * (Ndet / ((t_ / tauI)**a + 1) + NDetM))

        pylab.bar(ybins[:-1], n.sum(0), width=ybins[1] - ybins[0], alpha=0.5)
        pylab.plot(ybins[:-1], rr.sum(0), lw=2)

        pylab.title(channame)
        pylab.xlabel('Time [s]')

        pylab.figtext(
            .2,
            .7,
            '$A = %3.0f\\;N_{det} = %3.2f\\;\\lambda = %3.0f\\;\\tau = %3.0f$\n$\\alpha = %3.3f\\;A_{crit} = %3.2f\\;N_{det_0} = %3.2f$'
            % (A, Ndet, lamb, tauI, a, Acrit, NDetM),
            size=18)

    return [channame, lamb, NEvents]
Пример #34
0
def fitDecay(colourFilter, metadata, channame='', i=0):
    #get frames in which events occured and convert into seconds
    t = colourFilter['t'].astype('f') * metadata.getEntry('Camera.CycleTime')

    n, bins = np.histogram(t, 100)

    b1 = bins[:-1]
    Nm = n.max()

    res = FitModel(e2mod, [Nm * 2, 15, -3, Nm * 3, n[1] / 1], n[1:], b1[1:],
                   Nm)
    #mse = (res[2]['fvec']**2).mean()
    #ch2 = chi2(res, n[1:])
    #print ch2
    ch2, mse = chi2_mse(e2mod, n[1:], res, b1[1:], Nm)

    PL.AddRecord('/Photophysics/Decay/e2mod',
                 munge_res(e2mod, res, mse=mse, ch2=ch2))

    res2 = FitModelPoisson(emod, [Nm * 2, 15, -3, Nm * 3, n[1] / 2], n[1:],
                           b1[1:], Nm)  #[0]
    ch2, mse = chi2_mse(hmod, n[1:], res2, b1[1:], Nm)

    PL.AddRecord('/Photophysics/Decay/emod',
                 munge_res(emod, res2, mse=mse, ch2=ch2))

    res3 = FitModelPoisson(hmod, [Nm * 2, 15, -3, Nm * 3, n[1] / 2], n[1:],
                           b1[1:], Nm)  #[0]
    ch2, mse = chi2_mse(hmod, n[1:], res3, b1[1:], Nm)

    PL.AddRecord('/Photophysics/Decay/hmod',
                 munge_res(hmod, res3, mse=mse, ch2=ch2))

    r4 = FitModelPoisson(e2mod, [Nm * 2, 15, -3, Nm * 3, n[1] / 2], n[1:],
                         b1[1:], Nm)
    ch2, mse = chi2_mse(e2mod, n[1:], r4, b1[1:], Nm)
    PL.AddRecord('/Photophysics/Decay/e2mod_p',
                 munge_res(e2mod, r4, mse=mse, ch2=ch2))

    if USE_GUI:
        pylab.bar(b1 / 60,
                  n,
                  width=(b1[1] - b1[0]) / 60,
                  alpha=0.4,
                  fc=colours[i])
        pylab.plot(b1 / 60, e2mod(res[0], b1, Nm), colours[i], lw=3)
        pylab.plot(b1 / 60, emod(res2[0], b1, Nm), colours[i], lw=2, ls='--')
        pylab.plot(b1 / 60, hmod(res3[0], b1, Nm), colours[i], lw=2, ls=':')
        pylab.plot(b1 / 60, e2mod(r4[0], b1, Nm), colours[i], lw=1)
        pylab.ylim(0, 1.2 * n.max())
        pylab.ylabel('Events')
        pylab.xlabel('Acquisition Time [mins]')
        pylab.title('Event Rate')

        b = 0.5 * (1 + erf(res[0][2])) * Nm

        pylab.figtext(.4,
                      .8 - .05 * i,
                      channame + '\t$\\tau = %3.2fs,\\;b = %3.2f$' %
                      (res[0][1], b / res[0][0]),
                      size=18,
                      color=colours[i])

    return 0
Пример #35
0
def bar(pl, x, y):
    pl.figure
    pl.bar(x,y)
    pl.show()
    pl.close()
Пример #36
0
def plot_calibration(sims, date, do_save=0):

    sim = sims[0] # For having a sim to refer to

    # Draw plots
    fig1_path = f'calibration_{date}_fig1.png'
    fig2_path = f'calibration_{date}_fig2.png'
    fig_args    = sc.mergedicts({'figsize': (16, 14)})
    axis_args   = sc.mergedicts({'left': 0.10, 'bottom': 0.05, 'right': 0.95, 'top': 0.93, 'wspace': 0.25, 'hspace': 0.40})

    # Handle input arguments -- merge user input with defaults
    low_q = 0.1
    high_q = 0.9

    # Figure 1: Calibration
    pl.figure(**fig_args)
    pl.subplots_adjust(**axis_args)
    pl.figtext(0.42, 0.95, 'Model calibration', fontsize=30)


    #%% Figure 1, panel 1
    ax = pl.subplot(4,1,1)
    format_ax(ax, sim)
    plotter('new_tests', sims, ax, calib=True, label='Number of tests per day', ylabel='Tests')
    plotter('new_diagnoses', sims, ax, calib=True, label='Number of diagnoses per day', ylabel='Tests')


    #%% Figure 1, panel 2
    ax = pl.subplot(4,1,2)
    format_ax(ax, sim)
    plotter('cum_diagnoses', sims, ax, calib=True, label='Cumulative diagnoses', ylabel='People')


    #%% Figure 1, panel 3
    ax = pl.subplot(4,1,3)
    format_ax(ax, sim)
    plotter('cum_deaths', sims, ax, calib=True, label='Cumulative deaths', ylabel='Deaths')


    #%% Figure 1, panels 4A and 4B

    agehists = []

    for s,sim in enumerate(sims):
        agehist = sim['analyzers'][0]
        if s == 0:
            age_data = agehist.data
        agehists.append(agehist.hists[-1])

    x = age_data['age'].values
    pos = age_data['cum_diagnoses'].values
    death = age_data['cum_deaths'].values

    # From the model
    mposlist = []
    mdeathlist = []
    for hists in agehists:
        mposlist.append(hists['diagnosed'])
        mdeathlist.append(hists['dead'])
    mposarr = np.array(mposlist)
    mdeatharr = np.array(mdeathlist)

    mpbest = pl.median(mposarr, axis=0)
    mplow  = pl.quantile(mposarr, q=low_q, axis=0)
    mphigh = pl.quantile(mposarr, q=high_q, axis=0)
    mdbest = pl.median(mdeatharr, axis=0)
    mdlow  = pl.quantile(mdeatharr, q=low_q, axis=0)
    mdhigh = pl.quantile(mdeatharr, q=high_q, axis=0)

    # Plotting
    w = 4
    off = 2
    bins = x.tolist() + [100]

    ax = pl.subplot(4,2,7)
    c1 = [0.3,0.3,0.6]
    c2 = [0.6,0.7,0.9]
    xx = x+w-off
    pl.bar(x-off,pos, width=w, label='Data', facecolor=c1)
    pl.bar(xx, mpbest, width=w, label='Model', facecolor=c2)
    for i,ix in enumerate(xx):
        pl.plot([ix,ix], [mplow[i], mphigh[i]], c='k')
    ax.set_xticks(bins[:-1])
    pl.title('Diagnosed cases by age')
    pl.xlabel('Age')
    pl.ylabel('Cases')
    pl.legend()

    ax = pl.subplot(4,2,8)
    c1 = [0.5,0.0,0.0]
    c2 = [0.9,0.4,0.3]
    pl.bar(x-off,death, width=w, label='Data', facecolor=c1)
    pl.bar(x+w-off, mdbest, width=w, label='Model', facecolor=c2)
    for i,ix in enumerate(xx):
        pl.plot([ix,ix], [mdlow[i], mdhigh[i]], c='k')
    ax.set_xticks(bins[:-1])
    pl.title('Deaths by age')
    pl.xlabel('Age')
    pl.ylabel('Deaths')
    pl.legend()

    # Tidy up
    if do_save:
        cv.savefig(fig1_path)


    # Figure 2: Projections
    pl.figure(**fig_args)
    pl.subplots_adjust(**axis_args)
    pl.figtext(0.42, 0.95, 'Model estimates', fontsize=30)

    #%% Figure 2, panel 1
    ax = pl.subplot(4,1,1)
    format_ax(ax, sim)
    plotter('cum_infections', sims, ax,calib=True, label='Cumulative infections', ylabel='People')
    plotter('cum_recoveries', sims, ax,calib=True, label='Cumulative recoveries', ylabel='People')

    #%% Figure 2, panel 2
    ax = pl.subplot(4,1,2)
    format_ax(ax, sim)
    plotter('n_infectious', sims, ax,calib=True, label='Number of active infections', ylabel='People')
    plot_intervs(sim, labels=True)

    #%% Figure 2, panel 3
    ax = pl.subplot(4,1,3)
    format_ax(ax, sim)
    plotter('new_infections', sims, ax,calib=True, label='Infections per day', ylabel='People')
    plotter('new_recoveries', sims, ax,calib=True, label='Recoveries per day', ylabel='People')
    plot_intervs(sim)

    #%% Figure 2, panels 4
    ax = pl.subplot(4,1,4)
    format_ax(ax, sim)
    plotter('r_eff', sims, ax, calib=True, label='Effective reproductive number', ylabel=r'$R_{eff}$')

    ylims = [0,4]
    pl.ylim(ylims)
    xlims = pl.xlim()
    pl.plot(xlims, [1, 1], 'k')
    plot_intervs(sim)

    # Tidy up
    if do_save:
        cv.savefig(fig2_path)

    return
    genotypeInt = []
    genotypeStrings = []
    g_int=0
    for gen in all_genotypes:
        g_int +=1 
        genotypeInt.append( g_int )
        genome_label = "".join( ["%d" %x for x in gen] )   
        genotypeStrings.append( genome_label )
    trueFitnesses = []
    for g in genotypeStrings:
        trueFitnesses.append( TRUE_FITNESS[g] )

    plt.clf()
    title = "Ciprofloxacin = "+cipcon+" ng/ml"
    plt.title(title, fontsize=60)
    plt.bar( genotypeInt, trueFitnesses, align='center', color='olivedrab')
    plt.xticks(rotation=70, fontsize=20)
    plt.xticks( genotypeInt, genotypeStrings)
    plt.yticks( fontsize=20 )
    plt.ylabel( "Fitness", fontsize=40 )
    plt.ylim((0,1.2))
    text = "g* = "+bestGeno
    plt.text(15, 1.1, text, fontsize=32)
    #plt.show()
    figure = plt.gcf() # get current figure
    figure.set_size_inches(16, 12)
    # when saving, specify the DPI
    filename2 = path+"/histo_"+cipcon+".png"
    plt.savefig(filename2,dpi=100)

Пример #38
0
    'datacolumns', 'tabulardata', 'searching', 'pipelines', 'libraries'
]
counts = [[0, 0, 0, 0, 1, 0, 3, 1, 0, 0, 0], [2, 1, 0, 5, 6, 0, 2, 1, 5, 1, 0],
          [3, 4, 9, 11, 4, 0, 8, 9, 8, 8, 4],
          [10, 12, 7, 2, 6, 13, 5, 7, 2, 5, 12],
          [3, 1, 1, 1, 2, 5, 1, 1, 3, 2, 2]]

figure()
title('Feedback distribution in TGAC2015 Python course')
xlabel('module')
ylabel('finger count')

x1 = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, 20.0, 23.0, 26.0, 29.0, 32.0]
x2 = [x - 0.5 for x in x1]
x3 = [x - 1.0 for x in x1]
x4 = [x - 1.5 for x in x1]
x5 = [x - 2.0 for x in x1]

xticks(x2, modules)

bar(x1, counts[4], width=0.5, color="#0000FF", label="5")
bar(x2, counts[3], width=0.5, color="#808080", label="4")
bar(x3, counts[2], width=0.5, color="#FF0000", label="3")
bar(x4, counts[1], width=0.5, color="#CCEEFF", label="2")
bar(x5, counts[0], width=0.5, color="#00FF00", label="1")

legend()
axis([0.0, 35.0, 0, 16])
savefig('barplot.png')
plt.show()
Пример #39
0
from pylab import figure, title, xlabel, ylabel, xticks, bar, legend, axis, savefig

nucleotides = ["A", "G", "C", "U"]
counts = [
    [606, 1024, 759, 398],
    [762, 912, 639, 591],
]
figure()
title('RNA nucleotides in the ribosome')
xlabel('RNA')
ylabel('base count')
x1 = [2.0, 4.0, 6.0, 8.0]
x2 = [x - 0.5 for x in x1]
xticks(x1, nucleotides)
bar(x1, counts[1], width=0.5, color="#cccccc", label="E.coli 23S")
bar(x2, counts[0], width=0.5, color="#808080", label="T.thermophilus 23S")
legend()
axis([1.0, 9.0, 0, 1200])
savefig('barplot.png')
Пример #40
0
import pylab as p
import numpy as np

x = np.array([10, 20, 30, 40, 50])
y = np.array([100, 110, 80, 60, 105])

p.bar(x, y)
p.show()

# p.savefig('barchart.png', format='png')
Пример #41
0
Generate the exercise results on the Gumbell distribution
"""
import numpy as np
from scipy.interpolate import UnivariateSpline
import pylab as pl


def gumbell_dist(arr):
    return -np.log(-np.log(arr))

years_nb = 21
wspeeds = np.load('sprog-windspeeds.npy')
max_speeds = np.array([arr.max() for arr in np.array_split(wspeeds, years_nb)])
sorted_max_speeds = np.sort(max_speeds)

cprob = (np.arange(years_nb, dtype=np.float32) + 1)/(years_nb + 1)
gprob = gumbell_dist(cprob)
speed_spline = UnivariateSpline(gprob, sorted_max_speeds, k=1)
nprob = gumbell_dist(np.linspace(1e-3, 1-1e-3, 1e2))
fitted_max_speeds = speed_spline(nprob)

fifty_prob = gumbell_dist(49./50.)
fifty_wind = speed_spline(fifty_prob)

pl.figure()
pl.bar(np.arange(years_nb) + 1, max_speeds)
pl.axis('tight')
pl.xlabel('Year')
pl.ylabel('Annual wind speed maxima [$m/s$]')
Пример #42
0
    xv1.append(xi + 1 - 0.125)
    xv2.append(xi + 1 + 0.125)

print xv1, xv2
print y1
print y2

test_numpy = numpy.array(test)

if axis == 'top_down_pyr':
    test_numpy = test_numpy * 15.0

elif axis == 'top_down_pv':
    test_numpy = test_numpy * 20.0

pylab.bar(xv1, y1, width=0.25)
pylab.errorbar(xv1, y1, yerr=y1e, fmt='o')
pylab.bar(xv2, y2, width=0.25)
pylab.errorbar(xv2, y2, yerr=y2e, fmt='o')
pylab.xticks(range(1, len(xv1) + 1), test_numpy)

if axis == 'second_lgn':
    from scipy import stats
    d1 = p_conns[str(1.0)]
    d2 = np_conns[str(1.0)]
    t, p = stats.ttest_ind(d1, d2)
    print(d1)
    print(d2)
    print(p)

pylab.savefig('../figs/' + axis + '_' + str(args.top_down_pyr) + '_' +
Пример #43
0
def plot6_1():
    main_columns = ["label", "d_0", "d_1", "d_2", "d_3"]
    feats_df = pd.read_csv(config.ary_feats_file)
    feats_df = feats_df[main_columns]
    plt.figure(1, figsize=(14, 10), dpi=70, facecolor="#FFFFFF")
    plt.title("basic features")
    for subplot_id in range(1, len(main_columns)):
        plt.subplot(220 + subplot_id)
        # 数据分桶
        column = [main_columns[subplot_id]]
        feats_df[column[0]] = feats_df[column[0]].replace(np.nan, 0.0)
        max_ = np.max(feats_df[column].values)
        min_ = np.min(feats_df[column].values)
        print min_, max_
        xlabel = np.arange(min_, max_, 1.0 * (max_ - min_) / 20)  #分成20个桶
        fun = lambda x: data_buckets(x, xlabel)
        # p_y = [ 0.0 for i in range(len(xlabel)) ]
        # n_y = [ 0.0 for i in range(len(xlabel)) ]

        # fun = lambda x: x
        tmp_df = feats_df[column + ["label"]]
        # tmp_df[column[0]] = pd.Series( preprocessing.scale(tmp_df[column[0]].values) )
        tmp_df[column[0]] = tmp_df[column[0]].map(fun)
        p_df = tmp_df[tmp_df["label"] == 1.0].groupby(
            column, as_index=False).count().reset_index(drop=True)
        n_df = tmp_df[tmp_df["label"] == 0.0].groupby(
            column, as_index=False).count().reset_index(drop=True)
        p_x = p_df[column[0]].values
        p_y = p_df["label"].values
        n_x = n_df[column[0]].values
        n_y = n_df["label"].values
        n_y /= np.sum(n_y) / np.sum(p_y)  # 负样本太多,num比例规范到一个量纲下

        # plt.bar( p_x, p_y, label='pulsers')
        # plt.bar( n_x, n_y, label='RFI' )
        if subplot_id == 1:
            plt.bar(p_x,
                    +p_y,
                    alpha=.7,
                    facecolor='lightskyblue',
                    edgecolor='white',
                    label='pulsers count')
            plt.bar(n_x,
                    +n_y,
                    alpha=.7,
                    facecolor='#ff9999',
                    edgecolor='white',
                    label='RFI count')
        else:
            plt.bar(p_x,
                    +p_y,
                    alpha=.7,
                    facecolor='lightskyblue',
                    edgecolor='white')
            plt.bar(n_x,
                    +n_y,
                    alpha=.7,
                    facecolor='#ff9999',
                    edgecolor='white')
        # plt.ylim(0.2968,0.2969)
        xlabel = ["%.1f" % k for k in xlabel]
        plt.xticks(n_x, xlabel, rotation=45)
        plt.xlabel('%s' % column[0])
        plt.ylabel('counts')
        plt.legend()
    plt.show()
mean_host_placement = np.mean(percentages)

initial = [best_type, distinction_level, AVG, *powers, mean_host_placement]

Iterative_rank.loc[0] = initial
Iterative_rank.to_csv("Values of the best histogram for the given statistic.csv", header = ["Best type", "Distinction level", "Rank value for distinction", "a", "b", "c", "d", "Mean placement of host galaxy"])


elapsed_time = timer() - start # in seconds
print('The code took {:.3g} minutes to complete'.format(elapsed_time/60))
            
#%%
plt.close("all")

plt.figure(0)
plt.bar(goodedge[:-1], goodvals, width= (goodedge[1]-goodedge[0]), color='red', alpha=0.5, label = "Outside")
plt.bar(goodedgein[:-1], goodvalsin, width= (goodedgein[1]-goodedgein[0]), color='blue', alpha=0.5, label = "Inside")
plt.axvline(AVG, ls = "--", color = "black", label = "Best distinction point")


plt.legend(loc = "best")
plt.xlabel("Stat values")
plt.ylabel("Number")
plt.title("Histogram of {0} producing the best distinction \n value for {1} simulated SGRBs".format(best_type, N))
plt.savefig("Histogram {}.png".format(N))

#%%
"""
Calculating and plotting a cumulative distribution for the histogram values 
inside and outside.
"""
Пример #45
0
def plot_bars(data,
              labels=None,
              title=None,
              ylim=None,
              ylabel=None,
              width=0.2,
              offset=0.2,
              color='0.6',
              distance=1.0,
              yerr='ste',
              xloc=None,
              **kwargs):
    """Make bar plots with automatically computed error bars.

    Candlestick plot (multiple interleaved barplots) can be done,
    by calling this function multiple time with appropriatly modified
    `offset` argument.

    Parameters
    ----------
    data : array (nbars x nobservations) or other sequence type
      Source data for the barplot. Error measure is computed along the
      second axis.
    labels : list or None
      If not None, a label from this list is placed on each bar.
    title : str
      An optional title of the barplot.
    ylim : 2-tuple
      Y-axis range.
    ylabel : str
      An optional label for the y-axis.
    width : float
      Width of a bar. The value should be in a reasonable relation to
      `distance`.
    offset : float
      Constant offset of all bar along the x-axis. Can be used to create
      candlestick plots.
    color : matplotlib color spec
      Color of the bars.
    distance : float
      Distance of two adjacent bars.
    yerr : {'ste', 'std', None}
      Type of error for the errorbars. If `None` no errorbars are plotted.
    xloc : sequence
      Locations of the bars on the x axis.
    **kwargs
      Any additional arguments are passed to matplotlib's `bar()` function.
    """
    # determine location of bars
    if xloc is None:
        xloc = (np.arange(len(data)) * distance) + offset

    if yerr == 'ste':
        yerr = [np.std(d) / np.sqrt(len(d)) for d in data]
    elif yerr == 'std':
        yerr = [np.std(d) for d in data]
    else:
        # if something that we do not know just pass on
        pass

    # plot bars
    plot = pl.bar(xloc, [np.mean(d) for d in data],
                  yerr=yerr,
                  width=width,
                  color=color,
                  ecolor='black',
                  **kwargs)

    if ylim:
        pl.ylim(*(ylim))
    if title:
        pl.title(title)

    if labels:
        pl.xticks(xloc + width / 2, labels)

    if ylabel:
        pl.ylabel(ylabel)

    # leave some space after last bar
    pl.xlim(0, xloc[-1] + width + offset)

    return plot
Пример #46
0
def _make_plot(ts, ts1, gids, neurons, hist=True, hist_binwidth=5.0,
               grayscale=False, title=None, xlabel=None):
    """Generic plotting routine.

    Constructs a raster plot along with an optional histogram (common part in
    all routines above).

    Parameters
    ----------
    ts : list
        All timestamps
    ts1 : list
        Timestamps corresponding to gids
    gids : list
        Global ids corresponding to ts1
    neurons : list
        GIDs of neurons to plot
    hist : bool, optional
        Display histogram
    hist_binwidth : float, optional
        Width of histogram bins
    grayscale : bool, optional
        Plot in grayscale
    title : str, optional
        Plot title
    xlabel : str, optional
        Label for x-axis
    """
    pylab.figure()

    if grayscale:
        color_marker = ".k"
        color_bar = "gray"
    else:
        color_marker = "."
        color_bar = "blue"

    color_edge = "black"

    if xlabel is None:
        xlabel = "Time (ms)"

    ylabel = "Neuron ID"

    if hist:
        ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6])
        plotid = pylab.plot(ts1, gids, color_marker)
        pylab.ylabel(ylabel)
        pylab.xticks([])
        xlim = pylab.xlim()

        pylab.axes([0.1, 0.1, 0.85, 0.17])
        t_bins = numpy.arange(
            numpy.amin(ts), numpy.amax(ts),
            float(hist_binwidth)
        )
        n, bins = _histogram(ts, bins=t_bins)
        num_neurons = len(numpy.unique(neurons))
        heights = 1000 * n / (hist_binwidth * num_neurons)

        pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar,
                  edgecolor=color_edge)
        pylab.yticks([
            int(x) for x in
            numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)
        ])
        pylab.ylabel("Rate (Hz)")
        pylab.xlabel(xlabel)
        pylab.xlim(xlim)
        pylab.axes(ax1)
    else:
        plotid = pylab.plot(ts1, gids, color_marker)
        pylab.xlabel(xlabel)
        pylab.ylabel(ylabel)

    if title is None:
        pylab.title("Raster plot")
    else:
        pylab.title(title)

    pylab.draw()

    return plotid
        else:
            black_picks += 1
    else:
        pick = randint(0, total2)
        if pick <= black2:
            black_picks += 1
            current = 1
        else:
            white_picks += 1

    black_white_ratios.append(white_picks / black_picks)
    if i % 100 == 0:
        # The constant re-plotting without clearing the data is bad,
        # but if I clear it just a couple of times, the progress is smoother
        pl.clf()
    if i % 10 == 0:
        pl.subplot(2, 1, 1)
        pl.grid(True)
        pl.bar(range(2), [black_picks, white_picks],
               align='center',
               color=['Black', 'Gray'],
               tick_label=['Black', 'White'])
        pl.subplot(2, 1, 2)
        pl.grid(True)
        pl.plot(interval[:i], black_white_ratios, color='Blue', linestyle='-')
        pl.axhline(y=overall_ratio, color='Green', linestyle='-')
        pl.pause(0.0001)
    # pl.show()

pl.waitforbuttonpress()
Пример #48
0
    '1999_1313', '1999_1907', '1999_2063', '2001_1099', '2002_1896'
]

# for i in *ann; do echo -ne "$i\t" ; grep HasProperty $i | wc -l ; done
matt_cnt = [43, 12, 0, 0, 23, 35, 8, 16, 7, 1]
raymond_cnt = [44, 11, 1, 0, 35, 30, 7, 14, 6, 0]
# Determined manually; not sure how to do it automatically
overlap = [25, 5, 0, 0, 10, 26, 5, 14, 0, 0]

pl.figure()
width = 0.5
bar_width = width - 0.05
shift = 0.23

xvals = np.arange(len(mpf_docs))
pl.bar(xvals - shift, matt_cnt, width=bar_width, label='Matt')
pl.bar(xvals + shift, raymond_cnt, width=bar_width, label='Raymond')
pl.bar(xvals,
       overlap,
       width=bar_width * 2,
       label='Overlap',
       color='yellow',
       alpha=0.5)
pl.ylabel('Number of properties', fontsize=14)
pl.gca().set_xticks(xvals)
pl.gca().set_xticklabels(mpf_docs, rotation=30)
pl.yticks(fontsize=14)
pl.xticks(fontsize=14)
pl.legend(fontsize=14)

fig_file = 'mpf-property-counts.png'
Пример #49
0
def plot_people(people,
                bins=None,
                width=1.0,
                alpha=0.6,
                fig_args=None,
                axis_args=None,
                plot_args=None,
                do_show=None,
                fig=None):
    ''' Plot statistics of a population -- see People.plot() for documentation '''

    # Handle inputs
    if bins is None:
        bins = np.arange(0, 101)

    # Set defaults
    color = [0.1, 0.1, 0.1]  # Color for the age distribution
    n_rows = 4  # Number of rows of plots
    offset = 0.5  # For ensuring the full bars show up
    gridspace = 10  # Spacing of gridlines
    zorder = 10  # So plots appear on top of gridlines

    # Handle other arguments
    fig_args = sc.mergedicts(dict(figsize=(18, 11)), fig_args)
    axis_args = sc.mergedicts(
        dict(left=0.05,
             right=0.95,
             bottom=0.05,
             top=0.95,
             wspace=0.3,
             hspace=0.35), axis_args)
    plot_args = sc.mergedicts(dict(lw=1.5, alpha=0.6, c=color, zorder=10),
                              plot_args)

    # Compute statistics
    min_age = min(bins)
    max_age = max(bins)
    edges = np.append(
        bins, np.inf)  # Add an extra bin to end to turn them into edges
    age_counts = np.histogram(people.age, edges)[0]

    # Create the figure
    if fig is None:
        fig = pl.figure(**fig_args)
    pl.subplots_adjust(**axis_args)

    # Plot age histogram
    pl.subplot(n_rows, 2, 1)
    pl.bar(bins,
           age_counts,
           color=color,
           alpha=alpha,
           width=width,
           zorder=zorder)
    pl.xlim([min_age - offset, max_age + offset])
    pl.xticks(np.arange(0, max_age + 1, gridspace))
    pl.grid(True)
    pl.xlabel('Age')
    pl.ylabel('Number of people')
    pl.title(f'Age distribution ({len(people):n} people total)')

    # Plot cumulative distribution
    pl.subplot(n_rows, 2, 2)
    age_sorted = sorted(people.age)
    y = np.linspace(0, 100, len(age_sorted))  # Percentage, not hard-coded!
    pl.plot(age_sorted, y, '-', **plot_args)
    pl.xlim([0, max_age])
    pl.ylim([0, 100])  # Percentage
    pl.xticks(np.arange(0, max_age + 1, gridspace))
    pl.yticks(np.arange(0, 101, gridspace))  # Percentage
    pl.grid(True)
    pl.xlabel('Age')
    pl.ylabel('Cumulative proportion (%)')
    pl.title(
        f'Cumulative age distribution (mean age: {people.age.mean():0.2f} years)'
    )

    # Calculate contacts
    lkeys = people.layer_keys()
    n_layers = len(lkeys)
    contact_counts = sc.objdict()
    for lk in lkeys:
        layer = people.contacts[lk]
        p1ages = people.age[layer['p1']]
        p2ages = people.age[layer['p2']]
        contact_counts[lk] = np.histogram(p1ages, edges)[0] + np.histogram(
            p2ages, edges)[0]

    # Plot contacts
    layer_colors = sc.gridcolors(n_layers)
    share_ax = None
    for w, w_type in enumerate(['total', 'percapita', 'weighted'
                                ]):  # Plot contacts in different ways
        for i, lk in enumerate(lkeys):
            if w_type == 'total':
                weight = 1
                total_contacts = 2 * len(
                    people.contacts[lk])  # x2 since each contact is undirected
                ylabel = 'Number of contacts'
                title = f'Total contacts for layer "{lk}": {total_contacts:n}'
            elif w_type == 'percapita':
                weight = np.divide(1.0, age_counts, where=age_counts > 0)
                mean_contacts = 2 * len(people.contacts[lk]) / len(
                    people)  # Factor of 2 since edges are bi-directional
                ylabel = 'Per capita number of contacts'
                title = f'Mean contacts for layer "{lk}": {mean_contacts:0.2f}'
            elif w_type == 'weighted':
                weight = people.pars['beta_layer'][lk] * people.pars['beta']
                total_weight = np.round(weight * 2 * len(people.contacts[lk]))
                ylabel = 'Weighted number of contacts'
                title = f'Total weight for layer "{lk}": {total_weight:n}'

            ax = pl.subplot(n_rows,
                            n_layers,
                            n_layers * (w + 1) + i + 1,
                            sharey=share_ax)
            pl.bar(bins,
                   contact_counts[lk] * weight,
                   color=layer_colors[i],
                   width=width,
                   zorder=zorder,
                   alpha=alpha)
            pl.xlim([min_age - offset, max_age + offset])
            pl.xticks(np.arange(0, max_age + 1, gridspace))
            pl.grid(True)
            pl.xlabel('Age')
            pl.ylabel(ylabel)
            pl.title(title)
            if w_type == 'weighted':
                share_ax = ax  # Update shared axis

    cvset.handle_show(do_show)

    return fig
Пример #50
0
df = pd.DataFrame()
df['Question'] = ["Q1", "Q2", "Q3", "Q4"]
df['Charles'] = [3, 4, 5, 3]
df['Mike'] = [3, 3, 4, 4]

title("Professor Criss's Ratings by Users")
xlabel('Question Number')
ylabel('Score')

c = [2.0, 4.0, 6.0, 8.0]
m = [x - 0.5 for x in c]

xticks(c, df['Question'])

bar(m, df['Mike'], width=0.5, color="#91eb87", label="Mike")
bar(c, df['Charles'], width=0.5, color="#eb879c", label="Charles")

legend()
axis([0, 10, 0, 8])
savefig('barchart.png')

pdf = FPDF()
pdf.add_page()
pdf.set_xy(0, 0)
pdf.set_font('arial', 'B', 12)
pdf.cell(60)
pdf.cell(
    75, 10,
    "A Tabular and Graphical Report of Professor Criss's Ratings by Users Charles and Mike",
    0, 2, 'C')
Пример #51
0
def upper_sco():
    rootDir = '/u/jlu/doc/proposals/irtf/2012A/'

    # Read in a reference table for converting between
    # spectral types and effective temperatures.
    ref = atpy.Table(rootDir + 'Teff_SpT_table.txt', type='ascii')

    sp_type = np.array([ii[0] for ii in ref.col1])
    sp_class = np.array([float(ii[1:4]) for ii in ref.col1])
    sp_teff = ref.col2

    # Read in the upper sco table
    us = atpy.Table(rootDir + 'upper_sco_sample_simbad.txt', type='ascii')

    us_sp_type = np.array([ii[0] for ii in us.spectype])
    us_sp_class = np.zeros(len(us_sp_type), dtype=int)
    us_sp_teff = np.zeros(len(us_sp_type), dtype=int)

    for ii in range(len(us_sp_class)):
        if (us_sp_type[ii] == "~"):
            us_sp_class[ii] = -1
        else:
            if ((len(us.spectype[ii]) < 2)
                    or (us.spectype[ii][1].isdigit() == False)):
                us_sp_class[ii] = 5  # Arbitrarily assigned
            else:
                us_sp_class[ii] = us.spectype[ii][1]

            # Assign effective temperature
            idx = np.where(us_sp_type[ii] == sp_type)[0]
            tdx = np.abs(us_sp_class[ii] - sp_class[idx]).argmin()
            us_sp_teff[ii] = sp_teff[idx[tdx]]

    # Trim out the ones that don't have spectral types and K-band
    # magnitudes for plotting purposes.
    idx = np.where((us_sp_type != "~") & (us.K != "~") & (us.J != "~"))[0]
    print 'Keeping %d of %d with spectral types and K mags.' % \
        (len(idx), len(us_sp_type))

    us.add_column('sp_type', us_sp_type)
    us.add_column('sp_class', us_sp_class)
    us.add_column('sp_teff', us_sp_teff)

    us = us.rows([idx])

    J = np.array(us.J[0], dtype=float)
    H = np.array(us.H[0], dtype=float)
    K = np.array(us.K[0], dtype=float)
    JKcolor = J - K

    # Get the unique spectral classes and count how many of each
    # we have in the sample.
    sp_type_uniq = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T'])
    sp_type_count = np.zeros(len(sp_type_uniq), dtype=int)
    sp_type_idx = []

    sp_type_J = np.zeros(len(sp_type_uniq), dtype=float)
    sp_type_JK = np.zeros(len(sp_type_uniq), dtype=float)

    for ii in range(len(sp_type_uniq)):
        idx = np.where(us.sp_type[0] == sp_type_uniq[ii])[0]
        sp_type_count[ii] = len(idx)

        sp_type_idx.append(idx)

        # Calc the mean J and J-K color for each spectral type
        if len(idx) > 2:
            sp_type_J[ii] = J[idx].mean()
            sp_type_JK[ii] = JKcolor[idx].mean()

        print '%s  %3d  J = %4.1f  J-K = %4.1f' % \
            (sp_type_uniq[ii], sp_type_count[ii],
             sp_type_J[ii], sp_type_JK[ii])

    # Plot up the distribution of spectral types
    xloc = np.arange(len(sp_type_uniq)) + 1
    py.figure(2, figsize=(10, 6))
    py.clf()
    py.bar(xloc, sp_type_count, width=0.5)
    py.xticks(xloc + 0.25, sp_type_uniq)
    py.xlim(0.5, xloc.max() + 0.5)
    py.xlabel('Spectral Type')
    py.ylabel('Upper Sco Sample')
    py.savefig(rootDir + 'USco_spec_type_hist.png')

    # Plot Teff vs. J-band mag
    py.figure(1)
    py.clf()
    py.semilogx(us.sp_teff[0], J, 'k.')
    rng = py.axis()
    py.axis([rng[1], rng[0], rng[3], rng[2]])
    py.xlabel('Teff (K, log scale)')
    py.ylabel('J Magnitude')
    py.xlim(40000, 1000)
    py.savefig(rootDir + 'USco_HR.png')

    py.clf()
    py.plot(JKcolor, J, 'kx')

    idx = np.where(sp_type_J != 0)[0]
    py.plot(sp_type_JK[idx], sp_type_J[idx], 'bs')
    for ii in idx:
        py.text(sp_type_JK[ii] + 0.05,
                sp_type_J[ii] - 0.5,
                sp_type_uniq[ii],
                color='blue')
    rng = py.axis()
    py.axis([rng[0], rng[1], rng[3], rng[2]])
    py.xlabel('J - K (mag)')
    py.ylabel('J (mag)')
    py.xlim(-0.25, 1.75)
    py.savefig(rootDir + 'USco_CMD.png')

    idx = np.where(J < 11)[0]
    print '%d stars with J<11 and Teff = [%3d - %4d] K' % \
        (len(idx), us.sp_teff[0][idx].min(), us.sp_teff[0][idx].max())

    return us
Пример #52
0
def plot_data(plot_type, data, title):
    '''
        This function plots the data.
            1) Bar plot: Plots the diabetes prevalence of various age groups in
                         a specific region.
            2) Pie chart: Plots the diabetes prevalence by gender.

        Parameters:
            plot_type (string): Indicates what plotting function is used.
            data (dict): Contains the dibetes prevalence of all the contries
                         within a specific region.
            title (string): Plot title

        Returns:
            None

    '''

    plot_type = plot_type.upper()

    categories = data.keys()  # Have the list of age groups
    gender = ['FEMALE', 'MALE']  # List of the genders used in this dataset

    if plot_type == 'BAR':

        # List of population with diabetes per age group and gender
        female = [data[x][gender[0]] for x in categories]
        male = [data[x][gender[1]] for x in categories]

        # Make the bar plots
        width = 0.35
        p1 = pylab.bar([x for x in range(len(categories))],
                       female,
                       width=width)
        p2 = pylab.bar([x + width for x in range(len(categories))],
                       male,
                       width=width)
        pylab.legend((p1[0], p2[0]), gender)

        pylab.title(title)
        pylab.xlabel('Age Group')
        pylab.ylabel('Population with Diabetes')

        # Place the tick between both bar plots
        pylab.xticks([x + width / 2 for x in range(len(categories))],
                     categories,
                     rotation='vertical')
        pylab.show()
        # optionally save the plot to a file; file extension determines
        # file type
        # pylab.savefig("plot_bar.png")

    elif plot_type == 'PIE':

        # total population with diabetes per gender
        male = sum([data[x][gender[1]] for x in categories])
        female = sum([data[x][gender[0]] for x in categories])

        pylab.title(title)
        pylab.pie([female, male], labels=gender, autopct='%1.1f%%')
        pylab.show()
Пример #53
0
        bic.append(gmm.bic(X))
        if bic[-1] < lowest_bic:
            lowest_bic = bic[-1]
            best_gmm = gmm

bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []

# Plot the BIC scores
spl = pl.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
    xpos = np.array(n_components_range) + .2 * (i - 2)
    bars.append(pl.bar(xpos, bic[i * len(n_components_range):
                                 (i + 1) * len(n_components_range)],
                       width=.2, color=color))
pl.xticks(n_components_range)
pl.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
pl.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
    .2 * np.floor(bic.argmin() / len(n_components_range))
pl.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)

# Plot the winner
splot = pl.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
                                             color_iter)):
Пример #54
0
 def plotHist(self, pars=None, SHOW=True, SAVE=True):
     skipfits = False
     if pars != None: params = pars
     else:
         if not np.isnan(np.sum(self.params)): params = self.params
         else: skipfits = True
     xs = []
     for i in range(len(self.x)):
         if float(self.n[i]) / max(self.n) > 0.01:
             xs.append(self.x[i])
     Xdiff = max(xs) - min(xs)
     Xav = 0.5 * (max(xs) + min(xs))
     Xmin = Xav - 0.75 * Xdiff
     Xmax = Xav + 0.75 * Xdiff
     pb.clf()
     pb.bar(self.x,
            self.n,
            float(self.Imax) / self.numbin,
            color='0.8',
            linewidth=0.4,
            align='center')
     if not skipfits:
         for i in range(len(params) / 3):
             pb.plot(np.linspace(0, 100, 500),
                     nGauss(np.linspace(0, 100, 500),
                            *params[i * 3:i * 3 + 3]),
                     color=colours[i],
                     linewidth=1)
         pb.plot(np.linspace(0, 100, 500),
                 nGauss(np.linspace(0, 100, 500), *params),
                 '--k',
                 linewidth=1)
     if self.ranges != None:
         for i in range(len(self.ranges)):
             rn = self.ranges[i]
             mask = (self.x > rn[0]) & (self.x < rn[1])
             pb.bar(np.array(self.x)[mask],
                    np.array(self.n)[mask],
                    float(self.Imax) / self.numbin,
                    color=colours[i],
                    linewidth=0.4,
                    align='center')
     pb.xlabel('I (pA)', fontsize=20)
     pb.ylabel('Counts', fontsize=20)
     pb.xticks(fontsize=16)
     pb.yticks(fontsize=16)
     pb.xlim([Xmin, Xmax])
     pb.grid(True)
     if SAVE:
         fn = os.path.basename(self.filename)
         pb.savefig(self.figdir + 'hist_' + fn[:-4] + '.eps')
         f = open(self.datdir + fn[:-4] + '.dat', 'w')
         f.write('#hist\n')
         f.write('BinCentre(pA)\tBinMin(pA)\tBinMax(pA)\tCounts\n')
         for i in range(len(self.n)):
             f.write(
                 str(self.x[i]) + '\t' + str(self.bins[i]) + '\t' +
                 str(self.bins[i + 1]) + '\t' + str(self.n[i]) + '\n')
         f.close
     else:
         self.plotHist(SHOW=SHOW)  # this looks problematic
     #if savename!=None:pb.savefig(savename)
     if SHOW: pb.show()
     return
Пример #55
0
def MultiDimPrior_Gen(intable):
    Nrows = len(intable)
    Ncols = len(intable.colnames)

    # Set bin-edges for each column in the table.
    bins = []
    for i in range(Ncols):
        bins.append(
            np.linspace(intable[:, i].min(), intable[:, i].max(), Nbins))

    #multidim histogram
    H, edges = np.histogramdd(intable, bins=bins)
    Hnorm = H / np.sum(H)

    #indices where histogram != 0
    indpdf = np.transpose(np.array((Hnorm).nonzero()))

    ## binmids = np.empty([Ncols, Nbins-1], dtype='float')
    ## for i in range(Ncols): binmids[i,:] = (bins[i][0:Nbins-1]+bins[i][1:Nbins])/2.

    ## pdfflat = pdf.flatten()
    ## print len(pdfflat)
    ## print lkqsl
    Nrows = len(indpdf[:, 0])
    cdf = np.zeros(Nrows, dtype='float')
    pdf = np.zeros(Nrows, dtype='float')

    for i in range(Ncols):
        print np.transpose(np.nonzero(Hnorm != 0))[i]

    # 1D cdf and pdf using only non-zero bins
    for j in range(Nrows):
        print str(j) + '/' + str(Nrows)
        cdf[j] = cdf[j - 1] + Hnorm[tuple(indpdf[j, :])]
        pdf[j] = Hnorm[tuple(indpdf[j, :])]
    indmax = np.where(pdf == np.max(pdf))[0]
    print indpdf[indmax, :]

    plotbins = []
    plotslice = np.empty([Nrows, Nbins - 1])
    indpdftemp = copy.copy(indpdf)
    for i in range(Ncols):
        # plot slice through dimension i
        #keep = [x for x in range(Ncols) if x != i]
        #print keep
        #print indpdf[indmax[0],keep]
        plotbins.append(bins[i][0:-1])
        for j in range(Nbins - 1):
            indpdftemp = copy.copy(indpdf)
            indpdftemp[indmax[0], i] = j
            print indpdftemp[indmax[0]]
            plotslice[i, j] = H[tuple(indpdftemp[indmax[0]])]
            #plotslice.append(H[tuple(indpdf[indmax[0],keep])])

    py.clf()
    fig3 = py.figure(figsize=[10, 12])
    for i in range(Ncols):
        ax = fig3.add_subplot(3, 2, i + 1)
        py.xticks(fontsize=14)
        py.yticks(fontsize=14)
        py.xlim(plotbins[i].min(), plotbins[i].max())
        py.bar(plotbins[i], plotslice[i, :],
               (plotbins[i][1] - plotbins[i][0]) * 0.8)  #bx = bin edges
    ax = fig3.add_subplot(3, 2, Ncols + 1)
    nx, bx, px = py.hist(np.sum(H) * pdf.flatten(),
                         40,
                         ec='b',
                         histtype='step',
                         linewidth=3)
    py.xticks(fontsize=14)
    py.yticks(fontsize=14)
    #print np.shape(cdf)
    #py.clf()
    #py.plot(range(len(cdf)), cdf)
    py.savefig(rootdir + analysisdir + 'plots/testhist_Nbins' +
               str(int(Nbins)) + '.pdf')
    py.clf()

    savearr = indpdf[:, 0].astype(int)
    for i in range(Ncols - 1):
        savearr = np.column_stack((savearr.astype(int), indpdf[:, i + 1]))
    savearr = np.column_stack((pdf, cdf, savearr))

    # bins = bin edges, savearr columns are normalized pdf, cdf, index[0], index[1], index[2]...
    # where indices are multi-dimensional bin indices
    return bins, savearr
Пример #56
0
import pylab as pl
import numpy as np

ax = pl.axes([0.025, 0.025, 0.95, 0.95], polar=True)

N = 20
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / N)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
bars = pl.bar(theta, radii, width=width, bottom=0.0)

for r, bar in zip(radii, bars):
    bar.set_facecolor(pl.cm.jet(r / 10.))
    bar.set_alpha(0.5)

ax.set_xticklabels([])
ax.set_yticklabels([])
pl.show()
Пример #57
0
                     (count_only_2) /
                     (count_both + count_only_1 + count_only_2))
             fractions.append((count_only_1 + count_only_2) /
                              (count_both + count_only_1 + count_only_2))
             fractions_all3.append(
                 (count_only_1 + count_only_2 + count_both) /
                 (count_both + count_only_1 + count_only_2))
             labels.append(s1.rsplit('_', 1)[0])
             print s1.rsplit('_', 1)[0], fractions[-1], mfractions[
                 -1], fractions[-1] - mfractions[-1]
         except:
             print s1, s2, len(allowed_gene_i), sum(expr[s1]), sum(expr[s2])
             continue
 pylab.bar(range(len(fractions)),
           fractions_all3,
           facecolor='#009933',
           linewidth=0,
           width=0.95)
 pylab.bar(range(len(fractions)),
           fractions,
           facecolor='b',
           linewidth=0,
           width=0.95)
 pylab.bar(range(len(fractions)),
           mfractions,
           facecolor='#cc0000',
           linewidth=0,
           width=0.95)
 pylab.xticks([x + 0.95 / 2 for x in range(len(fractions))],
              labels,
              rotation=90,
Пример #58
0
    def plot_PARC(self,
                  show=True,
                  normalised=True,
                  savefig=False,
                  num_fig=1,
                  width=1,
                  linewidth=1):
        """plotting dedicated to the :attr:`PARC` attribute.

        where P stand for pipe cost

        :param bool show: 
        :param bool savefig: 
        :param bool normalised: normalise the quantity D, A, R, C by R (total resource)

        .. todo: this doc to be cleaned up

        .. plot::
            :include-source:
            :width: 50%

            from vplants.plantik import *
            options = ConfigParams(get_shared_data('pruning.ini'))
            plant = Plant(1, options)
            for x in range(100):
                plant.DARC.D.append(0.25)
                plant.DARC.A.append(0.25)
                plant.DARC.R.append(1)
                plant.DARC.C.append(0.25)
                plant.DARC.pipe_cost.append(0.5)
                plant._time.append(x)
            plant.plot_PARC()


        """
        from pylab import bar, hold, legend, title, figure, clf, xlabel, plot, ylabel
        import numpy

        T = numpy.array(self.time)
        D = numpy.array(self.DARC.D.values)
        A = numpy.array(self.DARC.A.values)
        Rn = numpy.array(self.DARC.R.values)
        C = numpy.array(self.DARC.C.values)
        Reserve = numpy.array(self.variables.reserve.values)
        if normalised == False:
            R = Rn / Rn
        else:
            R = Rn
        pipe = numpy.array(self.DARC.pipe_cost.values)
        figure(num_fig)
        clf()
        bar(T,
            A / R,
            label='Primary growth, A',
            width=width,
            linewidth=linewidth)
        hold(True)
        bar(T,
            C / R,
            bottom=A / R,
            label='Living cost, C',
            color='r',
            width=width,
            linewidth=linewidth)
        bar(T, (pipe / R),
            bottom=(C + A) / R,
            color='g',
            label='Secondary growth',
            width=width,
            linewidth=linewidth)
        bar(T, (Reserve / R),
            bottom=(C + A + pipe) / R,
            color='y',
            label='Reserve',
            width=width,
            linewidth=linewidth)
        plot(T, Rn, color='k', label='Resource, R', linewidth=2)
        plot(T, D, color='k', label='Demand, D', linewidth=1, linestyle='--')
        legend(loc='best')
        xlabel('Time (days)')
        ylabel('Unit Resource')
        title("Proportion of allocation, pipe cost and living cost")
        if show is True:
            from pylab import show as myshow
            myshow()
Пример #59
0
def bar_chart_auroc(file, fout_name, plot_title="performance"):
    """
    plot a bar chart based on the test performance from diff CV
    """

    ## loading the data
    data = data_process(file)

    method = 'individual'
    org_code = 'H_sapiens'
    numb_cvs = len(data[method][0][org_code])

    ## fetching the best performance based on the validation and test
    best_eval = numpy.argmax(data[method][0][org_code], axis=1)
    test_perf = numpy.zeros(numb_cvs)
    for idx, v_idx in enumerate(best_eval):
        test_perf[idx] = data[method][1][org_code][idx][v_idx]

    ## plot settings
    pylab.figure(figsize=(5, 4))
    pylab.rcParams.update({'figure.autolayout': True})

    offset = 0
    width = 0.10
    seperator = 0.10

    xlocations = []
    min_max = []
    rects = []

    x_axis_marks = []
    for idk, ele in enumerate(test_perf):
        xlocations.append(offset + 0.05)
        min_max.append(ele)
        rects.append(
            pylab.bar(offset, ele, width, color="#73C6B6", edgecolor="white"))
        offset += width

        idk += 1
        cvname = "fold_%d" % idk
        x_axis_marks.append(cvname)

    ## mean bar
    rects.append(
        pylab.bar(offset,
                  test_perf.mean(),
                  width,
                  color="#EC7063",
                  edgecolor="white"))
    xlocations.append(offset + 0.05)
    offset += width

    x_axis_marks.append('mean')

    ## adjusting the axis range
    min_max.sort()

    ymax = min_max[-1] * 1.03
    ymin = min_max[0] * 0.98

    ## set the ticks
    tick_step = 0.01
    ticks = [tick_step * i for i in xrange(int(round(ymax / tick_step) + 1))]

    pylab.yticks(ticks, fontsize=8)
    pylab.xticks(xlocations, x_axis_marks, rotation="vertical", fontsize=8)

    pylab.xlim(0, offset)
    pylab.ylim(ymin, ymax)

    pylab.title(plot_title, fontsize=8)

    pylab.gca().get_yaxis().tick_left()
    pylab.gca().get_xaxis().tick_bottom()

    pylab.gca().get_yaxis().grid(True)
    pylab.gca().get_xaxis().grid(False)

    pylab.ylabel("auROC", fontsize=8)

    pylab.savefig(fout_name)
Пример #60
0
    def plot_fit_go(self,
                    dat,
                    lims=(0.001, 5),
                    res=100,
                    nbins=20,
                    conditions='all',
                    split_by_response='correct',
                    ylim_fixed=True):
        """
        Plot histogram of GO-trials and the model-solution per condition.

        nbins : int or list
           bins for the data-histogram
        
        res : int
           number of datapoints with which the density plots are resolved
        
        conditions : None, 'all' or list of items or lists
            this is used to collapse over design factors that are not modeled
            e.g., conditions=[ [0,1], [1,2] ] would plot the data of
            [0,1] together and [1,2] together. The density is plotted for
            both conditions on top of each other.

        split_by_response : None, 'correct' or 'response'
           plot separate histograms/densities depending on the response
             'correct'  : split into correct/incorrect responses
             'response' : split into the possible responses

        ylim_fixed : False, True
           should the y-axis be fixed across conditions?
        """
        colors = ['red', 'blue', 'green', 'yellow', 'magenta', 'cyan']
        if lims[0] <= 0:
            lims[0] = 1e-10

        if isinstance(nbins, int):
            bins = np.linspace(lims[0], lims[1], nbins)
        else:
            bins = nbins

        if conditions == 'all':
            conditions = range(self.design.nconditions())
        elif conditions == None:
            conditions = [range(self.design.nconditions())]

        a = int(np.sqrt(len(conditions)))
        b = np.ceil(len(conditions) / float(a))

        t = np.linspace(lims[0], lims[1], res)
        maxy = []
        for cix, cond in enumerate(conditions):
            pl.subplot(a, b, cix + 1)
            if isinstance(cond, int):
                condidx = (dat.condition == cond)
            if isinstance(cond, Iterable):
                condidx = np.array([dcond in cond for dcond in dat.condition],
                                   dtype=np.bool)

            goix = ((condidx) & np.isfinite(dat.RT) & np.isnan(dat.SSD))

            ## construct response indices
            if split_by_response not in ['response', 'correct']:
                split_by_response == 'response'

            if split_by_response == 'response':
                respidcs = [(ri, resp, (dat.response == ri))
                            for ri, resp in enumerate(self.design.responses)]
            else:
                con = cond[0] if isinstance(cond, Iterable) else cond
                corr_ri = self.design.correct_response(con, as_index=True)
                inc_ri = ((corr_ri + 1) % self.design.nresponses())
                respidcs = [(ri, resp, dat.correct == rbo) for rbo, resp, ri in
                            zip([True, False], ['correct', 'incorrect'],
                                [corr_ri, inc_ri])]

            for ri, resp, respix in respidcs:
                #resp, respix=resptup
                d = dat.RT[goix & respix]

                if len(d) > 0:
                    h, hx = np.histogram(d, density=True, bins=bins)
                    hx = hx[:-1]
                    h = h * (len(d) / float((len(dat.RT[goix]))))
                    pl.bar(hx,
                           h,
                           width=(hx[1] - hx[0]),
                           alpha=.3,
                           color=colors[ri],
                           label='resp=%s' % (resp))

                if isinstance(cond, Iterable):
                    for con in cond:
                        if split_by_response == 'correct':
                            corr_ri = self.design.correct_response(
                                con, as_index=True)
                            inc_ri = ((corr_ri + 1) % self.design.nresponses())
                            cri = corr_ri if resp == 'correct' else inc_ri
                        else:
                            cri = ri
                        pl.plot(t,
                                self.dens_acc_go(t, con, cri),
                                color=colors[ri],
                                linewidth=3)
                else:
                    pl.plot(t,
                            self.dens_acc_go(t, cond, ri),
                            color=colors[ri],
                            linewidth=3)
            if isinstance(cond, Iterable):
                titlab = (",".join(":".join(self.design.condidx(con))
                                   for con in cond))
            else:
                titlab = (":".join(self.design.condidx(cond)))
            # long title, guaranteed to fit w matplotlib
            titlab = ("\n".join(wrap(titlab, width=70)))
            pl.title(titlab, fontsize='x-small')

            if cix == 0:
                pl.legend()
            pl.xlim(lims)
            maxy.append(pl.ylim()[1])
        if ylim_fixed:
            maxy = np.max(maxy)
            for cix, cond in enumerate(conditions):
                pl.subplot(a, b, cix + 1)
                pl.ylim(0, maxy)