コード例 #1
0
ファイル: PlottingFns.py プロジェクト: ajrichards/cytostream
def set_arbitrary_ticks(ax,axis,events,index1,index2,fontsize=10,fontname='sans'):
    """
    if an axis is using an unknown scale or we just with to use the data to scale 
    the axis
    """

    buff = 0.02
    formatter = ScalarFormatter(useMathText=True)
    formatter.set_scientific(True)
    formatter.set_powerlimits((-3,3))

    ## handle data edge buffers
    if axis in ['x','both']:
        bufferX = buff * (events[:,index1].max() - events[:,index1].min())
        ax.set_xlim([events[:,index1].min()-bufferX,events[:,index1].max()+bufferX])
        ax.xaxis.set_major_formatter(formatter)
    if axis in ['y','both']:
        bufferY = buff * (events[:,index2].max() - events[:,index2].min())
        ax.set_ylim([events[:,index2].min()-bufferY,events[:,index2].max()+bufferY])
        ax.yaxis.set_major_formatter(formatter)

    if axis in ['x','both']:
        for tick in ax.xaxis.get_major_ticks():
            tick.label.set_fontsize(fontsize-2) 
            tick.label.set_fontname(fontname)
    if axis in ['y','both']:
        for tick in ax.yaxis.get_major_ticks():
            tick.label.set_fontsize(fontsize-2) 
            tick.label.set_fontname(fontname)
コード例 #2
0
def age_vs_plot(track, infile, ycol='logl', ax=None, annotate=True, xlabels=True,
                save_plot=True, ylabels=True):
    agb_mix = infile.agb_mix
    set_name = infile.set_name

    if ycol == 'logl':
        ydata= track.get_col('L_star')
        majL = MultipleLocator(.2)
        minL = MultipleLocator(.1)
        ylab = '$\log\ L_{\odot}$'
    elif ycol == 'logt':
        ydata = track.get_col('T_star')
        majL = MultipleLocator(.1)
        minL = MultipleLocator(.05)
        ylab = '$\log\ Te$'
    elif ycol == 'C/O':
        ydata = track.get_col('CO')
        majL = MaxNLocator(4)
        minL = MaxNLocator(2)
        ylab = '$C/O$'
    else:
        print 'logl, logt, C/O only choices for y.'
        return

    age = track.get_col('ageyr')
    addpt = track.addpt
    Qs = list(track.Qs)

    if ax is None:
        fig, ax = plt.subplots()
    ax.plot(age, ydata, color='black')
    ax.plot(age[Qs], ydata[Qs], 'o', color='green')
    if len(addpt) > 0:
        ax.plot(age[addpt], ydata[addpt], 'o', color='purple')
    ax.yaxis.set_major_locator(majL)
    ax.yaxis.set_minor_locator(minL)
    majorFormatter = ScalarFormatter()
    majorFormatter.set_powerlimits((-3, 4))
    ax.xaxis.set_major_formatter(majorFormatter)

    if annotate is True:
        ax.text(0.06, 0.87, '${\\rm %s}$' % agb_mix.replace('_', '\ '),
                transform=ax.transAxes)
        ax.text(0.06, 0.77,'${\\rm %s}$' % set_name.replace('_', '\ '),
                transform=ax.transAxes)
        ax.text(0.06, 0.67, '$M=%.2f$' % track.mass,
                transform=ax.transAxes)
    if ylabels is True:
        ax.set_ylabel('$%s$' % ylab, fontsize=20)
    if xlabels is True:
        ax.set_xlabel('$\rm{Age (yr)}$', fontsize=20)

    if save_plot is True:
        plotpath = os.path.join(infile.diagnostic_dir, 'age_v/')
        fileIO.ensure_dir(plotpath)
        fname = os.path.split(track.name)[1].replace('.dat', '')
        fig_name = os.path.join(plotpath, '_'.join(('diag', fname)))
        plt.savefig('%s_age_v.png' % fig_name, dpi=300)
        plt.close()
    return
コード例 #3
0
ファイル: myfunctions.py プロジェクト: jzmnd/myfunctions
def quickPlot(filename, path, datalist, xlabel="x", ylabel="y", xrange=["auto", "auto"], yrange=["auto", "auto"], yscale="linear", xscale="linear", col=["r", "b"]):
	"""Plots Data to .pdf File in Plots Folder Using matplotlib"""
	if "plots" not in os.listdir(path):
		os.mkdir(os.path.join(path, "plots"))
	coltab = col*10
	seaborn.set_context("notebook", rc={"lines.linewidth": 1.0})
	formatter = ScalarFormatter(useMathText=True)
	formatter.set_scientific(True)
	formatter.set_powerlimits((-2, 3))
	fig = Figure(figsize=(6, 6))
	ax = fig.add_subplot(111)
	for i, ydata in enumerate(datalist[1:]):
		ax.plot(datalist[0], ydata, c=coltab[i])
	ax.set_title(filename)
	ax.set_yscale(yscale)
	ax.set_xscale(xscale)
	ax.set_xlabel(xlabel)
	ax.set_ylabel(ylabel)
	if xrange[0] != "auto":
		ax.set_xlim(xmin=xrange[0])
	if xrange[1] != "auto":
		ax.set_xlim(xmax=xrange[1])
	if yrange[0] != "auto":
		ax.set_ylim(ymin=yrange[0])
	if yrange[1] != "auto":
		ax.set_ylim(ymax=yrange[1])
	if yscale == "linear":
		ax.yaxis.set_major_formatter(formatter)
	ax.xaxis.set_major_formatter(formatter)
	canvas = FigureCanvasPdf(fig)
	canvas.print_figure(os.path.join(path, "plots", filename+".pdf"))
	return
コード例 #4
0
ファイル: plot.py プロジェクト: maxvogel/NetworKit-mirror2
		def funcPlotEnd(fig, ax, theme, width, height, x_showTicks=True, x_showTickLabels=True, y_showTicks=True, y_showTickLabels=True, drawAxis=True, showGrid=True):
			""" set some layout options """
			if not x_showTicks:
				ax.set_xticks([])
			if not x_showTickLabels:
				ax.set_xticklabels([])
			else:
				xfmt = ScalarFormatter(useMathText=True)
				xfmt.set_powerlimits((-2,3))
				ax.xaxis.set_major_formatter(xfmt)
			if not y_showTicks:
				ax.set_yticks([])
			if not y_showTickLabels:
				ax.set_yticklabels([])
			ax.grid(showGrid, which="both", color=theme.getGridColor(), linestyle="-")
			ax.patch.set_facecolor(theme.getBackgroundColor())
			if drawAxis:
				axisColor = theme.getGridColor()
			else:
				axisColor = theme.getBackgroundColor()
			ax.spines["bottom"].set_color(axisColor)
			ax.spines["top"].set_color(axisColor)
			ax.spines["right"].set_color(axisColor)
			ax.spines["left"].set_color(axisColor)
			ax.tick_params(axis="x", colors=theme.getGridColor(), which="both", labelsize=theme.getFontSize())
			ax.tick_params(axis="y", colors=theme.getGridColor(), which="both", labelsize=theme.getFontSize())
			ax.xaxis.label.set_color(theme.getFontColor())
			ax.yaxis.label.set_color(theme.getFontColor())
			[x_ticklabel.set_color(theme.getFontColor()) for x_ticklabel in ax.get_xticklabels()]
			[y_ticklabel.set_color(theme.getFontColor()) for y_ticklabel in ax.get_yticklabels()]
			fig.set_size_inches(width, height)
コード例 #5
0
ファイル: libhooke.py プロジェクト: Alwnikrotikz/hooke
 def __init__(self, ndec=None, useOffset=True, useMathText=False):
     ScalarFormatter.__init__(self, useOffset, useMathText)
     if ndec is None or ndec < 0:
         self.format = None
     elif ndec == 0:
         self.format = "%d"
     else:
         self.format = "%%1.%if" % ndec
コード例 #6
0
ファイル: plotutils_base.py プロジェクト: flomertens/libwise
class AbsFormatter(object):
    def __init__(self, useMathText=True):
        self._fmt = ScalarFormatter(useMathText=useMathText, useOffset=False)
        self._fmt.create_dummy_axis()

    def __call__(self, direction, factor, values):
        self._fmt.set_locs(values)
        return [self._fmt(abs(v)) for v in values]
コード例 #7
0
ファイル: infovar.py プロジェクト: guziy/RPN
def get_colorbar_formatter(varname):
    if varname in ["STFL", "STFA"]:
        return None
    else:
        # format the colorbar tick labels
        sfmt = ScalarFormatter(useMathText=True)
        sfmt.set_powerlimits((-3, 3))
        return sfmt
コード例 #8
0
ファイル: tile.py プロジェクト: michaelsmit/ocean-navigator
def scale(args):
    dataset_name = args.get('dataset')
    scale = args.get('scale')
    scale = [float(component) for component in scale.split(',')]

    variable = args.get('variable')
    if variable.endswith('_anom'):
        variable = variable[0:-5]
        anom = True
    else:
        anom = False

    variable = variable.split(',')

    with open_dataset(get_dataset_url(dataset_name)) as dataset:
        variable_unit = get_variable_unit(dataset_name,
                                          dataset.variables[variable[0]])
        variable_name = get_variable_name(dataset_name,
                                          dataset.variables[variable[0]])

    if variable_unit.startswith("Kelvin"):
        variable_unit = "Celsius"

    if anom:
        cmap = colormap.colormaps['anomaly']
        variable_name = gettext("%s Anomaly") % variable_name
    else:
        cmap = colormap.find_colormap(variable_name)

    if len(variable) == 2:
        if not anom:
            cmap = colormap.colormaps.get('speed')

        variable_name = re.sub(
            r"(?i)( x | y |zonal |meridional |northward |eastward )", " ",
            variable_name)
        variable_name = re.sub(r" +", " ", variable_name)

    fig = plt.figure(figsize=(2, 5), dpi=75)
    ax = fig.add_axes([0.05, 0.05, 0.25, 0.9])
    norm = matplotlib.colors.Normalize(vmin=scale[0], vmax=scale[1])

    formatter = ScalarFormatter()
    formatter.set_powerlimits((-3, 4))
    bar = ColorbarBase(ax, cmap=cmap, norm=norm, orientation='vertical',
                       format=formatter)
    bar.set_label("%s (%s)" % (variable_name.title(),
                               utils.mathtext(variable_unit)))

    buf = StringIO()
    try:
        plt.savefig(buf, format='png', dpi='figure', transparent=False,
                    bbox_inches='tight', pad_inches=0.05)
        plt.close(fig)
        return buf.getvalue()
    finally:
        buf.close()
コード例 #9
0
def setAxes(ax):
    globalAxesSettings(ax)
    ax.yaxis.set_major_locator(MaxNLocator(4))
    ax.xaxis.set_minor_locator(AutoMinorLocator(2))
    ax.yaxis.set_minor_locator(AutoMinorLocator(2))
    f = ScalarFormatter(useMathText=True)
    f.set_scientific(True)
    f.set_powerlimits((0, 3))
    ax.yaxis.set_major_formatter(f)
コード例 #10
0
ファイル: plot_bfe_hydrographs.py プロジェクト: guziy/RPN
def plot_comparison_hydrographs(basin_name_to_out_indices_map, rea_config=None, gcm_config=None):
    """

    :type basin_name_to_out_indices_map: dict
    """
    assert isinstance(rea_config, RunConfig)
    assert isinstance(gcm_config, RunConfig)

    assert hasattr(rea_config, "data_daily")
    assert hasattr(gcm_config, "data_daily")

    bname_to_indices = OrderedDict([item for item in sorted(basin_name_to_out_indices_map.items(),
                                                            key=lambda item: item[1][1], reverse=True)])

    print(bname_to_indices)

    plot_utils.apply_plot_params(font_size=12, width_pt=None, width_cm=25, height_cm=12)
    fig = plt.figure()
    ncols = 3
    nrows = len(bname_to_indices) // ncols + int(len(bname_to_indices) % ncols != 0)
    gs = GridSpec(nrows, ncols)

    ax_last = None
    for pl_index, (bname, (i, j)) in enumerate(bname_to_indices.items()):
        row = pl_index // ncols
        col = pl_index % ncols
        ax = fig.add_subplot(gs[row, col])

        ax.plot(rea_config.data_daily[0], rea_config.data_daily[1][:, i, j], color="b", lw=2,
                label=rea_config.label)
        ax.plot(gcm_config.data_daily[0], gcm_config.data_daily[1][:, i, j], color="r", lw=2,
                label=gcm_config.label)


        ax.xaxis.set_major_locator(MonthLocator())
        ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
        ax.xaxis.set_minor_formatter(FuncFormatter(lambda d, pos: num2date(d).strftime("%b")[0]))
        plt.setp(ax.xaxis.get_majorticklabels(), visible=False)
        ax.grid()

        sfmt = ScalarFormatter(useMathText=True)
        sfmt.set_powerlimits([-2, 2])
        ax.yaxis.set_major_formatter(sfmt)

        bbox_props = dict(boxstyle="round,pad=0.3", fc="cyan", ec="b", lw=1, alpha=0.5)
        ax.annotate(bname, (0.9, 0.1), xycoords="axes fraction", bbox=bbox_props, zorder=10,
                    alpha=0.5, horizontalalignment="right", verticalalignment="bottom")

        ax_last = ax

    ax_last.legend(loc="upper right", bbox_to_anchor=(1, -0.2), borderaxespad=0, ncol=2)

    img_file = img_folder.joinpath("bfe_hydrographs.eps")
    with img_file.open("wb") as f:
        fig.tight_layout()
        fig.savefig(f, bbox_inches="tight", format="eps")
コード例 #11
0
ファイル: pyPlotSolution.py プロジェクト: tomaszhof/scripts
def plotResults(a1, a2, b1, b2, fname, ofname):
  #read data from csv file
  print('Reading data from csv file...')
  a = np.genfromtxt(fname, delimiter=';')
  noRows = a.shape[0]
  noCols = a.shape[1]
  a = a[0:noRows, 0:(noCols-1)]
  deltaX = a2-a1
  deltaY = b2-b1
  stepX = deltaX / (noRows)
  stepY = deltaY / (noCols-1)
  print('done.')

  print('Preparing plot...')
  fig = plt.figure(figsize=(5, 3), dpi=500)
  ax = fig.gca(projection='3d')
  X = np.arange(a1, a2, stepX)
  Y = np.arange(b1, b2, stepY)
  X, Y = np.meshgrid(X, Y)
  Z = a
  vMax=Z.max()
  vMin=Z.min()
  vMax=vMax+0.1*abs(vMax)
  vMin=vMin-0.1*abs(vMin)
  surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.Greys_r,
        linewidth=0, antialiased=True, vmin=vMin, vmax=vMax)
  zAxisFormatter = ScalarFormatter()
  zAxisFormatter.set_scientific(True)
  zAxisFormatter.set_powerlimits((0, 1))
  #ax.zaxis.set_major_formatter(zAxisFormatter)
  print('Drawing...')
  fontSize=8 #set fontsize on plot
  ax.set_xlabel('x', fontsize=fontSize)
  ax.set_ylabel('y', fontsize=fontSize)
  ax.zaxis.set_rotate_label(False)
  ax.set_zlabel('u(x,y)', fontsize=fontSize, rotation=90)
  ax.view_init(27, 35)
  t = ax.zaxis.get_offset_text()
  t.set_size(fontSize-2)
  #t.set_position((0,0))
  t.set_rotation(45)
  t.set_verticalalignment('center')
  #t.set_z(0)
  plt.setp(ax.get_xticklabels(), fontsize=fontSize)
  plt.setp(ax.get_yticklabels(), fontsize=fontSize)
  plt.setp(ax.get_zticklabels(), fontsize=fontSize)
  plt.legend()
  cbar=fig.colorbar(surf, shrink=0.75, aspect=15)
  cbar.ax.tick_params(labelsize=fontSize)
  
  #plt.show()
  plt.savefig(filename=ofname, format='eps')
  plt.close()
コード例 #12
0
ファイル: plotting.py プロジェクト: ashishyadavppe/Skater
def tick_formatter(powerlimits=None):
    try:
        from matplotlib.ticker import ScalarFormatter
    except ImportError:
        raise (MatplotlibUnavailableError("Matplotlib is required but unavailable on your system."))
    except RuntimeError:
        raise (MatplotlibDisplayError("Matplotlib unable to open display"))
    if powerlimits is None:
        powerlimits = (3, 3)
    formatter = ScalarFormatter()
    formatter.set_powerlimits(powerlimits)
    return formatter
コード例 #13
0
ファイル: plot_2d.py プロジェクト: reflectometry/osrefl
    def show_slice_overlay(self, x_range, y_range, x, slice_y_data, y, slice_x_data):
        """sum along x and z within the box defined by qX- and qZrange.
        sum along qx is plotted to the right of the data,
        sum along qz is plotted below the data.
        Transparent white rectangle is overlaid on data to show summing region"""
        from matplotlib.ticker import FormatStrFormatter, ScalarFormatter

        if self.fig == None:
            print ("No figure for this dataset is available")
            return

        fig = self.fig
        ax = fig.ax
        extent = fig.im.get_extent()

        if fig.slice_overlay == None:
            fig.slice_overlay = ax.fill(
                [x_range[0], x_range[1], x_range[1], x_range[0]],
                [y_range[0], y_range[0], y_range[1], y_range[1]],
                fc="white",
                alpha=0.3,
            )
            fig.ax.set_ylim(extent[2], extent[3])
        else:
            fig.slice_overlay[0].xy = [
                (x_range[0], y_range[0]),
                (x_range[1], y_range[0]),
                (x_range[1], y_range[1]),
                (x_range[0], y_range[1]),
            ]
        fig.sz.clear()
        default_fmt = ScalarFormatter(useMathText=True)
        default_fmt.set_powerlimits((-2, 4))
        fig.sz.xaxis.set_major_formatter(default_fmt)
        fig.sz.yaxis.set_major_formatter(default_fmt)
        fig.sz.xaxis.set_major_formatter(FormatStrFormatter("%.2g"))
        fig.sz.set_xlim(x[0], x[-1])
        fig.sz.plot(x, slice_y_data)
        fig.sx.clear()
        fig.sx.yaxis.set_major_formatter(default_fmt)
        fig.sx.xaxis.set_major_formatter(default_fmt)
        fig.sx.yaxis.set_ticks_position("right")
        fig.sx.yaxis.set_major_formatter(FormatStrFormatter("%.2g"))
        fig.sx.set_ylim(y[0], y[-1])
        fig.sx.plot(slice_x_data, y)

        fig.im.set_extent(extent)
        fig.canvas.draw()
コード例 #14
0
 def __init__(self, figure, x_label='', y_label=''):
     """
     Initializes a _plot_list, which contains plot_data.
     
     Keyword arguments:
     figure -- The matplotlib figure to which the plots are added.
     x_label -- The x-axis label to use for all plots (default: '')
     y_label -- The y-axis label to use for all plots (default: '')
     """
     self.x_label = x_label
     self.y_label = y_label
     self.figure = figure
     
     self.sub_plots = []
     # set default formatter for the time being
     frmt = ScalarFormatter(useOffset = True)   
     frmt.set_powerlimits((-3,3))
     frmt.set_scientific(True)
     self.default_formatter = (frmt, frmt)
コード例 #15
0
ファイル: learning_plots.py プロジェクト: tbekolay/bekolay
def plot_learn_zips_summary(name,control_zip,learn_zips,vals,skip_rows=0,png=False,num_mins=3):
    _,control = read_zip(control_zip,skip_rows=skip_rows)
    baseline = 1
    scale = baseline/numpy.mean(control)
    
    figsize = (5,2.5)
    pylab.figure(figsize=figsize,dpi=300)
    
    means = []
    
    for learn_zip in learn_zips:
        _,learn = read_zip(learn_zip,skip_rows=skip_rows)
        means.append(numpy.mean(learn)*scale)
    
    sorted_means = list(means)
    sorted_means.sort()
    min_means_loc = [vals[means.index(sorted_means[i])] for i in range(num_mins)]
    
    ax = pylab.axes((0.18,0.2,0.8,0.7))
    
    fmt = ScalarFormatter()
    fmt.set_powerlimits((-3,4))
    fmt.set_scientific(True)
    ax.xaxis.set_major_formatter(fmt)
    ax.xaxis.set_minor_locator(MultipleLocator(float(vals[1])-float(vals[0])))
    
    pylab.plot(vals,means,color='k',linewidth=2)
    pylab.plot(min_means_loc,sorted_means[:num_mins],'o',markerfacecolor='None')
    pylab.plot(min_means_loc[0],sorted_means[0],'ko')
    
    pylab.axhline(baseline,linestyle='--',linewidth=1,color='k')
    
    pylab.ylabel('Mean relative error\n(learning vs. analytic)\n\n',ha='center')    
    pylab.xlabel(name)
    pylab.axis('tight')
    
    if png:
        if not os.path.exists('png'):
            os.mkdir('png')
        pylab.savefig('png/'+learn_zips[0].split('-')[0]+'-summary.png',figsize=figsize,dpi=600)
    else:
        pylab.show()
コード例 #16
0
ファイル: plot_static_fields.py プロジェクト: guziy/RPN
def _plot_soil_hydraulic_conductivity(ax, basemap, x, y, field, title="", cmap=None):
    ax.set_title(title)
    if cmap is not None:
        levels = np.linspace(field.min(), field.max(), cmap.N + 1)
        levels = np.round(levels, decimals=6)
        bn = BoundaryNorm(levels, cmap.N)
        im = basemap.pcolormesh(x, y, field, ax=ax, cmap=cmap, norm = bn)
        fmt = ScalarFormatter(useMathText=True)
        fmt.set_powerlimits([-2, 3])


        cb = basemap.colorbar(im, ticks=levels, format=fmt)
        cax = cb.ax
        cax.yaxis.get_offset_text().set_position((-3, 5))



    else:
        im = basemap.pcolormesh(x, y, field, ax=ax)
        basemap.colorbar(im, format="%.1f")
コード例 #17
0
ファイル: benchmarkGUI.py プロジェクト: vzupanovic/skripte
	def CreatePlot(self): #just sample plot, it will be replaced with real one after you load some data
		formatter = ScalarFormatter()
		formatter.set_scientific(True)
		formatter.set_powerlimits((0,0)) 
		self.figure = Figure()
		self.figure.set_facecolor('white')
		self.figure.subplots_adjust(bottom=0.3, left=0.25) 
		self.axes = self.figure.add_subplot(111)
		self.axes.xaxis.set_major_formatter(formatter) 
		self.axes.yaxis.set_major_formatter(formatter) 
		x = np.arange(0,6,.01)
		y = np.sin(x**2)*np.exp(-x)
		self.axes.plot(x,y, ls = 'dotted',label = "This is just a sample plot and it will be replaced with\nthe real plot once when you load some data...")
		self.setScales()
	
		handles, labels = self.axes.get_legend_handles_labels()
		self.axes.legend(handles[::-1], labels[::-1], fancybox=True)
		frame=self.axes.get_frame()
		frame.set_alpha(0.4) 
		self.canvas = FigCanvas(self.plotPanel, wx.ID_ANY, self.figure) #jako bitna stavka
		return 1
コード例 #18
0
ファイル: plot.py プロジェクト: maxvogel/NetworKit-mirror2
	def run(self):
		""" computation """
		(name, nameA, nameB, labelA, labelB, stat_1, stat_2, correlation, theme) = self.getParams()
		plt.ioff()

		def funcHexBin(ax):
			gridsize = correlation["Binning"]["Grid Size"]
			frequencies = correlation["Binning"]["Absolute Frequencies"]
			max  = correlation["Binning"]["Max Frequency"]
			offsets = correlation["Binning"]["Offsets"]
			paths = correlation["Binning"]["Paths"]
			x_min = stat_1["Location"]["Min"]
			x_max = stat_1["Location"]["Max"]
			y_min = stat_2["Location"]["Min"]
			y_max = stat_2["Location"]["Max"]
			for i in range(len(frequencies)):
				color = Theme.RGBA2RGB(
					theme.getPlotColor(),
					math.log(frequencies[i]+1,10)/math.log(max+1,10),
					theme.getBackgroundColor()
				)
				path = paths.transformed(mpl.transforms.Affine2D().translate(
					offsets[i][0],
					offsets[i][1]
				)) 
				ax.add_patch(patches.PathPatch(
					path,
					facecolor = color,
					linestyle = "solid",
					linewidth = 0			
				))
			ax.set_xlim([x_min, x_max])
			ax.set_ylim([y_min, y_max])
			ax.set_xlabel(labelA)
			ax.set_ylabel(labelB)
			ax2 = ax.twinx()
			ax2.set_ylabel(nameB)
			ax2.set_yticks([])
			ax3 = ax.twiny()
			ax3.set_xlabel(nameA)
			ax3.set_xticks([])

		fig = plt.figure()
		ax = fig.gca()

		funcHexBin(ax)
		xfmt = ScalarFormatter(useMathText=True)
		xfmt.set_powerlimits((-1,1))
		ax.xaxis.set_major_formatter(xfmt)
		yfmt = ScalarFormatter(useMathText=True)
		yfmt.set_powerlimits((-1,1))
		ax.yaxis.set_major_formatter(yfmt)

		fig.set_size_inches(4, 3.75)

		return self.save(
			name,
			fig,
			"scatter." + nameA + " - " + nameB
		)
コード例 #19
0
ファイル: style.py プロジェクト: matthewwardrop/python-mplkit
	def _polish(self,f):
		# Handle properties of axes directly
		#a = plt.gca() # Current set of axes
		formatter_scalar = ScalarFormatter(useOffset=True,useMathText=False)
		formatter_scalar.set_powerlimits((-3,3))
		formatter_log = LogFormatterMathtext(base=10.0,labelOnlyBase=False)

		# Neaten axes formatters
		for ax in f.get_axes():

			if not isinstance(ax.xaxis.get_major_formatter(),NullFormatter):
				if ax.xaxis.get_scale() == "log":
					ax.xaxis.set_major_locator(LogLocator(base=10.0, subs=[1.0], numdecs=1))
					ax.xaxis.set_major_formatter(formatter_log)
				else:
					ax.xaxis.set_major_formatter(formatter_scalar)
			if not isinstance(ax.yaxis.get_major_formatter(),NullFormatter):
				if ax.yaxis.get_scale() == "log":
					ax.yaxis.set_major_locator(LogLocator(base=10.0, subs=[1.0], numdecs=1))
					ax.yaxis.set_major_formatter(formatter_log)
					#ax.yaxis.set_minor_locator(LogLocator(base=10.0, subs=[10], numdecs=1)) # why is this necessary?
				else:
					ax.yaxis.set_major_formatter(formatter_scalar)
コード例 #20
0
ファイル: plot.py プロジェクト: nurbldoff/plothole
    def update_ticks(self, draw_canvas=True):

        xMajorFormatter = ScalarFormatter()
        yMajorFormatter = ScalarFormatter()
        xMajorFormatter.set_powerlimits((-3,4))
        yMajorFormatter.set_powerlimits((-3,4))

        xaxis=self.axes.get_xaxis()
        yaxis=self.axes.get_yaxis()

        xaxis.set_major_formatter(xMajorFormatter)
        yaxis.set_major_formatter(yMajorFormatter)

        if self.plot.x_majorticks_enable:
            xMajorLocator = MaxNLocator(self.plot.x_majorticks_maxn)
            xaxis.set_major_locator(xMajorLocator)
        else:
            xaxis.set_major_locator(NullLocator())

        if self.plot.y_majorticks_enable:
            yMajorLocator = MaxNLocator(self.plot.y_majorticks_maxn)
            yaxis.set_major_locator(yMajorLocator)
        else:
            yaxis.set_major_locator(NullLocator())

        if self.plot.x_minorticks_enable:
            xMinorLocator = MaxNLocator(self.plot.x_minorticks_maxn)
            xaxis.set_minor_locator(xMinorLocator)
        else:
            xaxis.set_minor_locator(NullLocator())

        if self.plot.y_minorticks_enable:
            yMinorLocator = MaxNLocator(self.plot.y_minorticks_maxn)
            yaxis.set_minor_locator(yMinorLocator)
        else:
            yaxis.set_minor_locator(NullLocator())

        self.update_margins(draw_canvas=False)

        if draw_canvas:
            self.canvas.draw()
コード例 #21
0
    def __init__(self, diagnostics, filename, ntMax=0, nPlot=1):
        '''
        Constructor
        '''
        
#        matplotlib.rc('text', usetex=True)
        matplotlib.rc('font', family='sans-serif', size='22')
        
        self.prefix = filename
        
        self.ntMax = diagnostics.nt
        
        if self.ntMax > ntMax and ntMax > 0:
            self.ntMax = ntMax
        
        self.nPlot = nPlot
        
        self.diagnostics = diagnostics
        
        
        self.energy      = np.zeros(self.ntMax+1)
        self.helicity    = np.zeros(self.ntMax+1)
        self.magnetic    = np.zeros(self.ntMax+1)
        self.potential   = np.zeros(self.ntMax+1)
        
        
        print("")
        for i in range(0, self.ntMax+1):
            print("Reading timestep %5i" % (i))
            
            self.diagnostics.read_from_hdf5(i)
            self.diagnostics.update_invariants(i)
            
            if self.diagnostics.plot_energy:
                self.energy  [i] = self.diagnostics.energy
            else:
                self.energy  [i] = self.diagnostics.E_error
            
            if self.diagnostics.plot_helicity:
                self.helicity[i] = self.diagnostics.helicity
            else:
                self.helicity[i] = self.diagnostics.H_error
            
            if self.diagnostics.plot_magnetic:
                self.magnetic[i] = self.diagnostics.magnetic
            else:
                self.magnetic[i] = self.diagnostics.M_error
            
            if self.diagnostics.inertial_mhd:
                if self.diagnostics.plot_L2_X:
                    self.potential[i] = self.diagnostics.L2_X
                else:
                    self.potential[i] = self.diagnostics.L2_X_error
            else:
                if self.diagnostics.plot_L2_A:
                    self.potential[i] = self.diagnostics.L2_A
                else:
                    self.potential[i] = self.diagnostics.L2_A_error
            
        
        # set up tick formatter
        majorFormatter = ScalarFormatter(useOffset=False, useMathText=True)
        ## -> limit to 1.1f precision
        majorFormatter.set_powerlimits((-1,+1))
        majorFormatter.set_scientific(True)


        # set up figure for energy plot
        self.figure1 = plt.figure(num=1, figsize=(16,4))
        
        # set up plot margins
        plt.subplots_adjust(hspace=0.25, wspace=0.2)
        plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)
        
        axesE = plt.subplot(1,1,1)
        axesE.plot(self.diagnostics.tGrid[0:ntMax+1:self.nPlot], self.energy[0:ntMax+1:self.nPlot])
        
        axesE.set_xlabel('$t$', labelpad=15, fontsize=26)
        axesE.set_xlim(self.diagnostics.tGrid[0], self.diagnostics.tGrid[ntMax])
        
        if self.diagnostics.plot_energy:
            axesE.set_ylabel('$E (t)$', labelpad=15, fontsize=26)
        else:
            axesE.set_ylabel('$(E (t) - E (0)) / E (0)$', labelpad=15, fontsize=26)
        
        axesE.yaxis.set_label_coords(-0.075, 0.5)
        axesE.yaxis.set_major_formatter(majorFormatter)
        
        for tick in axesE.xaxis.get_major_ticks():
            tick.set_pad(12)
        for tick in axesE.yaxis.get_major_ticks():
            tick.set_pad(8)
                
        plt.draw()
        
        filename = self.prefix + str('_energy_%06d' % self.ntMax) + '.png'
        plt.savefig(filename, dpi=300)
        filename = self.prefix + str('_energy_%06d' % self.ntMax) + '.pdf'
        plt.savefig(filename)
        
        
        # set up figure for helicity plot
        self.figure2 = plt.figure(num=2, figsize=(16,4))
        
        # set up plot margins
        plt.subplots_adjust(hspace=0.25, wspace=0.2)
        plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)
        
        axesH = plt.subplot(1,1,1)
        axesH.plot(self.diagnostics.tGrid[0:ntMax+1:self.nPlot], self.helicity[0:ntMax+1:self.nPlot])
        axesH.set_xlim(self.diagnostics.tGrid[0], self.diagnostics.tGrid[ntMax])
        
        axesH.set_xlabel('$t$', labelpad=15, fontsize=26)
        
        if self.diagnostics.plot_helicity:
            axesH.set_ylabel('$C_{\mathrm{CH}} (t)$', labelpad=15, fontsize=24)
            axesH.yaxis.set_label_coords(-0.075, 0.5)
        else:
            axesH.set_ylabel('$(C_{\mathrm{CH}} (t) - C_{\mathrm{CH}} (0)) / C_{\mathrm{CH}} (0)$', labelpad=15, fontsize=24)
            axesH.yaxis.set_label_coords(-0.075, 0.38)
        
        axesH.yaxis.set_major_formatter(majorFormatter)
        
        for tick in axesH.xaxis.get_major_ticks():
            tick.set_pad(12)
        for tick in axesH.yaxis.get_major_ticks():
            tick.set_pad(8)
                
        plt.draw()
        
        filename = self.prefix + str('_c_helicity_%06d' % self.ntMax) + '.png'
        plt.savefig(filename, dpi=300)
        filename = self.prefix + str('_c_helicity_%06d' % self.ntMax) + '.pdf'
        plt.savefig(filename)
        
        

        # set up figure for helicity plot
        self.figure3 = plt.figure(num=3, figsize=(16,4))
        
        # set up plot margins
        plt.subplots_adjust(hspace=0.25, wspace=0.2)
        plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)
        
        axesM = plt.subplot(1,1,1)
        axesM.plot(self.diagnostics.tGrid[0:ntMax+1:self.nPlot], self.magnetic[0:ntMax+1:self.nPlot])
        
        axesM.set_xlabel('$t$', labelpad=15, fontsize=26)
        axesM.set_xlim(self.diagnostics.tGrid[0], self.diagnostics.tGrid[ntMax])
        
        if self.diagnostics.plot_magnetic:
            axesM.set_ylabel('$C_{\mathrm{MH}} (t)$', labelpad=15, fontsize=24)
            axesM.yaxis.set_label_coords(-0.075, 0.5)
        else:
            axesM.set_ylabel('$(C_{\mathrm{MH}} (t) - C_{\mathrm{MH}} (0)) / C_{\mathrm{MH}} (0)$', labelpad=15, fontsize=24)
            axesM.yaxis.set_label_coords(-0.075, 0.38)
        
        axesM.yaxis.set_major_formatter(majorFormatter)
        
        for tick in axesM.xaxis.get_major_ticks():
            tick.set_pad(12)
        for tick in axesM.yaxis.get_major_ticks():
            tick.set_pad(8)
                
        plt.draw()
        
        filename = self.prefix + str('_m_helicity_%06d' % self.ntMax) + '.png'
        plt.savefig(filename, dpi=300)
        filename = self.prefix + str('_m_helicity_%06d' % self.ntMax) + '.pdf'
        plt.savefig(filename)
        
        
        # set up figure for potential plot
        self.figure4 = plt.figure(num=4, figsize=(16,4))
        
        # set up plot margins
        plt.subplots_adjust(hspace=0.25, wspace=0.2)
        plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)
        
        axesL = plt.subplot(1,1,1)
        axesL.plot(self.diagnostics.tGrid[0:ntMax+1:self.nPlot], self.potential[0:ntMax+1:self.nPlot])
        
        axesL.set_xlabel('$t$', labelpad=15, fontsize=26)
        axesL.set_xlim(self.diagnostics.tGrid[0], self.diagnostics.tGrid[ntMax])
        
        
        if self.diagnostics.inertial_mhd:
            if self.diagnostics.plot_L2_X:
                axesL.set_ylabel('$C_{L^2} (t)$', labelpad=15, fontsize=24)
                axesL.yaxis.set_label_coords(-0.075, 0.5)
            else:
                axesL.set_ylabel('$(C_{L^2} (t) - C_{L^2} (0)) / C_{L^2} (0)$', labelpad=15, fontsize=24)
                axesL.yaxis.set_label_coords(-0.075, 0.4)
        else:
            if self.diagnostics.plot_L2_A:
                axesL.set_ylabel('$C_{L^2} (t)$', labelpad=15, fontsize=24)
                axesL.yaxis.set_label_coords(-0.075, 0.5)
            else:
                axesL.set_ylabel('$(C_{L^2} (t) - C_{L^2} (0)) / C_{L^2} (0)$', labelpad=15, fontsize=24)
                axesL.yaxis.set_label_coords(-0.075, 0.4)
        
        axesL.yaxis.set_major_formatter(majorFormatter)
        
        for tick in axesL.xaxis.get_major_ticks():
            tick.set_pad(12)
        for tick in axesL.yaxis.get_major_ticks():
            tick.set_pad(8)
                
        plt.draw()
        
        filename = self.prefix + str('_l2_psi_%06d' % self.ntMax) + '.png'
        plt.savefig(filename, dpi=300)
        filename = self.prefix + str('_l2_psi_%06d' % self.ntMax) + '.pdf'
        plt.savefig(filename)
コード例 #22
0
ファイル: graph.py プロジェクト: seann999/graph-cli
def create_graph(graphs):
    import matplotlib
    if graphs[-1].output:
        # disables screen requirement for plotting
        # must be called before importing matplotlib.pyplot
        matplotlib.use('Agg')
    else:
        # sets backend to qt4
        # required for python2
        matplotlib.rcParams['backend'] = 'Qt5Agg'
    import matplotlib.pyplot as plt
    from matplotlib.ticker import PercentFormatter, ScalarFormatter

    # set global fontsize if any
    if Graph.fontsize[1]:
        plt.rcParams.update({'font.size': Graph.fontsize[0]})
        # TODO: override individual font settings
        if Graph.label_fontsize[1] is False:
            Graph.xlabel_fontsize = (None, False)
            Graph.ylabel_fontsize = (None, False)
        if Graph.tick_fontsize[1] is False:
            Graph.xtick_fontsize = (None, False)
            Graph.ytick_fontsize = (None, False)

    # make Graph.global = (val, flag) just val
    Graph.remove_global_flags()

    # create figure
    fig, ax = plt.subplots(figsize=(Graph.figsize))

    # iterate over graphs array
    for graph in graphs:
        if graph.bar:
            x = np.arange(len(graph.xcol))
            ax.bar(x + graph.offset, graph.ycol, align='center',
                label=graph.legend, color=graph.color, width=graph.width)
            plt.xticks(x, graph.xcol)
        elif graph.barh:
            x = np.arange(len(graph.xcol))
            ax.barh(x + graph.offset, graph.ycol, align='center',
                label=graph.legend, color=graph.color, height=graph.width)
            plt.yticks(x, graph.xcol)
        elif graph.hist or graph.hist_perc:
            bins = graph.bins
            if bins is None and graph.bin_size is None:
                # default: one bin for each
                bins = int((graph.ycol.max() - graph.ycol.min()))
            elif graph.bin_size:
                _min, _max, _bin = graph.ycol.min(), graph.ycol.max(), graph.bin_size
                bins = np.arange(_min - (_min % _bin),
                        _max + (_bin - (_max % _bin)), _bin)
            weights = np.ones_like(graph.ycol)
            if graph.hist_perc:
                weights = weights * 100.0 / len(graph.ycol)
                ax.yaxis.set_major_formatter(PercentFormatter(xmax=100, decimals=1))
            ax.hist(graph.ycol, bins=bins, weights=weights)
        else:
            l = ax.plot(graph.xcol, graph.ycol, label=graph.legend,
                marker=graph.marker, color=graph.color, linestyle=graph.style,
                linewidth=graph.width, markersize=graph.markersize)[0]
            if not graph.timeseries:
                yformat = ScalarFormatter(useOffset=False, useMathText=True)
                xformat = ScalarFormatter(useOffset=False, useMathText=True)
                yformat.set_powerlimits(Graph.exponent_range)
                xformat.set_powerlimits(Graph.exponent_range)
                ax.yaxis.set_major_formatter(yformat)
                ax.xaxis.set_major_formatter(xformat)

            if graph.fill:
                ax.fill_between(graph.xcol, graph.ycol, alpha=0.1,
                color=l.get_color())
        if graph.output:
            apply_globals(plt, ax, graphs)
            plt.savefig(graph.output)
        elif graph == graphs[-1]:
            apply_globals(plt, ax, graphs)
            plt.show()
コード例 #23
0
                data[algorithm] = dict()
            if not core in data[algorithm]:
                data[algorithm][core] = dict()
            if not depth in data[algorithm][core]:
                data[algorithm][core][depth] = list()
            data[algorithm][core][depth].append(duration)
    return (data, depth_set, core_set)


outputfile = sys.argv[1]
data_set, depth_set, core_set = get_dataset(outputfile)
runtime_results = dict()
for depth_val in sorted(depth_set.keys()):
    print "Results for " + str(depth_val) + " depth"
    ax = plt.gca()
    formatter = ScalarFormatter()
    formatter.set_scientific(False)
    ax.yaxis.set_major_formatter(formatter)
    color = iter(cm.rainbow(np.linspace(0, 1, 10)))
    marker = iter([
        "\\", "/", "\\", "/", "\\", "/", "\\", "/", "\\", "/", "\\", "/", "\\",
        "/", "\\", "/", "\\", "/", "\\", "/"
    ])
    m = next(marker)
    c = next(color)
    core_vals = sorted(core_set.keys())
    num_cores = len(core_set.keys())
    num_algorithms = len(data_set.keys())
    bar_width = 1.0 / float(num_algorithms + 1)
    indices = np.arange(num_cores)
    runtime_cnt = 0
コード例 #24
0
ファイル: demo_ribbon_box.py プロジェクト: sjl421/ML
    # Fixing random state for reproducibility
    np.random.seed(19680801)

    fig, ax = plt.subplots()

    years = np.arange(2004, 2009)
    box_colors = [(0.8, 0.2, 0.2),
                  (0.2, 0.8, 0.2),
                  (0.2, 0.2, 0.8),
                  (0.7, 0.5, 0.8),
                  (0.3, 0.8, 0.7),
                  ]
    heights = np.random.random(years.shape) * 7000 + 3000

    fmt = ScalarFormatter(useOffset=False)
    ax.xaxis.set_major_formatter(fmt)

    for year, h, bc in zip(years, heights, box_colors):
        bbox0 = Bbox.from_extents(year - 0.4, 0., year + 0.4, h)
        bbox = TransformedBbox(bbox0, ax.transData)
        rb_patch = RibbonBoxImage(bbox, bc, interpolation="bicubic")

        ax.add_artist(rb_patch)

        ax.annotate(r"%d" % (int(h/100.)*100),
                    (year, h), va="bottom", ha="center")

    patch_gradient = BboxImage(ax.bbox, interpolation="bicubic", zorder=0.1)
    gradient = np.zeros((2, 2, 4))
    gradient[:, :, :3] = [1, 1, 0.]
コード例 #25
0
ファイル: plotutils_base.py プロジェクト: flomertens/libwise
 def __init__(self, useMathText=True):
     self._fmt = ScalarFormatter(useMathText=useMathText, useOffset=False)
     self._fmt.create_dummy_axis()
コード例 #26
0
def plot_all(data, graph_title, y_axis_label, graph_filename, is_port_data,
             max_y):
    """
    Create a graph from the data given. Each element in the data represents
    a subplot in the graph. The graph will be titled with the param: graph_title
    and label the y axis with the param: y axis label. Finally, it will be saved
    as a png file named with the param: graph_filename.
    """
    #how many rows and columns to use for the subplots
    dimension = generate_subplot_dimension(len(data))
    #generate the subplots. The figsize is the width and height of the whole graph in inches
    fig, axlist = plt.subplots(dimension[0],
                               dimension[1],
                               figsize=(dimension[1] * 4, dimension[0] * 4))
    #the original axlist is a list of lists. This coverts it to just a list
    axlist = axlist.flatten()

    #check that there aren't going to be any empty subplots
    difference = (dimension[0] * dimension[1]) - len(data)
    #if there are empty subplots, reduce the number of subplots
    for _ in range(difference, 0, -1):
        fig.delaxes(axlist[0])

    #update the ax list in case we deleted some subplots
    axlist = fig.axes

    #dont use scientific notation
    y_formatter = ScalarFormatter(useOffset=False)
    y_formatter.set_scientific(False)

    i = 0
    for d in data:
        #get a subplot to draw in
        ax = axlist[i]
        #set y axis to not use scientific notation
        ax.yaxis.set_major_formatter(y_formatter)
        ax.set_xlabel("Time (sec)")
        #The y label gets split into lines of 40 characters for readability
        ax.set_ylabel("\n".join(wrap(y_axis_label, 40)))
        ax.set_title("Attempt {:03d}".format(i))
        #Get the length of a random list in the data (they should all be
        #the same length) and create the x axis tick labels based on this
        x_axis = create_x_axis_tick_labels(len(d.itervalues().next()))
        #Finally, draw the plot
        if is_port_data:
            #set y axis bounds and pad the top
            ax.set_ylim([0, max_y + (max_y * 0.01)])
            ax.stackplot(x_axis, d.values(), labels=d.keys(), colors=COLOURS)
        else:
            j = 0
            for key in d.iterkeys():
                x_axis = create_x_axis_tick_labels(len(d.get(key)))
                colour = COLOURS[j]
                if MASTER_HOSTNAME not in d:
                    colour = COLOURS[j + 1]
                ax.plot(x_axis, d.get(key), label=key, color=colour)
                j += 1
        i += 1

    #The title gets split into lines of 80 characters for readability
    title = plt.suptitle("\n".join(wrap(graph_title, 80)))
    legend = plt.legend(bbox_to_anchor=(0, -0.4), ncol=4, loc="lower center")
    #ensures that none of the subplots are overlapping
    plt.tight_layout(rect=[0, 0, 1, 0.95])
    fig.savefig(graph_filename,
                dpi=300,
                bbox_extra_artists=(
                    title,
                    legend,
                ),
                bbox_inches='tight')
コード例 #27
0
def hist1d_oneset(label='',
                  dolog=True,
                  val='mvir',
                  lims=[],
                  binnum=20,
                  binsize=0,
                  types=[]):
    # new version of mass_spec function
    # label is the name of the data set (eg 30dor, pcc, etc.)
    # val is the input from the label_physprop_add.txt columns

    # global deltav, avgbeam, dist, freq, axes
    # deltav  = dvkms * u.km / u.s
    # avgbeam = beam * u.arcsec
    # dist    = distpc * u.pc
    # freq    = fghz * u.GHz

    # sanity check on label
    if label == '':
        label = str(
            input('No label [dataset_line] given, enter name to continue: '))

    # default types is all of them, can be specified to only put a few
    # loop below catches types not consistent with parameters
    if len(types) == 0:
        types = ['trunks', 'branches', 'leaves', 'clusters']
    else:
        for t in types:
            if t not in ['trunks', 'branches', 'leaves', 'clusters']:
                print(
                    'Type \'{}\' not recognized from default types, exiting...'
                    .format(t))
                return
            else:
                continue

    # makes directory in which plots will be created if it doesn't already exist
    if os.path.isdir('../prophists') == 0:
        os.mkdir('../prophists')

    # formatting parameters
    params = {'text.usetex': False, 'mathtext.fontset': 'stixsans'}
    plt.rcParams.update(params)

    # read in data
    if os.path.isfile('../props/' + label + '_physprop_add.txt'):
        pcat = Table.read('../props/' + label + '_physprop_add.txt',
                          format='ascii.ecsv')
    else:
        pcat = Table.read('../props/' + label + '_physprop.txt',
                          format='ascii.ecsv')

    # get indicies of types
    idc = [0, 0, 0, 0]
    for i, typ in enumerate(types):
        with open('../props/' + label + '_' + typ + '.txt', 'r') as f:
            reader = csv.reader(f, delimiter=' ')
            a = zip(*reader)
        idc[i] = map(int, a[0])

    # data flattening
    pltdata = []
    for i in range(len(types)):
        data = pcat[val][idc[i]]
        xdata = np.log10(data[data > 0])
        pltdata.append(xdata)

    # limit and bin size determination
    if len(lims) == 0:
        limvals = [item for sublist in pltdata for item in sublist]
        limmin = np.around(np.nanmin(limvals), 2)
        limmax = np.around(np.nanmax(limvals), 2)
    elif len(lims) == 1:
        print('Only one limit ({}) specified, exiting...'.format(lims[0]))
        return
    else:
        if len(lims) > 2:
            print('Only first two limits will be used')
        limmin = lims[0]
        limmax = lims[1]

    if binsize == 0:
        limdif = limmax - limmin
        optsize = np.around(limdif / binnum, 3)

        # choosing logic, presets are arbitrary but work well for 30dor and pcc
        if (optsize < .01):
            binsize = .01
        elif (.01 <= optsize < .025):
            binsize = .02
        elif (.025 <= optsize < .06):
            binsize = .04
        elif (.06 <= optsize < .09):
            binsize = .08
        elif (.09 <= optsize < .15):
            binsize = .1
        elif (.15 <= optsize < .25):
            binsize = .2
        elif (.25 <= optsize < .6):
            binsize = .4
        elif (.6 <= optsize < .9):
            binsize = .8
        elif (.9 <= optsize):
            binsize = 1

    # bin spacing, extra space for clarity
    binlist = np.arange((limmin - 2 * binsize), (limmax + 2 * binsize),
                        binsize)

    # plotting section
    fig, axes = plt.subplots()
    n, bins, patches = axes.hist(pltdata,
                                 binlist,
                                 normed=0,
                                 log=dolog,
                                 histtype='bar',
                                 label=types,
                                 rwidth=.8)

    # changes y-axis to linear for small distributions (looks much cleaner)
    nummax = max([np.max(o) for o in n])
    if (nummax < 14):
        plt.cla()
        n, bins, patches = axes.hist(pltdata,
                                     binlist,
                                     normed=0,
                                     log=0,
                                     histtype='bar',
                                     label=types,
                                     rwidth=.8)

    axes.xaxis.set_minor_locator(FixedLocator(binlist[1::2] + binsize / 2))
    axes.xaxis.set_major_locator(FixedLocator(binlist[0::2] + binsize / 2))

    axes.tick_params(labelsize=6)
    axes.set_xlabel('log ' + val + ' [' + str(pcat[val].unit) + ']')
    axes.set_ylabel('Number of objects binned')
    axes.yaxis.set_major_formatter(ScalarFormatter())

    plt.title('{0}_{1}'.format(label, val))
    plt.legend(loc='best', fontsize='medium')
    #plt.show()
    plt.savefig('../prophists/' + label + '_' + val + '_hist.pdf',
                bbox_inches='tight')
    plt.close()

    print('Plot created successfully for {0}_{1}'.format(label, val))

    return
コード例 #28
0
ファイル: drawing.py プロジェクト: SebaArn/Ausbildung
def generate_plot(partial_quota, number_of_instances, f, a0, a1, tmp_y2, tmp_x,
                  tmp_y, blocks_x, start_point, Xtics, yearly_quota, x_start,
                  finished, User_t, System_t, y_start2, y_end2, beginning_dt,
                  nutzergraph, fig, x_end, Data, filter_n, months):
    if filter_n:
        f.suptitle(str(filter_n), fontweight="bold")
    else:
        f.suptitle(str(Data[0]['Account'])[2:-1], fontweight="bold")
    global daily_eff_days
    global daily_eff_eff
    fmt = "%Y-%m-%d-%H-%M"  # standard format for Dates, year month, day, hour, minute
    myFmt = mdates.DateFormatter('%b %y')
    nothing = mdates.DateFormatter(' ')
    monthly_cputime = []
    monthly_used = []
    effarray = []
    tmp_y2.append(tmp_y[-1])
    if partial_quota:  #### drawing of the quota####
        for i in range(0, number_of_instances
                       ):  # not possible for the last area, hence skipping it.
            col = colorization(
                D_.find_y_from_given_time(
                    datetime.datetime.fromtimestamp(blocks_x[i * 2 + 1]),
                    tmp_x, tmp_y) - D_.find_y_from_given_time(
                        datetime.datetime.fromtimestamp(blocks_x[i * 2]),
                        tmp_x, tmp_y), partial_quota)
            coordinates_x = (datetime.datetime.fromtimestamp(blocks_x[i * 2]),
                             datetime.datetime.fromtimestamp(blocks_x[i * 2]),
                             datetime.datetime.fromtimestamp(blocks_x[i * 2 +
                                                                      1]))
            coordinates_y = [
                tmp_y2[i * 2], tmp_y2[i * 2 + 1], tmp_y2[i * 2 + 1]
            ]
            a0.fill_between(coordinates_x,
                            0,
                            coordinates_y,
                            color=col,
                            alpha=0.99)
            monthly_cputime.append(tmp_y2[i * 2 + 1] - tmp_y2[i * 2])
        value1 = D_.find_y_from_given_time(
            datetime.datetime.fromtimestamp(blocks_x[-1]), tmp_x, tmp_y)
        value2 = D_.find_y_from_given_time(
            datetime.datetime.fromtimestamp(blocks_x[-2]), tmp_x, tmp_y)
        col = colorization(value1 - value2, partial_quota)
        coordinates_x = (datetime.datetime.fromtimestamp(blocks_x[-2]),
                         datetime.datetime.fromtimestamp(blocks_x[-2]),
                         datetime.datetime.fromtimestamp(blocks_x[-1]))
        coordinates_y = (value2, value2 + partial_quota,
                         value2 + partial_quota)
        a0.fill_between(coordinates_x, 0, coordinates_y, color=col, alpha=0.99)
    # determines the last interval's color and draws it (uses the highest
    # recorded value as the end value of the ongoing time span).
    axis = plt.gca()  # for plotting/saving the plot as it's own image
    # Sets the visual borders for the graphs; area of occurring values (main graph) +- 5%.
    if start_point:  # setting the beginning and end of the graph
        beginning = start_point.timestamp()
        end = start_point.timestamp() + 365 * 24 * 3600
        beginning = beginning - 30 * 24 * 3600
        end = end + 30 * 24 * 3600
    extrapolation_x = []
    extrapolation_y = []
    if len(tmp_y2) < 3:
        tmp_y2.append(0)
        tmp_y2.append(0)
    usedmonths = 0
    usedmonths += 12 * (int(x_end.strftime("%Y")) -
                        int(x_start.strftime("%Y")))
    usedmonths += int(x_end.strftime("%m")) - int(x_start.strftime("%m"))
    monthsleft = int(months - usedmonths)

    if yearly_quota and len(tmp_x) >= 1:
        extrapolation_point_x = D_.first_of_month(x_end)
        extrapolation_point_y = D_.find_y_from_given_time(
            tmp_x[-1], tmp_x, tmp_y)
        extrapolation_point_y = max(
            extrapolation_point_y,
            D_.find_y_from_given_time(D_.first_of_month(extrapolation_point_x),
                                      tmp_x, tmp_y) + partial_quota)
        extrapolation_x.append(D_.first_of_month(extrapolation_point_x))
        extrapolation_x.append(D_.first_of_month(extrapolation_point_x))
        extrapolation_x.append(
            D_.first_of_month(
                datetime.datetime.fromtimestamp(
                    extrapolation_point_x.timestamp() + 2851200)))
        extrapolation_y.append(
            D_.find_y_from_given_time(extrapolation_point_x, tmp_x, tmp_y))
        extrapolation_y.append(
            max(extrapolation_y[0] + partial_quota, tmp_y[-1]))
        extrapolation_y.append(extrapolation_y[-1])
        expoint_y = extrapolation_y[-1]

        extrapolation_y[-2] = tmp_y[-1]
        extrapolation_y[-1] = tmp_y[-1]
        expoint_y = extrapolation_y[-1]

        xtr_pt_x = extrapolation_point_x
        xtr_pt_y = extrapolation_point_y
        for i in range(1,
                       monthsleft):  # The three points required for each block
            extrapolation_x.append(
                D_.first_of_month(
                    datetime.datetime.fromtimestamp(xtr_pt_x.timestamp() +
                                                    i * 2851200)))
            extrapolation_x.append(
                D_.first_of_month(
                    datetime.datetime.fromtimestamp(xtr_pt_x.timestamp() +
                                                    i * 2851200)))
            extrapolation_x.append(
                D_.first_of_month(
                    datetime.datetime.fromtimestamp(xtr_pt_x.timestamp() +
                                                    (i + 1) * 2851200)))
            extrapolation_y.append(expoint_y + (i - 1) * partial_quota)
            extrapolation_y.append(expoint_y + i * partial_quota)
            extrapolation_y.append(expoint_y + i * partial_quota)
        if monthsleft:
            a0.plot(extrapolation_x[3:], extrapolation_y[3:], "black")
    if monthsleft:
        extrapolation_y.append(0)
    else:
        extrapolation_y = [0]
    beg_14_months = beginning + 36817200
    fourteen_dt = datetime.datetime.fromtimestamp(beg_14_months)
    # Print statements, to give feedback either onscreen or into a dedicated file to be piped into.
    print('The accumulated TotalCPU time is',
          int((User_t[-1] + System_t[-1]) * 100) / 100, "hours")
    print('and the number of accumulated corehours is',
          int(tmp_y[-1] * 100) / 100)
    efficiency = (User_t[-1] + System_t[-1]) / tmp_y[-1]
    # Added rounding to the efficiency percentage feedback.
    print('Which results in an efficiency of',
          int(efficiency * 10000) / 100 + 0.005, "%")
    if efficiency < 0 or efficiency > 1:
        print(
            "Efficiency is outside of it's boundaries, valid is only between 0 and 1"
        )
    accum_total_time = np.zeros(len(tmp_x))
    for i in range(0, len(accum_total_time)):
        accum_total_time[i] = User_t[i] + System_t[i]
    delta = [0]
    total_time = []
    total_time.append(accum_total_time[0])
    total_time.append(accum_total_time[0])
    difference = [0]
    for i in range(1, len(accum_total_time)):
        total_time.append(accum_total_time[i] - accum_total_time[i - 1])
        delta.append(100 * ((accum_total_time[i] - accum_total_time[i - 1]) /
                            (tmp_y[i] - tmp_y[i - 1])))
        if delta[i] > 100:
            a = 0
    if yearly_quota:  # ensuring that the extrapolated quota is still in frame
        a0.set_ylim([
            y_start2 - (0.05 * y_end2),
            max(tmp_y[-1], max(extrapolation_y), max(coordinates_y)) * 1.2
        ])
    #    print("limit",a0.get_ylim()[1])
    else:  # No quota given, image is focused around occupied and utilized resources.
        print("NO YEARLY DETECTED")
        a0.set_ylim([y_start2 - (0.05 * y_end2), tmp_y[-1] * 1.05])
    #####  Creation of patches for Legend #####
    red_patch = mpatches.Patch(color='#ff0000', alpha=0.7, label='>=150%')
    orange_patch = mpatches.Patch(color='#ffa500',
                                  alpha=0.7,
                                  label='>=110%,<150%')
    green_patch = mpatches.Patch(color='#008000',
                                 alpha=0.8,
                                 label='>=70%,<110%')
    light_green_patch = mpatches.Patch(color='#81c478',
                                       alpha=0.8,
                                       label='<70%')
    grey_patch = mpatches.Patch(color='dimgrey',
                                alpha=0.75,
                                label='Allocated corehours')
    yellow_patch = mpatches.Patch(color='#d9e72e',
                                  alpha=0.49,
                                  label='Utilized corehours')
    black_patch = mpatches.Patch(color='black',
                                 alpha=1,
                                 label='Granted corehours')
    a0.plot(tmp_x, accum_total_time, '#d9e72e')  # plotting the TotatlCPU Graph
    if yearly_quota:  # Legends for if there is a quota, or a shorter Legend in case there isn't.
        a0.legend(handles=[
            red_patch, orange_patch, green_patch, light_green_patch,
            grey_patch, yellow_patch, black_patch
        ])
    else:
        a0.legend(handles=[grey_patch, yellow_patch])
    a0.fill_between(tmp_x, 0, accum_total_time, color='#d9e72e',
                    alpha=0.70)  # plotting the area below TotalCPU graph
    a0.plot(tmp_x, tmp_y, 'dimgrey', fillstyle='bottom',
            alpha=0.75)  # plotting the main graph (cores * hours)
    a0.fill_between(tmp_x, 0, tmp_y, color="grey",
                    alpha=0.45)  # plotting the area below the corehours graph
    for i in range(len(accum_total_time), number_of_instances * 3 +
                   4):  # ensuring that empty months will be accounted for
        accum_total_time = np.append(
            accum_total_time,
            accum_total_time[-1])  # filling accumulated time with most recent
    if yearly_quota:
        for i in range(0, int(number_of_instances)
                       ):  # not possible for the last area, hence skipping it.
            monthly_used.append(accum_total_time[i * 3 + 3] -
                                accum_total_time[i * 3])
    percentages = [0]
    for i in range(len(monthly_cputime)):
        if monthly_used[i] >= 1:
            percentages.append(10 * (monthly_cputime[i] / monthly_used[i]))
    for i in range(len(percentages)):
        effarray.append(percentages[i])
    a0.grid(True)
    axis2 = fig.add_subplot(212)
    a1legend1 = mpatches.Patch(color='Red', alpha=0.8, label="per day")
    a1legend2 = mpatches.Patch(color='purple', alpha=0.8, label='per job')
    a1.plot(tmp_x, delta, '.', color="purple", markersize=5,
            alpha=0.35)  # percentages amplified by the lower bound to
    a1.legend(handles=[a1legend1, a1legend2])
    plt.ylabel('Efficiency')  # be more visible.
    daily = []
    dates = []
    for i in range(int(x_start.timestamp()), int(x_end.timestamp()), 2764800):
        r = D_.gather_efficiencies_for_month(
            datetime.datetime.fromtimestamp(i), Data)
        daily_eff_days = r[-2]
        daily_eff_eff = r[-1]
        r = r[:-2]
        for j in range(len(r[0])):
            if r[1][j] > 0:
                daily.append(100 * r[1][j] / r[0][j])
                dates.append(r[2][j])
    formatteddates = []
    for i in dates:
        if len(str(i)) > 5 and "." not in str(i):
            transp = str(i)[2:18]
            formatteddates.append(datetime.datetime.strptime(transp, fmt))
    eff_days = []
    #for i in dates:
    #    eff_days.append(datetime.datetime.strptime(str(i)[2:18], fmt))
    a1.plot(formatteddates, daily, '.', color="Red", markersize=3, alpha=0.85)
    eff_distance = 0 - axis.get_ylim()[0]
    a1.grid(
        True
    )  # Creates a grid in the image to aid the viewer in visually processing the data.
    a1.set_ylim([-5, 105])
    if nutzergraph:
        a1.set_ylim(
            [0,
             100])  # Usergraphs don't display anything above 100% or below 0%.
    a1.set_yticks(
        np.arange(0, 101, 10),
        minor=True)  # minor tick-lines are much thinner than regular ones
    a1.set_yticks(np.arange(0, 101, 25))
    a1.yaxis.set_major_formatter(mtick.PercentFormatter())
    plt.xlabel('Efficiency')
    plt.xlabel(' ')
    a0.xaxis.tick_top()
    a0.set_xlim((beginning_dt, fourteen_dt))
    a1.xlim = (beginning_dt, fourteen_dt)
    plt.sca(a0)
    a0.yaxis.set_major_formatter(ScalarFormatter(useOffset=True))
    plt.xticks(Xtics)
    plt.ylabel('CPUhours')
    emptylabels = []
    for i in a0.get_xticklabels():
        emptylabels.append(["", ""])

    new_ylabels, unit = get_scaled_ylabels(a0.get_yticks())

    plt.ylabel("CPUhours (" + unit + ")")

    a0.set_xticklabels = emptylabels
    a0.set_yticklabels(new_ylabels)
    plt.sca(a1)
    # dictates gap in height, left border, right border, gap in width, bottom border, top border
    plt.subplots_adjust(hspace=0.03,
                        left=0.1,
                        right=0.925,
                        wspace=0.07,
                        bottom=0.035,
                        top=0.95)
    plt.xlim(beginning_dt, fourteen_dt)
    plt.xticks(Xtics)
    a0.xaxis.set_major_formatter(nothing)  # removes the x-tic notations
    a1.xaxis.set_major_formatter(myFmt)
    a1.grid(which='minor', alpha=0.2)
    a1.grid(which='major', alpha=0.5)
    f.set_size_inches((11, 8.5), forward=False)
    ## for png compression
    #ram = io.BytesIO()
    #plt.savefig(ram, format='png')
    #ram.seek(0)
    #im = Image.open(ram)
    #im2 = im.convert('RGB').convert('P', palette=Image.ADAPTIVE)
    #return im2
    #print(tmp_x)
    #print(tmp_y)
    return f
コード例 #29
0
def minimize():
    #set variables
    cur_f = 0  #current objective function value at defined point
    iters = 0  #Iteration variable
    precision = 0.0000001  #alarm to stop algo
    iterations = 100000  #set max number of iterations
    previous_step_size = 1
    ai = []  #list for observation points
    fnval = []  #list for objective function value wit each iteration
    Error_Overflow = False  # scalar exception error
    Plot = True  #Variable to differentiate plots without error or with error!
    lstep = 0.0002  # set the step for gradient algo

    # initialize a point with some random value b/w [-5,5]
    a = np.array([np.random.randint(-5, 5), np.random.randint(-5, 5)])
    initial_point = a
    #when starting point itself is a minimum
    if a[0] == 1.00 and a[1] == 1.00:
        print("\tStarting point itself is a minima.\n")
        Plot = False

    #loop to minimize objective function
    while previous_step_size > precision and iters < iterations:
        prev_f = cur_f
        #objective function
        f = ((1 - a[0])**2) + (100 * ((a[1] - a[0]**2)**2))
        #raise OverflowError() true
        if abs(f) == float('inf'):
            Error_Overflow = True
            f = fnval
            break
        #append the point and its obj function to ai/fnval lists
        ai.append([a, f])
        fnval.append(f)  #mainly for plot
        #reassign objective function value at point a
        cur_f = f
        #compute its derrivative on point a
        fi = np.array(DerivativeRosenbrock(a))
        #reassign step value at point a
        a = a - lstep * fi
        previous_step_size = abs(cur_f - prev_f)
        #increase iteration by 1.
        iters = iters + 1

    # convert ai/fnval into a numpy array and do operations
    ai = np.array(ai)
    minFnVal = min(np.array(fnval))
    minIndex = fnval.index(minFnVal)
    #print the findings
    #print(f'\n\tThe minimum is: {minFnVal} at point: {ai[minIndex,0]} after iterations: {iters}.')
    print(
        f'\t\tStarting Point:   {[initial_point[0],initial_point[1]]}\n\t\tFinal Point:      {ai[minIndex,0]}\
          \n\t\tMinimum Value:    {round(minFnVal,5)}\n\t\tTotal Iterations: {iters}'
    )

    # Plot the decline in function value
    if (Plot == True):
        fig1, ax = plt.subplots()
        ax.set_xscale('log')
        ax.set_yscale('log')
        #Plot with red line if there were an overflow while calculating function value
        #Print appropriate message
        if Error_Overflow:
            ax.plot(range(iters), fnval, color="r")
            print(
                "overflow encountered. Function is NOT going to minimize with starting point:{} and step-size:0.002"
                .format(ai[0, 0]))
        else:
            ax.plot(range(iters), fnval, color="b")
        #loop for formatting x/y axis ticks
        for axis in [ax.xaxis, ax.yaxis]:
            axis.set_major_formatter(ScalarFormatter())
            ax.yaxis.set_major_formatter(
                ticker.FuncFormatter(lambda y, pos: ('{{:.{:1d}f}}'.format(
                    int(np.maximum(-np.log10(y), 0)))).format(y)))
        #draw a red line for function decline intersection at function value 0.001 for ease in understanding
        plt.axhline(y=minFnVal, color="r", linestyle='--')
        #Title and label the graph
        plt.title("Objective Function Value vs Iteartions\n",
                  fontsize=16,
                  fontweight='bold')
        plt.xlabel("Iterations", fontsize=16, fontweight='bold')
        plt.ylabel("Objective Function value", fontsize=16, fontweight='bold')
        plt.show()
コード例 #30
0
ファイル: IntConCor.py プロジェクト: michaelJwilson/Spectre
import matplotlib.pylab as plt
import matplotlib.pyplot
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import FixedFormatter
import pylab as pl
import numpy as np
import math, os
import glob, pickle

formatter = ScalarFormatter(useMathText=True)

fig_width_pt = 246.0 * 2  # Get this from LaTex using \the\columnwidth
inches_per_pt = 1.0 / 72.27
golden_mean = (np.sqrt(5) - 1.0) / 2.0
fig_width = fig_width_pt * inches_per_pt  # width in inches
fig_height = fig_width * golden_mean  # height in inches
fig_size = [fig_width, fig_height]
params = {
    'axes.labelsize': 10,
    'text.fontsize': 6,
    'legend.fontsize': 6,
    'xtick.labelsize': 11.0,
    'ytick.labelsize': 11.0,
    'figure.figsize': fig_size,
    'font.family': 'serif'
}

pl.rcParams.update(params)
pl.clf()
pl.figure()
コード例 #31
0
ファイル: agg_results.py プロジェクト: ry-z/badge
def plot_lc(df, y, tag, highlight):
    sns.set()
    sns.set_style("white")
    #sns.set_context("paper")

    y_mean = y + '_mean'
    y_std = y + '_std'

    avg_folds = df.groupby(
        ['Data', 'Model', 'Alg', 'nQuery', 'TrainAug', 'Samples']).agg({
            y: ['mean', pop_std, 'count']
        }).reset_index()
    avg_folds[y_mean] = avg_folds[y]['mean']
    avg_folds[y_std] = avg_folds.apply(
        lambda row: row[y]['pop_std'] / sqrt(row[y]['count']), axis=1)
    avg_folds = avg_folds.drop([y], axis=1)

    print(avg_folds)

    setting_col = ['Data', 'Model', 'nQuery', 'TrainAug']

    for setting, g_setting in avg_folds.groupby(setting_col):
        data, model, nQuery, TrainAug = setting
        setting_t = param_to_str_t(OrderedDict(zip(setting_col, setting)))
        setting_fn = param_to_str_fn(OrderedDict(zip(setting_col, setting)))

        df_rand = g_setting[g_setting['Alg'] == 'rand']

        if nQuery != 10000 and highlight:
            for samp in sorted(list(df_rand['Samples'])):
                acc_vals = g_setting[g_setting['Samples'] ==
                                     samp][y_mean].values
                #print(max(acc_vals) / min(acc_vals))
                if max(acc_vals) / min(acc_vals) > 1.08:
                    break
                st_samp = samp

            stopFrac = 0.99

            endSamp = max(df_rand['Samples'])
            endAcc = np.mean(
                df_rand[df_rand['Samples'] == endSamp][y_mean].values)

            for samp in sorted(list(df_rand['Samples'])):
                acc = np.mean(
                    df_rand[df_rand['Samples'] == samp][y_mean].values)
                if acc / endAcc > stopFrac:
                    cut_samp = samp
                    break

            g_setting = g_setting[(g_setting['Samples'] <= cut_samp)
                                  & (g_setting['Samples'] >= st_samp)]
        else:
            algs = g_setting['Alg'].unique()
            cut_samp = 1e+8
            st_samp = -1

            for a in algs:
                df = g_setting[g_setting['Alg'] == a]
                mx = max(df['Samples'])
                mn = min(df['Samples'])
                cut_samp = min(cut_samp, mx)
                st_samp = max(st_samp, mn)

        plt.figure(figsize=(fig_w, fig_h))
        for alg, g_alg in g_setting.groupby(['Alg']):
            ns = list(g_alg['Samples'])
            accs = list(g_alg[y_mean])
            stds = list(g_alg[y_std])

            if float(setting[2]) < 10.1:
                accs = smooth(accs, 10)[:-10]
                ns = ns[:-10]
                stds = stds[:-10]

            plt.plot(ns,
                     accs,
                     label=str(name_dict[alg]),
                     linewidth=1.0,
                     color=color_dict[alg],
                     zorder=10 - order_dict[alg])
            acc_up = [avg + ci for avg, ci in zip(accs, stds)]
            acc_dn = [avg - ci for avg, ci in zip(accs, stds)]
            plt.fill_between(ns,
                             acc_up,
                             acc_dn,
                             alpha=0.2,
                             color=color_dict[alg],
                             zorder=10 - order_dict[alg])

        plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
        plt.gca().yaxis.get_offset_text().set_fontsize(tick_size)
        if y == 'Time':
            plt.gca().yaxis.set_major_formatter(
                ScalarFormatter(useMathText=True))
            plt.gca().ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
        plt.xticks(fontsize=tick_size)
        plt.yticks(fontsize=tick_size)
        plt.title(setting_t, fontsize=title_size)
        plt.xlabel('#Labels queried', fontsize=label_size)
        plt.subplots_adjust(bottom=0.15)
        plt.ylabel(y, fontsize=label_size)
        plt.gca().set_xlim([st_samp, cut_samp])
        #plt.legend(frameon=False)
        plt.grid(linestyle='--', linewidth=1)
        n_alg = len(g_setting['Alg'].unique())
        save_legend(n_alg)
        plt.savefig(all_dir + '/' + tag + '_' + y + '_' + setting_fn + '_' +
                    '.pdf')
コード例 #32
0
def plot_partial_dependence(gbrt,
                            X,
                            features,
                            feature_names=None,
                            label=None,
                            n_cols=3,
                            grid_resolution=100,
                            percentiles=(0.05, 0.95),
                            n_jobs=None,
                            verbose=0,
                            ax=None,
                            line_kw=None,
                            contour_kw=None,
                            **fig_kw):
    """Partial dependence plots for ``features``.

    The ``len(features)`` plots are arranged in a grid with ``n_cols``
    columns. Two-way partial dependence plots are plotted as contour
    plots.

    Read more in the :ref:`User Guide <partial_dependence>`.

    Parameters
    ----------
    gbrt : BaseGradientBoosting
        A fitted gradient boosting model.
    X : array-like, shape=(n_samples, n_features)
        The data on which ``gbrt`` was trained.
    features : seq of ints, strings, or tuples of ints or strings
        If seq[i] is an int or a tuple with one int value, a one-way
        PDP is created; if seq[i] is a tuple of two ints, a two-way
        PDP is created.
        If feature_names is specified and seq[i] is an int, seq[i]
        must be < len(feature_names).
        If seq[i] is a string, feature_names must be specified, and
        seq[i] must be in feature_names.
    feature_names : seq of str
        Name of each feature; feature_names[i] holds
        the name of the feature with index i.
    label : object
        The class label for which the PDPs should be computed.
        Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
    n_cols : int
        The number of columns in the grid plot (default: 3).
    grid_resolution : int, default=100
        The number of equally spaced points on the axes.
    percentiles : (low, high), default=(0.05, 0.95)
        The lower and upper percentile used to create the extreme values
        for the PDP axes.
    n_jobs : int or None, optional (default=None)
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.
    verbose : int
        Verbose output during PD computations. Defaults to 0.
    ax : Matplotlib axis object, default None
        An axis object onto which the plots will be drawn.
    line_kw : dict
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For one-way partial dependence plots.
    contour_kw : dict
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For two-way partial dependence plots.
    **fig_kw : dict
        Dict with keywords passed to the figure() call.
        Note that all keywords not recognized above will be automatically
        included here.

    Returns
    -------
    fig : figure
        The Matplotlib Figure object.
    axs : seq of Axis objects
        A seq of Axis objects, one for each subplot.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman1
    >>> from sklearn.ensemble import GradientBoostingRegressor
    >>> X, y = make_friedman1()
    >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
    >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
    ...
    """
    import matplotlib.pyplot as plt
    from matplotlib import transforms
    from matplotlib.ticker import MaxNLocator
    from matplotlib.ticker import ScalarFormatter

    if not isinstance(gbrt, BaseGradientBoosting):
        raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
    check_is_fitted(gbrt, 'estimators_')

    # set label_idx for multi-class GBRT
    if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
        if label is None:
            raise ValueError('label is not given for multi-class PDP')
        label_idx = np.searchsorted(gbrt.classes_, label)
        if gbrt.classes_[label_idx] != label:
            raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
    else:
        # regression and binary classification
        label_idx = 0

    X = check_array(X, dtype=DTYPE, order='C')
    if gbrt.n_features_ != X.shape[1]:
        raise ValueError('X.shape[1] does not match gbrt.n_features_')

    if line_kw is None:
        line_kw = {'color': 'green'}
    if contour_kw is None:
        contour_kw = {}

    # convert feature_names to list
    if feature_names is None:
        # if not feature_names use fx indices as name
        feature_names = [str(i) for i in range(gbrt.n_features_)]
    elif isinstance(feature_names, np.ndarray):
        feature_names = feature_names.tolist()

    def convert_feature(fx):
        if isinstance(fx, str):
            try:
                fx = feature_names.index(fx)
            except ValueError:
                raise ValueError('Feature %s not in feature_names' % fx)
        return fx

    # convert features into a seq of int tuples
    tmp_features = []
    for fxs in features:
        if isinstance(fxs, (numbers.Integral, str)):
            fxs = (fxs, )
        try:
            fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
        except TypeError:
            raise ValueError('features must be either int, str, or tuple '
                             'of int/str')
        if not (1 <= np.size(fxs) <= 2):
            raise ValueError('target features must be either one or two')

        tmp_features.append(fxs)

    features = tmp_features

    names = []
    try:
        for fxs in features:
            l = []
            # explicit loop so "i" is bound for exception below
            for i in fxs:
                l.append(feature_names[i])
            names.append(l)
    except IndexError:
        raise ValueError('All entries of features must be less than '
                         'len(feature_names) = {0}, got {1}.'.format(
                             len(feature_names), i))

    # compute PD functions
    pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(delayed(
        partial_dependence
    )(gbrt, fxs, X=X, grid_resolution=grid_resolution, percentiles=percentiles)
                                                         for fxs in features)

    # get global min and max values of PD grouped by plot type
    pdp_lim = {}
    for pdp, axes in pd_result:
        min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
        n_fx = len(axes)
        old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
        min_pd = min(min_pd, old_min_pd)
        max_pd = max(max_pd, old_max_pd)
        pdp_lim[n_fx] = (min_pd, max_pd)

    # create contour levels for two-way plots
    if 2 in pdp_lim:
        Z_level = np.linspace(*pdp_lim[2], num=8)

    if ax is None:
        fig = plt.figure(**fig_kw)
    else:
        fig = ax.get_figure()
        fig.clear()

    n_cols = min(n_cols, len(features))
    n_rows = int(np.ceil(len(features) / float(n_cols)))
    axs = []
    for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result):
        ax = fig.add_subplot(n_rows, n_cols, i + 1)

        if len(axes) == 1:
            ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
        else:
            # make contour plot
            assert len(axes) == 2
            XX, YY = np.meshgrid(axes[0], axes[1])
            Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
            CS = ax.contour(XX,
                            YY,
                            Z,
                            levels=Z_level,
                            linewidths=0.5,
                            colors='k')
            ax.contourf(XX,
                        YY,
                        Z,
                        levels=Z_level,
                        vmax=Z_level[-1],
                        vmin=Z_level[0],
                        alpha=0.75,
                        **contour_kw)
            ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)

        # plot data deciles + axes labels
        deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
        trans = transforms.blended_transform_factory(ax.transData,
                                                     ax.transAxes)
        ylim = ax.get_ylim()
        ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
        ax.set_xlabel(name[0])
        ax.set_ylim(ylim)

        # prevent x-axis ticks from overlapping
        ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
        tick_formatter = ScalarFormatter()
        tick_formatter.set_powerlimits((-3, 4))
        ax.xaxis.set_major_formatter(tick_formatter)

        if len(axes) > 1:
            # two-way PDP - y-axis deciles + labels
            deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
            trans = transforms.blended_transform_factory(
                ax.transAxes, ax.transData)
            xlim = ax.get_xlim()
            ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
            ax.set_ylabel(name[1])
            # hline erases xlim
            ax.set_xlim(xlim)
        else:
            ax.set_ylabel('Partial dependence')

        if len(axes) == 1:
            ax.set_ylim(pdp_lim[1])
        axs.append(ax)

    fig.subplots_adjust(bottom=0.15,
                        top=0.7,
                        left=0.1,
                        right=0.95,
                        wspace=0.4,
                        hspace=0.3)
    return fig, axs
コード例 #33
0
def hist1d_multiset(labels=['30dor', 'pcc'],
                    lines=['12'],
                    val='mvir',
                    binnum=20,
                    binsize=0,
                    dolog=True,
                    lims=[],
                    types=[]):
    # function will be able to run from any dataset's pyscript folder given that directory
    # structure is identical between datasets (i.e. parallel directories)

    # default types is all of them, can be specified to only plot a few
    # loop below catches types not consistent with parameters
    if len(types) == 0:
        types = ['trunks', 'branches', 'leaves', 'clusters']
    else:
        for t in types:
            if t not in ['trunks', 'branches', 'leaves', 'clusters']:
                print(
                    'Type \'{}\' not recognized from default types (trunks, branches, leaves, clusters), exiting...'
                    .format(t))
                return
            else:
                continue

    # default output folder will be in parallel to running directory
    if os.path.isdir('../multiprop') == 0:
        os.mkdir('../multiprop')

    # formatting parameters
    params = {'text.usetex': False, 'mathtext.fontset': 'stixsans'}
    plt.rcParams.update(params)

    # dictionaries initialized for looping
    pcats = {}  # dictionary of data from respepctive physprop_add files
    idcs = {
    }  # dictionary of indicies of data for different types in physprop_add
    pltdatum = {}  # dictionary of data to plot

    # read in data
    for lb in range(len(labels)):
        for ln in range(len(lines)):
            fname = '../../' + labels[lb] + '/props/' + labels[
                lb] + '_' + lines[ln] + '_physprop_add.txt'
            lname = labels[lb] + '_' + lines[ln]

            if os.path.isfile(fname):
                pcats[lname] = Table.read(fname, format='ascii.ecsv')
            else:
                pcats[lname] = Table.read(fname.replace('prop_add', 'prop'),
                                          format='ascii.ecsv')

            # idc indicies are in the order of types list
            idc = [0, 0, 0, 0]
            for i, typ in enumerate(types):
                with open(fname.replace('physprop_add', typ), 'r') as f:
                    reader = csv.reader(f, delimiter=' ')
                    a = zip(*reader)
                idc[i] = map(int, a[0])
            idcs[lname] = idc

            # data flattening
            pltdata = []
            for i in range(len(types)):
                data = pcats[lname][val][idcs[lname][i]]
                xdata = np.log10(data[data > 0])
                pltdata.append(xdata)
            pltdatum[lname] = pltdata

    # begin limit calculation
    # need to create list from all datasets that are being plotted
    if len(lims) == 0:
        limvals = []
        for p in pltdatum:
            #print(pltdatum[p])
            lim_swp = [item for sublist in pltdatum[p] for item in sublist]
            limvals.extend(lim_swp)
        limmin = np.around(np.nanmin(limvals), 2)
        limmax = np.around(np.nanmax(limvals), 2)
    elif len(lims) == 1:
        print('Only one limit ({}) specified, exiting...'.format(lims[0]))
        return
    else:
        if len(lims) > 2:
            print('Only first two limits will be used')
        limmin = lims[0]
        limmax = lims[1]

    if binsize == 0:
        limdif = limmax - limmin
        optsize = np.around(limdif / binnum, 3)

        # arbitrary choosing logic
        if (optsize < .01):
            binsize = .01
        elif (.01 <= optsize < .025):
            binsize = .02
        elif (.025 <= optsize < .06):
            binsize = .04
        elif (.06 <= optsize < .09):
            binsize = .08
        elif (.09 <= optsize < .15):
            binsize = .1
        elif (.15 <= optsize < .25):
            binsize = .2
        elif (.25 <= optsize < .6):
            binsize = .4
        elif (.6 <= optsize < .9):
            binsize = .8
        elif (.9 <= optsize):
            binsize = 1

    # bin spacing
    binlist = np.arange((limmin - 2 * binsize), (limmax + 2 * binsize),
                        binsize)

    # plotting
    fig, axes = plt.subplots()
    for p in pltdatum:
        temp_types = [p + ' ' + t for t in types]
        axes.hist(pltdatum[p],
                  binlist,
                  normed=0,
                  log=dolog,
                  histtype='bar',
                  label=temp_types,
                  rwidth=.8)
    axes.xaxis.set_minor_locator(FixedLocator(binlist[1::2] + binsize / 2))
    axes.xaxis.set_major_locator(FixedLocator(binlist[::2] + binsize / 2))

    axes.tick_params(labelsize=6)
    axes.set_xlabel('log ' + val + ' [' +
                    str(pcats[labels[0] + '_' + lines[0]][val].unit) + ']')
    axes.set_ylabel('Number of objects binned')

    axes.set_yscale('log')
    axes.yaxis.set_major_formatter(ScalarFormatter())
    plt.legend(loc='best', fontsize='medium')

    # automated looping for title, output name, output message
    title = '{} for '.format(val)
    for i in range(len(labels)):
        for j in range(len(lines)):
            title += '{}, '.format(labels[i] + '_' + lines[j])
    title = title[:len(title) - 2]
    plt.title(title)
    #plt.show()

    outname = '../multiprop/'
    msg = 'Plot created successfully for {} '.format(val)

    for i in range(len(labels)):
        outname += '{}_'.format(labels[i])
        msg += '{} '.format(labels[i])
    for j in range(len(lines)):
        outname += '{}CO_'.format(lines[j])
        msg += '{}CO '.format(lines[j])
    msg = msg[:len(msg) - 1]

    outname += '{}.pdf'.format(val)
    plt.savefig(outname, bbox_inches='tight')
    plt.close()

    print(msg)

    return
コード例 #34
0
ファイル: plotCMD-2data.py プロジェクト: Mezek/datalock
    df['beta'], 3.) * nanoToMili / htc2) * df['pionFF']
df['deltaCheck'] = 1. / (3. * df['S'] / np.pi / alf / alf / np.power(
    df['beta'], 3.) * nanoToMili / htc2) * df['deltaPionFF']
print(df[['sqrtS', 'S', 'CS', 'check', 'deltaCheck']])
# df.to_csv(out_filename)

fig = plt.figure(figsize=(10, 7))
fig.canvas.manager.set_window_title('Figure name')

ax1 = plt.subplot(111)
ax1.set_title('CMD-2')
ax1.set_xlabel('$\\sqrt{s}$  [GeV]')
ax1.set_ylabel('$\\sigma$  [nb]')
ax1.errorbar(dataX, dataY, yerr=dataEY, marker='o', linestyle='', markersize=6)

# ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))
yfmt = ScalarFormatter(useOffset=False)
yfmt.set_powerlimits((-4, 4))
ax1.yaxis.set_major_formatter(yfmt)
ax1.yaxis.grid()

# fig = plt.figure(figsize=(10, 7))
# ax2 = plt.subplot(111)
# ax2.errorbar(dataX, df['FF'], yerr=df['deltaFF'], marker='o', linestyle='', markersize=6)

fig.tight_layout()
save_file = os.path.join(dev_name, 'plot_CMD-2_CS.png')
fig.savefig(save_file, dpi=150)

plt.show()
コード例 #35
0
def plot_histograms(
    path="/home/huziy/skynet3_rech1/hdf_store/quebec_0.1_crcm5-hcd-rl-intfl_spinup_ecoclimap.hdf"
):
    fig = plt.figure()
    assert isinstance(fig, Figure)
    gs = gridspec.GridSpec(3, 3)

    lons2d, lats2d, basemap = analysis.get_basemap_from_hdf(file_path=path)

    # slope
    ch_slope = analysis.get_array_from_file(path=path, var_name="slope")
    ch_slope = maskoceans(lons2d, lats2d, ch_slope)
    ch_slope = np.ma.masked_where(ch_slope.mask | (ch_slope < 0), ch_slope)
    ax = fig.add_subplot(gs[0, 0])
    assert isinstance(ax, Axes)
    ch_slope_flat = ch_slope[~ch_slope.mask]
    the_hist, positions = np.histogram(
        ch_slope_flat, bins=25, range=[0, np.percentile(ch_slope_flat, 90)])
    the_hist = the_hist.astype(float)
    the_hist /= the_hist.sum()
    barwidth = (positions[1] - positions[0]) * 0.9
    ax.bar(positions[:-1], the_hist, color="0.75", linewidth=0, width=barwidth)
    ax.set_title(r"$\alpha$")
    ax.grid()
    ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
    ax.yaxis.set_major_locator(MaxNLocator(nbins=5))

    # drainage density
    dd = analysis.get_array_from_file(path=path,
                                      var_name="drainage_density_inv_meters")
    dd *= 1000  # convert to km^-1
    ax = fig.add_subplot(gs[0, 1])
    assert isinstance(ax, Axes)
    dd_flat = dd[~ch_slope.mask]
    the_hist, positions = np.histogram(dd_flat,
                                       bins=25,
                                       range=[0, np.percentile(dd_flat, 90)])
    the_hist = the_hist.astype(np.float)
    the_hist /= the_hist.sum()
    print(the_hist.max(), the_hist.min())
    barwidth = (positions[1] - positions[0]) * 0.9
    ax.bar(positions[:-1], the_hist, color="0.75", linewidth=0, width=barwidth)
    ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
    ax.yaxis.set_major_locator(MaxNLocator(nbins=5))
    ax.set_title(r"$DD {\rm \left( km^{-1} \right)}$")
    ax.grid()

    # vertical soil hydraulic conductivity
    vshc = analysis.get_array_from_file(
        path=path, var_name=infovar.HDF_VERT_SOIL_HYDR_COND_NAME)
    if vshc is not None:
        # get only on the first layer
        vshc = vshc[0, :, :]
        ax = fig.add_subplot(gs[1, 0])
        assert isinstance(ax, Axes)
        vshc_flat = vshc[~ch_slope.mask]
        the_hist, positions = np.histogram(
            vshc_flat, bins=25, range=[0, np.percentile(vshc_flat, 90)])
        the_hist = the_hist.astype(np.float)
        the_hist /= the_hist.sum()
        print(the_hist.max(), the_hist.min())
        barwidth = (positions[1] - positions[0]) * 0.9
        ax.bar(positions[:-1],
               the_hist,
               color="0.75",
               linewidth=0,
               width=barwidth)
        ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
        ax.yaxis.set_major_locator(MaxNLocator(nbins=5))

        # set a scalar formatter
        sfmt = ScalarFormatter(useMathText=True)
        sfmt.set_powerlimits([-2, 2])
        ax.xaxis.set_major_formatter(sfmt)
        ax.set_title(r"$ K_{\rm V} {\rm (m/s)}$")
        ax.grid()

        # Kv * slope * DD
        ax = fig.add_subplot(gs[1, 1])
        assert isinstance(ax, Axes)

        interflow_h = 0.2  # Soulis et al 2000
        # 1e-3 is to convert drainage density to m^-1
        the_prod = dd_flat * 1e-3 * vshc_flat * ch_slope_flat * 48 * interflow_h

        print("product median: {0}".format(np.median(the_prod)))
        print("product maximum: {0}".format(the_prod.max()))
        print("product 90-quantile: {0}".format(np.percentile(the_prod, 90)))

        the_hist, positions = np.histogram(
            the_prod, bins=25, range=[0, np.percentile(the_prod, 90)])
        the_hist = the_hist.astype(np.float)
        the_hist /= the_hist.sum()
        print(the_hist.max(), the_hist.min())
        barwidth = (positions[1] - positions[0]) * 0.9
        ax.bar(positions[:-1],
               the_hist,
               color="0.75",
               linewidth=0,
               width=barwidth)
        ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
        ax.yaxis.set_major_locator(MaxNLocator(nbins=5))

        # set a scalar formatter
        sfmt = ScalarFormatter(useMathText=True)
        sfmt.set_powerlimits([-2, 2])
        ax.xaxis.set_major_formatter(sfmt)
        ax.set_title(
            r"$ \beta_{\rm max}\cdot K_{\rm v} \cdot \alpha \cdot DD \cdot H {\rm (m/s)}$ "
        )
        ax.grid()

        # read flow directions
        flow_directions = analysis.get_array_from_file(
            path=path, var_name=infovar.HDF_FLOW_DIRECTIONS_NAME)
        # read cell areas
        # cell_areas = analysis.get_array_from_file(path=path, var_name=infovar.HDF_CELL_AREA_NAME)
        cell_manager = CellManager(flow_directions)
        acc_index = cell_manager.get_accumulation_index()
        acc_index_flat = acc_index[acc_index > 1]
        print(
            "acc_index: min={0}; max={1}; median={2}; 90-quantile={3}".format(
                acc_index_flat.min(), acc_index_flat.max(),
                np.median(acc_index_flat), np.percentile(acc_index_flat, 90)))

        # plot the range of the accumulation index
        ax = fig.add_subplot(gs[0, 2])
        assert isinstance(ax, Axes)
        the_hist, positions = np.histogram(
            acc_index_flat,
            bins=25,
            range=[0, np.percentile(acc_index_flat, 90)])
        the_hist = the_hist.astype(np.float)
        the_hist /= the_hist.sum()
        print(the_hist.max(), the_hist.min())
        barwidth = (positions[1] - positions[0]) * 0.9
        ax.bar(positions[:-1],
               the_hist,
               color="0.75",
               linewidth=0,
               width=barwidth)
        ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
        ax.yaxis.set_major_locator(MaxNLocator(nbins=5))

        # set a scalar formatter
        sfmt = ScalarFormatter(useMathText=True)
        sfmt.set_powerlimits([-2, 2])
        ax.xaxis.set_major_formatter(sfmt)
        ax.set_title(r"Accum. index")
        ax.grid()

    # lake fraction

    # sand

    # clay

    fig_path = os.path.join(images_folder, "static_fields_histograms.jpeg")
    fig.tight_layout()
    fig.savefig(fig_path, dpi=cpp.FIG_SAVE_DPI, bbox_inches="tight")
コード例 #36
0
              data[i] / pc.percent,
              uncert[i] / pc.percent,
              fmt='o',
              alpha=0.7,
              ms=ms,
              mew=0.25,
              color='b',
              elinewidth=lw,
              capthick=lw,
              zorder=3,
              label='Data')
 ax1.tick_params(labelsize=fs - 1, direction='in', which='both')
 if ax1.get_xlim()[1] > 3:
     ax1.set_xscale('log')
     plt.gca().xaxis.set_minor_formatter(NullFormatter())
     ax1.get_xaxis().set_major_formatter(ScalarFormatter())
     ax1.set_xticks(logxticks)
     ax1.set_xlim(1.0, 5.5)
 else:
     ax1.set_xlim(1.0, 2.0)
 ax1.text(0.997,
          0.89,
          names[i],
          fontsize=fs - 1,
          ha='right',
          transform=ax1.transAxes)
 if sticks[i] is not None:
     ax1.yaxis.set_major_locator(MultipleLocator(sticks[i]))
 plt.ylabel(r'$(R_{\rm p}/R_{\rm s})^2$ (%)', fontsize=fs)
 # The posteriors:
 axes = [
コード例 #37
0
##>>>and plot on the screen and output figure file using matplotlib-python

import Tkinter as tk
from Tkinter import *

import matplotlib
matplotlib.use('Agg')

from matplotlib.ticker import MaxNLocator
from matplotlib import cm

from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure

from matplotlib.ticker import ScalarFormatter
yformatter = ScalarFormatter()
yformatter.set_powerlimits((-3, 3))

import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as ticker

from numpy import arange, sin, pi
import numpy as np

import h5py as h5
import math

font = {
    'family': 'sans-serif',
    'weight': 'bold',
コード例 #38
0
FillContour1.set_norm(norm)
FillContour2.set_norm(norm)
FillContour3.set_norm(norm)
FillContour4.set_norm(norm)
FillContour5.set_norm(norm)
FillContour6.set_norm(norm)

#plt.suptitle("GridSpec")
make_ticklabels_invisible(plt.gcf())

ax3.locator_params(tight=True, nbins=4)
ax5.locator_params(tight=True, nbins=4)
ax6.locator_params(tight=True, nbins=4)

#myformatter = ScalarFormatter(useMathText=True)
myformatter = ScalarFormatter()
myformatter.set_powerlimits((-1,1))
ax3.xaxis.set_major_formatter(myformatter)
ax5.xaxis.set_major_formatter(myformatter)
ax6.yaxis.set_major_formatter(myformatter)
ax6.xaxis.set_major_formatter(myformatter)

cax = f.add_axes([0.2, 0.06, 0.6, 0.02])
f.colorbar(FillContour1, cax, orientation='horizontal')

plt.tight_layout()

plt.savefig('higuchi_proj.eps')
plt.savefig('higuchi_proj.pdf')

plt.show()
コード例 #39
0
    def GlobalSignature(self,
                        ax=None,
                        fig=1,
                        freq_ax=False,
                        time_ax=False,
                        z_ax=True,
                        mask=None,
                        scatter=False,
                        xaxis='nu',
                        ymin=None,
                        ymax=50,
                        zmax=None,
                        rotate_xticks=False,
                        rotate_yticks=False,
                        force_draw=False,
                        zlim=80,
                        temp_unit='mK',
                        yscale='linear',
                        take_abs=False,
                        **kwargs):
        """
        Plot differential brightness temperature vs. redshift (nicely).

        Parameters
        ----------
        ax : matplotlib.axes.AxesSubplot instance
            Axis on which to plot signal.
        fig : int
            Figure number.
        freq_ax : bool
            Add top axis denoting corresponding (observed) 21-cm frequency?
        time_ax : bool
            Add top axis denoting corresponding time since Big Bang?
        z_ax : bool
            Add top axis denoting corresponding redshift? Only applicable
            if xaxis='nu' (see below).
        scatter : bool
            Plot signal as scatter-plot?
        mask : int
            If scatter==True, this defines the sampling "rate" of the data,
            i.e., only every mask'th element is included in the plot.
        xaxis : str
            Determines whether x-axis is redshift or frequency. 
            Options: 'z' or 'nu'
        
        Returns
        -------
        matplotlib.axes.AxesSubplot instance.
        
        """

        if xaxis == 'nu' and freq_ax:
            freq_ax = False
        if xaxis == 'z' and z_ax:
            z_ax = False

        if ax is None:
            gotax = False
            fig = pl.figure(fig)
            ax = fig.add_subplot(111)
        else:
            gotax = True

        conv = 1.
        if temp_unit.lower() in ['k', 'kelvin']:
            conv = 1e-3

        if mask is not None:
            nu_plot, dTb_plot = \
                self.history[xaxis][mask], self.history['dTb'][mask] * conv
        else:
            nu_plot, dTb_plot = \
                self.history[xaxis], self.history['dTb'] * conv

        if take_abs:
            dTb_plot = np.abs(dTb_plot)

        ##
        # Plot the stupid thing
        ##
        if scatter is False:
            ax.plot(nu_plot, dTb_plot, **kwargs)
        else:
            ax.scatter(self.history[xaxis][-1::-mask],
                       self.history['dTb'][-1::-mask] * conv, **kwargs)

        if zmax is None:
            zmax = self.pf["initial_redshift"]

        zmin = self.pf["final_redshift"] if self.pf["final_redshift"] >= 10 \
            else 5

        # x-ticks
        if xaxis == 'z' and hasattr(self, 'pf'):
            xticks = list(np.arange(zmin, zmax, zmin))
            xticks_minor = list(np.arange(zmin, zmax, 1))
        else:
            xticks = np.arange(0, 250, 50)
            xticks_minor = np.arange(10, 200, 10)

        # Some elements deemed objects when run through pipelines...
        dTb = np.array(self.history['dTb'], dtype=float)

        if ymin is None and yscale == 'linear':
            ymin = max(min(min(dTb[np.isfinite(dTb)]), ax.get_ylim()[0]), -500)

            # Set lower y-limit by increments of 50 mK
            for val in [
                    -50, -100, -150, -200, -250, -300, -350, -400, -450, -500,
                    -550, -600
            ]:
                if val <= ymin:
                    ymin = int(val)
                    break

        if ymax is None:
            ymax = max(max(dTb[np.isfinite(dTb)]), ax.get_ylim()[1])

        if yscale == 'linear':
            if (not gotax) or force_draw:
                yticks = np.arange(int(ymin / 50) * 50, 100, 50) * conv
                ax.set_yticks(yticks)
            else:
                # Minor y-ticks - 10 mK increments
                yticks = np.linspace(ymin, 50,
                                     int((50 - ymin) / 10. + 1)) * conv
                yticks = list(yticks)

                # Remove major ticks from minor tick list
                if ymin >= -200:
                    for y in np.linspace(ymin, 50,
                                         int((50 - ymin) / 50. + 1)) * conv:
                        if y in yticks:
                            yticks.remove(y)

                ax.set_ylim(ymin * conv, ymax * conv)
                ax.set_yticks(yticks, minor=True)

        if xaxis == 'z' and hasattr(self, 'pf'):
            ax.set_xlim(5, self.pf["initial_redshift"])
        else:
            ax.set_xlim(0, 210)

        if (not gotax) or force_draw:
            ax.set_xticks(xticks, minor=False)
            ax.set_xticks(xticks_minor, minor=True)

            xt = []
            for x in ax.get_xticklabels():
                xt.append(x.get_text())

            ax.set_xticklabels(xt, rotation=45. if rotate_xticks else 0)

            yt = []
            for y in ax.get_yticklabels():
                if not y.get_text().strip():
                    break
                yt.append(y.get_text())

            if yt == []:
                yt = yticks

            ax.set_yticklabels(yt, rotation=45. if rotate_yticks else 0)

        if ax.get_xlabel() == '':
            if xaxis == 'z':
                ax.set_xlabel(labels['z'], fontsize='x-large')
            else:
                ax.set_xlabel(labels['nu'])

        if ax.get_ylabel() == '':
            if temp_unit.lower() == 'mk':
                ax.set_ylabel(labels['dTb'], fontsize='x-large')
            else:
                ax.set_ylabel(r'$\delta T_b \ (\mathrm{K})$',
                              fontsize='x-large')

        # Twin axes along the top
        if freq_ax:
            twinax = self.add_frequency_axis(ax)
        elif time_ax:
            twinax = add_time_axis(ax, self.cosm)
        elif z_ax:
            twinax = add_redshift_axis(ax, zlim=zmax)
        else:
            twinax = None

        self.twinax = twinax

        if gotax and (ax.get_xlabel().strip()) and (not force_draw):
            pl.draw()
            return ax, twinax

        try:
            ax.ticklabel_format(style='plain', axis='both')
        except AttributeError:
            ax.xaxis.set_major_formatter(ScalarFormatter())
            ax.yaxis.set_major_formatter(ScalarFormatter())
            #twinax.xaxis.set_major_formatter(ScalarFormatter())
            ax.ticklabel_format(style='plain', axis='both')

        pl.draw()

        return ax, twinax
コード例 #40
0
 def __call__(self, x, pos):
     if pos==0: return ''
     else: return ScalarFormatter.__call__(self, x, pos)
コード例 #41
0
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import os
from pylab import *
from matplotlib import ticker
from matplotlib.ticker import ScalarFormatter
sformatter = ScalarFormatter(useOffset=True, useMathText=True)
sformatter.set_scientific(True)
sformatter.set_powerlimits((-2, 3))

#plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))

font = {'family': 'serif', 'weight': 'normal', 'size': 14}
plt.rc('font', **font)
plt.rc('text', usetex=False)
plt.figure(figsize=(6, 5))
fig = plt.figure(1)
ax = fig.add_axes([0.14, 0.125, 0.82, 0.85])

mydata = np.loadtxt('moments_bw_Omega50.dat', skiprows=1, unpack=True)
Omega = mydata[0]
rhoB = mydata[3]
Pbar = mydata[5]
sigma2P = mydata[6]
SsigmaP = mydata[7]
Ksigma2P = mydata[8]
Qbar = mydata[9]
sigma2Q = mydata[10]
SsigmaQ = mydata[11]
Ksigma2Q = mydata[10]
コード例 #42
0
ファイル: display.py プロジェクト: alex-ht/librosa
def __decorate_axis(axis, ax_type):
    '''Configure axis tickers, locators, and labels'''

    if ax_type == 'tonnetz':
        axis.set_major_formatter(TonnetzFormatter())
        axis.set_major_locator(FixedLocator(0.5 + np.arange(6)))
        axis.set_label_text('Tonnetz')

    elif ax_type == 'chroma':
        axis.set_major_formatter(ChromaFormatter())
        axis.set_major_locator(FixedLocator(0.5 +
                                            np.add.outer(12 * np.arange(10),
                                                         [0, 2, 4, 5, 7, 9, 11]).ravel()))
        axis.set_label_text('Pitch class')

    elif ax_type in ['tempo', 'fourier_tempo']:
        axis.set_major_formatter(ScalarFormatter())
        axis.set_major_locator(LogLocator(base=2.0))
        axis.set_label_text('BPM')

    elif ax_type == 'time':
        axis.set_major_formatter(TimeFormatter(unit=None, lag=False))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Time')

    elif ax_type == 's':
        axis.set_major_formatter(TimeFormatter(unit='s', lag=False))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Time (s)')

    elif ax_type == 'ms':
        axis.set_major_formatter(TimeFormatter(unit='ms', lag=False))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Time (ms)')

    elif ax_type == 'lag':
        axis.set_major_formatter(TimeFormatter(unit=None, lag=True))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Lag')

    elif ax_type == 'lag_s':
        axis.set_major_formatter(TimeFormatter(unit='s', lag=True))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Lag (s)')

    elif ax_type == 'lag_ms':
        axis.set_major_formatter(TimeFormatter(unit='ms', lag=True))
        axis.set_major_locator(MaxNLocator(prune=None,
                                           steps=[1, 1.5, 5, 6, 10]))
        axis.set_label_text('Lag (ms)')

    elif ax_type == 'cqt_note':
        axis.set_major_formatter(NoteFormatter())
        axis.set_major_locator(LogLocator(base=2.0))
        axis.set_minor_formatter(NoteFormatter(major=False))
        axis.set_minor_locator(LogLocator(base=2.0,
                                          subs=2.0**(np.arange(1, 12)/12.0)))
        axis.set_label_text('Note')

    elif ax_type in ['cqt_hz']:
        axis.set_major_formatter(LogHzFormatter())
        axis.set_major_locator(LogLocator(base=2.0))
        axis.set_minor_formatter(LogHzFormatter(major=False))
        axis.set_minor_locator(LogLocator(base=2.0,
                                          subs=2.0**(np.arange(1, 12)/12.0)))
        axis.set_label_text('Hz')

    elif ax_type in ['mel', 'log']:
        axis.set_major_formatter(ScalarFormatter())
        axis.set_major_locator(SymmetricalLogLocator(axis.get_transform()))
        axis.set_label_text('Hz')

    elif ax_type in ['linear', 'hz']:
        axis.set_major_formatter(ScalarFormatter())
        axis.set_label_text('Hz')

    elif ax_type in ['frames']:
        axis.set_label_text('Frames')

    elif ax_type in ['off', 'none', None]:
        axis.set_label_text('')
        axis.set_ticks([])
コード例 #43
0
    def render_GET(self, request):
        q = urlparse.urlparse(request.uri)
        args = urlparse.parse_qs(q.query)

        if q.path == self.base + '/status':
            return serve_json(request,
                              fast_connected=self.fast.factory.isConnected(),
                              fast=self.fast.fast_status)
        elif q.path == self.base + '/command':
            self.fast.factory.message(args['string'][0])
            return serve_json(request)
        elif q.path == self.base + '/image.jpg':
            if self.fast.image:
                request.responseHeaders.setRawHeaders("Content-Type",
                                                      ['image/jpeg'])
                request.responseHeaders.setRawHeaders(
                    "Content-Length", [str(len(self.fast.image))])
                return self.fast.image
            else:
                request.setResponseCode(400)
                return "No images"
        elif q.path == self.base + '/total_image.jpg':
            if self.fast.total_image:
                request.responseHeaders.setRawHeaders("Content-Type",
                                                      ['image/jpeg'])
                request.responseHeaders.setRawHeaders(
                    "Content-Length", [str(len(self.fast.total_image))])
                return self.fast.total_image
            else:
                request.setResponseCode(400)
                return "No images"
        elif q.path == self.base + '/total_flux.jpg':
            width = 800
            fig = Figure(facecolor='white',
                         figsize=(width / 72, width * 0.5 / 72),
                         tight_layout=True)
            ax = fig.add_subplot(111)

            ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))

            x = np.array(self.fast.time)
            x = x - x[0]

            ax.plot(x, self.fast.mean, '-')
            ax.set_xlabel('Time, seconds')
            ax.set_ylabel('Flux, counts')

            canvas = FigureCanvas(fig)
            s = StringIO()
            canvas.print_jpeg(s, bbox_inches='tight')

            request.responseHeaders.setRawHeaders("Content-Type",
                                                  ['image/jpeg'])
            request.responseHeaders.setRawHeaders("Content-Length",
                                                  [str(s.len)])
            request.responseHeaders.setRawHeaders(
                "Cache-Control",
                ['no-store, no-cache, must-revalidate, max-age=0'])
            return s.getvalue()

        elif q.path == self.base + '/current_flux.jpg':
            width = 800
            fig = Figure(facecolor='white',
                         figsize=(width / 72, width * 0.5 / 72),
                         tight_layout=True)
            ax = fig.add_subplot(111)

            ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))

            x = np.array(self.fast.time)
            if len(x):
                x = x - x[0]
            y = np.array(self.fast.mean)

            ax.plot(x[-1000:], y[-1000:], '-')
            ax.set_xlabel('Time, seconds')
            ax.set_ylabel('Flux, counts')

            canvas = FigureCanvas(fig)
            s = StringIO()
            canvas.print_jpeg(s, bbox_inches='tight')

            request.responseHeaders.setRawHeaders("Content-Type",
                                                  ['image/jpeg'])
            request.responseHeaders.setRawHeaders("Content-Length",
                                                  [str(s.len)])
            request.responseHeaders.setRawHeaders(
                "Cache-Control",
                ['no-store, no-cache, must-revalidate, max-age=0'])
            return s.getvalue()

        else:
            return q.path
コード例 #44
0
ファイル: datacursor.py プロジェクト: siriuspal/mpldatacursor
    def __init__(self,
                 artists,
                 tolerance=5,
                 formatter=None,
                 point_labels=None,
                 display='one-per-axes',
                 draggable=False,
                 hover=False,
                 props_override=None,
                 keybindings=True,
                 date_format='%x %X',
                 display_button=1,
                 hide_button=3,
                 keep_inside=True,
                 magnetic=False,
                 **kwargs):
        """Create the data cursor and connect it to the relevant figure.

        Parameters
        -----------
        artists : a matplotlib artist or sequence of artists.
            The artists to make selectable and display information for.
        tolerance : number, optional
            The radius (in points) that the mouse click must be within to
            select the artist.
        formatter : function, optional
            A function that accepts arbitrary kwargs and returns a string that
            will be displayed with annotate. The `x`, `y`, `event`, `ind`, and
            `label` kwargs will always be present. See the
            ``mpldatacursor.datacursor`` function docstring for more
            information.
        point_labels : sequence or dict, optional
            Labels for "subitems" of an artist, passed to the formatter
            function as the `point_label` kwarg.  May be either a single
            sequence (used for all artists) or a dict of artist:sequence pairs.
        display : {'one-per-axes', 'single', 'multiple'}, optional
            Controls whether more than one annotation box will be shown.
        draggable : boolean, optional
            Controls whether or not the annotation box will be interactively
            draggable to a new location after being displayed. Default: False.
        hover : boolean, optional
            If True, the datacursor will "pop up" when the mouse hovers over an
            artist.  Defaults to False.  Enabling hover also sets
            `display="single"` and `draggable=False`.
        props_override : function, optional
            If specified, this function customizes the parameters passed into
            the formatter function and the x, y location that the datacursor
            "pop up" "points" to.  This is often useful to make the annotation
            "point" to a specific side or corner of an artist, regardless of
            the position clicked. The function is passed the same kwargs as the
            `formatter` function and is expected to return a dict with at least
            the keys "x" and "y" (and probably several others).
            Expected call signature: `props_dict = props_override(**kwargs)`
        keybindings : boolean or dict, optional
            By default, the keys "d" and "t" will be bound to hiding/showing
            all annotation boxes and toggling interactivity for datacursors,
            respectively.  "<shift> + <right>" and "<shift> + <left>" will be
            bound to moving the datacursor to the next and previous item in the
            sequence for artists that support it. If keybindings is False, the
            ability to hide/toggle datacursors interactively will be disabled.
            Alternatively, a dict mapping "hide", "toggle", "next", and
            "previous" to matplotlib key specifications may specified to
            customize the keyboard shortcuts.  Note that hitting the "hide" key
            once will hide datacursors, and hitting it again will show all of
            the hidden datacursors.
        date_format : string, optional
            The strftime-style formatting string for dates. Used only if the x
            or y axes have been set to display dates. Defaults to "%x %X".
        display_button: int, optional
            The mouse button that will triggers displaying an annotation box.
            Defaults to 1, for left-clicking. (Common options are
            1:left-click, 2:middle-click, 3:right-click)
        hide_button: int or None, optional
            The mouse button that triggers hiding the selected annotation box.
            Defaults to 3, for right-clicking. (Common options are
            1:left-click, 2:middle-click, 3:right-click, None:hiding disabled)
        keep_inside : boolean, optional
            Whether or not to adjust the x,y offset to keep the text box inside
            the figure. This option has no effect on draggable datacursors.
            Defaults to True. Note: Currently disabled on OSX and
            NbAgg/notebook backends.
        magnetic: boolean, optional
            Magnetic will attach the cursor only to the data points.
            Default is cursor can be added to interpolated lines.
            If exact data point is not clicked, nearby data point will be selected.
            Works with artists that have x, y attributes. Other plots will ignore Magnetic.
        **kwargs : additional keyword arguments, optional
            Additional keyword arguments are passed on to annotate.
        """
        def filter_artists(artists):
            """Replace ContourSets, etc with their constituent artists."""
            output = []
            for item in artists:
                if isinstance(item, ContourSet):
                    output += item.collections
                elif isinstance(item, Container):
                    children = item.get_children()
                    for child in children:
                        child._mpldatacursor_label = item.get_label()
                        child._mpldatacursor_parent = item
                    output += children
                else:
                    output.append(item)
            return output

        if not np.iterable(artists):
            artists = [artists]

        #-- Deal with contour sets... -------------------------------------
        # These are particularly difficult, as the original z-value array
        # is never associated with the ContourSet, and they're not "normal"
        # artists (they're not actually added to the axes). Not only that, but
        # the PatchCollections created by filled contours don't even fire a
        # pick event for points inside them, only on their edges. At any rate,
        # this is a somewhat hackish way of handling contours, but it works.
        self.artists = filter_artists(artists)
        self.contour_levels = {}
        for cs in [x for x in artists if isinstance(x, ContourSet)]:
            for z, artist in zip(cs.levels, cs.collections):
                self.contour_levels[artist] = z

        valid_display_options = ['single', 'one-per-axes', 'multiple']
        if display in valid_display_options:
            self.display = display
        else:
            raise ValueError('"display" must be one of the following: '\
                             ', '.join(valid_display_options))
        self.hover = hover
        if self.hover:
            self.display = 'single'
            self.draggable = False

        self.magnetic = magnetic
        self.keep_inside = keep_inside
        self.tolerance = tolerance
        self.point_labels = point_labels
        self.draggable = draggable
        self.date_format = date_format
        self.props_override = props_override
        self.display_button = display_button
        self.hide_button = hide_button
        self.axes = tuple(set(art.axes for art in self.artists))
        self.figures = tuple(set(ax.figure for ax in self.axes))
        self._mplformatter = ScalarFormatter(useOffset=False, useMathText=True)
        self._hidden = False
        self._last_event = None
        self._last_annotation = None

        if self.draggable:
            # If we're dealing with draggable cursors, don't try to override
            # the x,y position.  Otherwise, dragging the cursor outside the
            # figure will have unexpected consequences.
            self.keep_inside = False

        if formatter is None:
            self.formatter = self._formatter
        else:
            self.formatter = formatter

        self._annotation_kwargs = kwargs
        self.annotations = {}
        if self.display is not 'multiple':
            for ax in self.axes:
                self.annotations[ax] = self.annotate(ax, **kwargs)
                # Hide the annotation box until clicked...
                self.annotations[ax].set_visible(False)

        if keybindings:
            if keybindings is True:
                self.keybindings = self.default_keybindings
            else:
                self.keybindings = self.default_keybindings.copy()
                self.keybindings.update(keybindings)
            for fig in self.figures:
                fig.canvas.mpl_connect('key_press_event', self._on_keypress)

        self.enable()

        # We need to make sure the DataCursor isn't garbage collected until the
        # figure is.  Matplotlib's weak references won't keep this DataCursor
        # instance alive in all cases.
        for fig in self.figures:
            try:
                fig._mpldatacursors.append(self)
            except AttributeError:
                fig._mpldatacursors = [self]
コード例 #45
0
ファイル: util.py プロジェクト: dbouman1/qtplot
 def __init__(self, format='%.0f', division=1e0):
     ScalarFormatter.__init__(self, useOffset=None, useMathText=None)
     self.format = format
     self.division = division
コード例 #46
0
ファイル: xor_functions.py プロジェクト: PSSF23/SPDT
def xnor_plot_error(mean_error):
    """Plot Generalization Errors"""
    algorithms = [
        "Hoeffding Tree ",
        "Mondrian Forest",
        "Stream Decision Tree",
        "Stream Decision Forest",
        "Synergistic Forest",
    ]
    fontsize = 30
    labelsize = 28
    ls = ["-", "--"]
    colors = sns.color_palette("bright")
    fig = plt.figure(figsize=(21, 14))
    gs = fig.add_gridspec(14, 21)
    ax1 = fig.add_subplot(gs[7:, :6])
    # Hoeffding Tree XOR
    ax1.plot(
        (100 * np.arange(0.25, 22.75, step=0.25)).astype(int),
        mean_error[0],
        label=algorithms[0],
        c=colors[4],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Mondrian Forest XOR
    ax1.plot(
        (100 * np.arange(0.25, 22.75, step=0.25)).astype(int),
        mean_error[2],
        label=algorithms[1],
        c=colors[5],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Stream Decision Tree XOR
    ax1.plot(
        (100 * np.arange(0.25, 22.75, step=0.25)).astype(int),
        mean_error[4],
        label=algorithms[2],
        c=colors[2],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Stream Decision Forest XOR
    ax1.plot(
        (100 * np.arange(0.25, 22.75, step=0.25)).astype(int),
        mean_error[6],
        label=algorithms[3],
        c=colors[3],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Synergistic Forest XOR
    ax1.plot(
        (100 * np.arange(0.25, 22.75, step=0.25)).astype(int),
        mean_error[8],
        label=algorithms[4],
        c=colors[9],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    ax1.set_ylabel("Generalization Error (XOR)", fontsize=fontsize)
    ax1.set_xlabel("Total Sample Size", fontsize=fontsize)
    ax1.tick_params(labelsize=labelsize)
    ax1.set_yscale("log")
    ax1.yaxis.set_major_formatter(ScalarFormatter())
    ax1.set_yticks([0.1, 0.3, 0.5, 0.9])
    ax1.set_xticks([0, 750, 1500, 2250])
    ax1.axvline(x=750, c="gray", linewidth=1.5, linestyle="dashed")
    ax1.axvline(x=1500, c="gray", linewidth=1.5, linestyle="dashed")

    right_side = ax1.spines["right"]
    right_side.set_visible(False)
    top_side = ax1.spines["top"]
    top_side.set_visible(False)

    ax1.text(200, np.mean(ax1.get_ylim()) + 0.5, "XOR", fontsize=26)
    ax1.text(850, np.mean(ax1.get_ylim()) + 0.5, "XNOR", fontsize=26)
    ax1.text(1700, np.mean(ax1.get_ylim()) + 0.5, "XOR", fontsize=26)

    ######## XNOR
    ax1 = fig.add_subplot(gs[7:, 8:14])
    xnor_range = (100 * np.arange(0.25, 22.75, step=0.25)).astype(int)[30:]
    # Hoeffding Tree XNOR
    ax1.plot(
        xnor_range,
        mean_error[1, 30:],
        label=algorithms[0],
        c=colors[4],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Mondrian Forest XNOR
    ax1.plot(
        xnor_range,
        mean_error[3, 30:],
        label=algorithms[1],
        c=colors[5],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Stream Decision Tree XNOR
    ax1.plot(
        xnor_range,
        mean_error[5, 30:],
        label=algorithms[2],
        c=colors[2],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Stream Decision Forest XNOR
    ax1.plot(
        xnor_range,
        mean_error[7, 30:],
        label=algorithms[3],
        c=colors[3],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )
    # Synergistic Forest XNOR
    ax1.plot(
        xnor_range,
        mean_error[9, 30:],
        label=algorithms[4],
        c=colors[9],
        ls=ls[np.sum(1 > 1).astype(int)],
        lw=3,
    )

    ax1.set_ylabel("Generalization Error (%s)" % "XNOR", fontsize=fontsize)
    ax1.legend(bbox_to_anchor=(1.05, 1.0),
               loc="upper left",
               fontsize=20,
               frameon=False)
    ax1.set_xlabel("Total Sample Size", fontsize=fontsize)
    ax1.tick_params(labelsize=labelsize)
    ax1.set_yscale("log")
    ax1.yaxis.set_major_formatter(ScalarFormatter())
    ax1.set_yticks([0.1, 0.3, 0.5, 0.9])
    ax1.set_xticks([0, 750, 1500, 2250])
    ax1.axvline(x=750, c="gray", linewidth=1.5, linestyle="dashed")
    ax1.axvline(x=1500, c="gray", linewidth=1.5, linestyle="dashed")
    right_side = ax1.spines["right"]
    right_side.set_visible(False)
    top_side = ax1.spines["top"]
    top_side.set_visible(False)

    ax1.text(200, np.mean(ax1.get_ylim()) + 0.5, "XOR", fontsize=26)
    ax1.text(850, np.mean(ax1.get_ylim()) + 0.5, "XNOR", fontsize=26)
    ax1.text(1700, np.mean(ax1.get_ylim()) + 0.5, "XOR", fontsize=26)
コード例 #47
0
def main():
    start_year = 1980
    end_year = 2010

    start_date = datetime(start_year, 1, 1)
    end_date = datetime(end_year, 12, 31)

    ids_with_lakes_upstream = [
        "104001", "093806", "093801", "081002", "081007", "080718"
    ]

    selected_station_ids = ["092715", "074903", "080104", "081007", "061905",
                            "093806", "090613", "081002", "093801", "080718", "104001"]

    selected_station_ids = ids_with_lakes_upstream

    # Get the list of stations to do the comparison with
    stations = cehq_station.read_station_data(
        start_date=start_date,
        end_date=end_date,
        selected_ids=selected_station_ids
    )


    # add hydat stations
    # province = "QC"
    # min_drainage_area_km2 = 10000.0
    # stations_hd = cehq_station.load_from_hydat_db(start_date=start_date, end_date=end_date,
    # province=province, min_drainage_area_km2=min_drainage_area_km2)
    # if not len(stations_hd):
    #     print "No hydat stations satisying the conditions: period {0}-{1}, province {2}".format(
    #         str(start_date), str(end_date), province
    #     )
    # stations.extend(stations_hd)

    # brewer2mpl.get_map args: set name  set type  number of colors
    bmap = brewer2mpl.get_map("Set1", "qualitative", 9)

    path1 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-r.hdf5"
    label1 = "CRCM5-L1"

    path2 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-rl.hdf5"
    label2 = "CRCM5-L2"

    color2, color1 = bmap.mpl_colors[:2]

    fldirs = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_FLOW_DIRECTIONS_NAME)
    lons2d, lats2d, basemap = analysis.get_basemap_from_hdf(path1)

    lake_fractions = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_LAKE_FRACTION_NAME)
    # cell_areas = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_CELL_AREA_NAME)
    acc_area = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_ACCUMULATION_AREA_NAME)

    cell_manager = CellManager(fldirs, lons2d=lons2d, lats2d=lats2d, accumulation_area_km2=acc_area)

    station_to_mp = cell_manager.get_model_points_for_stations(station_list=stations,
                                                               lake_fraction=lake_fractions,
                                                               drainaige_area_reldiff_limit=0.3)

    fig, axes = plt.subplots(1, 2, gridspec_kw=dict(top=0.80, wspace=0.4))

    q90_obs_list = []
    q90_mod1_list = []
    q90_mod2_list = []

    q10_obs_list = []
    q10_mod1_list = []
    q10_mod2_list = []

    for the_station, the_mp in station_to_mp.items():
        assert isinstance(the_station, Station)
        compl_years = the_station.get_list_of_complete_years()
        if len(compl_years) < 3:
            continue

        t, stfl1 = analysis.get_daily_climatology_for_a_point(path=path1, years_of_interest=compl_years,
                                                              i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")

        _, stfl2 = analysis.get_daily_climatology_for_a_point(path=path2, years_of_interest=compl_years,
                                                              i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")

        _, stfl_obs = the_station.get_daily_climatology_for_complete_years(stamp_dates=t, years=compl_years)

        # Q90
        q90_obs = np.percentile(stfl_obs, 90)
        q90_mod1 = np.percentile(stfl1, 90)
        q90_mod2 = np.percentile(stfl2, 90)

        # Q10
        q10_obs = np.percentile(stfl_obs, 10)
        q10_mod1 = np.percentile(stfl1, 10)
        q10_mod2 = np.percentile(stfl2, 10)

        # save quantiles to lists for correlation calculation
        q90_obs_list.append(q90_obs)
        q90_mod1_list.append(q90_mod1)
        q90_mod2_list.append(q90_mod2)

        q10_mod1_list.append(q10_mod1)
        q10_mod2_list.append(q10_mod2)
        q10_obs_list.append(q10_obs)


        # axes[0].annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
        # axes[1].annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))




    # Plot scatter plot of Q90
    the_ax = axes[0]

    # the_ax.annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
    the_ax.scatter(q90_obs_list, q90_mod1_list, label=label1, c=color1)
    the_ax.scatter(q90_obs_list, q90_mod2_list, label=label2, c=color2)



    # plot scatter plot of Q10
    the_ax = axes[1]
    # the_ax.annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))
    h1 = the_ax.scatter(q10_obs_list, q10_mod1_list, label=label1, c=color1)
    h2 = the_ax.scatter(q10_obs_list, q10_mod2_list, label=label2, c=color2)



    # Add correlation coefficients to the axes
    fp = FontProperties(size=14, weight="bold")
    axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod1_list, q90_obs_list)[0, 1] ** 2),
                     (0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
    axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod2_list, q90_obs_list)[0, 1] ** 2),
                     (0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)

    axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod1_list, q10_obs_list)[0, 1] ** 2),
                     (0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
    axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod2_list, q10_obs_list)[0, 1] ** 2),
                     (0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)


    sf = ScalarFormatter(useMathText=True)
    sf.set_powerlimits((-2, 3))
    for ind, the_ax in enumerate(axes):
        plot_one_to_one_line(the_ax)
        if ind == 0:
            the_ax.set_xlabel(r"Observed $\left({\rm m^3/s} \right)$")
            the_ax.set_ylabel(r"Modelled $\left({\rm m^3/s} \right)$")

        the_ax.annotate(r"$Q_{90}$" if ind == 0 else r"$Q_{10}$",
                        (0.95, 0.95), xycoords="axes fraction",
                        bbox=dict(facecolor="white"),
                        va="top", ha="right")

        the_ax.xaxis.set_major_formatter(sf)
        the_ax.yaxis.set_major_formatter(sf)

        locator = MaxNLocator(nbins=5)
        the_ax.xaxis.set_major_locator(locator)
        the_ax.yaxis.set_major_locator(locator)
        x1, x2 = the_ax.get_xlim()
        # Since streamflow percentiles can only be positive
        the_ax.set_xlim(0, x2)
        the_ax.set_ylim(0, x2)

    fig.legend([h1, h2], [label1, label2], loc="upper center", ncol=2)
    figpath = os.path.join(images_folder, "percentiles_comparison.png")
    # plt.tight_layout()
    fig.savefig(figpath, dpi=cpp.FIG_SAVE_DPI, bbox_inches="tight")
コード例 #48
0
def plot_partial_dependence(gbrt, X, features, feature_names=None,
                            label=None, n_cols=3, grid_resolution=100,
                            percentiles=(0.05, 0.95), n_jobs=None,
                            verbose=0, ax=None, line_kw=None,
                            contour_kw=None, **fig_kw):
    """Partial dependence plots for ``features``.

    The ``len(features)`` plots are arranged in a grid with ``n_cols``
    columns. Two-way partial dependence plots are plotted as contour
    plots.

    Read more in the :ref:`User Guide <partial_dependence>`.

    Parameters
    ----------
    gbrt : BaseGradientBoosting
        A fitted gradient boosting model.
    X : array-like, shape=(n_samples, n_features)
        The data on which ``gbrt`` was trained.
    features : seq of ints, strings, or tuples of ints or strings
        If seq[i] is an int or a tuple with one int value, a one-way
        PDP is created; if seq[i] is a tuple of two ints, a two-way
        PDP is created.
        If feature_names is specified and seq[i] is an int, seq[i]
        must be < len(feature_names).
        If seq[i] is a string, feature_names must be specified, and
        seq[i] must be in feature_names.
    feature_names : seq of str
        Name of each feature; feature_names[i] holds
        the name of the feature with index i.
    label : object
        The class label for which the PDPs should be computed.
        Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
    n_cols : int
        The number of columns in the grid plot (default: 3).
    grid_resolution : int, default=100
        The number of equally spaced points on the axes.
    percentiles : (low, high), default=(0.05, 0.95)
        The lower and upper percentile used to create the extreme values
        for the PDP axes.
    n_jobs : int or None, optional (default=None)
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.
    verbose : int
        Verbose output during PD computations. Defaults to 0.
    ax : Matplotlib axis object, default None
        An axis object onto which the plots will be drawn.
    line_kw : dict
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For one-way partial dependence plots.
    contour_kw : dict
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For two-way partial dependence plots.
    **fig_kw : dict
        Dict with keywords passed to the figure() call.
        Note that all keywords not recognized above will be automatically
        included here.

    Returns
    -------
    fig : figure
        The Matplotlib Figure object.
    axs : seq of Axis objects
        A seq of Axis objects, one for each subplot.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman1
    >>> from sklearn.ensemble import GradientBoostingRegressor
    >>> X, y = make_friedman1()
    >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
    >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
    ...
    """
    import matplotlib.pyplot as plt
    from matplotlib import transforms
    from matplotlib.ticker import MaxNLocator
    from matplotlib.ticker import ScalarFormatter

    if not isinstance(gbrt, BaseGradientBoosting):
        raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
    check_is_fitted(gbrt, 'estimators_')

    # set label_idx for multi-class GBRT
    if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
        if label is None:
            raise ValueError('label is not given for multi-class PDP')
        label_idx = np.searchsorted(gbrt.classes_, label)
        if gbrt.classes_[label_idx] != label:
            raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
    else:
        # regression and binary classification
        label_idx = 0

    X = check_array(X, dtype=DTYPE, order='C')
    if gbrt.n_features_ != X.shape[1]:
        raise ValueError('X.shape[1] does not match gbrt.n_features_')

    if line_kw is None:
        line_kw = {'color': 'green'}
    if contour_kw is None:
        contour_kw = {}

    # convert feature_names to list
    if feature_names is None:
        # if not feature_names use fx indices as name
        feature_names = [str(i) for i in range(gbrt.n_features_)]
    elif isinstance(feature_names, np.ndarray):
        feature_names = feature_names.tolist()

    def convert_feature(fx):
        if isinstance(fx, str):
            try:
                fx = feature_names.index(fx)
            except ValueError:
                raise ValueError('Feature %s not in feature_names' % fx)
        return fx

    # convert features into a seq of int tuples
    tmp_features = []
    for fxs in features:
        if isinstance(fxs, (numbers.Integral, str)):
            fxs = (fxs,)
        try:
            fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
        except TypeError:
            raise ValueError('features must be either int, str, or tuple '
                             'of int/str')
        if not (1 <= np.size(fxs) <= 2):
            raise ValueError('target features must be either one or two')

        tmp_features.append(fxs)

    features = tmp_features

    names = []
    try:
        for fxs in features:
            l = []
            # explicit loop so "i" is bound for exception below
            for i in fxs:
                l.append(feature_names[i])
            names.append(l)
    except IndexError:
        raise ValueError('All entries of features must be less than '
                         'len(feature_names) = {0}, got {1}.'
                         .format(len(feature_names), i))

    # compute PD functions
    pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
        delayed(partial_dependence)(gbrt, fxs, X=X,
                                    grid_resolution=grid_resolution,
                                    percentiles=percentiles)
        for fxs in features)

    # get global min and max values of PD grouped by plot type
    pdp_lim = {}
    for pdp, axes in pd_result:
        min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
        n_fx = len(axes)
        old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
        min_pd = min(min_pd, old_min_pd)
        max_pd = max(max_pd, old_max_pd)
        pdp_lim[n_fx] = (min_pd, max_pd)

    # create contour levels for two-way plots
    if 2 in pdp_lim:
        Z_level = np.linspace(*pdp_lim[2], num=8)

    if ax is None:
        fig = plt.figure(**fig_kw)
    else:
        fig = ax.get_figure()
        fig.clear()

    n_cols = min(n_cols, len(features))
    n_rows = int(np.ceil(len(features) / float(n_cols)))
    axs = []
    for i, fx, name, (pdp, axes) in zip(count(), features, names,
                                        pd_result):
        ax = fig.add_subplot(n_rows, n_cols, i + 1)

        if len(axes) == 1:
            ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
        else:
            # make contour plot
            assert len(axes) == 2
            XX, YY = np.meshgrid(axes[0], axes[1])
            Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
            CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
                            colors='k')
            ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
                        vmin=Z_level[0], alpha=0.75, **contour_kw)
            ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)

        # plot data deciles + axes labels
        deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
        trans = transforms.blended_transform_factory(ax.transData,
                                                     ax.transAxes)
        ylim = ax.get_ylim()
        ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
        ax.set_xlabel(name[0])
        ax.set_ylim(ylim)

        # prevent x-axis ticks from overlapping
        ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
        tick_formatter = ScalarFormatter()
        tick_formatter.set_powerlimits((-3, 4))
        ax.xaxis.set_major_formatter(tick_formatter)

        if len(axes) > 1:
            # two-way PDP - y-axis deciles + labels
            deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
            trans = transforms.blended_transform_factory(ax.transAxes,
                                                         ax.transData)
            xlim = ax.get_xlim()
            ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
            ax.set_ylabel(name[1])
            # hline erases xlim
            ax.set_xlim(xlim)
        else:
            ax.set_ylabel('Partial dependence')

        if len(axes) == 1:
            ax.set_ylim(pdp_lim[1])
        axs.append(ax)

    fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
                        hspace=0.3)
    return fig, axs
コード例 #49
0
def plot_partial_dependence(estimator, X, features, feature_names=None,
                            target=None, response_method='auto', n_cols=3,
                            grid_resolution=100, percentiles=(0.05, 0.95),
                            method='auto', n_jobs=None, verbose=0, fig=None,
                            line_kw=None, contour_kw=None):
    """Partial dependence plots.

    The ``len(features)`` plots are arranged in a grid with ``n_cols``
    columns. Two-way partial dependence plots are plotted as contour plots.

    Read more in the :ref:`User Guide <partial_dependence>`.

    Parameters
    ----------
    estimator : BaseEstimator
        A fitted estimator object implementing `predict`, `predict_proba`,
        or `decision_function`. Multioutput-multiclass classifiers are not
        supported.
    X : array-like, shape (n_samples, n_features)
        The data to use to build the grid of values on which the dependence
        will be evaluated. This is usually the training data.
    features : list of {int, str, pair of int, pair of str}
        The target features for which to create the PDPs.
        If features[i] is an int or a string, a one-way PDP is created; if
        features[i] is a tuple, a two-way PDP is created. Each tuple must be
        of size 2.
        if any entry is a string, then it must be in ``feature_names``.
    feature_names : seq of str, shape (n_features,), optional
        Name of each feature; feature_names[i] holds the name of the feature
        with index i. By default, the name of the feature corresponds to
        their numerical index.
    target : int, optional (default=None)
        - In a multiclass setting, specifies the class for which the PDPs
          should be computed. Note that for binary classification, the
          positive class (index 1) is always used.
        - In a multioutput setting, specifies the task for which the PDPs
          should be computed
        Ignored in binary classification or classical regression settings.
    response_method : 'auto', 'predict_proba' or 'decision_function', \
            optional (default='auto') :
        Specifies whether to use :term:`predict_proba` or
        :term:`decision_function` as the target response. For regressors
        this parameter is ignored and the response is always the output of
        :term:`predict`. By default, :term:`predict_proba` is tried first
        and we revert to :term:`decision_function` if it doesn't exist. If
        ``method`` is 'recursion', the response is always the output of
        :term:`decision_function`.
    n_cols : int, optional (default=3)
        The maximum number of columns in the grid plot.
    grid_resolution : int, optional (default=100)
        The number of equally spaced points on the axes of the plots, for each
        target feature.
    percentiles : tuple of float, optional (default=(0.05, 0.95))
        The lower and upper percentile used to create the extreme values
        for the PDP axes. Must be in [0, 1].
    method : str, optional (default='auto')
        The method to use to calculate the partial dependence predictions:

        - 'recursion' is only supported for objects inheriting from
          `BaseGradientBoosting`, but is more efficient in terms of speed.
          With this method, ``X`` is optional and is only used to build the
          grid and the partial dependences are computed using the training
          data. This method does not account for the ``init`` predicor of
          the boosting process, which may lead to incorrect values (see
          warning below. With this method, the target response of a
          classifier is always the decision function, not the predicted
          probabilities.

        - 'brute' is supported for any estimator, but is more
          computationally intensive.

        - If 'auto', then 'recursion' will be used for
          ``BaseGradientBoosting`` estimators with ``init=None``, and
          'brute' for all other.

        Unlike the 'brute' method, 'recursion' does not account for the
        ``init`` predictor of the boosting process. In practice this still
        produces the same plots, up to a constant offset in the target
        response.
    n_jobs : int, optional (default=None)
        The number of CPUs to use to compute the partial dependences.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.
    verbose : int, optional (default=0)
        Verbose output during PD computations.
    fig : Matplotlib figure object, optional (default=None)
        A figure object onto which the plots will be drawn, after the figure
        has been cleared. By default, a new one is created.
    line_kw : dict, optional
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For one-way partial dependence plots.
    contour_kw : dict, optional
        Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
        For two-way partial dependence plots.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman1
    >>> from sklearn.ensemble import GradientBoostingRegressor
    >>> X, y = make_friedman1()
    >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
    >>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP

    See also
    --------
    sklearn.inspection.partial_dependence: Return raw partial
      dependence values

    Warnings
    --------
    The 'recursion' method only works for gradient boosting estimators, and
    unlike the 'brute' method, it does not account for the ``init``
    predictor of the boosting process. In practice this will produce the
    same values as 'brute' up to a constant offset in the target response,
    provided that ``init`` is a consant estimator (which is the default).
    However, as soon as ``init`` is not a constant estimator, the partial
    dependence values are incorrect for 'recursion'.
    """
    check_matplotlib_support('plot_partial_dependence')  # noqa
    import matplotlib.pyplot as plt  # noqa
    from matplotlib import transforms  # noqa
    from matplotlib.ticker import MaxNLocator  # noqa
    from matplotlib.ticker import ScalarFormatter  # noqa

    # set target_idx for multi-class estimators
    if hasattr(estimator, 'classes_') and np.size(estimator.classes_) > 2:
        if target is None:
            raise ValueError('target must be specified for multi-class')
        target_idx = np.searchsorted(estimator.classes_, target)
        if (not (0 <= target_idx < len(estimator.classes_)) or
                estimator.classes_[target_idx] != target):
            raise ValueError('target not in est.classes_, got {}'.format(
                target))
    else:
        # regression and binary classification
        target_idx = 0

    X = check_array(X)
    n_features = X.shape[1]

    # convert feature_names to list
    if feature_names is None:
        # if feature_names is None, use feature indices as name
        feature_names = [str(i) for i in range(n_features)]
    elif isinstance(feature_names, np.ndarray):
        feature_names = feature_names.tolist()
    if len(set(feature_names)) != len(feature_names):
        raise ValueError('feature_names should not contain duplicates.')

    def convert_feature(fx):
        if isinstance(fx, str):
            try:
                fx = feature_names.index(fx)
            except ValueError:
                raise ValueError('Feature %s not in feature_names' % fx)
        return int(fx)

    # convert features into a seq of int tuples
    tmp_features = []
    for fxs in features:
        if isinstance(fxs, (numbers.Integral, str)):
            fxs = (fxs,)
        try:
            fxs = [convert_feature(fx) for fx in fxs]
        except TypeError:
            raise ValueError('Each entry in features must be either an int, '
                             'a string, or an iterable of size at most 2.')
        if not (1 <= np.size(fxs) <= 2):
            raise ValueError('Each entry in features must be either an int, '
                             'a string, or an iterable of size at most 2.')

        tmp_features.append(fxs)

    features = tmp_features

    names = []
    try:
        for fxs in features:
            names_ = []
            # explicit loop so "i" is bound for exception below
            for i in fxs:
                names_.append(feature_names[i])
            names.append(names_)
    except IndexError:
        raise ValueError('All entries of features must be less than '
                         'len(feature_names) = {0}, got {1}.'
                         .format(len(feature_names), i))

    # compute averaged predictions
    pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
        delayed(partial_dependence)(estimator, X, fxs,
                                    response_method=response_method,
                                    method=method,
                                    grid_resolution=grid_resolution,
                                    percentiles=percentiles)
        for fxs in features)

    # For multioutput regression, we can only check the validity of target
    # now that we have the predictions.
    # Also note: as multiclass-multioutput classifiers are not supported,
    # multiclass and multioutput scenario are mutually exclusive. So there is
    # no risk of overwriting target_idx here.
    avg_preds, _ = pd_result[0]  # checking the first result is enough
    if is_regressor(estimator) and avg_preds.shape[0] > 1:
        if target is None:
            raise ValueError(
                'target must be specified for multi-output regressors')
        if not 0 <= target <= avg_preds.shape[0]:
            raise ValueError(
                'target must be in [0, n_tasks], got {}.'.format(target))
        target_idx = target
    else:
        target_idx = 0

    # get global min and max values of PD grouped by plot type
    pdp_lim = {}
    for avg_preds, values in pd_result:
        min_pd = avg_preds[target_idx].min()
        max_pd = avg_preds[target_idx].max()
        n_fx = len(values)
        old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
        min_pd = min(min_pd, old_min_pd)
        max_pd = max(max_pd, old_max_pd)
        pdp_lim[n_fx] = (min_pd, max_pd)

    # create contour levels for two-way plots
    if 2 in pdp_lim:
        Z_level = np.linspace(*pdp_lim[2], num=8)

    if fig is None:
        fig = plt.figure()
    else:
        fig.clear()

    if line_kw is None:
        line_kw = {'color': 'green'}
    if contour_kw is None:
        contour_kw = {}

    n_cols = min(n_cols, len(features))
    n_rows = int(np.ceil(len(features) / float(n_cols)))
    axs = []
    for i, fx, name, (avg_preds, values) in zip(
            count(), features, names, pd_result):
        ax = fig.add_subplot(n_rows, n_cols, i + 1)

        if len(values) == 1:
            ax.plot(values[0], avg_preds[target_idx].ravel(), **line_kw)
        else:
            # make contour plot
            assert len(values) == 2
            XX, YY = np.meshgrid(values[0], values[1])
            Z = avg_preds[target_idx].T
            CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
                            colors='k')
            ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
                        vmin=Z_level[0], alpha=0.75, **contour_kw)
            ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)

        # plot data deciles + axes labels
        deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
        trans = transforms.blended_transform_factory(ax.transData,
                                                     ax.transAxes)
        ylim = ax.get_ylim()
        ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
        ax.set_xlabel(name[0])
        ax.set_ylim(ylim)

        # prevent x-axis ticks from overlapping
        ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
        tick_formatter = ScalarFormatter()
        tick_formatter.set_powerlimits((-3, 4))
        ax.xaxis.set_major_formatter(tick_formatter)

        if len(values) > 1:
            # two-way PDP - y-axis deciles + labels
            deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
            trans = transforms.blended_transform_factory(ax.transAxes,
                                                         ax.transData)
            xlim = ax.get_xlim()
            ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
            ax.set_ylabel(name[1])
            # hline erases xlim
            ax.set_xlim(xlim)
        else:
            ax.set_ylabel('Partial dependence')

        if len(values) == 1:
            ax.set_ylim(pdp_lim[1])
        axs.append(ax)

    fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
                        hspace=0.3)
コード例 #50
0
ファイル: plot_current.py プロジェクト: DDMGNI/viRMHD2D
    def __init__(self, diagnostics, nTime=0, ntMax=0, nPlot=1, write=False):
        '''
        Constructor
        '''

        self.write = write
        self.prefix = 'viRMHD2D_'

        self.nrows = 2
        self.ncols = 4

        if ntMax == 0:
            ntMax = diagnostics.nt

        if nTime > 0 and nTime < ntMax:
            self.nTime = nTime
        else:
            self.nTime = ntMax

        self.iTime = 0
        self.nPlot = nPlot

        self.diagnostics = diagnostics

        self.E_velocity = np.zeros(ntMax + 1)
        self.E_magnetic = np.zeros(ntMax + 1)

        self.energy = np.zeros(ntMax + 1)
        self.helicity = np.zeros(ntMax + 1)

        self.x = np.zeros(diagnostics.nx + 1)
        self.y = np.zeros(diagnostics.ny + 1)

        self.x[0:-1] = self.diagnostics.xGrid
        self.x[-1] = self.x[-2] + self.diagnostics.hx

        self.y[0:-1] = self.diagnostics.yGrid
        self.y[-1] = self.y[-2] + self.diagnostics.hy

        self.Bx = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.By = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.Vx = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.Vy = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.P = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))

        self.A = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.J = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.PB = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))

        self.B = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))
        self.V = np.zeros((diagnostics.nx + 1, diagnostics.ny + 1))

        # set up figure/window size
        self.figure = plt.figure(num=None, figsize=(16, 9))

        # set up plot margins
        plt.subplots_adjust(hspace=0.25, wspace=0.2)
        plt.subplots_adjust(left=0.05, right=0.95, top=0.93, bottom=0.05)

        # set up plot title
        self.title = self.figure.text(0.5,
                                      0.97,
                                      't = 0.0' %
                                      (diagnostics.tGrid[self.iTime]),
                                      horizontalalignment='center')

        # set up tick formatter
        majorFormatter = ScalarFormatter(useOffset=True)
        ## -> limit to 1.1f precision
        majorFormatter.set_powerlimits((-1, +1))
        majorFormatter.set_scientific(True)

        # add data for zero timepoint
        self.add_timepoint()

        # set up plots
        self.axes = {}
        self.conts = {}
        self.cbars = {}
        self.lines = {}
        self.vecs = {}

        self.update_boundaries()

        # create subplots
        gs = gridspec.GridSpec(4, 3)
        self.gs = gs

        self.axes["Bx"] = plt.subplot(gs[0, 0])
        self.axes["By"] = plt.subplot(gs[0, 1])
        self.axes["Vx"] = plt.subplot(gs[1, 0])
        self.axes["Vy"] = plt.subplot(gs[1, 1])
        self.axes["J"] = plt.subplot(gs[2:4, 0:2])
        self.axes["Emag"] = plt.subplot(gs[0, 2])
        self.axes["Evel"] = plt.subplot(gs[1, 2])
        self.axes["E"] = plt.subplot(gs[2, 2])
        self.axes["H"] = plt.subplot(gs[3, 2])

        self.axes["Bx"].set_title('$B_{x} (x,y)$')
        self.axes["By"].set_title('$B_{y} (x,y)$')
        self.axes["Vx"].set_title('$V_{x} (x,y)$')
        self.axes["Vy"].set_title('$V_{y} (x,y)$')
        self.axes["J"].set_title('$J (x,y)$')

        self.conts["Bx"] = self.axes["Bx"].contourf(self.x,
                                                    self.y,
                                                    self.Bx.T,
                                                    self.BxTicks,
                                                    cmap=cm.jet,
                                                    extend='both')
        self.cbars["Bx"] = self.figure.colorbar(self.conts["Bx"],
                                                ax=self.axes["Bx"],
                                                orientation='vertical',
                                                ticks=self.BxTicks)

        self.conts["By"] = self.axes["By"].contourf(self.x,
                                                    self.y,
                                                    self.By.T,
                                                    self.ByTicks,
                                                    cmap=cm.jet,
                                                    extend='both')
        self.cbars["By"] = self.figure.colorbar(self.conts["By"],
                                                ax=self.axes["By"],
                                                orientation='vertical',
                                                ticks=self.ByTicks)

        self.conts["Vx"] = self.axes["Vx"].contourf(self.x,
                                                    self.y,
                                                    self.Vx.T,
                                                    self.VxTicks,
                                                    cmap=cm.jet,
                                                    extend='both')
        self.cbars["Vx"] = self.figure.colorbar(self.conts["Vx"],
                                                ax=self.axes["Vx"],
                                                orientation='vertical',
                                                ticks=self.VxTicks)

        self.conts["Vy"] = self.axes["Vy"].contourf(self.x,
                                                    self.y,
                                                    self.Vy.T,
                                                    self.VyTicks,
                                                    cmap=cm.jet,
                                                    extend='both')
        self.cbars["Vy"] = self.figure.colorbar(self.conts["Vy"],
                                                ax=self.axes["Vy"],
                                                orientation='vertical',
                                                ticks=self.VyTicks)

        tStart, tEnd, xStart, xEnd = self.get_timerange()

        self.lines["Emag"], = self.axes["Emag"].plot(
            self.diagnostics.tGrid[tStart:tEnd], self.E_magnetic[tStart:tEnd])
        self.lines["Evel"], = self.axes["Evel"].plot(
            self.diagnostics.tGrid[tStart:tEnd], self.E_velocity[tStart:tEnd])
        self.lines["E"], = self.axes["E"].plot(
            self.diagnostics.tGrid[tStart:tEnd], self.energy[tStart:tEnd])
        self.lines["H"], = self.axes["H"].plot(
            self.diagnostics.tGrid[tStart:tEnd], self.helicity[tStart:tEnd])

        self.axes["Emag"].set_title('$E_{B} (t)$')
        self.axes["Evel"].set_title('$E_{V} (t)$')

        #         if self.diagnostics.plot_energy:
        #             self.axes["E"].set_title('$E (t)$')
        #         else:
        self.axes["E"].set_title('$(E-E_0) / E_0 (t)$')

        #         if self.diagnostics.plot_helicity:
        #             self.axes["H"].set_title('$H (t)$')
        #         else:
        self.axes["H"].set_title('$(H-H_0) / H_0 (t)$')

        self.axes["Emag"].set_xlim((xStart, xEnd))
        self.axes["Evel"].set_xlim((xStart, xEnd))
        self.axes["E"].set_xlim((xStart, xEnd))
        self.axes["H"].set_xlim((xStart, xEnd))

        self.axes["Emag"].yaxis.set_major_formatter(majorFormatter)
        self.axes["Evel"].yaxis.set_major_formatter(majorFormatter)
        self.axes["E"].yaxis.set_major_formatter(majorFormatter)
        self.axes["H"].yaxis.set_major_formatter(majorFormatter)

        # switch off some ticks
        plt.setp(self.axes["Bx"].get_xticklabels(), visible=False)
        plt.setp(self.axes["By"].get_xticklabels(), visible=False)
        #        plt.setp(self.axes["Vx"   ].get_xticklabels(), visible=False)
        plt.setp(self.axes["Emag"].get_xticklabels(), visible=False)
        plt.setp(self.axes["Evel"].get_xticklabels(), visible=False)
        plt.setp(self.axes["E"].get_xticklabels(), visible=False)

        self.update()
コード例 #51
0
 def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
     self._order_of_mag = order_of_mag
     ScalarFormatter.__init__(self,
                              useOffset=useOffset,
                              useMathText=useMathText)
ax.set_xticklabels([])  # remove x axis

#plot C1
plt.subplot(gs[4])
plt.plot(zoomedTime,
         zoomedC2_y,
         linewidth=2,
         color="tab:green",
         label=C2_label)
plt.legend(loc="upper right")
plt.grid(True)
plt.xlim(startTime, stopTime)
plt.ylabel('Napětí [V]')
plt.xlabel('Čas [s]')
fig.subplots_adjust(wspace=0, hspace=0.1)
x_formatter = ScalarFormatter(useOffset=False)
plt.yticks(np.arange(0, 6, 2.5))
ax = plt.gca()
ax.xaxis.set_major_formatter(x_formatter)

#plot C2
plt.subplot(gs[0])
plt.plot(zoomedTime, zoomedC3_y, linewidth=2, color="tab:blue", label=C3_label)
plt.legend(loc="upper right")
plt.grid(True)
plt.yticks(np.arange(0, 13, 2.0))
plt.ylabel('Napětí [V]')
plt.xlim(startTime, stopTime)
ax = plt.gca()
ax.set_xticklabels([])  # remove x axis
コード例 #53
0
 def __init__(self,useOffset=True, useMathText=False, precision=None):
    ScalarFormatter.__init__(self,useOffset=useOffset,useMathText=useMathText )
    self.precision = precision
コード例 #54
0
 def __init__(self, format='%f', division=1e0):
     ScalarFormatter.__init__(self, useOffset=False, useMathText=True)
     self.format = format
     self.division = division
コード例 #55
0
def compare_plots_one_param_line_hist(list_of_pos_by_name,param,cl,color_by_name,cl_lines_flag=True,legend='right',analyticPDF=None):


    """
    Plots a gaussian kernel density estimate for a set
    of Posteriors onto the same axis.

    @param list_of_pos: a list of Posterior class instances.

    @param plot1DParams: a dict; {paramName:Nbins}

    """

    from scipy import seterr as sp_seterr

    #Create common figure
    myfig=plt.figure(figsize=(6,4.5),dpi=150)
  #myfig.add_axes([0.1,0.1,0.65,0.85])
  #myfig.add_axes([0.15,0.15,0.6,0.76])
    axes=plt.Axes(myfig,[0.15,0.15,0.6,0.76])
    myfig.add_axes(axes)
    majorFormatterX=ScalarFormatter(useMathText=True)
    majorFormatterX.format_data=lambda data:'%.6g'%(data)
    majorFormatterY=ScalarFormatter(useMathText=True)
    majorFormatterY.format_data=lambda data:'%.6g'%(data)
    majorFormatterX.set_scientific(True)
    majorFormatterY.set_scientific(True)

    list_of_pos=list_of_pos_by_name.values()
    list_of_pos_names=list_of_pos_by_name.keys()

    allmins=map(lambda a: np.min(a[param].samples), list_of_pos)
    allmaxes=map(lambda a: np.max(a[param].samples), list_of_pos)
    min_pos=np.min(allmins)
    max_pos=np.max(allmaxes)

    injvals=[]

    patch_list=[]
    max_y=0

    posbins=np.linspace(min_pos,max_pos,50)

    for name,posterior in list_of_pos_by_name.items():
        colour=color_by_name[name]
        #myfig.gca(autoscale_on=True)
        if posterior[param].injval:
            injvals.append(posterior[param].injval)

        try:
            n,bins=np.histogram(posterior[param].samples,bins=posbins,normed=True,new=True)
        except:
            n,bins=np.histogram(posterior[param].samples,bins=posbins,normed=True)
        if min(bins)==max(bins):
            print 'Skipping '+param
            continue
        locmaxy=max(n)
        if locmaxy>max_y: max_y=locmaxy
#(n, bins, patches)=plt.hist(posterior[param].samples,bins=bins,facecolor='white',label=name,normed=True,hold=True,color=color_by_name[name])#range=(min_pos,max_pos)
        (n, bins, patches)=plt.hist(posterior[param].samples,bins=bins,histtype='step',label=name,normed=True,hold=True,color=color_by_name[name])
        patch_list.append(patches[0])

    Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
    if Nchars>8:
      Nticks=3
    elif Nchars>5:
      Nticks=4
    elif Nchars>4:
      Nticks=6
    else:
      Nticks=6
    locatorX=mpl.ticker.MaxNLocator(nbins=Nticks)
    locatorX.view_limits(bins[0],bins[-1])
    axes.xaxis.set_major_locator(locatorX)

    plt.xlim(min_pos,max_pos)
    top_cl_intervals_list={}
    pos_names=list_of_pos_by_name.keys()


    for name,posterior in list_of_pos_by_name.items():
        #toppoints,injectionconfidence,reses,injection_area,cl_intervals=bppu.greedy_bin_one_param(posterior,{param:greedyBinSizes[param]},[cl])
        cl_intervals=posterior[param].prob_interval([cl])
        colour=color_by_name[name]
        if cl_intervals[0] is not None and cl_lines_flag:
            try:
                plt.plot([cl_intervals[0][0],cl_intervals[0][0]],[0,max_y],color=colour,linestyle='--')
                plt.plot([cl_intervals[0][1],cl_intervals[0][1]],[0,max_y],color=colour,linestyle='--')
            except:
                print "MAX_Y",max_y,[cl_intervals[0][0],cl_intervals[0][0]],[cl_intervals[0][1],cl_intervals[0][1]]
        top_cl_intervals_list[name]=(cl_intervals[0][0],cl_intervals[0][1])

    if cl_lines_flag:
        pos_names.append(str(int(cl*100))+'%')
        patch_list.append(mpl.lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),linestyle='--',color='black'))

    plt.grid()
    plt.xlim(min_pos,max_pos)
    if legend is not None:
      oned_legend=plt.figlegend(patch_list,pos_names,'right')
      for text in oned_legend.get_texts():
        text.set_fontsize('small')
    plt.xlabel(bppu.plot_label(param))
    plt.ylabel('Probability density')
    plt.draw()
    #plt.tight_layout()
    if injvals:
        print "Injection parameter is %f"%(float(injvals[0]))
        injpar=injvals[0]
        #if min(pos_samps)<injpar and max(pos_samps)>injpar:
        plt.plot([injpar,injpar],[0,max_y],'r-.',scalex=False,scaley=False,linewidth=4,label='Injection')

    #
    if analyticPDF is not None:
	plt.plot(posbins,map(analyticPDF,posbins),'r')
    return myfig,top_cl_intervals_list#,rkde
def plot_everything(*args):

    n_rows = 2
    n_cols = n_bins / n_rows

    print("n_rows: {}, n_cols: {}".format(n_rows, n_cols))

    for idx in range(n_bins):

        row = idx / n_cols
        col = idx % n_cols

        print("plotting bin {}, row {} col {}".format(idx, row, col))

        plt.figure(1)

        #{ Figure 1

        ax3 = plt.subplot2grid((n_rows, n_bins / n_rows), (row, col))

        m = createMap('cyl')

        x_m_meshgrid, y_m_meshgrid = m(y_meshgrid, x_meshgrid)

        # m.pcolor(x_m_meshgrid, y_m_meshgrid, bins_rbf_log[idx], cmap=my_cm)
        m.pcolor(x_m_meshgrid,
                 y_m_meshgrid,
                 bins_rbf_log[idx],
                 cmap=my_cm,
                 edgecolor=(1.0, 1.0, 1.0, 0.3),
                 linewidth=0.005,
                 vmin=pcolor_min,
                 vmax=pcolor_max)

        formatter = ScalarFormatter()
        formatter.set_scientific(False)
        cb = m.colorbar(
            location="bottom",
            label="Z",
            format=ticker.FuncFormatter(fake_log_fmt))  # draw colorbar

        low_limit = idx * bin_size
        high_limit = (idx + 1.0) * bin_size

        if idx == (n_bins - 1):
            high_limit = 150

        plt.title('X-ray/gamma {}-{} keV'.format(low_limit, high_limit),
                  fontsize=13)

        x_m, y_m = m(lons_orig, lats_orig)  # project points

        cb.set_label('log10(' + x_label + ') ' + x_units)

        for image in images:
            latitude, longitude, tle_date = getLatLong(image.time)
            x, y = m(longitude, latitude)
            m.scatter(x, y, 0.2, marker='o', color='grey', zorder=10)

        plt.subplots_adjust(left=0.025,
                            bottom=0.05,
                            right=0.975,
                            top=0.95,
                            wspace=0.2,
                            hspace=0.3)

        #} end of Figure 1

    plt.show()
コード例 #57
0
plot_m.set_ylabel("mass [Earths]")
plot_m.grid(True)

plot_I = fig.add_subplot(2, 2, 4, sharex=plot_a)
if isLog:
  plot = plot_I.semilogx
else:
  plot = plot_I.plot

for planet in range(nb_planete):
  plot(t[planet][id_min:id_max+1], I[planet][id_min:id_max+1], color=colors[planet], label='PLANETE'+str(planet))
plot_I.set_xlabel("time [years]")
plot_I.set_ylabel("Inclination [degrees]")
plot_I.grid(True)

myyfmt = ScalarFormatter(useOffset=False)
myyfmt.set_scientific(True)
myyfmt.set_powerlimits((-2, 3)) 
myxfmt = ScalarFormatter(useOffset=True)
myxfmt._set_offset(1e5)
myxfmt.set_scientific(True)
myxfmt.set_powerlimits((-3, 3)) 

plot_a.xaxis.set_major_formatter(myxfmt)
plot_I.yaxis.set_major_formatter(myyfmt)


nom_fichier_plot = "evolution_planete"
fig.savefig('%s.%s' % (nom_fichier_plot, OUTPUT_EXTENSION), format=OUTPUT_EXTENSION)

pl.show()
コード例 #58
0
ファイル: corner.py プロジェクト: griffin-h/corner.py
def corner(xs,
           bins=20,
           range=None,
           weights=None,
           color="k",
           smooth=None,
           smooth1d=None,
           labels=None,
           label_kwargs=None,
           show_titles=False,
           title_fmt=".2f",
           title_kwargs=None,
           truths=None,
           truth_color="#4682b4",
           scale_hist=False,
           quantiles=None,
           verbose=False,
           fig=None,
           max_n_ticks=5,
           top_ticks=False,
           use_math_text=False,
           reverse=False,
           hist_kwargs=None,
           priors=None,
           prior_kwargs=None,
           **hist2d_kwargs):
    """
    Make a *sick* corner plot showing the projections of a data set in a
    multi-dimensional space. kwargs are passed to hist2d() or used for
    `matplotlib` styling.

    Parameters
    ----------
    xs : array_like[nsamples, ndim]
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in a simple histogram. For a 2-D array, the zeroth
        axis is the list of samples and the next axis are the dimensions of
        the space.

    bins : int or array_like[ndim,]
        The number of bins to use in histograms, either as a fixed value for
        all dimensions, or as a list of integers for each dimension.

    weights : array_like[nsamples,]
        The weight of each sample. If `None` (default), samples are given
        equal weight.

    color : str
        A ``matplotlib`` style color for all histograms.

    smooth, smooth1d : float
       The standard deviation for Gaussian kernel passed to
       `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
       respectively. If `None` (default), no smoothing is applied.

    labels : iterable (ndim,)
        A list of names for the dimensions. If a ``xs`` is a
        ``pandas.DataFrame``, labels will default to column names.

    label_kwargs : dict
        Any extra keyword arguments to send to the `set_xlabel` and
        `set_ylabel` methods.

    show_titles : bool
        Displays a title above each 1-D histogram showing the 0.5 quantile
        with the upper and lower errors supplied by the quantiles argument.

    title_fmt : string
        The format string for the quantiles given in titles. If you explicitly
        set ``show_titles=True`` and ``title_fmt=None``, the labels will be
        shown as the titles. (default: ``.2f``)

    title_kwargs : dict
        Any extra keyword arguments to send to the `set_title` command.

    range : iterable (ndim,)
        A list where each element is either a length 2 tuple containing
        lower and upper bounds or a float in range (0., 1.)
        giving the fraction of samples to include in bounds, e.g.,
        [(0.,10.), (1.,5), 0.999, etc.].
        If a fraction, the bounds are chosen to be equal-tailed.

    truths : iterable (ndim,)
        A list of reference values to indicate on the plots.  Individual
        values can be omitted by using ``None``.

    truth_color : str
        A ``matplotlib`` style color for the ``truths`` makers.

    scale_hist : bool
        Should the 1-D histograms be scaled in such a way that the zero line
        is visible?

    quantiles : iterable
        A list of fractional quantiles to show on the 1-D histograms as
        vertical dashed lines.

    verbose : bool
        If true, print the values of the computed quantiles.

    plot_contours : bool
        Draw contours for dense regions of the plot.

    use_math_text : bool
        If true, then axis tick labels for very large or small exponents will
        be displayed as powers of 10 rather than using `e`.
        
    reverse : bool
        If true, plot the corner plot starting in the upper-right corner instead 
        of the usual bottom-left corner
        
    max_n_ticks: int
        Maximum number of ticks to try to use

    top_ticks : bool
        If true, label the top ticks of each axis

    fig : matplotlib.Figure
        Overplot onto the provided figure object.

    hist_kwargs : dict
        Any extra keyword arguments to send to the 1-D histogram plots.

    priors : iterable (ndim,)
        A list of functions used to plot another probability distribution on
        top of the 1-D histograms. Individual priors can be omitted by using
        ``None``. Priors are only calculated over the range of the posteriors.
        E.g., to plot flat priors, ``def flat(p): return np.ones_like(p)`` and
        pass ``priors=[flat, flat, flat]``.
    
    prior_kwargs : dict
        Any extra keyword arguments to send to the prior histogram plots, e.g.,
        ``prior_kwargs={"color": "blue"}``. The default style is a thin, gray,
        dotted line.

    **hist2d_kwargs
        Any remaining keyword arguments are sent to `corner.hist2d` to generate
        the 2-D histogram plots.

    """
    if quantiles is None:
        quantiles = []
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()

    # Try filling in labels from pandas.DataFrame columns.
    if labels is None:
        try:
            labels = xs.columns
        except AttributeError:
            pass

    # Deal with 1D sample lists.
    xs = np.atleast_1d(xs)
    if len(xs.shape) == 1:
        xs = np.atleast_2d(xs)
    else:
        assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
        xs = xs.T
    assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
                                       "dimensions than samples!"

    # Parse the weight array.
    if weights is not None:
        weights = np.asarray(weights)
        if weights.ndim != 1:
            raise ValueError("Weights must be 1-D")
        if xs.shape[1] != weights.shape[0]:
            raise ValueError("Lengths of weights must match number of samples")

    # Parse the parameter ranges.
    if range is None:
        if "extents" in hist2d_kwargs:
            logging.warn("Deprecated keyword argument 'extents'. "
                         "Use 'range' instead.")
            range = hist2d_kwargs.pop("extents")
        else:
            range = [[x.min(), x.max()] for x in xs]
            # Check for parameters that never change.
            m = np.array([e[0] == e[1] for e in range], dtype=bool)
            if np.any(m):
                raise ValueError(
                    ("It looks like the parameter(s) in "
                     "column(s) {0} have no dynamic range. "
                     "Please provide a `range` argument.").format(", ".join(
                         map("{0}".format,
                             np.arange(len(m))[m]))))

    else:
        # If any of the extents are percentiles, convert them to ranges.
        # Also make sure it's a normal list.
        range = list(range)
        for i, _ in enumerate(range):
            try:
                emin, emax = range[i]
            except TypeError:
                q = [0.5 - 0.5 * range[i], 0.5 + 0.5 * range[i]]
                range[i] = quantile(xs[i], q, weights=weights)

    if len(range) != xs.shape[0]:
        raise ValueError("Dimension mismatch between samples and range")

    # Parse the bin specifications.
    try:
        bins = [int(bins) for _ in range]
    except TypeError:
        if len(bins) != len(range):
            raise ValueError("Dimension mismatch between bins and range")

    # Some magic numbers for pretty axis layout.
    K = len(xs)
    factor = 2.0  # size of one side of one panel
    if reverse:
        lbdim = 0.2 * factor  # size of left/bottom margin
        trdim = 0.5 * factor  # size of top/right margin
    else:
        lbdim = 0.5 * factor  # size of left/bottom margin
        trdim = 0.2 * factor  # size of top/right margin
    whspace = 0.05  # w/hspace size
    plotdim = factor * K + factor * (K - 1.) * whspace
    dim = lbdim + plotdim + trdim

    # Create a new figure if one wasn't provided.
    if fig is None:
        fig, axes = pl.subplots(K, K, figsize=(dim, dim))
    else:
        try:
            axes = np.array(fig.axes).reshape((K, K))
        except:
            raise ValueError("Provided figure has {0} axes, but data has "
                             "dimensions K={1}".format(len(fig.axes), K))

    # Format the figure.
    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    # Set up the default histogram keywords.
    if hist_kwargs is None:
        hist_kwargs = dict()
    hist_kwargs["color"] = hist_kwargs.get("color", color)
    if smooth1d is None:
        hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")

    # Set up the defaults for plotting priors
    if priors is None:
        priors = [None] * xs.shape[0]

    if prior_kwargs is None:
        prior_kwargs = dict()
    prior_kwargs["linestyle"] = prior_kwargs.get("linestyle", ":")
    prior_kwargs["linewidth"] = prior_kwargs.get("linewidth", 1)
    prior_kwargs["color"] = prior_kwargs.get("color", "gray")

    for i, x in enumerate(xs):
        # Deal with masked arrays.
        if hasattr(x, "compressed"):
            x = x.compressed()

        if np.shape(xs)[0] == 1:
            ax = axes
        else:
            if reverse:
                ax = axes[K - i - 1, K - i - 1]
            else:
                ax = axes[i, i]
        # Plot the histograms.
        if smooth1d is None:
            n, bin_edges, _ = ax.hist(x,
                                      bins=bins[i],
                                      weights=weights,
                                      range=np.sort(range[i]),
                                      **hist_kwargs)
        else:
            if gaussian_filter is None:
                raise ImportError("Please install scipy for smoothing")
            n, bin_edges = np.histogram(x,
                                        bins=bins[i],
                                        weights=weights,
                                        range=np.sort(range[i]))
            n = gaussian_filter(n, smooth1d)
            x0 = np.array(list(zip(bin_edges[:-1], bin_edges[1:]))).flatten()
            y0 = np.array(list(zip(n, n))).flatten()
            ax.plot(x0, y0, **hist_kwargs)

        if priors[i] is not None:
            prior = priors[i](bin_edges)
            prior *= np.sum(n) / np.sum(prior)
            ax.plot(bin_edges, prior, zorder=1, **prior_kwargs)

        if truths is not None and truths[i] is not None:
            ax.axvline(truths[i], color=truth_color)

        # Plot quantiles if wanted.
        if len(quantiles) > 0:
            qvalues = quantile(x, quantiles, weights=weights)
            for q in qvalues:
                ax.axvline(q, ls="dashed", color=color)

            if verbose:
                print("Quantiles:")
                print([item for item in zip(quantiles, qvalues)])

        if show_titles:
            title = None
            if title_fmt is not None:
                # Compute the quantiles for the title. This might redo
                # unneeded computation but who cares.
                q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
                                            weights=weights)
                q_m, q_p = q_50 - q_16, q_84 - q_50

                # Format the quantile display.
                fmt = "{{0:{0}}}".format(title_fmt).format
                title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))

                # Add in the column name if it's given.
                if labels is not None:
                    title = "{0} = {1}".format(labels[i], title)

            elif labels is not None:
                title = "{0}".format(labels[i])

            if title is not None:
                if reverse:
                    ax.set_xlabel(title, **title_kwargs)
                else:
                    ax.set_title(title, **title_kwargs)

        # Set up the axes.
        ax.set_xlim(range[i])
        if scale_hist:
            maxn = np.max(n)
            ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
        else:
            ax.set_ylim(0, 1.1 * np.max(n))
        ax.set_yticklabels([])
        if max_n_ticks == 0:
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())
        else:
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
            ax.yaxis.set_major_locator(NullLocator())

        if i < K - 1:
            if top_ticks:
                ax.xaxis.set_ticks_position("top")
                [l.set_rotation(45) for l in ax.get_xticklabels()]
            else:
                ax.set_xticklabels([])
        else:
            if reverse:
                ax.xaxis.tick_top()
            [l.set_rotation(45) for l in ax.get_xticklabels()]
            if labels is not None:
                if reverse:
                    ax.set_title(labels[i], y=1.25, **label_kwargs)
                else:
                    ax.set_xlabel(labels[i], **label_kwargs)

            # use MathText for axes ticks
            ax.xaxis.set_major_formatter(
                ScalarFormatter(useMathText=use_math_text))

        for j, y in enumerate(xs):
            if np.shape(xs)[0] == 1:
                ax = axes
            else:
                if reverse:
                    ax = axes[K - i - 1, K - j - 1]
                else:
                    ax = axes[i, j]
            if j > i:
                ax.set_frame_on(False)
                ax.set_xticks([])
                ax.set_yticks([])
                continue
            elif j == i:
                continue

            # Deal with masked arrays.
            if hasattr(y, "compressed"):
                y = y.compressed()

            hist2d(y,
                   x,
                   ax=ax,
                   range=[range[j], range[i]],
                   weights=weights,
                   color=color,
                   smooth=smooth,
                   bins=[bins[j], bins[i]],
                   **hist2d_kwargs)

            if truths is not None:
                if truths[i] is not None and truths[j] is not None:
                    ax.plot(truths[j], truths[i], "s", color=truth_color)
                if truths[j] is not None:
                    ax.axvline(truths[j], color=truth_color)
                if truths[i] is not None:
                    ax.axhline(truths[i], color=truth_color)

            if max_n_ticks == 0:
                ax.xaxis.set_major_locator(NullLocator())
                ax.yaxis.set_major_locator(NullLocator())
            else:
                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
                ax.yaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))

            if i < K - 1:
                ax.set_xticklabels([])
            else:
                if reverse:
                    ax.xaxis.tick_top()
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                if labels is not None:
                    ax.set_xlabel(labels[j], **label_kwargs)
                    if reverse:
                        ax.xaxis.set_label_coords(0.5, 1.4)
                    else:
                        ax.xaxis.set_label_coords(0.5, -0.3)

                # use MathText for axes ticks
                ax.xaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            if j > 0:
                ax.set_yticklabels([])
            else:
                if reverse:
                    ax.yaxis.tick_right()
                [l.set_rotation(45) for l in ax.get_yticklabels()]
                if labels is not None:
                    if reverse:
                        ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
                        ax.yaxis.set_label_coords(1.3, 0.5)
                    else:
                        ax.set_ylabel(labels[i], **label_kwargs)
                        ax.yaxis.set_label_coords(-0.3, 0.5)

                # use MathText for axes ticks
                ax.yaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

    return fig
コード例 #59
0
ファイル: Bar.py プロジェクト: beiko-lab/STAMP
	def plot(self, profile, statsResults):
		if len(statsResults.activeData) <= 0:
			self.emptyAxis()			
			return
		
		features = statsResults.getColumn('Features')
		if len(features) > 200:
			QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
			reply = QtGui.QMessageBox.question(self, 'Continue?', 'Profile contains ' + str(len(features)) + ' features. ' +
																		'It may take several seconds to generate this plot. We recommend filtering your profile first. ' + 
																		'Do you wish to continue?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
			QtGui.QApplication.instance().restoreOverrideCursor()
			if reply == QtGui.QMessageBox.No:
				self.emptyAxis()	
				return

		# *** Colour of plot elements
		axesColour = str(self.preferences['Axes colour'].name())
		profile1Colour = str(self.preferences['Sample 1 colour'].name())
		profile2Colour = str(self.preferences['Sample 2 colour'].name())
		
		# *** Set sample names
		bLogScale = False
		xLabel = self.fieldToPlot
		
		# *** Create lists for each quantity of interest
		if self.fieldToPlot == 'Number of sequences':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['Seq1']], True)
			field1 = statsResults.getColumn('Seq1')
			field2 = statsResults.getColumn('Seq2')
			

		elif self.fieldToPlot == 'Number of parental sequences':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['ParentalSeq1']], True)
			field1 = statsResults.getColumn('ParentalSeq1')
			field2 = statsResults.getColumn('ParentalSeq2')
			
		elif self.fieldToPlot == 'Proportion of sequences (%)':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['RelFreq1']], True)
			field1 = statsResults.getColumn('RelFreq1')
			field2 = statsResults.getColumn('RelFreq2')
			
		elif self.fieldToPlot == 'p-values':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['pValues']], False)
			field1 = statsResults.getColumn('pValues')
			field2 = None
			
		elif self.fieldToPlot == 'p-values (corrected)':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['pValuesCorrected']], False)
			field1 = statsResults.getColumn('pValuesCorrected')
			field2 = None
						
		elif self.fieldToPlot == 'Effect size':
			if self.bSortFeatures:
				statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
																												[statsResults.dataHeadings['EffectSize']], True, True,
																												statsResults.confIntervMethod.bRatio)
			field1 = statsResults.getColumn('EffectSize')
			field2 = None
			
			bLogScale = statsResults.confIntervMethod.bRatio
			xLabel = statsResults.confIntervMethod.plotLabel
			
		features = statsResults.getColumn('Features')	# get sorted feature labels

		# *** Truncate feature labels
		highlightedFeatures = list(self.preferences['Highlighted sample features'])
		if self.preferences['Truncate feature names']:
			length = self.preferences['Length of truncated feature names']
			
			for i in xrange(0, len(features)):
				if len(features[i]) > length+3:
					features[i] = features[i][0:length] + '...'

			for i in xrange(0, len(highlightedFeatures)):
				if len(highlightedFeatures[i]) > length+3:
					highlightedFeatures[i] = highlightedFeatures[i][0:length] + '...'
						
		# *** Check that there is at least one significant feature
		if len(features) <= 0:
			self.emptyAxis('No significant features')			
			return

		# *** Set figure size
		padding = 0.2							 #inches
		heightBottomLabels = 0.4		# inches
		
		imageHeight = len(features)*self.figHeightPerRow + padding + heightBottomLabels
		self.fig.set_size_inches(self.figWidth, imageHeight)	
							
		yPlotOffsetFigSpace = heightBottomLabels / imageHeight 
		heightPlotFigSpace = 1.0 - yPlotOffsetFigSpace - padding / imageHeight
			 
		yLabelBounds = self.yLabelExtents(features, 8)
		xPlotOffsetFigSpace = yLabelBounds.width + 0.1 / self.figWidth
		widthPlotFigSpace = 1.0 - xPlotOffsetFigSpace - padding / self.figWidth
		
		axesBar = self.fig.add_axes([xPlotOffsetFigSpace,yPlotOffsetFigSpace,widthPlotFigSpace,heightPlotFigSpace])
		
		# *** Plot data
		barHeight = 0.35 
		
		if bLogScale:
			field1 = np.log10(field1)
			xLabel = 'log(' + xLabel + ')'
			if field2 != None:
				field2 = np.log10(field2)
		
		if field2 == None:
			rects1 = axesBar.barh(np.arange(len(features)), field1, height=barHeight)	
			axesBar.set_yticks(np.arange(len(features)) + 0.5*barHeight)		
			axesBar.set_ylim([0, len(features)-1.0 + barHeight + 0.1])			
		elif field2 != None:
			rects2 = axesBar.barh(np.arange(len(features)), field2, height=barHeight, color=profile2Colour)	
			rects1 = axesBar.barh(np.arange(len(features))+barHeight, field1, height=barHeight, color=profile1Colour)
			axesBar.set_yticks(np.arange(len(features)) + barHeight)		
			axesBar.set_ylim([0, len(features)-1.0 + 2*barHeight + 0.1])			 
				
		axesBar.set_yticklabels(features)	
		axesBar.set_xlabel(xLabel)
		
		scalarFormatter = ScalarFormatter(useMathText=False)
		scalarFormatter.set_scientific(True)
		scalarFormatter.set_powerlimits((-3,4))
		axesBar.xaxis.set_major_formatter(scalarFormatter)

		# *** Prettify plot
		if self.legendPos != -1 and field2 != None:
			legend = axesBar.legend([rects1[0], rects2[0]], (profile.sampleNames[0], profile.sampleNames[1]), loc=self.legendPos)
			legend.get_frame().set_linewidth(0)
				
		for label in axesBar.get_yticklabels():
			if label.get_text() in highlightedFeatures:
					label.set_color('red')
			
		for a in axesBar.yaxis.majorTicks:
			a.tick1On=False
			a.tick2On=False
				
		for a in axesBar.xaxis.majorTicks:
			a.tick1On=True
			a.tick2On=False
			
		for line in axesBar.yaxis.get_ticklines(): 
			line.set_color(axesColour)
				
		for line in axesBar.xaxis.get_ticklines(): 
			line.set_color(axesColour)
			
		for loc, spine in axesBar.spines.iteritems():
			if loc in ['right','top']:
					spine.set_color('none') 
			else:
				spine.set_color(axesColour)
		
		self.updateGeometry()			 
		self.draw()
コード例 #60
0
ファイル: sid_power_law_it.py プロジェクト: loniitkina/sid
#dummy x data for plotting
x = np.arange(min(meanls_list_fyi), max(meanls_list_fyi), 1)
x = np.arange(min(ls_list_fyi), max(ls_list_fyi), 1)

ax.loglog(x,
          a * x**k,
          linewidth=2,
          label=r'$D=%.2f*10^{-6} L^{%.2f}$ (FYI)' % (a * 10e6, k),
          c='darkorange')
ax.plot(cix,
        ciy_low,
        '--',
        c='r',
        linewidth=1,
        label=r'$99\%\,confidence\,band$')
ax.plot(cix, ciy_upp, '--', c='r', linewidth=1)

ax.grid(True)
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.legend(loc='lower left',
          prop={'size': 16},
          fancybox=True,
          framealpha=0.5,
          numpoints=1)
fig1.tight_layout()

fig1.savefig(outpath + 'power_law_24h_it_7km_seed_f_masked_n9' + title)
#fig1.savefig(outpath+'power_law_24h_it_7km_'+title)