) sources = {} region_color = regions_df['region_color'] region_color.name = 'region_color' for year in years: fertility = fertility_df[year] fertility.name = 'fertility' life = life_expectancy_df[year] life.name = 'life' population = population_df_size[year] population.name = 'population' new_df = pd.concat([fertility, life, population, region_color], axis=1) sources['_' + str(year)] = ColumnDataSource(new_df) dictionary_of_sources = dict( zip([x for x in years], ['_%s' % x for x in years])) js_source_array = str(dictionary_of_sources).replace("'", "") xdr = Range1d(1, 9) ydr = Range1d(20, 100) plot = Plot( x_range=xdr, y_range=ydr, title="", plot_width=800, plot_height=400, outline_line_color=None, toolbar_location=None,
y=lats, name=county_names, data=county_cars, col=county_cars) area_dict = dict(x=longs, y=lats, name=county_names, data=county_areas, col=(-1) * county_areas) density_dict = dict(x=longs, y=lats, name=county_names, data=county_pop_density, col=np.log(county_pop_density)) source = ColumnDataSource(pop_dict) p = figure(plot_width=500, plot_height=1000, title='Swedish county populations in 2017', tools=TOOLS, x_axis_location=None, y_axis_location=None) p.grid.grid_line_color = None patch = p.patches('x', 'y', source=source, fill_color={ 'field': 'col', 'transform': color_mapper
import numpy as np from bokeh.io import curdoc, show from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Quadratic N = 9 x = np.linspace(-2, 2, N) y = x**2 source = ColumnDataSource( dict( x=x, y=y, xp02=x + 0.4, xp01=x + 0.1, yp01=y + 0.2, )) plot = Plot(title=None, plot_width=300, plot_height=300, min_border=0, toolbar_location=None) glyph = Quadratic(x0="x", y0="y", x1="xp02", y1="y", cx="xp01", cy="yp01", line_color="#4daf4a",
for i in metric: rep_date = str(i[5]) metrics[rep_date] = {} metrics[rep_date]['server'] = str(i[0]) metrics[rep_date]['curr_con'] = str(i[1]) metrics[rep_date]['curr_ssl_con'] = str(i[2]) metrics[rep_date]['sess_rate'] = str(i[3]) metrics[rep_date]['max_sess_rate'] = str(i[4]) df = pd.DataFrame.from_dict(metrics, orient="index") df = df.fillna(0) df.index = pd.to_datetime(df.index) df.index.name = 'Date' df.sort_index(inplace=True) source = ColumnDataSource(df) output_file("templates/metrics_out.html") x_min = df.index.min() - pd.Timedelta(hours=1) x_max = df.index.max() + pd.Timedelta(minutes=1) p[serv] = figure(tools="pan,box_zoom,reset,xwheel_zoom", title=metric[0][0], x_axis_type="datetime", y_axis_label='Connections', x_range=(x_max.timestamp() * 1000 - 60 * 100000, x_max.timestamp() * 1000)) hover = HoverTool(tooltips=[("Connections", "@curr_con"), ("SSL connections", "@curr_ssl_con"),
from functools import partial import time from concurrent.futures import ThreadPoolExecutor from tornado import gen from bokeh.document import without_document_lock from bokeh.models import ColumnDataSource from bokeh.plotting import curdoc, figure source = ColumnDataSource(data=dict(x=[0], y=[0], color=["blue"])) i = 0 doc = curdoc() executor = ThreadPoolExecutor(max_workers=2) def blocking_task(i): time.sleep(1) return i # the unlocked callback uses this locked callback to safely update @gen.coroutine def locked_update(i): source.stream(dict(x=[source.data['x'][-1] + 1], y=[i], color=["blue"])) # this unclocked callback will not prevent other session callbacks from # executing while it is in flight @gen.coroutine @without_document_lock def unlocked_task(): global i
def create_analysis_chart(metadata, actuals, predictions): add_data_to_indexdb(actuals, predictions) ###################### # create actuals plot actuals_source = ColumnDataSource(dict(id=[], start=[], target=[], ma=[])) # empty filtered_predictions = predictions.reset_index(level=1) predictions_source = ColumnDataSource( dict(id=[], start=[], **{q: [] for q in quantile_names})) # create the plot predictions_plot = figure(title='', plot_width=800, plot_height=400, x_axis_label='date/time', y_axis_label='pm10 ', x_axis_type='datetime', y_range=[0, max(predictions['0.5'].max())], tools='') # plot vertical areas for the quantiles predictions_plot.varea_stack( stackers=quantile_names, x='start', color=inferno(len(quantiles)), legend_label=quantile_names, source=predictions_source, alpha=1, ) # plot actual values predictions_plot.line(x="start", y="target", color='red', source=actuals_source) # plot actual values predictions_plot.line(x="start", y="ma", color='black', source=actuals_source) # add a legend predictions_plot.legend.items.reverse() ############################# # Create location selector options = metadata.reset_index()[['id', 'country', 'city', 'location' ]].astype('str').agg('-'.join, axis=1).tolist() location_select = Select(title='Select Location:', value=options[0], options=options) ################################ # Create prediction start slider start_min = filtered_predictions.reset_index()['start'].min() start_slider = Slider( start=0, end=predictions.index.get_level_values(1).unique().max(), value=0, step=1, title=f'prediction time delta') ############################# # Create javascript callback # The javascript callback function connects all the plots and # gui components together so changes will update the plots. callback_args = dict(actuals=actuals_source, predictions=predictions_source, location_select=location_select, start_slider=start_slider) with open('javascript/plot_update_callback.js', 'r') as f: callback_code = f.read() plot_update_callback = CustomJS(code=callback_code, args=callback_args) location_select.js_on_change('value', plot_update_callback) start_slider.js_on_change('value', plot_update_callback) return column(location_select, predictions_plot, start_slider)
output_file('bokeh_map.html', mode='relative-dev') # Process the data stations = pd.read_json('stations.json').T stations = stations.rename(columns={0: 'latitude', 1: 'longitude', 2: 'name', 3: 'data'}) x_range = DataRange1d() y_range = DataRange1d() map_options = GMapOptions( lat=37.76487, lng=-122.41948, zoom=6, map_type="roadmap" ) plot = GMapPlot( x_range=x_range, y_range=y_range, map_options=map_options, title=None, ) circle = Circle(y="longitude", x="latitude", size=10, fill_color="red", line_color=None) plot.add_glyph(ColumnDataSource(stations), circle) xaxis = LinearAxis() yaxis = LinearAxis() plot.add_layout(xaxis, 'left') plot.add_layout(yaxis, 'below') plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) plot.add_tools(PanTool(), WheelZoomTool(), CrosshairTool()) save(plot)
def aggregate_plot(tb): """ Function for creating a bokeh plot that shows aggregate tax liabilities for each year the TaxBrain instance was run Parameters ---------- tb: An instance of the TaxBrain object Returns ------- Bokeh figure """ # Pull aggregate data by year and transpose it for plotting varlist = ["iitax", "payrolltax", "combined"] base_data = tb.multi_var_table(varlist, "base").transpose() base_data["calc"] = "Base" reform_data = tb.multi_var_table(varlist, "reform").transpose() reform_data["calc"] = "Reform" base_cds = ColumnDataSource(base_data) reform_cds = ColumnDataSource(reform_data) num_ticks = len(base_data) del base_data, reform_data fig = figure(title="Aggregate Tax Liability by Year", width=700, height=500, tools="save") ii_base = fig.line(x="index", y="iitax", line_width=4, line_color="#12719e", legend="Income Tax - Base", source=base_cds) ii_reform = fig.line(x="index", y="iitax", line_width=4, line_color="#73bfe2", legend="Income Tax - Reform", source=reform_cds) proll_base = fig.line(x="index", y="payrolltax", line_width=4, line_color="#98cf90", legend="Payroll Tax - Reform", source=reform_cds) proll_reform = fig.line(x="index", y="payrolltax", line_width=4, line_color="#408941", legend="Payroll Tax - Base", source=base_cds) comb_base = fig.line(x="index", y="combined", line_width=4, line_color="#a4201d", legend="Combined - Base", source=base_cds) comb_reform = fig.line(x="index", y="combined", line_width=4, line_color="#e9807d", legend="Combined - Reform", source=reform_cds) # format figure fig.legend.location = "top_left" fig.yaxis.formatter = NumeralTickFormatter(format="$0.00a") fig.yaxis.axis_label = "Aggregate Tax Liability" fig.xaxis.minor_tick_line_color = None fig.xaxis[0].ticker.desired_num_ticks = num_ticks # Add hover tool tool_str = """ <p><b>@calc - {}</b></p> <p>${}</p> """ ii_hover = HoverTool(tooltips=tool_str.format("Individual Income Tax", "@iitax{0,0}"), renderers=[ii_base, ii_reform]) proll_hover = HoverTool(tooltips=tool_str.format("Payroll Tax", "@payrolltax{0,0}"), renderers=[proll_base, proll_reform]) combined_hover = HoverTool(tooltips=tool_str.format( "Combined Tax", "@combined{0,0}"), renderers=[comb_base, comb_reform]) fig.add_tools(ii_hover, proll_hover, combined_hover) # toggle which lines are shown plot_js = """ object1.visible = toggle.active object2.visible = toggle.active object3.visible = toggle.active """ base_callback = CustomJS.from_coffeescript(code=plot_js, args={}) base_toggle = Toggle(label="Base", button_type="primary", callback=base_callback, active=True) base_callback.args = { "toggle": base_toggle, "object1": ii_base, "object2": proll_base, "object3": comb_base } reform_callback = CustomJS.from_coffeescript(code=plot_js, args={}) reform_toggle = Toggle(label="Reform", button_type="primary", callback=reform_callback, active=True) reform_callback.args = { "toggle": reform_toggle, "object1": ii_reform, "object2": proll_reform, "object3": comb_reform } fig_layout = layout([fig], [base_toggle, reform_toggle]) # Components needed to embed the figure js, div = components(fig_layout) outputs = { "media_type": "bokeh", "title": "", "data": { "javascript": js, "html": div } } return outputs
def nb_view_patches(Yr, A, C, b, f, d1, d2, YrA = None, image_neurons=None, thr=0.99, denoised_color=None,cmap='jet'): """ Interactive plotting utility for ipython notebook Parameters: ----------- Yr: np.ndarray movie A,C,b,f: np.ndarrays outputs of matrix factorization algorithm d1,d2: floats dimensions of movie (x and y) YrA: np.ndarray ROI filtered residual as it is given from update_temporal_components If not given, then it is computed (K x T) image_neurons: np.ndarray image to be overlaid to neurons (for instance the average) thr: double threshold regulating the extent of the displayed patches denoised_color: string or None color name (e.g. 'red') or hex color code (e.g. '#F0027F') cmap: string name of colormap (e.g. 'viridis') used to plot image_neurons """ colormap = cm.get_cmap(cmap) grayp = [mpl.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] nr, T = C.shape nA2 = np.ravel(np.power(A,2).sum(0)) if type(A) == np.ndarray else np.ravel(A.power(2).sum(0)) b = np.squeeze(b) f = np.squeeze(f) if YrA is None: Y_r = np.array(spdiags(old_div(1, nA2), 0, nr, nr) * (A.T * np.matrix(Yr) - (A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) - A.T.dot(A) * np.matrix(C)) + C) else: Y_r = C + YrA x = np.arange(T) z = old_div(np.squeeze(np.array(Y_r[:, :].T)), 100) if image_neurons is None: image_neurons = A.mean(1).reshape((d1, d2), order='F') coors = get_contours(A, (d1, d2), thr) cc1 = [cor['coordinates'][:, 0] for cor in coors] cc2 = [cor['coordinates'][:, 1] for cor in coors] c1 = cc1[0] c2 = cc2[0] # split sources up, such that Bokeh does not warn # "ColumnDataSource's columns must be of the same length" source = ColumnDataSource(data=dict(x=x, y=z[:, 0], y2=C[0] / 100)) source_ = ColumnDataSource(data=dict(z=z.T, z2=C / 100)) source2 = ColumnDataSource(data=dict(c1=c1, c2=c2)) source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) callback = CustomJS(args=dict(source=source, source_=source_, source2=source2, source2_=source2_), code=""" var data = source.get('data') var data_ = source_.get('data') var f = cb_obj.get('value')-1 x = data['x'] y = data['y'] y2 = data['y2'] for (i = 0; i < x.length; i++) { y[i] = data_['z'][i+f*x.length] y2[i] = data_['z2'][i+f*x.length] } var data2_ = source2_.get('data'); var data2 = source2.get('data'); c1 = data2['c1']; c2 = data2['c2']; cc1 = data2_['cc1']; cc2 = data2_['cc2']; for (i = 0; i < c1.length; i++) { c1[i] = cc1[f][i] c2[i] = cc2[f][i] } source2.trigger('change') source.trigger('change') """) plot = bpl.figure(plot_width=600, plot_height=300) plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6) if denoised_color is not None: plot.line('x', 'y2', source=source, line_width=1, line_alpha=0.6, color=denoised_color) slider = bokeh.models.Slider(start=1, end=Y_r.shape[0], value=1, step=1, title="Neuron Number", callback=callback) xr = Range1d(start=0, end=image_neurons.shape[1]) yr = Range1d(start=image_neurons.shape[0], end=0) plot1 = bpl.figure(x_range=xr, y_range=yr, plot_width=300, plot_height=300) plot1.image(image=[image_neurons[::-1, :]], x=0, y=image_neurons.shape[0], dw=d2, dh=d1, palette=grayp) plot1.patch('c1', 'c2', alpha=0.6, color='purple', line_width=2, source=source2) bpl.show(bokeh.layouts.layout([[slider], [bokeh.layouts.row(plot1, plot)]])) return Y_r
def about(): if request.method == 'POST': app.choices = request.form.getlist('Stock_Ticker') filepath = 'https://www.quandl.com/api/v3/datasets/WIKI/' + app.choices[ 0] + '.csv' data = read_csv(filepath, index_col='Date', parse_dates=True) new_data = data.iloc[::-1] new_index = pd.date_range(start=new_data.index[0], end=new_data.index[-1], freq='D') new_data = new_data.reindex(new_index) subset_index = pd.date_range(start='2017-01-01', end='2017-01-31', freq='D') subset = new_data.loc[subset_index] data_source = ColumnDataSource(subset.dropna()) cols = [] i = 0 while i < (len(app.choices) - 1): if app.choices[i + 1] == '1': cols.append('High') elif app.choices[i + 1] == '2': cols.append('Adj. High') elif app.choices[i + 1] == '3': cols.append('Low') elif app.choices[i + 1] == '4': cols.append('Adj. Low') i = i + 1 p = figure(x_axis_type='datetime', title="Quandl WIKI Stock Prices - Jan 2017") p.xaxis.formatter = DatetimeTickFormatter(days='%d') p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None p.xaxis.axis_label = 'date' i = 0 while i < len(cols): if cols[i] == 'High': p.line(x=subset.dropna().index, y=data_source.data[cols[i]], line_color='red', legend_label=cols[i]) elif cols[i] == 'Adj. High': p.line(x=subset.dropna().index, y=data_source.data[cols[i]], line_color='green', legend_label=cols[i]) elif cols[i] == 'Low': p.line(x=subset.dropna().index, y=data_source.data[cols[i]], line_color='blue', legend_label=cols[i]) elif cols[i] == 'Adj. Low': p.line(x=subset.dropna().index, y=data_source.data[cols[i]], line_color='yellow', legend_label=cols[i]) i = i + 1 p.legend.location = "top_left" p.legend.click_policy = "hide" script, div = components(p, INLINE) js_resources = INLINE.render_js() css_resources = INLINE.render_css() title = 'Graph of ' + app.choices[0] + ' data' return render_template('about.html', plot_script=script, plot_div=div, js_resources=js_resources, css_resources=css_resources, webpage_title=title)
# Definition of the sequence numbers plot p_seq = figure(width=1500,height=700,tools=TOOLS, x_axis_type="datetime", title="TCP sequence values in Honeypot {}".format(title)) hoverseq = p_seq.select(dict(type=HoverTool)) hoverseq.tooltips = [ ("index", "$index"), ("timestamp", "@x{%F %H:%M:%S}"), ("number", "@y{0,0}"), ("dest port", "@dport"), ("src port", "@sport") ] hoverseq.formatters = { 'x': 'datetime'} seq_sourceplot = ColumnDataSource(data=dict( x = x, y = y, dport = z_d, sport = z_s, colorseq = colorseq )) p_seq.xaxis.axis_label = "Time" p_seq.yaxis.axis_label = "Sequence Numbers" p_seq.yaxis[0].formatter = BasicTickFormatter(use_scientific=False) p_seq.scatter(x='x', y='y', color='colorseq', legend="seq values", alpha=0.5, source=seq_sourceplot) p_ack = figure(width=1500,height=700,tools=TOOLS, x_axis_type="datetime", title="TCP acknowledgement values in Honeypot {}".format(title)) hoverack = p_ack.select(dict(type=HoverTool)) hoverack.tooltips = [ ("index", "$index"), ("timestamp", "@x{%F %H:%M:%S}"), ("number", "@y{0,0}"), ("dest port", "@dport"), ("src port", "@sport")
def getTimePlotAll(valuedict, colors, title, xtitle, ytitle): x_time = [] for i, year in enumerate(valuedict["Time"]): dyear = datetime.datetime(int(year), 1, 1, 0, 0) x_time.append(dyear) hover = HoverTool( tooltips=[ ("Year", "@x{%Y}"), ("Value", "@y{int}"), ], formatters={ 'x': 'datetime', # use 'datetime' formatter for 'date' field }, # display a tooltip whenever the cursor is vertically in line with a glyph mode='vline', point_policy="snap_to_data") p = figure(plot_height=bokutils.PLOT_HEIGHT, plot_width=bokutils.PLOT_WIDTH, x_axis_type='datetime', title=title, tools=[hover, "crosshair", "box_zoom", "reset", "pan"]) p.xaxis.formatter = DatetimeTickFormatter(years=['%Y']) p.xaxis.axis_label = xtitle p.yaxis.axis_label = ytitle p.xaxis.major_label_orientation = 'vertical' band_x = np.append(x_time, x_time[::-1]) band_y = np.append(valuedict[definitions.YEAR_CLOSED], valuedict[definitions.YEAR_OPENED][::-1]) # a line works fine with time objects kcount = 0 litems = [] for key in sorted(valuedict.iterkeys()): val = valuedict[key] if (key != BokehTime.TIME): desc = [] for i in range(0, len(valuedict[BokehTime.TIME])): desc.append(key) source = ColumnDataSource( data=dict(x=x_time, y=valuedict[key], desc=desc)) circ = p.circle('x', 'y', size=5, source=source, color=colors[key]) litems.append( LegendItem(label=bokutils.makeLegendKey(key), renderers=[circ])) p.line(x_time, valuedict[key], color=colors[key]) kcount = kcount + 1 if (kcount > bokutils.MAX_CATEGORY_LINES): break p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2) p.add_layout( Legend( items=litems, glyph_height=bokutils.LEGEND_GLYPH_HEIGHT, glyph_width=bokutils.LEGEND_GLYPH_WIDTH, ), 'right') return p
'http://bostonopendata-boston.opendata.arcgis.com/datasets/465e00f9632145a1ad645a27d27069b4_2.csv', 'Fire Depts': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/092857c15cbb49e8b214ca5e228317a1_2.csv', 'Police Stations': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/e5a0066d38ac4e2abbc7918197a4f6af_6.csv', 'WickedWiFi': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/4b803745fedd4e88861967d16a1e07fb_0.csv', 'Trees': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/ce863d38db284efe83555caf8a832e2a_1.csv', 'Snow Emergency Parking': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/53ebc23fcc654111b642f70e61c63852_0.csv', 'Hubway Stations': 'http://bostonopendata-boston.opendata.arcgis.com/datasets/ee7474e2a0aa45cbbdfe0b747a5eb032_0.csv', } src = ColumnDataSource() def make_plot(): # Load data # selected_source = sorted(data_urls)[select.active] # data = pd.read_csv(data_urls[selected_source]) data = pd.read_csv(data_urls[select.value]) # Convert EPSG code p1 = Proj(init='epsg:4326') # this is the EPSG code of the original p2 = Proj(init='epsg:3857') # this is the EPSG code of the tiles transformed_coords = [ transform(p1, p2, x1, y1) for x1, y1 in zip(data['X'], data['Y']) ] data['X'], data['Y'] = (zip(*transformed_coords))
CallsY2.to_csv('CallsY2.csv') Puts2 = Puts.groupby(['Strike']).sum() Puts2['Strike'] = Puts2.index Puts2.to_csv('Puts2.csv') PutsY2 = PutsY.groupby(['Strike']).sum() PutsY2['Strike'] = PutsY2.index PutsY2.to_csv('PutsY2.csv') ''' Plots simple ''' output_file(f"{folder}/Current_Call-OIsimple_{sys.argv[3]}_days.html") strikes = Calls['Strike'] op_int = Calls['Open Int'] source = ColumnDataSource(data=dict(x=strikes, y=op_int)) MyHover10 = HoverTool(tooltips=[ ('Strike', '@x'), ('OI', '@y'), ], point_policy="follow_mouse") p = figure( title="Current Call Open Interest Simple SPX", tools=[ "pan,wheel_zoom,xwheel_zoom,ywheel_zoom,box_zoom,zoom_in, xzoom_in, yzoom_in,zoom_out, xzoom_out, yzoom_out,reset,save,lasso_select", MyHover10 ], x_axis_label='Strike Price', y_axis_label='OI', width=1200, height=600)
import numpy as np from bokeh.document import Document from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid from bokeh.models.glyphs import Ellipse from bokeh.plotting import show N = 9 x = np.linspace(-2, 2, N) y = x**2 w = x / 15.0 + 0.3 h = y / 20.0 + 0.3 source = ColumnDataSource(dict(x=x, y=y, w=w, h=h)) xdr = DataRange1d() ydr = DataRange1d() plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300, h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None) glyph = Ellipse(x="x", y="y", width="w",
def nb_view_patches3d(Y_r, A, C, dims, image_type='mean', Yr=None, max_projection=False, axis=0, thr=0.9, denoised_color=None,cmap='jet'): """ Interactive plotting utility for ipython notbook Parameters: ----------- Y_r: np.ndarray residual of each trace A,C,b,f: np.ndarrays outputs of matrix factorization algorithm dims: tuple of ints dimensions of movie (x, y and z) image_type: 'mean', 'max' or 'corr' image to be overlaid to neurons (average of shapes, maximum of shapes or nearest neigbor correlation of raw data) Yr: np.ndarray movie, only required if image_type=='corr' to calculate correlation image max_projection: boolean plot max projection along specified axis if True, plot layers if False axis: int (0, 1 or 2) axis along which max projection is performed or layers are shown thr: scalar between 0 and 1 Energy threshold for computing contours denoised_color: string or None color name (e.g. 'red') or hex color code (e.g. '#F0027F') cmap: string name of colormap (e.g. 'viridis') used to plot image_neurons Raise: ------ ValueError("image_type must be 'mean', 'max' or 'corr'") """ bokeh.io.curdoc().clear() # prune old orphaned models, otherwise filesize blows up d = A.shape[0] order = list(range(4)) order.insert(0, order.pop(axis)) Y_r = Y_r + C index_permut = np.reshape(np.arange(d), dims, order='F').transpose( order[:-1]).reshape(d, order='F') A = csc_matrix(A)[index_permut, :] dims = tuple(np.array(dims)[order[:3]]) d1, d2, d3 = dims colormap = cm.get_cmap(cmap) grayp = [mpl.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] nr, T = C.shape x = np.arange(T) source = ColumnDataSource(data=dict(x=x, y=Y_r[0] / 100, y2=C[0] / 100)) source_ = ColumnDataSource(data=dict(z=Y_r / 100, z2=C / 100)) sourceN = ColumnDataSource(data=dict(N=[nr], nan=np.array([np.nan]))) if max_projection: if image_type == 'corr': tmp = [(local_correlations( Yr.reshape(dims + (-1,), order='F'))[:, ::-1]).max(i) for i in range(3)] elif image_type == 'mean': tmp = [(np.array(A.mean(axis=1)).reshape(dims, order='F')[:, ::-1]).max(i) for i in range(3)] elif image_type == 'max': tmp = [(A.max(axis=1).toarray().reshape(dims, order='F')[:, ::-1]).max(i) for i in range(3)] else: raise ValueError("image_type must be 'mean', 'max' or 'corr'") image_neurons = np.nan * np.ones((int(1.05 * (d1 + d2)), int(1.05 * (d1 + d3)))) image_neurons[:d2, -d3:] = tmp[0][::-1] image_neurons[:d2, :d1] = tmp[2].T[::-1] image_neurons[-d1:, -d3:] = tmp[1] offset1 = image_neurons.shape[1] - d3 offset2 = image_neurons.shape[0] - d1 proj_ = [coo_matrix([A[:, nnrr].toarray().reshape(dims, order='F').max( i).reshape(-1, order='F') for nnrr in range(A.shape[1])]) for i in range(3)] proj_ = [pproj_.T for pproj_ in proj_] coors = [get_contours(proj_[i], tmp[i].shape, thr=thr) for i in range(3)] pl.close() K = np.max([[len(cor['coordinates']) for cor in cc] for cc in coors]) cc1 = np.nan * np.zeros(np.shape(coors) + (K,)) cc2 = np.nan * np.zeros(np.shape(coors) + (K,)) for i, cor in enumerate(coors[0]): cc1[0, i, :len(cor['coordinates'])] = cor['coordinates'][:, 0] + offset1 cc2[0, i, :len(cor['coordinates'])] = cor['coordinates'][:, 1] for i, cor in enumerate(coors[2]): cc1[1, i, :len(cor['coordinates'])] = cor['coordinates'][:, 1] cc2[1, i, :len(cor['coordinates'])] = cor['coordinates'][:, 0] for i, cor in enumerate(coors[1]): cc1[2, i, :len(cor['coordinates'])] = cor['coordinates'][:, 0] + offset1 cc2[2, i, :len(cor['coordinates'])] = cor['coordinates'][:, 1] + offset2 c1x = cc1[0][0] c2x = cc2[0][0] c1y = cc1[1][0] c2y = cc2[1][0] c1z = cc1[2][0] c2z = cc2[2][0] source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) source2 = ColumnDataSource(data=dict(c1x=c1x, c1y=c1y, c1z=c1z, c2x=c2x, c2y=c2y, c2z=c2z)) callback = CustomJS(args=dict(source=source, source_=source_, sourceN=sourceN, source2=source2, source2_=source2_), code=""" var data = source.get('data'); var data_ = source_.get('data'); var f = cb_obj.get('value')-1 x = data['x'] y = data['y'] y2 = data['y2'] for (i = 0; i < x.length; i++) { y[i] = data_['z'][i+f*x.length] y2[i] = data_['z2'][i+f*x.length] } var data2_ = source2_.get('data'); var data2 = source2.get('data'); c1x = data2['c1x']; c2x = data2['c2x']; c1y = data2['c1y']; c2y = data2['c2y']; c1z = data2['c1z']; c2z = data2['c2z']; cc1 = data2_['cc1']; cc2 = data2_['cc2']; var N = sourceN.get('data')['N'][0]; for (i = 0; i < c1x.length; i++) { c1x[i] = cc1[f*c1x.length + i] c2x[i] = cc2[f*c1x.length + i] } for (i = 0; i < c1x.length; i++) { c1y[i] = cc1[N*c1y.length + f*c1y.length + i] c2y[i] = cc2[N*c1y.length + f*c1y.length + i] } for (i = 0; i < c1x.length; i++) { c1z[i] = cc1[2*N*c1z.length + f*c1z.length + i] c2z[i] = cc2[2*N*c1z.length + f*c1z.length + i] } source2.trigger('change'); source.trigger('change'); """) else: if image_type == 'corr': image_neurons = local_correlations(Yr.reshape(dims + (-1,), order='F'))[:-1, ::-1] elif image_type == 'mean': image_neurons = np.array(A.mean(axis=1)).reshape(dims, order='F')[:, ::-1] elif image_type == 'max': image_neurons = A.max(axis=1).toarray().reshape(dims, order='F')[:, ::-1] else: raise ValueError('image_type must be mean, max or corr') cmap = bokeh.models.mappers.LinearColorMapper([mpl.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))]) cmap.high = image_neurons.max() coors = get_contours(A, dims, thr=thr) pl.close() cc1 = [[(l[:, 0]) for l in n['coordinates']] for n in coors] cc2 = [[(l[:, 1]) for l in n['coordinates']] for n in coors] length = np.ravel([list(map(len, cc)) for cc in cc1]) idx = np.cumsum(np.concatenate([[0], length[:-1]])) cc1 = np.concatenate(list(map(np.concatenate, cc1))) cc2 = np.concatenate(list(map(np.concatenate, cc2))) linit = int(round(coors[0]['CoM'][0])) # pick initial layer in which first neuron lies K = length.max() c1 = np.nan * np.zeros(K) c2 = np.nan * np.zeros(K) c1[:length[linit]] = cc1[idx[linit]:idx[linit] + length[linit]] c2[:length[linit]] = cc2[idx[linit]:idx[linit] + length[linit]] source2 = ColumnDataSource(data=dict(c1=c1, c2=c2)) source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2)) source2_idx = ColumnDataSource(data=dict(idx=idx, length=length)) source3 = ColumnDataSource( data=dict(image=[image_neurons[linit]], im=[image_neurons], x=[0], y=[d2], dw=[d3], dh=[d2])) callback = CustomJS(args=dict(source=source, source_=source_, sourceN=sourceN, source2=source2, source2_=source2_, source2_idx=source2_idx), code=""" var data = source.data; var data_ = source_.data; var f = slider_neuron.value-1; var l = slider_layer.value-1; x = data['x'] y = data['y'] y2 = data['y2'] for (i = 0; i < x.length; i++) { y[i] = data_['z'][i+f*x.length] y2[i] = data_['z2'][i+f*x.length] } var data2 = source2.data; var data2_ = source2_.data; var data2_idx = source2_idx.data; var idx = data2_idx['idx']; c1 = data2['c1']; c2 = data2['c2']; var nz = idx.length / sourceN.data['N'][0]; var nan = sourceN.data['nan'][0]; for (i = 0; i < c1.length; i++) { c1[i] = nan; c2[i] = nan; } for (i = 0; i < data2_idx['length'][l+f*nz]; i++) { c1[i] = data2_['cc1'][idx[l+f*nz] + i]; c2[i] = data2_['cc2'][idx[l+f*nz] + i]; } source2.trigger('change'); source.trigger('change'); """) callback_layer = CustomJS(args=dict(source=source3, sourceN=sourceN, source2=source2, source2_=source2_, source2_idx=source2_idx), code=""" var f = slider_neuron.value-1; var l = slider_layer.value-1; var dh = source.data['dh'][0]; var dw = source.data['dw'][0]; var image = source.data['image'][0]; var images = source.data['im'][0]; for (var i = 0; i < x.length; i++) { for (var j = 0; j < dw; j++){ image[i*dh+j] = images[l*dh*dw + i*dh + j]; } } var data2 = source2.data; var data2_ = source2_.data; var data2_idx = source2_idx.data; var idx = data2_idx['idx'] c1 = data2['c1']; c2 = data2['c2']; var nz = idx.length / sourceN.data['N'][0]; var nan = sourceN.data['nan'][0]; for (i = 0; i < c1.length; i++) { c1[i] = nan; c2[i] = nan; } for (i = 0; i < data2_idx['length'][l+f*nz]; i++) { c1[i] = data2_['cc1'][idx[l+f*nz] + i]; c2[i] = data2_['cc2'][idx[l+f*nz] + i]; } source.trigger('change'); source2.trigger('change'); """) plot = bpl.figure(plot_width=600, plot_height=300) plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6) if denoised_color is not None: plot.line('x', 'y2', source=source, line_width=1, line_alpha=0.6, color=denoised_color) slider = bokeh.models.Slider(start=1, end=Y_r.shape[0], value=1, step=1, title="Neuron Number", callback=callback) xr = Range1d(start=0, end=image_neurons.shape[1] if max_projection else d3) yr = Range1d(start=image_neurons.shape[0] if max_projection else d2, end=0) plot1 = bpl.figure(x_range=xr, y_range=yr, plot_width=300, plot_height=300) if max_projection: plot1.image(image=[image_neurons[::-1, :]], x=0, y=image_neurons.shape[0], dw=image_neurons.shape[1], dh=image_neurons.shape[0], palette=grayp) plot1.patch('c1x', 'c2x', alpha=0.6, color='purple', line_width=2, source=source2) plot1.patch('c1y', 'c2y', alpha=0.6, color='purple', line_width=2, source=source2) plot1.patch('c1z', 'c2z', alpha=0.6, color='purple', line_width=2, source=source2) layout = bokeh.layouts.layout([[slider], [bokeh.layouts.row(plot1, plot)]], sizing_mode="scale_width") else: slider_layer = bokeh.models.Slider(start=1, end=d1, value=linit + 1, step=1, title="Layer", callback=callback_layer) callback.args['slider_neuron'] = slider callback.args['slider_layer'] = slider_layer callback_layer.args['slider_neuron'] = slider callback_layer.args['slider_layer'] = slider_layer plot1.image(image='image', x='x', y='y', dw='dw', dh='dh', color_mapper=cmap, source=source3) plot1.patch('c1', 'c2', alpha=0.6, color='purple', line_width=2, source=source2) layout = bokeh.layouts.layout([[slider], [slider_layer], [bokeh.layouts.row(plot1, plot)]], sizing_mode="scale_width") bpl.show(layout) return Y_r
def get_contour_data(X, Y, Z, levels=[0.5, 0.7, 0.9]): import matplotlib.pyplot as plt import matplotlib.cm as cm from bokeh.models import (ColumnDataSource) try: cs = plt.contour(X, Y, Z, levels=(np.array(levels) * np.nanmax(Z)).tolist(), cmap=cm.Greys_r) # dx = X[0,1]-X[0,0] # dy = Y[1,0]-Y[0,0] xs = [] ys = [] xt = [] yt = [] col = [] text = [] isolevelid = 0 for isolevel in cs.collections: # isocol = isolevel.get_color()[0] # thecol = 3 * [None] theiso = '{:.0f}%'.format(cs.get_array()[isolevelid] / Z.max() * 100) isolevelid += 1 # for i in xrange(3): # thecol[i] = int(255 * isocol[i]) thecol = '#%02x%02x%02x' % (220, 220, 220) # thecol = '#03FFF9' for path in isolevel.get_paths(): v = path.vertices # x = v[:, 0]+dx # y = v[:, 1]+dy x = v[:, 0] y = v[:, 1] xs.append(x.tolist()) ys.append(y.tolist()) xt.append(x[len(x) / 2]) yt.append(y[len(y) / 2]) text.append(theiso) col.append(thecol) source = ColumnDataSource( data={ 'xs': xs, 'ys': ys, 'line_color': col, 'xt': xt, 'yt': yt, 'text': text }) except: source = ColumnDataSource(data={ 'xs': [], 'ys': [], 'line_color': [], 'xt': [], 'yt': [], 'text': [] }) return source
def make_calendar(year, month, firstweekday="Mon"): firstweekday = list(day_abbrs).index(firstweekday) calendar = Calendar(firstweekday=firstweekday) month_days = [ None if not day else str(day) for day in calendar.itermonthdays(year, month) ] month_weeks = len(month_days) // 7 workday = "linen" weekend = "lightsteelblue" def weekday(date): return (date.weekday() - firstweekday) % 7 def pick_weekdays(days): return [days[i % 7] for i in range(firstweekday, firstweekday + 7)] day_names = pick_weekdays(day_abbrs) week_days = pick_weekdays([workday] * 5 + [weekend] * 2) source = ColumnDataSource(data=dict( days=list(day_names) * month_weeks, weeks=sum([[str(week)] * 7 for week in range(month_weeks)], []), month_days=month_days, day_backgrounds=sum([week_days] * month_weeks, []), )) holidays = [ (date, summary.replace("(US-OPM)", "").strip()) for (date, summary) in us_holidays if date.year == year and date.month == month and "(US-OPM)" in summary ] holidays_source = ColumnDataSource(data=dict( holidays_days=[day_names[weekday(date)] for date, _ in holidays], holidays_weeks=[ str((weekday(date.replace(day=1)) + date.day) // 7) for date, _ in holidays ], month_holidays=[summary for _, summary in holidays], )) xdr = FactorRange(factors=list(day_names)) ydr = FactorRange( factors=list(reversed([str(week) for week in range(month_weeks)]))) x_scale, y_scale = CategoricalScale(), CategoricalScale() plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale, plot_width=300, plot_height=300, outline_line_color=None) plot.title.text = month_names[month] plot.title.text_font_size = "12pt" plot.title.text_color = "darkolivegreen" plot.title.offset = 25 plot.min_border_left = 0 plot.min_border_bottom = 5 rect = Rect(x="days", y="weeks", width=0.9, height=0.9, fill_color="day_backgrounds", line_color="silver") plot.add_glyph(source, rect) rect = Rect(x="holidays_days", y="holidays_weeks", width=0.9, height=0.9, fill_color="pink", line_color="indianred") rect_renderer = plot.add_glyph(holidays_source, rect) text = Text(x="days", y="weeks", text="month_days", text_align="center", text_baseline="middle") plot.add_glyph(source, text) xaxis = CategoricalAxis() xaxis.major_label_text_font_size = "8pt" xaxis.major_label_standoff = 0 xaxis.major_tick_line_color = None xaxis.axis_line_color = None plot.add_layout(xaxis, 'above') hover_tool = HoverTool(plot=plot, renderers=[rect_renderer], tooltips=[("Holiday", "@month_holidays")]) plot.tools.append(hover_tool) return plot
def best_fit_model(self, modelgrid, report=None): """Perform simple fitting of the spectrum to all models in the given modelgrid and store the best fit Parameters ---------- modelgrid: sedkit.modelgrid.ModelGrid The model grid to fit report: str The name of the parameter to plot versus the Goodness-of-fit statistic """ # Prepare data spectrum = Spectrum(*self.spectrum) rows = [row for n, row in modelgrid.index.iterrows()] # Iterate over entire model grid pool = Pool(8) func = partial(fit_model, fitspec=spectrum) fit_rows = pool.map(func, rows) pool.close() pool.join() # Turn the results into a DataFrame and sort models = DataFrame(fit_rows) models = models.sort_values('gstat') # Get the best fit bf = copy.copy(models.iloc[0]) if self.verbose: print(bf[modelgrid.parameters]) if report is not None: # Configure plot tools = "pan, wheel_zoom, box_zoom, reset" rep = figure(tools=tools, x_axis_label=report, y_axis_label='Goodness-of-fit', plot_width=600, plot_height=400) # Single out best fit best = ColumnDataSource(data=models.iloc[:1]) others = ColumnDataSource(data=models.iloc[1:]) # Add hover tool hover = HoverTool(tooltips=[('label', '@label'), ('gstat', '@gstat')]) rep.add_tools(hover) # Plot the fits rep.circle(report, 'gstat', source=best, color='red', legend=bf['label']) rep.circle(report, 'gstat', source=others) # Show the plot show(rep) if bf['filepath'] not in [i['filepath'] for i in self.best_fit]: self.best_fit.append(bf)
def __init__(self, server, doc=None, **kwargs): if doc is not None: self.doc = weakref.ref(doc) self.server = server self.log = self.server.io_loop.profile self.start = None self.stop = None self.ts = {"count": [], "time": []} self.state = profile.get_profile(self.log) data = profile.plot_data(self.state, profile_interval) self.states = data.pop("states") self.profile_plot, self.source = profile.plot_figure(data, **kwargs) changing = [False] # avoid repeated changes from within callback @without_property_validation def cb(attr, old, new): if changing[0]: return with log_errors(): if isinstance(new, list): # bokeh >= 1.0 selected = new else: selected = new["1d"]["indices"] try: ind = selected[0] except IndexError: return data = profile.plot_data(self.states[ind], profile_interval) del self.states[:] self.states.extend(data.pop("states")) changing[0] = True # don't recursively trigger callback update(self.source, data) if isinstance(new, list): # bokeh >= 1.0 self.source.selected.indices = old else: self.source.selected = old changing[0] = False if BOKEH_VERSION >= "1.0.0": self.source.selected.on_change("indices", cb) else: self.source.on_change("selected", cb) self.ts_source = ColumnDataSource({"time": [], "count": []}) self.ts_plot = figure( title="Activity over time", height=150, x_axis_type="datetime", active_drag="xbox_select", y_range=[0, 1 / profile_interval], tools="xpan,xwheel_zoom,xbox_select,reset", sizing_mode="stretch_width", ) self.ts_plot.line("time", "count", source=self.ts_source) self.ts_plot.circle( "time", "count", source=self.ts_source, color=None, selection_color="orange" ) self.ts_plot.yaxis.visible = False self.ts_plot.grid.visible = False def ts_change(attr, old, new): with log_errors(): try: selected = self.ts_source.selected.indices except AttributeError: selected = self.ts_source.selected["1d"]["indices"] if selected: start = self.ts_source.data["time"][min(selected)] / 1000 stop = self.ts_source.data["time"][max(selected)] / 1000 self.start, self.stop = min(start, stop), max(start, stop) else: self.start = self.stop = None self.trigger_update() if BOKEH_VERSION >= "1.0.0": self.ts_source.selected.on_change("indices", ts_change) else: self.ts_source.on_change("selected", ts_change) self.reset_button = Button(label="Reset", button_type="success") self.reset_button.on_click(lambda: self.update(self.state)) self.update_button = Button(label="Update", button_type="success") self.update_button.on_click(self.trigger_update) self.root = column( row(self.reset_button, self.update_button, sizing_mode="scale_width"), self.profile_plot, self.ts_plot, **kwargs )
targets_y = curr_exp_data['trials_info'].iloc[0][target_y_cols].tolist() #initial cursor location cursor_x = curr_exp_data['cursor_pos'].loc[start_time]['x'] cursor_y = curr_exp_data['cursor_pos'].loc[start_time]['y'] #burst prob and firing rate timestamps and values ts_frate = curr_rate_df.loc[start_time - 2000:start_time + 2000].index.tolist() values_frate = curr_rate_df.loc[start_time - 2000:start_time + 2000].tolist() ts_burst = curr_burst_df.loc[start_time - 2000:start_time + 2000].index.tolist() values_burst = curr_burst_df.loc[start_time - 2000:start_time + 2000].tolist() # Set up data sources source_targets = ColumnDataSource(data=dict(x=targets_x, y=targets_y)) source_pos = ColumnDataSource(data=dict(x=[cursor_x, ], y=[cursor_y, ])) source_pop_rate = ColumnDataSource(data=dict(x=ts_frate, y=values_frate)) source_burst_prob = ColumnDataSource(data=dict(x=ts_burst, y=values_burst)) source_spike_raster = ColumnDataSource(data=dict(x=spike_raster_x, y=spike_raster_y)) # Set up plots plot_pos = figure(plot_height=500, plot_width=500, x_range=(-10, 10), y_range=(-10, 10)) plot_spike_raster = figure(plot_height=200, plot_width=400, x_axis_label='time (ms)', y_axis_label='Neuron #') plot_pop_rate = figure(plot_height=150, plot_width=400, x_axis_label='time (ms)', y_axis_label='Pop. Rate (Hz)') plot_burst_prob = figure(plot_height=150, plot_width=400, x_axis_label='time (ms)', y_axis_label='Burst Probability %') plot_pos.asterisk('x', 'y', size=10, color='red', source=source_pos) plot_pos.circle('x', 'y', size=24, alpha=0.4, source=source_targets)
def __init__(self, worker, height=150, **kwargs): self.worker = worker names = worker.monitor.quantities self.last = 0 self.source = ColumnDataSource({name: [] for name in names}) update(self.source, self.get_data()) x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0) tools = "reset,xpan,xwheel_zoom" self.cpu = figure( title="CPU", x_axis_type="datetime", height=height, tools=tools, x_range=x_range, **kwargs ) self.cpu.line(source=self.source, x="time", y="cpu") self.cpu.yaxis.axis_label = "Percentage" self.mem = figure( title="Memory", x_axis_type="datetime", height=height, tools=tools, x_range=x_range, **kwargs ) self.mem.line(source=self.source, x="time", y="memory") self.mem.yaxis.axis_label = "Bytes" self.bandwidth = figure( title="Bandwidth", x_axis_type="datetime", height=height, x_range=x_range, tools=tools, **kwargs ) self.bandwidth.line(source=self.source, x="time", y="read_bytes", color="red") self.bandwidth.line(source=self.source, x="time", y="write_bytes", color="blue") self.bandwidth.yaxis.axis_label = "Bytes / second" # self.cpu.yaxis[0].formatter = NumeralTickFormatter(format='0%') self.bandwidth.yaxis[0].formatter = NumeralTickFormatter(format="0.0b") self.mem.yaxis[0].formatter = NumeralTickFormatter(format="0.0b") plots = [self.cpu, self.mem, self.bandwidth] if not WINDOWS: self.num_fds = figure( title="Number of File Descriptors", x_axis_type="datetime", height=height, x_range=x_range, tools=tools, **kwargs ) self.num_fds.line(source=self.source, x="time", y="num_fds") plots.append(self.num_fds) if "sizing_mode" in kwargs: kw = {"sizing_mode": kwargs["sizing_mode"]} else: kw = {} if not WINDOWS: self.num_fds.y_range.start = 0 self.mem.y_range.start = 0 self.cpu.y_range.start = 0 self.bandwidth.y_range.start = 0 self.root = column(*plots, **kw) self.worker.monitor.update()
Instructions 100 XP Define a callback function called update_plot with the parameters attr, old, new. If the new selection is 'female_literacy', update the 'y' value of the ColumnDataSource to female_literacy. Else, 'y' should be population. 'x' remains fertility in both cases. Create a dropdown select widget using Select(). Specify the parameters title, options, and value. The options are 'female_literacy' and 'population', while the value is 'female_literacy'. Attach the callback to the 'value' property of select. This can be done using on_change() and passing in 'value' and update_plot. ''' SOLUTION # Perform necessary imports from bokeh.models import ColumnDataSource, Select # Create ColumnDataSource: source source = ColumnDataSource(data={ 'x' : fertility, 'y' : female_literacy }) # Create a new plot: plot plot = figure() # Add circles to the plot plot.circle('x', 'y', source=source) # Define a callback function: update_plot def update_plot(attr, old, new): # If the new Selection is 'female_literacy', update 'y' to female_literacy if new == 'female_literacy': source.data = { 'x' : fertility, 'y' : female_literacy
from bokeh.palettes import Dark2_5 from bokeh.palettes import Inferno256 output_file("route_name_count.html") # further reduce number for this vis num_words = 50 freq = 100 while len(names_data[names_data['Count'] > freq]) < num_words: freq -= 1 names_data = names_data[names_data['Count'] > freq] words = names_data['Word'].to_list() counts = names_data['Count'].to_list() source = ColumnDataSource(data=dict(words=words, counts=counts)) # Custom tooltip to display images on hover TOOLTIPS = """ <div> <div float: left; width: 230px;> <div> <span style="float: left; margin: 10px 15px 0px 15px; font-size: 15px; width: 100px;">"@words" occurs</span> </div> <div> <span style="float: left; margin: 10px 15px 0px 15px; font-size: 15px; width: 100px;">@counts times</span> </div> </div> </div> """
#Add text labels x,y = polar_to_cartesian(r=text_radius, alpha=end_angle) text = Text(x=x, y=y, text=[feature.qualifiers['locus_tag'][0]], angle=end_angle, text_font_size='10px', text_align=text_align) plot.add_glyph(text) source = ColumnDataSource(dict(type=type_list, start_location=start_locations, end_location=end_locations, strand=strand_values, locus_tag=locus_tags, start_angle=end_angles, end_angle=start_angles, wedge_inner_radius=wedge_inner_radii, wedge_outer_radius=wedge_outer_radii)) annular_wedge = AnnularWedge(x=0, y=0, inner_radius='wedge_inner_radius', # For the inner_radius, use the values in the column # called 'wedge_inner_radius' outer_radius='wedge_outer_radius', # Likewise for the outer radius... line_color='white', fill_color='green', start_angle='start_angle', # ...and start and end angles end_angle='end_angle', direction='anticlock')
# Inputs for the plot county = list(df['county']) predicted = model.predict(df) # Generate plot county_xs = [county["lons"] for county in counties.values()] county_ys = [county["lats"] for county in counties.values()] color_mapper = CategoricalColorMapper(palette=["red", "green"], factors=[True, False]) source = ColumnDataSource(data=dict( x=county_xs, y=county_ys, name=county, risk=predicted, )) TOOLS = "pan,wheel_zoom,reset,hover,save" p = figure(title="Current risk of fire in California", tools=TOOLS, x_axis_location=None, y_axis_location=None) p.grid.grid_line_color = None p.patches('x', 'y', source=source, fill_color={
css_classes=["annotation-button"]) group_info_box = Div(text='', height=30, css_classes=["group-div"]) search_input_box = TextInput(title="Search:", value="", width=300) expand_button = Button(label="Expand", button_type="success", width=150, css_classes=["expand-button"]) clear_seed_button = Button(label="Clear", button_type="success", css_classes=['clear_button'], width=50) export_button = Button(label="Export", button_type="success", css_classes=['export_button'], width=100) expand_table_source = ColumnDataSource(data=empty_table) expand_table = DataTable(source=expand_table_source, columns=expand_columns, width=500, css_classes=['expand_table']) phrases_list = MultiSelect(title="", value=[], options=[], width=300, size=27, css_classes=['phrases_list']) checkbox_group = CheckboxGroup(labels=["Text annotation", checkbox_label], active=[], width=400, css_classes=['checkbox_group']) annotate_checkbox = CheckboxGroup(labels=["Text annotation"],
def figures_chisq_detailed(init_group, df_chisq): df_chisq["dt_str"] = df_chisq.Date.dt.strftime("%Y-%m-%d") df_latest = df_chisq.groupby("CountryProv").apply(lambda g: g.tail(1)).reset_index(drop=True) df_latest["color"] = "#73b2ff" source_hist = ColumnDataSource(df_chisq) source_latest = ColumnDataSource(df_latest) # since cannot use View iwth LabelSet, creating a different source per continent srcLatest_continent = df_latest.groupby("Continent").apply(lambda g: ColumnDataSource(g)) srcLatest_continent = srcLatest_continent.reset_index().rename(columns={0:"src"}) gf = GroupFilter(column_name='CountryProv', group=init_group) view1 = CDSView(source=source_hist, filters=[gf]) plot_size_and_tools = {'plot_height': 300, 'plot_width': 600, 'tools':['box_select', 'reset', 'help', 'box_zoom'], 'x_axis_type': 'datetime', 'tooltips': [ ("Date", "@dt_str"), ], } # FIXME couldnt do p_a1.line below, so using hack of varea p_a1 = figure(title="Confirmed and thresholds. Below threshold: good, above: bad, within: ok", **plot_size_and_tools) p_a1.varea(x='Date', y1='case_ma07_lower', y2='case_ma07_upper', source=source_hist, color='pink', view=view1, fill_alpha=.7, legend_label="mean +/- std band") p_a1.varea(x='Date', y1='case_ma07', y2='case_ma07_eps', source=source_hist, color='red', view=view1, legend_label="7-day moving avg") #p_a1.varea(x='Date', y1='case_ma14', y2='case_ma14_eps', source=source_hist, color='purple', view=view1, legend_label="14-day moving avg") p_a1.varea(x='Date', y1='threshold_min_eps', y2='threshold_max_eps', source=source_hist, color='green', view=view1, fill_alpha=.7, legend_label="chi-squared thresholds band") # band: view= is not supported, so just using varea above #band = Band(base='Date', lower='case_ma07_lower', upper='case_ma07_upper', source=source_hist, level='underlay', # fill_alpha=1.0, line_width=1, line_color='black', view=view1) #p_a1.add_layout(band) c_a1a = p_a1.circle(x='Date', y='daily_conf', source=source_hist, color='black', view=view1) # https://stackoverflow.com/a/51540955/4126114 # https://docs.bokeh.org/en/latest/docs/user_guide/styling.html#inside-the-plot-area p_a1.legend.label_text_font_size = '6pt' p_a1.legend.location = "top_left" p_a2 = figure(title="Total tests (daily vs 7-day moving avg)", **plot_size_and_tools) p_a2.varea(x='Date', y1='tests_ma07_lower', y2='tests_ma07_upper', source=source_hist, color='pink', view=view1) p_a2.varea(x='Date', y1='tests_ma07', y2="tests_ma07_eps", source=source_hist, color='red', view=view1) #p_a2.varea(x='Date', y1='tests_ma14', y2="tests_ma14_eps", source=source_hist, color='purple', view=view1) p_a2.circle(x='Date', y='daily_tests', source=source_hist, color='black', view=view1) p_a2.x_range = p_a1.x_range # lock in the x axis so that zoom works simultaneously on all p_b1 = figure(title="Detrended cases. Negative: good, positive: bad", **plot_size_and_tools) p_b1.varea(x='Date', y1='thresMinMinusMid', y2='thresMaxMinusMid', source=source_hist, color='green', view=view1, legend_label="thresholds band", fill_alpha=0.7) p_b1.varea(x='Date', y1='caseMa07Lower_minusMid', y2='caseMa07Upper_minusMid', source=source_hist, color='pink', view=view1, legend_label="cases ma7 - threshold mid +/- std", fill_alpha=0.7) p_b1.varea(x='Date', y1='case_detrended', y2='caseDet_eps', source=source_hist, color='red', view=view1, legend_label="cases detrended") p_b1.circle(x='Date', y='case_detrended', source=source_hist, color='red', view=view1) p_b1.x_range = p_a1.x_range p_b1.legend.label_text_font_size = '6pt' p_b1.legend.location = "top_left" p_b2 = figure(title="Detrended cases percentage of raw cases", **plot_size_and_tools) p_b2.circle(x='Date', y='caseDet_pct', source=source_hist, color='red', view=view1) p_b2.x_range = p_a1.x_range p_c1 = figure(title="Ratio case/total (daily)", **plot_size_and_tools) c_c1a = p_c1.circle(x='Date', y='ratio_daily', source=source_hist, color='blue', view=view1) p_c2 = figure(title="Ratio case/total (7-day ma)", **plot_size_and_tools) p_c2.circle(x='Date', y='ratio_ma07', source=source_hist, color='blue', view=view1) # general-use lines slope_y0 = Slope(gradient=0, y_intercept=0, line_color='orange', line_width=50) slope_x0 = Slope(gradient=np.Inf, y_intercept=0, line_color='orange', line_width=50) # scatter plot view_us = CDSView(source=source_latest, filters=[GroupFilter(column_name='Continent', group="US")]) view_other = CDSView(source=source_latest, filters=[GroupFilter(column_name='Continent', group="Other")]) TOOLTIPS = [ ("Country/Region", "@CountryProv"), ] p_cont = [] for srcCont_i in srcLatest_continent.iterrows(): srcCont_i = srcCont_i[1] p_d1=figure(plot_width=600,plot_height=400,tooltips=TOOLTIPS,title=srcCont_i.Continent) p_d1.scatter('case_detrended','case_det_diff07',source=srcCont_i.src, size=12,color='color') # , view=view_us p_d1.xaxis.axis_label = 'Cases detrended: values' p_d1.yaxis.axis_label = 'Cases detrended: diff07' from bokeh.models import LabelSet labels = LabelSet(x='case_detrended', y='case_det_diff07', text='cp_code', level='glyph', x_offset=5, y_offset=5, source=srcCont_i.src, render_mode='canvas') p_d1.add_layout(labels) p_d1.add_layout(slope_y0) p_d1.add_layout(slope_x0) p_cont.append(p_d1) # group plots into 3 per row # https://stackoverflow.com/a/1625013/4126114 from itertools import zip_longest p_cont = list(zip_longest(*(iter(p_cont),) * 3)) p_cont = [[e for e in t if e != None] for t in p_cont] g = gridplot([[p_a1, p_a2], [p_b1, p_b2], [p_c1, p_c2]] + p_cont) return source_hist, c_a1a, g
def bokeh_heatmap_grid(title_prefix, amino_acid_df_dict, scale=False, png_dir=None, svg_dir=None): if amino_acid_df_dict is None: return None logging.getLogger(config.FIVEPSEQ_PLOT_LOGGER).info( "Plotting amino acid pauses: %s" % title_prefix) mainLayout = row(row(), name=title_prefix + ' amino acid pauses') for key in amino_acid_df_dict.keys(): logging.getLogger(config.FIVEPSEQ_PLOT_LOGGER).info(key) amino_acid_df = amino_acid_df_dict.get(key) if amino_acid_df is not None: if scale: for i in range(amino_acid_df.shape[0]): amino_acid_df.iloc[i, :] /= ( sum(amino_acid_df.iloc[i, :]) + 1) colormap = cm.get_cmap("viridis") bokehpalette = [ mpl.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N)) ] mapper = LinearColorMapper(palette=bokehpalette, low=0, high=amino_acid_df.max().max()) amino_acid_df.index.name = "aa" amino_acid_df.columns.name = "dist" df = amino_acid_df.stack().rename("value").reset_index() source = ColumnDataSource(df) key_title = get_key_title(title_prefix, key) p = figure( title=key_title, x_range=FactorRange(factors=list(amino_acid_df.columns)), y_range=FactorRange(factors=list(amino_acid_df.index)), x_axis_label="distance from amino acid", y_axis_label="5'seq read counts") # figures for export p_png = None p_svg = None if png_dir is not None: p_png = figure( title=key_title, x_range=FactorRange(factors=list(amino_acid_df.columns)), y_range=FactorRange(factors=list(amino_acid_df.index)), x_axis_label="distance from amino acid", y_axis_label="5'seq read counts") if svg_dir is not None: p_svg = figure( title=key_title, x_range=FactorRange(factors=list(amino_acid_df.columns)), y_range=FactorRange(factors=list(amino_acid_df.index)), x_axis_label="distance from amino acid", y_axis_label="5'seq read counts") rect = p.rect(x='dist', y='aa', width=1, height=1, source=source, fill_color=transform('value', mapper), line_color=None) hover = HoverTool(tooltips=[('distance', '@dist'), ('count', '@value')], renderers=[rect]) p.add_tools(hover) if p_png is not None: p_png.rect(x='dist', y='aa', width=1, height=1, source=source, fill_color=transform('value', mapper), line_color=None) if p_svg is not None: p_svg.rect(x='dist', y='aa', width=1, height=1, source=source, fill_color=transform('value', mapper), line_color=None) mainLayout.children[0].children.append(p) export_images(p_png, key_title, png_dir=png_dir) export_images(p_svg, key_title, svg_dir=svg_dir) return mainLayout
def view_results(date_folder, time_folder, res_folder): # ------------------------------ 1. draw widgets ------------------------------ # # 1.1 header header_date, header_time, header_algorithm, header_note \ = make_header(date_folder, time_folder, res_folder) header_row = row(header_date, header_time, header_algorithm, header_note, sizing_mode='scale_width') # 1.2 summary tab # 1.3 overview Tab data_radio_button_group, data_table, source_datatable = make_overview_tab() layout_data = layout([[data_radio_button_group], [data_table]], sizing_mode='scale_width') tab_data = Panel(child=layout_data, title='Overview') # 1.4 cost, max demand, prices and demands tab source_combined = ColumnDataSource() source_heatmap_demand = ColumnDataSource() source_heatmap_price = ColumnDataSource() graph_dict = make_graph_tab(source_combined, source_heatmap_demand, source_heatmap_price) plot_line_cost = graph_dict["line"][k0_cost] plot_line_demand_max = graph_dict["line"][k0_demand_max] plot_heatmap_demand = graph_dict["heatmap"]["plot"][k0_demand] chart_heatmap_demand = graph_dict["heatmap"]["chart"][k0_demand] color_bar_demand = graph_dict["heatmap"]["colour"][k0_demand] mapper_demand = graph_dict["heatmap"]["mapper"][k0_demand] plot_heatmap_price = graph_dict["heatmap"]["plot"][k0_prices] chart_heatmap_price = graph_dict["heatmap"]["chart"][k0_prices] color_bar_prices = graph_dict["heatmap"]["colour"][k0_prices] mapper_price = graph_dict["heatmap"]["mapper"][k0_prices] # todo - stretch is not working yet. want to make it responsive row1 = row(plot_line_cost, plot_heatmap_demand) row2 = row(plot_line_demand_max, plot_heatmap_price) layout_graph = layout(column([row1, row2]), sizing_mode='stretch_both') tab_graph = Panel(child=layout_graph, title='Cost, Max demand, Demands and Prices') # 1.5 demands and prices line charts # source_line_demands = ColumnDataSource() # source_line_prices = ColumnDataSource() # plot_demands, plot_prices = make_demand_price_tab(source_line_demands, source_line_prices) # layout_demands_prices = layout(row(plot_demands, plot_prices, sizing_mode='scale_both')) # tab_demands_prices = Panel(child=layout_demands_prices, title='Demands and Prices') # 1.6 overall layout all tabs = Tabs(tabs=[tab_data, tab_graph], sizing_mode='scale_both') layout_overall = layout([header_row, tabs], sizing_mode='scale_width') # ------------------------------ 2. event functions for widgets ------------------------------ # prices_dict = dict() demands_prices_fw_dict = dict() others_combined_dict = dict() summary_dict = dict() def update_heatmap(chosen_algorithm, k0_label, source, plot, mapper, chart, colour_bar): data = demands_prices_fw_dict[k0_label][chosen_algorithm] x_periods = [str(x) for x in (list(data.columns))] y_iterations = [str(x) for x in (list(data.index))] plot.y_range.factors = y_iterations plot.x_range.factors = x_periods data = data.iloc[::-1].stack().reset_index() data.columns = ['Iteration', 'Period', k0_label] source.data = data mapper.low = data[k0_label].min() mapper.high = data[k0_label].max() chart.update() colour_bar.update() def update_line_chart(chosen_algorithm): source_combined.data = others_combined_dict[chosen_algorithm] # plot_line_cost.y_range = Range1d(0, int(source_combined.data[k0_cost].max() * 1.1)) plot_line_cost.update() # plot_line_demand_max.y_range = Range1d(0, int(source_combined.data[k0_demand_max].max() + 1)) plot_line_demand_max.update() def update_data_table(active_radio_button, chosen_algorithm): if active_radio_button == 0: # summary source_datatable.data = pd.DataFrame.from_dict(summary_dict, orient='index') elif active_radio_button == 1: # statistics source_datatable.data = others_combined_dict[chosen_algorithm] elif active_radio_button == 2: # demand source_datatable.data = demands_prices_fw_dict[k0_demand][ chosen_algorithm] elif active_radio_button == 3: # prices source_datatable.data = demands_prices_fw_dict[k0_prices][ chosen_algorithm] columns_keys = source_datatable.data.keys() if active_radio_button == 0: table_columns = [ TableColumn(field=str(i), title=str(i).replace("_", " ").capitalize()) for i in columns_keys ] else: table_columns = [ TableColumn(field=str(i), title=str(i).replace("_", " ").capitalize(), formatter=NumberFormatter(format="0,0.00", text_align="right")) for i in columns_keys ] data_table.columns = table_columns data_table.update() def update_header_algorithm(keys): chosen_algorithm = [k for k in keys if "optimal" in k][0] header_algorithm.value = chosen_algorithm header_algorithm.options = keys header_algorithm.update() def update_data_source(new_time): date_t = header_date.value time_t = new_time date_time_folder = results_folder + "{}/{}/".format(date_t, time_t) f = open(date_time_folder + 'note.txt', 'r+') str_summary = f.read() f.close() header_note.text = str_summary with open(date_time_folder + "summary.pkl", 'rb') as f2: summary = pickle.load(f2) f2.close() to_pd(summary, summary_dict) # df.columns = [str(x) for x in range(len(area_res[k0][k1][0]))] with open(date_time_folder + "area_output.pkl", 'rb') as f2: area_res = pickle.load(f2) f2.close() k1_scheduling_ks = [] k1_pricing_fw_ks = [] all_labels = area_res[k0_demand].keys() for label in all_labels: if "fw" in label: k1_pricing_fw_ks.append(label) else: k1_scheduling_ks.append(label) dict_to_pd_dt(area_res, prices_dict, [k0_demand], k1_scheduling_ks) dict_to_pd_dt(area_res, demands_prices_fw_dict, [k0_demand, k0_prices], k1_pricing_fw_ks) k0_keys = [k0_demand_max, k0_par, k0_obj, k0_cost, k0_penalty, k0_step] if k0_time in area_res: k0_keys.append(k0_time) if k0_demand_total in area_res: k0_keys.append(k0_demand_total) combine_dict_to_pd_dt(area_res, others_combined_dict, k0_keys, k1_scheduling_ks, k1_pricing_fw_ks) return k1_scheduling_ks, k1_pricing_fw_ks def callback_update_data_table(attr, old, active_radio_button): update_data_table(active_radio_button, header_algorithm.value) def callback_update_data_source(attr, old, new_time): # read new data source k1_scheduling_keys, k1_pricing_fw_keys = update_data_source(new_time) # update the algorithm selection update_header_algorithm(k1_pricing_fw_keys) chosen_algorithm = header_algorithm.value callback_switch_algorithm(None, None, chosen_algorithm) def callback_update_header_time_options(attr, old, new_date): select_opt = [ dirs for root, dirs, _ in walk(results_folder + "{}".format(new_date)) if dirs != [] ][0] header_time.options = select_opt # todo - choose the latest dataset header_time.value = select_opt[-1] def callback_switch_algorithm(attr, old, chosen_algorithm): # update the datatable content active_button = data_radio_button_group.active update_data_table(active_button, chosen_algorithm) # update the line graphs update_line_chart(chosen_algorithm) # update the heat maps update_heatmap(chosen_algorithm=chosen_algorithm, k0_label=k0_demand, source=source_heatmap_demand, plot=plot_heatmap_demand, mapper=mapper_demand, chart=chart_heatmap_demand, colour_bar=color_bar_demand) update_heatmap(chosen_algorithm=chosen_algorithm, k0_label=k0_prices, source=source_heatmap_price, plot=plot_heatmap_price, mapper=mapper_price, chart=chart_heatmap_price, colour_bar=color_bar_prices) # update_heatmap(k0_demand, source_heatmap_demand, plot_heatmap_demand, mapper_demand, # chart_heatmap_demand, color_bar_demand) # update_heatmap(k0_prices, source_heatmap_price, plot_heatmap_price, mapper_price, # chart_heatmap_price, color_bar_prices) # ------------------------------ 3. assign event functions to widgets ------------------------------ # header_date.on_change("value", callback_update_header_time_options) header_time.on_change("value", callback_update_data_source) header_algorithm.on_change("value", callback_switch_algorithm) data_radio_button_group.on_change("active", callback_update_data_table) curdoc().add_root(layout_overall) # ------------------------------ 4. initialise ------------------------------ # start_date = exp_date if exp_date is not None else str(date.today()) start_time = exp_time if exp_time is not None else header_time.value callback_update_header_time_options(None, None, start_date) callback_update_data_source(None, None, start_time) callback_switch_algorithm(None, None, header_algorithm.value)