def plot_k(df): i_src = ColumnDataSource(df[df.open < df.close]) d_src = ColumnDataSource(df[df.open >= df.close]) w = 16*60*60*1000 # half day in ms TOOLS = "pan,xwheel_zoom,ywheel_zoom,box_zoom,reset,save" p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1500, plot_height=640, title = "MSFT Candlestick") # hei 880 p.toolbar.active_scroll = "auto" p.xaxis.major_label_orientation = pi/4 p.grid.grid_line_alpha=0.3 p.background_fill_color = "black" p.segment('date', 'high', 'date', 'low', source=i_src, color="red") p.segment('date', 'high', 'date', 'low', source=d_src, color="green") p.vbar('date', w, 'open', 'close', source=i_src, name="kline", fill_color="red", line_color="red") p.vbar('date', w, 'open', 'close', source=d_src, name="kline", fill_color="green", line_color="green") p.add_tools(HoverTool(tooltips= [("date","@ToolTipDates"), ("close","@close{0,0.00}"), ("high","@high{0,0.00}"), ("low","@low{0,0.00}")], names= ["kline",], )) p.add_tools(CrosshairTool(line_color='grey')) inc_process(df, p) output_file("candlestick.html", title="candlestick.py example", mode='inline') gridplot() show(p) # open a browser
def test_gridplot_merge_tools_flat(): p1, p2, p3, p4 = figure(), figure(), figure(), figure() lyt.gridplot([[p1, p2], [p3, p4]], merge_tools=True) for p in p1, p2, p3, p4: assert p.toolbar_location is None
def test_gridplot_merge_tools_with_None(): p1, p2, p3, p4 = figure(), figure(), figure(), figure() gridplot([[p1, None, p2], [p3, p4, None]], merge_tools=True) for p in p1, p2, p3, p4: assert p.toolbar_location is None
def __init__(self): N = 11 self.N = N xs = np.linspace(-np.pi, np.pi, N) ys = xs Xs, Ys = np.meshgrid(xs, ys) self.Xs, self.Ys = Xs.flatten(), Ys.flatten() a, b = 1, 0 c, d = 0, 1 mat = matrix(a, b, c, d) transXs, transYs = mat @ np.array([self.Xs, self.Ys]) TOOLS = "pan,save,reset" self.dic_xs = {"Xs{}".format(step): self.Xs[N*step:N*(step+1)] for step in range(N)} self.dic_ys = {"Ys{}".format(step): self.Ys[N*step:N*(step+1)] for step in range(N)} dic_trasn_xs = {"transXs{}".format( step): transXs[N*step:N*(step+1)] for step in range(N)} dic_trans_ys = {"transYs{}".format( step): transYs[N*step:N*(step+1)] for step in range(N)} data = {**self.dic_xs, **self.dic_ys, **dic_trasn_xs, **dic_trans_ys} colors = Category20[11] self.source = ColumnDataSource(data=data) self.fig = figure(tools=TOOLS, title="target", x_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1), y_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1)) for s in range(N): eval("""self.fig.scatter('Xs{0}', 'Ys{0}', source=self.source, color=colors[{0}])""".format(s)) self.transfig = figure(tools=TOOLS, title="transformed", x_range=self.fig.x_range, y_range=self.fig.y_range) for s in range(N): eval("""self.transfig.scatter('transXs{0}', 'transYs{0}', source=self.source, color=colors[{0}])""".format(s)) self.a_slider = Slider(title="a", value=a, start=-10, end=10, step=0.1) self.b_slider = Slider(title="b", value=b, start=-10, end=10, step=0.1) self.c_slider = Slider(title="c", value=c, start=-10, end=10, step=0.1) self.d_slider = Slider(title="d", value=d, start=-10, end=10, step=0.1) for widget in [self.a_slider, self.b_slider, self.c_slider, self.d_slider]: widget.on_change('value', self.update_data) box = widgetbox([self.a_slider, self.b_slider, self.c_slider, self.d_slider]) self.plot = column(gridplot([[self.a_slider, self.b_slider], [self.c_slider, self.d_slider]]), gridplot([[self.fig, self.transfig]]))
def test_gridplot_merge_tools_nested(): p1, p2, p3, p4, p5, p6, p7 = figure(), figure(), figure(), figure(), figure(), figure(), figure() r1 = row(p1, p2) r2 = row(p3, p4) c = column(row(p5), row(p6)) gridplot([[r1, r2], [c, p7]], merge_tools=True) for p in p1, p2, p3, p4, p5, p6, p7: assert p.toolbar_location is None
def __init__(self): xs = np.linspace(-np.pi, np.pi, 11) ys = xs Xs, Ys = np.meshgrid(xs, ys) self.Xs, self.Ys = Xs.flatten(), Ys.flatten() a, b = 1, 0 c, d = 0, 1 mat = matrix(a, b, c, d) transXs, transYs = mat @ np.array([self.Xs, self.Ys]) TOOLS = "pan,lasso_select,save,reset" self.source = ColumnDataSource(data=dict(Xs=self.Xs, Ys=self.Ys, transXs=transXs, transYs=transYs)) self.evectors = ColumnDataSource(data=dict(ev0x=[0, 0], ev0y=[0, 1], ev1x=[0, 1], ev1y=[0, 0], transev0x=[0, 0], transev0y=[0, 1], transev1x=[0, 1], transev1y=[0, 0])) self.fig = figure(tools=TOOLS, title="target", x_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1), y_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1)) self.fig.scatter('Xs', 'Ys', source=self.source) self.fig.line('ev0x', 'ev0y', source=self.evectors, line_width=5, line_alpha=0.5, color='red') self.fig.line('ev1x', 'ev1y', source=self.evectors, line_width=5, line_alpha=0.5, color='blue') self.transfig = figure(tools=TOOLS, title="transformed", x_range=self.fig.x_range, y_range=self.fig.y_range) self.transfig.circle('transXs', 'transYs', source=self.source) self.transfig.line('transev0x', 'transev0y', source=self.evectors, line_width=5, line_alpha=0.5, color='red') self.transfig.line('transev1x', 'transev1y', source=self.evectors, line_width=5, line_alpha=0.5, color='blue') self.a_slider = Slider(title="a", value=a, start=-10, end=10, step=0.1) self.b_slider = Slider(title="b", value=b, start=-10, end=10, step=0.1) self.c_slider = Slider(title="c", value=c, start=-10, end=10, step=0.1) self.d_slider = Slider(title="d", value=d, start=-10, end=10, step=0.1) self.eigen0 = Div(text="eigen0") self.eigen1 = Div(text="eigen1") for widget in [self.a_slider, self.b_slider, self.c_slider, self.d_slider]: widget.on_change('value', self.update_data) box = widgetbox([self.a_slider, self.b_slider, self.c_slider, self.d_slider]) self.plot = column(gridplot([[self.a_slider, self.b_slider, self.eigen0], [self.c_slider, self.d_slider, self.eigen1]]), gridplot([[self.fig, self.transfig]]))
def build_columns(r_chart,dist_chart): #r = rchart #Xb = XbChart #mr = MovingRange(serie) #show() #return (Column(Row(r_chart, x_chart), Row(ind_chart,dist_chart))) return gridplot([[r_chart, dist_chart]])
def plot(self, job, typename): u = utils.utils(job) colors = d3["Category20"][20] hc = {} for i, hostname in enumerate(u.hostnames): hc[hostname] = colors[i%20] plots = [] schema, _stats = u.get_type(typename) # Plot this type of data for index, event in enumerate(schema): try: plot = Plot(plot_width=400, plot_height=150, x_range = DataRange1d(), y_range = DataRange1d()) for hostname, stats in _stats.items(): rate = stats[:, index] if typename == "mem": source = ColumnDataSource({"x" : u.hours, "y" : rate}) plot.add_glyph(source, Step(x = "x", y = "y", mode = "after", line_color = hc[hostname])) else: rate = numpy.diff(rate)/numpy.diff(job.times) source = ColumnDataSource({"x" : u.hours, "y" : numpy.append(rate, rate[-1])}) plot.add_glyph(source, Step(x = "x", y = "y", mode = "after", line_color = hc[hostname])) plots += [self.add_axes(plot, event)] except: print(event + ' plot failed for jobid ' + job.id ) print(sys.exc_info()) return gridplot(*plots, ncols = len(plots)//4 + 1, toolbar_options = {"logo" : None})
def initialize_plot(self, ranges=None, plots=[]): ranges = self.compute_ranges(self.layout, self.keys[-1], None) passed_plots = list(plots) plots = [[None for c in range(self.cols)] for r in range(self.rows)] for i, coord in enumerate(self.layout.keys(full_grid=True)): r = i % self.rows c = i // self.rows subplot = self.subplots.get(wrap_tuple(coord), None) if subplot is not None: plot = subplot.initialize_plot(ranges=ranges, plots=passed_plots) plots[r][c] = plot passed_plots.append(plot) else: passed_plots.append(None) plot = gridplot(plots[::-1]) plot = self._make_axes(plot) title = self._get_title(self.keys[-1]) if title: plot = Column(title, plot) self.handles['title'] = title self._update_callbacks(plot) self.handles['plot'] = plot self.handles['plots'] = plots if self.shared_datasource: self.sync_sources() self.drawn = True return self.handles['plot']
def main(): xs = np.linspace(-np.pi, np.pi, 100, endpoint=True) xs = np.linspace(0, 4*np.pi, 100) ys_exp = np.exp(xs) ys_sin = np.sin(xs) ys_cos = np.sin(xs) ys_tan = np.tan(xs) output_file("grid_example.html") fig1 = figure(width=250, plot_height=250, title=None) fig1.circle(xs, ys_exp, size=10, color="navy", alpha=0.5) fig2 = figure(width=250, plot_height=250, x_range=fig1.x_range, title=None) fig2.triangle(xs, ys_sin, size=10, color="firebrick", alpha=0.5) fig3 = figure(width=250, height=250, x_range=fig2.x_range, y_range=fig2.y_range, title=None) fig3.square(xs, ys_cos, color="olive") fig4 = figure(width=250, height=250, title=None) fig4.line(xs, ys_tan, color="green") show(gridplot([[fig1, fig2], [fig3, fig4]]))
def plot_feat_counts(bt, datadir, n_proc=1): def count_reads_in_features_this(feat): return count_reads_in_features(bt, features_fn=feat) def total_feature_length(bed_obj): df = bed_obj.to_dataframe() return sum(df['end'] - df['start']) t_utr = pybedtools.BedTool(datadir + '/3_utr.gff') f_utr = pybedtools.BedTool(datadir + '/5_utr.gff') cds = pybedtools.BedTool(datadir + '/cds.gff') exon = pybedtools.BedTool(datadir + '/exon.gff') intergenic = pybedtools.BedTool(datadir + '/intergenic.bed') intron = pybedtools.BedTool(datadir + '/intron.gff') features = (t_utr, f_utr, cds, exon, intergenic, intron) feat_names = '3_utr 5_utr cds exon intergenic intron'.split() with multiprocessing.Pool(processes=n_proc) as pool: results = pool.map(count_reads_in_features_this, features) with multiprocessing.Pool(processes=n_proc) as pool: features_length = pool.map(total_feature_length, features) counts_per_feature = pd.Series(results, feat_names) features_length = pd.Series(features_length, feat_names) return gridplot([[ plot_vbar(counts_per_feature, title='Peaks per feature'), plot_vbar(features_length, title='Feature length')]])
def __init__(self): xs = np.linspace(-np.pi, np.pi, 11) ys = xs Xs, Ys = np.meshgrid(xs, ys) self.Xs, self.Ys = Xs.flatten(), Ys.flatten() initdegree = 0 mat = rot_mat(initdegree) transXs, transYs = mat @ np.array([self.Xs, self.Ys]) TOOLS = "pan,lasso_select,save,reset" self.source = ColumnDataSource(data=dict(Xs=self.Xs, Ys=self.Ys, transXs=transXs, transYs=transYs)) self.fig = figure(tools=TOOLS, title="target", x_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1), y_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1)) self.fig.circle('Xs', 'Ys', source=self.source) self.transfig = figure(tools=TOOLS, title="transformed", x_range=self.fig.x_range, y_range=self.fig.y_range) self.transfig.circle('transXs', 'transYs', source=self.source, size=6) self.rot_param = Slider(title="degree", value=0, start=0, end=360, step=1) self.rot_param.on_change('value', self.update_data) self.plot = column(self.rot_param, gridplot([[self.fig, self.transfig]]))
def data_retrieval(): conn = lite.connect('/Users/shanekenny/PycharmProjects/WiFinder/app/website/WiFinderDBv02.db') with conn: df = pd.read_sql_query("SELECT W.Log_Count, W.Time, W.Hour, W.Datetime, R.RoomID, R.Capacity, C.ClassID, C.Module, C.Reg_Students, O.Occupancy, O.OccID FROM WIFI_LOGS W JOIN CLASS C ON W.ClassID = C.ClassID JOIN ROOM R ON C.Room = R.RoomID JOIN OCCUPANCY O ON C.ClassID = O.ClassID WHERE R.RoomID = 'B002' AND W.Datetime = '2015-11-12' GROUP BY W.LogID;", conn) df['Time'] = df['Time'].apply(pd.to_datetime) p = figure(width=800, height=250, x_axis_type="datetime", ) p.extra_y_ranges = {"foo": Range1d(start=0, end=1)} p.line(df['Time'], df['Log_Count'], color='red',legend='Log Count') p.line(df['Time'], df['Reg_Students'], color='green',legend='Registered Students') p.line(df['Time'], df['Capacity'], color='blue', legend='Capacity') p.line(df['Time'], df['Occupancy']*100, color='orange', legend='Occupancy') p.add_layout(LinearAxis(y_range_name="foo"), 'left') p2 = figure(width=800, height=250, x_axis_type="datetime", x_range=p.x_range,) p2.line(df['Time'], df['Log_Count'], color='red', legend='Log Count') r= gridplot([[p, p2]], toolbar_location=None) js_resources = INLINE.render_js() css_resources = INLINE.render_css() script, div = components(r) return flask.render_template( 'explore.html', script=script, div=div, js_resources=js_resources, css_resources=css_resources,)
def __init__(self, target): self.target = target[::-1] self.source1 = ColumnDataSource(data=dict(image=[self.target])) self.alpha = Slider(title="alpha", value=30, start=10, end=50, step=1) self.sigma = Slider(title="sigma", value=3, start=1, end=20, step=1) self.fig1 = self.define_figure('image') self.regist_image(self.fig1,self.source1) blurred = ndi.gaussian_filter(self.target, sigma=self.sigma.value) self.source2 = ColumnDataSource(data=dict(image=[blurred])) self.fig2 = self.define_figure('blurred') self.regist_image(self.fig2,self.source2) filtered = ndi.gaussian_filter(blurred, sigma=1) sharped = blurred+self.alpha.value*(blurred-filtered) sharped = sharped.astype(np.uint8) self.source3 = ColumnDataSource(data=dict(image=[sharped])) self.fig3 = self.define_figure('sharped') self.regist_image(self.fig3,self.source3) widget_list = [self.alpha, self.sigma] for widget in widget_list: widget.on_change('value', self.update_data) inputs = widgetbox(*[widget_list]) self.plot = row(inputs, gridplot( [[self.fig1, self.fig2, self.fig3]]), width=600)
def main(): xs = np.linspace(-np.pi, np.pi, 10) ys = xs Xs, Ys = np.meshgrid(xs, ys) Xs, Ys = Xs.flatten(), Ys.flatten() theta = np.deg2rad(30) mat = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) mat = np.array([[1,1], [1,1]]) transXs, transYs = mat @ np.array([Xs, Ys]) output_file("lasso_selector.html") TOOLS = "pan,lasso_select,save,reset" source = ColumnDataSource(data=dict(Xs=Xs, Ys=Ys, transXs=transXs, transYs=transYs)) f = figure(tools=TOOLS, title="target", x_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1), y_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1)) f.circle('Xs', 'Ys', source=source) transf = figure(x_range=f.x_range, y_range=f.y_range, tools=TOOLS, title="trans") transf.circle('transXs', 'transYs', source=source,size=6) grid = gridplot([[f, transf]]) show(column(Div(text="<h2>Transform before and after</h2>"), grid))
def return_plot(self): grid = gridplot([[self.plots['plotf_1'], self.plots['plotf_2'], self.plots['plotf_3']], [self.plots['plotm_1'], self.plots['plotm_2'], self.plots['plotm_3']]]) return grid
def plot_cross_variograms(self, iter_plot=200, trace=None, experimental=None): """ Plot the analytical cross-variogram of a given MCMC inference Args: iter_plot (int): Number of traces to plot trace (pymc3.trace): trace with the sill, range and weights of each property experimental (bool): if True plot the experimental variogram as well Returns: None """ if not trace: trace = self.trace assert trace, 'set the trace to the object' n_exp = self.n_exp n_gauss = self.n_gauss lags = self.lags # DEP- n_equations = trace['weights'].shape[1] n_iter = trace['weights'].shape[0] lags_tiled = np.tile(lags, (iter_plot, 1)) b_var = [] for i in range(0, self.n_properties): # DEP- n_equations, (n_exp+n_gaus)): # Init tensor b = np.zeros((len(lags), n_iter, 0)) for i_exp in range(0, n_exp): b = np.dstack((b, trace['weights'][:, i_exp + i * (n_exp + n_gauss)] * exp_vario(lags, trace['sill'][:, i_exp], trace['range'][:, i_exp]))) for i_gauss in range(n_exp, n_gauss + n_exp): b = np.dstack((b, trace['weights'][:, i_gauss + i * (n_exp + n_gauss)] * gaus_vario(lags, trace['sill'][:, i_gauss], trace['range'][:, i_gauss]))) # Sum the contributins of each function b_all = b.sum(axis=2) # Append each variable b_var.append(b_all[:, -iter_plot:].T) # Bokeh code to plot this p_all = [] for e, el in enumerate(self.properties): p = bp.figure()#x_axis_type="log") p.multi_line(list(lags_tiled), list(b_var[e]), color='olive', alpha=0.08) if experimental: p.scatter(self.lags, y=self.exp_var[el], color='navy', size=2) p.title.text = el p.xaxis.axis_label = "lags" p.yaxis.axis_label = "Semivariance" p_all = np.append(p_all, p) grid = bl.gridplot(list(p_all), ncols=5, plot_width=250, plot_height=150) show(grid)
def test_gridplot_None(): def p(): p = figure() p.circle([1, 2, 3], [4, 5, 6]) return p p0, p1, p2, p3 = p(), p(), p(), p() g = gridplot([[p0, p1], [None, None], [p2, p3]], toolbar_location=None) assert isinstance(g, GridBox) and len(g.children) == 4 assert g.children == [(p0, 0, 0), (p1, 0, 1), (p2, 2, 0), (p3, 2, 1)]
def plot_chr_counts(assembly, dataframe): chr_size = pybedtools.chromsizes(assembly) chromsizes = {k: chr_size[k][1] - chr_size[k][0] for k in chr_size} keys = dataframe['chrom'].value_counts( ).sort_values(ascending=True).index.tolist() return gridplot([[ plot_vbar(pd.Series(dataframe['chrom']), count=True, keys=keys, title='Counts per chromossome'), plot_vbar(pd.Series(chromsizes), keys=keys, title=assembly + ' Chromossome size')]])
def plot_cross_covariance(self, nuggets=False, iter_plot=200): """ Plot the cross covariance for the given properties Args: nuggets (numpy.array): subtracted nuggets iter_plot (int): number of traces to plot Returns: None """ n_exp = self.n_exp n_gauss = self.n_gauss trace = self.trace lags = self.lags n_equations = trace['weights'].shape[1] n_iter = trace['weights'].shape[0] lags_tiled = np.tile(lags, (iter_plot, 1)) b_var = [] for i in range(0, self.n_properties): # n_equations, (n_exp+n_gaus)): # Init tensor b = np.zeros((len(lags), n_iter, 0)) for i_exp in range(0, n_exp): # print(i_exp, "exp") b = np.dstack((b, trace['weights'][:, i_exp + i * (n_exp + n_gauss)] * exp_vario(lags, trace['sill'][:, i_exp], trace['range'][:, i_exp]))) for i_gaus in range(n_exp, n_gauss + n_exp): # print(i_gaus) b = np.dstack((b, trace['weights'][:, i_gaus + i * (n_exp + n_gauss)] * gaus_vario(lags, trace['sill'][:, i_gaus], trace['range'][:, i_gaus]))) # Sum the contributins of each function if nuggets: b_all = 1 - (b.sum(axis=2) + self.nuggets[i]) else: b_all = 1 - (b.sum(axis=2)) # Append each variable b_var.append(b_all[:, -iter_plot:].T) p_all = [] for e, el in enumerate(self.properties): p = bp.figure(x_axis_type="log") p.multi_line(list(lags_tiled), list(b_var[e]), color='olive', alpha=0.08) p.title.text = el p.xaxis.axis_label = "lags" p.yaxis.axis_label = "Semivariance" p_all = np.append(p_all, p) grid = bl.gridplot(list(p_all), ncols=5, plot_width=250, plot_height=150) show(grid)
def do_clusters_plot(experiment, show=True, image=False, html_file='clustersplot.html'): '''Create a series of line plots from k-means-clustered normalised expression data' Args: experiment (Experiment instance): An instance of the Experiment class. show (Boolean): Should the plot be shown. image (Boolean): Should the plot be saved as an image. html_file (string): The name of the html output file. ''' ## Creates a new data frame with nornalised expression data and cluster ## numbers from k-means clustering. cluster_df = get_clusters(experiment, how='kmeans') samples = experiment.get_sampleids() clusters = cluster_df['cluster'].unique() ## Set up the plot and create a list to add the plots to. bp.output_file(html_file) plots = [] ## For each cluster from the k-means, create a line plot. for c in clusters: cluster = cluster_df[cluster_df['cluster'] == c] vals = cluster[samples] xvals = vals.columns.values.tolist() yvals = vals.values.tolist() clusterplot = create_standard_plot(tools='save', x_range=xvals) clusterplot.xaxis.major_label_orientation = np.pi/3 clusterplot.yaxis.axis_label = 'normalised expression' for y in yvals: clusterplot.line(xvals, y, legend=False) plots.append([clusterplot]) ## Make a grid from the plots. grid = bl.gridplot(children=plots) ## Shows the plot. if show == True: bp.show(grid) else: bp.save(obj=grid) height = 600 * len(clusters) if image == True: render_plot_to_png(html_file, height=height, width=600, crop='side') return html_file
def modify_document(self, doc): curdoc().clear() # doc = curdoc() try: curdoc().remove_periodic_callback(self._pcb) except: pass doc.clear() self.build_plot() layout = gridplot(self.plots, ncols=2) doc.add_root(layout) self._pcb = doc.add_periodic_callback(self.update_data, 10000) return doc
def feature_scatterplot(fset_path, features_to_plot): """Create scatter plot of feature set. Parameters ---------- fset_path : str Path to feature set to be plotted. features_to_plot : list of str List of feature names to be plotted. Returns ------- (str, str) Returns (docs_json, render_items) json for the desired plot. """ fset, data = featurize.load_featureset(fset_path) fset = fset[features_to_plot] colors = cycle(palette[5]) plots = np.array([[figure(width=300, height=200) for j in range(len(features_to_plot))] for i in range(len(features_to_plot))]) for (j, i), p in np.ndenumerate(plots): if (j == i == 0): p.title.text = "Scatterplot matrix" p.circle(fset.values[:,i], fset.values[:,j], color=next(colors)) p.xaxis.minor_tick_line_color = None p.yaxis.minor_tick_line_color = None p.ygrid[0].ticker.desired_num_ticks = 2 p.xgrid[0].ticker.desired_num_ticks = 4 p.outline_line_color = None p.axis.visible = None plot = gridplot(plots.tolist(), ncol=len(features_to_plot), mergetools=True, responsive=True, title="Test") # Convert plot to json objects necessary for rendering with bokeh on the # frontend render_items = [{'docid': plot._id, 'elementid': make_id()}] doc = Document() doc.add_root(plot) docs_json_inner = doc.to_json() docs_json = {render_items[0]['docid']: docs_json_inner} docs_json = serialize_json(docs_json) render_items = serialize_json(render_items) return docs_json, render_items
def plot_biotype_counts(bt, ensembl_gtf): bt_gtf = pybedtools.BedTool( ensembl_gtf) \ .filter(filter_by_feature, 'gene') \ .each(add_chr) \ .saveas() biotype_result = bt_gtf.intersect(bt, wa=True, wb=True) reg_type = {x['gene_id']: x.attrs['gene_biotype'] for x in biotype_result} gene_type = {x['gene_id']: x.attrs['gene_biotype'] for x in bt_gtf} reg_type = pd.Series(list(reg_type.values())) return gridplot([[ plot_vbar(reg_type.value_counts(), keys=list(reg_type.unique()), title='Counts per biotype'), plot_vbar(pd.Series(list(gene_type.values())), count=True, keys=list(reg_type.unique()), title='Total')]])
def main(): xs = np.linspace(-np.pi, np.pi, 11) ys = xs Xs, Ys = np.meshgrid(xs, ys) Xs, Ys = Xs.flatten(), Ys.flatten() mat = rot_mat(init_degree) transXs, transYs = mat @ np.array([Xs, Ys]) output_file("lasso_selector.html") TOOLS = "pan,lasso_select,save,reset" source = ColumnDataSource(data=dict(Xs=Xs, Ys=Ys, transXs=transXs, transYs=transYs)) f = figure(tools=TOOLS, title="target", x_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1), y_range=(-np.pi*np.sqrt(2)-1, np.pi*np.sqrt(2)+1)) f.circle('Xs', 'Ys', source=source) transf = figure(x_range=f.x_range, y_range=f.y_range, tools=TOOLS, title="trans") transf.circle('transXs', 'transYs', source=source, size=6) degree = init_degree def update(): nonlocal degree degree += 1 new_mat = rot_mat(degree) transXs, transYs = new_mat @ np.array([Xs, Ys]) source.data = dict(Xs=Xs, Ys=Ys, transXs=transXs, transYs=transYs) transf.title.text = "theta={}".format(degree % 360) grid = gridplot([[f, transf]]) plot = column(Div(text="<h2>Transform before and after</h2>"), grid) document = curdoc() document.add_root(plot) document.add_periodic_callback(update, 60)
def make_plot(): '''Make the plot figures.''' ts = [] ts.append(figure( width=width, height=height, title="Audio", y_axis_label=None, x_range=(0,30), tools=tools[0], webgl=True, tags=['audio_fig'], logo=None ) ) ts[0].line('x', 'au', source=source, tags=['update_ts']) ts[0].x_range.on_change('end', update_ts) ts[0].circle('x', 'au', source=source, size=0.1, tags=['update_ts']) cursel = BoxAnnotation(left=0, right=0, fill_alpha=0.1, fill_color='blue', tags=['cursel']) ts[0].add_layout(cursel) ts.append(figure( width=width, height=height, title="P1", y_axis_label='', x_range=ts[0].x_range, tools=tools[1], webgl=True, tags=['p1_fig'], logo=None ) ) ts[1].line('x', 'p1', source=source, tags=['update_ts']) ts[1].circle('x', 'p1', source=source, size=0.1, tags=['update_ts']) cursel = BoxAnnotation(left=0, right=0, fill_alpha=0.1, fill_color='blue', tags=['cursel']) ts[1].add_layout(cursel) ts.append(figure( width=width, height=height, title="P2", x_axis_label='seconds', y_axis_label='', x_range=ts[0].x_range, tools=tools[2], webgl=True, tags=['p2_fig'], logo=None ) ) ts[2].line('x', 'p2', source=source, tags=['update_ts']) ts[2].circle('x', 'p2', source=source, size=0.1, tags=['update_ts']) cursel = BoxAnnotation(left=0, right=0, fill_alpha=0.1, fill_color='blue', tags=['cursel']) ts[2].add_layout(cursel) gp = gridplot([[ts[0]], [ts[1]], [ts[2]]]) return (gp, ts[0])
def fn_main(): df = pd.DataFrame(AAPL)[:2000] df["date"] = pd.to_datetime(df["date"]) df['ToolTipDates'] = df.date.map(lambda x: x.strftime("%y-%m-%d")) # Saves work with the tooltip later # print(df.head(3)) TOOL_k = "pan,xwheel_zoom,ywheel_zoom,box_zoom,reset" TOOL_v = "pan,ywheel_zoom" TOOL_m = "pan,ywheel_zoom" p_k = figure(x_axis_type="datetime", tools=TOOL_k, plot_width=1504, plot_height=600) # title = "MSFT Candlestick") # hei 880 p_v = figure(x_axis_type="datetime", tools=TOOL_v, plot_width=1504, plot_height=160) # title="volume" p_m = figure(x_axis_type="datetime", tools=TOOL_m, plot_width=1504, plot_height=200) # hei 880 p_k.add_tools(CrosshairTool(line_color='#999999')) p_m.add_tools(CrosshairTool(line_color='#999999')) p_v.add_tools(CrosshairTool(line_color='#999999')) p_k.x_range = p_v.x_range = p_m.x_range # 3 link must at one line p_k.xaxis.visible = p_v.yaxis.visible = p_v.xaxis.visible = False df['ma20'] = [0.0 for i in range(len(df))] fn_ma(df, 'ma20', 20) df['long'] = [0.0 for i in range(len(df))] df['short'] = [0.0 for i in range(len(df))] df['diff'] = fn_ema(df, 'close', 'short', 12) - fn_ema(df, 'close', 'long', 26) df['dea'] = fn_ema(df, 'diff', 'dea', 9) df['macd'] = 2*(df['diff']-df['dea']) all_source = ColumnDataSource(df) tbl_bi = pd.DataFrame(columns=('date', 'price')) fn_plot_kline(df, p_k, all_source) fn_plot_fenbi(df, p_k, tbl_bi) fn_plot_segmt(p_k, tbl_bi) fn_plot_volume(df, p_v) fn_plot_macd(df, p_m, all_source) output_file("chan.html", title="chan", mode='inline') grid = gridplot([[p_k], [p_v], [p_m]], merge_tools=False, responsive=True) grid.sizing_mode = 'stretch_both' show(grid) pass
def plot_grid(figs, width=700, height=700): ''' Sets up a grid of bokeh plots. `figs` is a list of rows of figures or widgets. For example, a 2x2 grid of plots is [[f1, f2],[f3,f4]] Width and height are the dimensions of the whole grid. Doesn't quite work well with widgets or colorbars. Best to plot these separately. ''' from bokeh.layouts import gridplot from bokeh.plotting.figure import Figure # Scale all plots, keeping aspect ratio constant. numrows = len(figs) for row in figs: numfigs = 0 for fig in row: #count the number of figures (excluding widgets) numfigs += 1 if type(fig) is Figure else 0 for fig in row: try: max_width = fig.plot_width max_height = fig.plot_height if fig.plot_width > int(width/numfigs): max_width = int(width/numfigs) if fig.plot_height > int(height/numrows): max_height = int(height/numrows) scale_factor_width = max_width/fig.plot_width scale_factor_height = max_height/fig.plot_height scale_factor = min(scale_factor_width, scale_factor_height) fig.plot_width = int(scale_factor*fig.plot_width) fig.plot_height = int(scale_factor*fig.plot_height) except: pass # do nothing with widgets and cross fingers ## Center plots - NOT WORKING # for row in figs: # numfigs = len(row) # total_width = 0 # for fig in row: # total_width += fig.plot_width # row[0].min_border_left = int((width-total_width)/2) # pad left # row[-1].min_border_right = int((width-total_width)/2) # pad right return gridplot(figs, merge_tools=True)
def test_gridplot_None(): def p(): p = figure() p.circle([1, 2, 3], [4, 5, 6]) return p g = gridplot([[p(), p()], [None, None], [p(), p()]]) assert isinstance(g, Column) and len(g.children) == 2 c = g.children[1] assert isinstance(c, Column) and len(c.children) == 3 r = c.children[1] assert isinstance(r, Row) and len(r.children) == 2 s0 = r.children[0] assert isinstance(s0, Spacer) and s0.width == 0 and s0.height == 0 s1 = r.children[1] assert isinstance(s1, Spacer) and s1.width == 0 and s1.height == 0
def _draw_plots(self, scaffolder): '''Setup all plots.''' self.contig_read_src = ColumnDataSource(dict( reads=[scaffolder.nrReads], contigs=[scaffolder.nrContigs], n50=[scaffolder.N50])) # Calculate data for contig circle plot circle = self._calculate_circle(scaffolder) self.contig_dist_src = ColumnDataSource(dict( start=circle[0], stop=circle[1], colors=circle[2], contigs=circle[3])) self.read_src = ColumnDataSource(dict( nrReads=[], nrPassReads=[], nrFailReads=[], readTime=[])) self.read_hist_src = ColumnDataSource(dict( readLength=[], left=[], right=[])) # Draw plots contigNrPlot = self._draw_contigNrPlot(scaffolder) n50Plot = self._draw_n50Plot() contigCirclePlot = self._draw_contigCirclePlot() readPlot = self._draw_readCountPlot() #readHist = self._draw_readLenHistPlot() # Position plots layout = gridplot([[n50Plot, contigNrPlot], [contigCirclePlot, readPlot]]) try: session = push_session(curdoc()) session.show(layout) except IOError: sys.exit("No bokeh server is running on this host")
"y": df["sepal_width"], "x1": df["petal_length"], "y1": df["petal_width"], "target": df["target"] }) p1 = figure(title="鳶尾花資料集-花萼") p1.circle(x="x", y="y", source=data, size=15, color={ "field": "target", "transform": c_map }, legend="target") p2 = figure(title="鳶尾花資料集-花瓣") p2.circle(x="x1", y="y1", source=data, size=15, color={ "field": "target", "transform": c_map }, legend="target") menu = [("setosa", "1"), ("virginica", "2"), ("versicolor", "3")] mnu = Dropdown(label="鳶尾花種類", menu=menu) layout = gridplot([mnu, None], [p1, p2]) show(layout)
def initialize_plot(self, plots=None, ranges=None): ranges = self.compute_ranges(self.layout, self.keys[-1], None) passed_plots = [] if plots is None else plots plots = [[] for _ in range(self.rows)] tab_titles = {} insert_rows, insert_cols = [], [] adjoined = False for r, c in self.coords: subplot = self.subplots.get((r, c), None) if subplot is not None: shared_plots = passed_plots if self.shared_axes else None subplots = subplot.initialize_plot(ranges=ranges, plots=shared_plots) # Computes plotting offsets depending on # number of adjoined plots offset = sum(r >= ir for ir in insert_rows) if len(subplots) > 2: adjoined = True # Add pad column in this position insert_cols.append(c) if r not in insert_rows: # Insert and pad marginal row if none exists plots.insert(r+offset, [None for _ in range(len(plots[r]))]) # Pad previous rows for ir in range(r): plots[ir].insert(c+1, None) # Add to row offset insert_rows.append(r) offset += 1 # Add top marginal plots[r+offset-1] += [subplots.pop(-1), None] elif len(subplots) > 1: adjoined = True # Add pad column in this position insert_cols.append(c) # Pad previous rows for ir in range(r): plots[r].insert(c+1, None) # Pad top marginal if one exists if r in insert_rows: plots[r+offset-1] += 2*[None] else: # Pad top marginal if one exists if r in insert_rows: plots[r+offset-1] += [None] * (1+(c in insert_cols)) plots[r+offset] += subplots if len(subplots) == 1 and c in insert_cols: plots[r+offset].append(None) passed_plots.append(subplots[0]) if self.tabs: title = subplot.subplots['main']._format_title(self.keys[-1], dimensions=False) if not title: title = ' '.join(self.paths[r,c]) tab_titles[r, c] = title else: plots[r+offset] += [empty_plot(0, 0)] # Replace None types with empty plots # to avoid bokeh bug plots = layout_padding(plots, self.renderer) # Wrap in appropriate layout model kwargs = dict(sizing_mode=self.sizing_mode) if self.tabs: panels = [Panel(child=child, title=str(tab_titles.get((r, c)))) for r, row in enumerate(plots) for c, child in enumerate(row) if child is not None] layout_plot = Tabs(tabs=panels) else: plots = filter_toolboxes(plots) plots, width = pad_plots(plots) layout_plot = gridplot(children=plots, width=width, **kwargs) title = self._get_title(self.keys[-1]) if title: self.handles['title'] = title layout_plot = Column(title, layout_plot, **kwargs) self._update_callbacks(layout_plot) self.handles['plot'] = layout_plot self.handles['plots'] = plots if self.shared_datasource: self.sync_sources() self.drawn = True return self.handles['plot']
def _plot_future(time, data, legend=None, title=None, y_axis_label=None, hor_lines=None, hor_lines_leg=None, vert_lines=None, vert_lines_leg=None, apply_opensignals_style=True, show_plot=True, warn_print=False, **kwargs): """ Plotting function intended for an easy representation of OpenSignals acquired data. ---------- Parameters ---------- time : list or list of lists List that contains the time axis samples or a list of lists, when it is intended to present data in a gridplot format. When the input is a list of lists the following structure must be respected: Gridplot with N rows and M columns [[cell_row_0_column_0, cell_row_0_column_1, ..., cell_row_0_column_M], [cell_row_1_column_0, cell_row_1_column_1, ..., cell_row_1_column_M], ... [cell_row_N_column_0, cell_row_N_column_1, ..., cell_row_N_column_M]] data : list or list of lists Should have the same shape of time until the cell_row_n_column_m level. At this stage cell_row_n_column_m can contain a set of lists. Each one of these lists contains give rise to a different plot at the figure located in row n and column m of the grid structure. legend : list Input where the legend of each plot is specified. Should have the same shape of data. title : list Input where the title of each figure is specified. Should have the same shape of time. y_axis_label : list Input where the y label of each figure is specified. Should have the same shape of time. hor_lines : list of lists The parent list must have the same shape of time and each of its elements (child lists) must be formed by numbers defining the y axis position of the horizontal lines. hor_lines_leg : list of lists Legends of the horizontal lines (same shape of hor_lines). vert_lines : list of lists The parent list must have the same shape of time and each of its elements (child lists) must be formed by numbers defining the x axis position of the vertical lines. vert_lines_leg : list of lists Legends of the vertical lines (same shape of vert_lines). apply_opensignals_style : boolean If True then the OpenSignals style will be applied. show_plot : boolean If True the generated figures will be shown. warn_print : bool If True some warnings about invalid kwargs keys will be prompted. **kwargs : dict Keyword values for applying in bokeh figures, lines and gridplots. Returns ------- out : bokeh figure or gridplot Object that is produced during the execution of the present function. """ # -------------------------------- Application of styling options ----------------------------- if apply_opensignals_style is True: style_figure = { **opensignals_kwargs("figure"), **_filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print) } style_line = { **opensignals_kwargs("line"), **_filter_keywords(Line, kwargs, warn_print=warn_print) } style_gridplot = { **opensignals_kwargs("gridplot"), **_filter_keywords(gridplot, kwargs, warn_print=warn_print) } else: style_figure = _filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print) style_line = _filter_keywords(Line, kwargs, warn_print=warn_print) style_gridplot = _filter_keywords(gridplot, kwargs, warn_print=warn_print) # ---------- Based on the input check if the output should be in the gridplot format ---------- if len(list(numpy.shape(data))) == 3 and len(list(numpy.shape(time))) == 3: grid_plot = True elif len(list(numpy.shape(data))) == 1 and len(list( numpy.shape(time))) == 1: grid_plot = False else: raise RuntimeError( "'time' and 'data' fields must have the same shape, which would be a " "list with 1 dimension or a list of lists with 3 levels, such as [[[" "time_0_0, time_0,1, time_0_2], [time_1_0, time_1_1, time_1_2]]]. In the" " previous example the output will be a gridplot with 2 rows and " "three columns.") # ------------ Verification if the input arguments (title and legend) are valid --------------- # [legend] legend = _check_validity_of_inputs(data, legend, "legend", grid_plot, dimension=3) # [title] title = _check_validity_of_inputs(data, title, "title", grid_plot, dimension=2) # [y_axis_label] y_axis_label = _check_validity_of_inputs(data, y_axis_label, "y_axis_label", grid_plot, dimension=2) # Horizontal Lines. # [hor_lines] hor_lines = _check_validity_of_inputs(data, hor_lines, "hor_lines", grid_plot, dimension=2) hor_lines_leg = _check_validity_of_inputs(data, hor_lines_leg, "hor_lines_leg", grid_plot, dimension=2) # Vertical Lines. # [vert_lines] vert_lines = _check_validity_of_inputs(data, vert_lines, "vert_lines", grid_plot, dimension=2) vert_lines_leg = _check_validity_of_inputs(data, vert_lines_leg, "vert_lines_leg", grid_plot, dimension=2) # --------------------------------------- Plotting Stage -------------------------------------- fig_list = [] if grid_plot is True: # Each element inside "data", "time", "title", "legend" ... matrix cell must be a list. if all( _is_instance(list, el, condition="all", deep=True) for el in [ time, data, title, legend, y_axis_label, hor_lines, vert_lines, hor_lines_leg, vert_lines_leg ]): for row in range(0, len(data)): # Generation of a figure per plot. fig_list.append([]) for column in range(0, len(data[row])): for plt in range(0, len(data[row][column])): # Verification if all elements inside list are numbers. if _is_instance(Number, data[row][column][plt], condition="all", deep=True) \ and not _is_instance(bool, data[row][column][plt], condition="any") \ and _is_instance(Number, time[row][column][0], condition="all") \ and not _is_instance(bool, time[row][column][0], condition="any"): fig_list.append([]) # Generation of multiple figures. fig_list[-1][-1].append( figure( title=title[row][column][0], y_axis_label=y_axis_label[row][column][0], **style_figure)) fig_list[-1][-1][-1].line( time[row][column][0], data[row][column][plt], legend=legend[row][column][plt], **style_line) else: raise RuntimeError( "At least one of the list elements, specified in " "data or time, is not numeric.") # Representation of horizontal lines. if hor_lines is not None: for hor_line_nbr, hor_line in enumerate( hor_lines[row][column]): if hor_lines_leg is not None: fig_list[-1][-1][-1].line( [ time[row][column][0], time[row][column][-1] ], [hor_line, hor_line], legend=hor_lines_leg[row][hor_line_nbr], **opensignals_kwargs("line")) else: fig_list[-1][-1][-1].line([ time[row][column][0], time[row][column][-1] ], [hor_line, hor_line ], **opensignals_kwargs("line")) # Representation of vertical lines. if vert_lines is not None: for vert_line_nbr, vert_line in enumerate( vert_lines[row][column]): if vert_lines_leg is not None: fig_list[-1][-1][-1].line( [vert_line, vert_line], [ numpy.min(data[row][column][0]), numpy.max(data[row][column][0]) ], legend=vert_lines_leg[row][vert_line_nbr], **opensignals_kwargs("line")) else: fig_list[-1][-1][-1].line( [vert_line, vert_line], [ numpy.min(data[row][column][0]), numpy.max(data[row][column][0]) ], **opensignals_kwargs("line")) # Update of line style. if apply_opensignals_style is True: style_line = { **opensignals_kwargs("line"), **_filter_keywords(Line, kwargs, warn_print=warn_print) } else: style_line = _filter_keywords(Line, kwargs, warn_print=warn_print) else: raise RuntimeError( "At least one of the list elements, specified in data, " "is not a sublist.") else: # If this happen, then we receive as input a single list for time and data # (Single plot perspective). if _is_instance(Number, data, condition="all") \ and not _is_instance(bool, data, condition="any") \ and _is_instance(Number, time, condition="all")\ and not _is_instance(bool, time, condition="any"): fig_list.append( figure(title=title, y_axis_label=y_axis_label[0], **style_figure)) fig_list[-1].line(time, data, legend=legend, **style_line) else: raise RuntimeError( "At least one of the list elements, specified in data or time, is " "not numeric.") # Application of the OpenSignals Sytle. if apply_opensignals_style is True: opensignals_style([item for sublist in fig_list for item in sublist]) # Show of plots. if grid_plot is True: # Generation of the gridplot. grid = gridplot(fig_list, **style_gridplot) if show_plot is True: show(grid) else: raise RuntimeError( "The specified number of lines and columns for the grid plot is not " "compatible.") else: if show_plot is True: show(fig_list[-1]) return fig_list
for s, e in pairwise(seg)]) vertex_source.data = dict(x=xs, y=ys) generate_button = Button(label='Generate') generate_button.on_click(generate) controls = [x_slider, y_slider] def update(): container.nodes = maze.Node2D.create_grid(y_slider.value, x_slider.value) xs, ys = zip(*[(n.x, n.y) for n in container.nodes]) node_source.data = dict(x=xs, y=ys) generate() for control in controls: control.on_change('value', lambda attr, old, new: update()) sizing_mode = 'fixed' controls.append(generate_button) inputs = widgetbox(*controls, sizing_mode=sizing_mode) update() l = layout([[inputs, gridplot([[main]])]], sizing_mode=sizing_mode) curdoc().add_root(l) curdoc().title = 'Maze'
color=INCREASING_COLOR if inc else DECREASING_COLOR new_data = dict( x1= [df1_.index[t1]], Date1=[df1_.Date.values[t1]], open1=[df1_.open.iloc[t1]], high1=[df1_.high.iloc[t1]], low1=[df1_.low.iloc[t1]], close1=[df1_.close.iloc[t1]], color=[color], ) inc_source.stream(new_data) dt1=pd.Index(df1_.Date).tz_localize(None).values fig.xaxis.major_label_overrides = { i: date.strftime(xaxis_dt_format) for i, date in enumerate(pd.to_datetime(dt1)) } a=1 df1=df1_ print(' ### DONE ####') update() curdoc().add_root(column(gridplot([[fig]], toolbar_location="left", plot_width=1000))) # curdoc().add_root(column(gridplot([[fig],[fig2]], toolbar_location="left", plot_width=1000))) curdoc().add_periodic_callback(update, 1000) curdoc().title = "OHLC"
p.xgrid.grid_line_color = None p.legend.orientation = "horizontal" # 其他参数设置 return p # 口味得分 s2 = plot_bar(source, "C", "red", "口味得分") # 性价比得分 s3 = plot_bar(source, "B", "green", "性价比得分") # 如何拼接起来 from bokeh.layouts import gridplot p = gridplot([[s1], [s2], [s3]]) show(p) ##################################################################################################### #################################### 2. 店铺选址分析 #################################### ##################################################################################################### import pandas as pd import warnings warnings.filterwarnings("ignore") import os os.chdir( r"F:\pythondata\Python微专业\【非常重要】课程资料\python_minor_project\项目7_15\项目07城市餐饮店铺选址分析\导出四项指标" ) from bokeh.models import ColumnDataSource from bokeh.models import HoverTool
</table> </div> """ txt_keys = [ 'CUTLO', 'CUTMED', 'CUTHI', 'NGOODFIB_NORMAL_RANGE', 'NGOODFIB_WARN_RANGE' ] for i in range(5): html_str = html_str.replace('%s%s' % ("param", str(i)), str(tests['countbins'][txt_keys[i]])) div = Div(text=html_str, width=400, height=200) # --------- # plow.legend.location = "top_left" # layout = gridplot( [phi,pmed,plow,None], ncols=2, plot_width=600, plot_height=600) layout_plot = gridplot([plow, pmed, phi, div], ncols=2, responsive=False, plot_width=600, plot_height=600) info_col = Div(text=write_description('countbins'), width=1200) layout = column(widgetbox(info_col), layout_plot) # End of Bokeh Block curdoc().add_root(layout) curdoc().title = "COUNTBINS"
def graphics(): """ :param max_drawdown: Drawdown from statistics function :param po_trades: Positive trades :param ne_trades: Negative trades :return: Graphics of the strategy, capital line, drawdown, positive and negative trades """ # Candlestick inc = data.close > data.open dec = data.open > data.close w = 30 * 30 * 30 * 30 source = ColumnDataSource(df_trades) p_hover_entry = HoverTool( names=["buy_condition"], tooltips=[ ("buy date", "@buy_date{%Y-%m-%d %H hour}"), ("buy price", "@buy_price"), ("type", "Buy") ], formatters={ 'buy_date': 'datetime', }, # display a tooltip whenever the cursor is vertically in line with a glyph mode='mouse' ) p_hover_exit = HoverTool( names=["sell_condition"], tooltips=[ ("sell date", "@sell_date{%Y-%m-%d %H hour}"), ("sell price", "@sell_price"), ("type", "@trade") ], formatters={ 'sell_date': 'datetime', }, # display a tooltip whenever the cursor is vertically in line with a glyph mode='mouse' ) crosshair = CrosshairTool(dimensions='both') #data['stoploss'] = data.close - (data.atr * ATR_COEF) # ATR # Figures p = figure(x_axis_type="datetime", plot_height=500, plot_width=1500, title="10 Year T-Note") p_aux = figure(x_axis_type="datetime", plot_height=350, plot_width=1500, title="Standard Deviations", x_range=p.x_range) p.segment(data.date, data.high, data.date, data.low, color="black") p.vbar(data.date[inc], w, data.open[inc], data.close[inc], fill_color="green", line_color="black") p.vbar(data.date[dec], w, data.open[dec], data.close[dec], fill_color="red", line_color="black") # Tools p.add_tools(p_hover_entry, p_hover_exit, crosshair) p_aux.add_tools(crosshair) # Graphics p.line(data.date, data.ma, line_color="red", legend='MA') p_aux.line(data.date, data.z_distance_mean, line_color="blue", legend='Z Mean Distance') # Axis of graphics p.xaxis.axis_label = 'TIME' p.yaxis.axis_label = 'PRICE (USD)' # Buy and Sell condition p.circle('buy_date', 'buy_price', fill_color="green", line_color="black", legend='BUY CONDITION', size=12, fill_alpha=0.8, source=source, name='buy_condition') p.circle('sell_date', 'sell_price', fill_color="red", line_color="black", legend='SELL CONDITION', size=12, fill_alpha=0.8, source=source, name='sell_condition') g = gridplot([[p], [p_aux]], sizing_mode='scale_width') show(g) output_file(f'{folder}graphic_validation/{strategy_name}_long_graphic_validation.html', mode='inline') save(g) logger.info('Graphics generated correctly')
sample_source = ColumnDataSource(data=data) current_label = sample_labels[label_key] plot_all = create_sample_scatter( x_data="X", y_data="Y", source=sample_source, label=current_label, title="ALL: %s" % feature_name, ) grid_list = list() for t in range(N): data = { 'X': tsne_list[t].T[0], 'Y': tsne_list[t].T[1], 'label': labels } sample_source = ColumnDataSource(data=data) plot = create_sample_scatter( x_data="X", y_data="Y", source=sample_source, label=current_label, title="%s" % t, ) grid_list.append(plot) grid = gridplot(grid_list, ncols=cols, plot_width=350, plot_height=350) tab_list.append(Panel(child=column(plot_all, grid), title=label_key)) tabs = Tabs(tabs=tab_list) show(tabs)
# ##plot the KL vs cput #fig5 = bkp.figure(width=1000,height=500,x_axis_label='CPU Time (s)', y_axis_label='KL(q || p)', y_axis_type='log') #preprocess_plot(fig5, '42pt', True) #for cput, kl, nm, clrid in [(cput_ubvi, rkl_ubvi, 'UBVI', 0), (cput_bbvi, rkl_bbvi, 'BVI1', 1), (cput_bbvieps, rkl_bbvieps, 'BVI70', 2)]: # cput_25 = np.percentile(np.cumsum(cput, axis=1), 25, axis=0) # cput_50 = np.percentile(np.cumsum(cput, axis=1), 50, axis=0) # cput_75 = np.percentile(np.cumsum(cput, axis=1), 75, axis=0) # rkl_25 = np.percentile(kl, 25, axis=0) # rkl_50 = np.percentile(kl, 50, axis=0) # rkl_75 = np.percentile(kl, 75, axis=0) # fig5.circle(cput_50, rkl_50, color=pal[clrid], size=10)#, legend=nm) # fig5.segment(x0=cput_50, y0=rkl_25, x1=cput_50, y1=rkl_75, color=pal[clrid], line_width=4)#, legend=nm) # fig5.segment(x0=cput_25, y0=rkl_50, x1=cput_75, y1=rkl_50, color=pal[clrid], line_width=4)#, legend=nm) postprocess_plot(fig, '42pt') #, orientation='horizontal', glyph_height=80) postprocess_plot(figL, '42pt') postprocess_plot(fig2, '42pt') postprocess_plot(fig3, '42pt') #postprocess_plot(fig4, '42pt') #postprocess_plot(fig5, '42pt') #bkp.show(bkl.gridplot([[fig, fig2, fig3, fig4, fig5]])) bkp.show(bkl.gridplot([[fig, figL, fig2, fig3]]))
plot = Plot(x_range=xdr, y_range=ydr, min_border=50) plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') plot.add_glyph(source, Line(x=xname, y=yname, line_color=line_color)) plot.add_tools(PanTool(), WheelZoomTool()) return plot plot1 = make_plot(source, "x", "y1", "blue") plot2 = make_plot(source, "x", "y2", "red", xdr=plot1.x_range) plot3 = make_plot(source, "x", "y3", "green") plot4 = make_plot(source, "x", "y4", "black") grid = gridplot([[plot1, plot2], [plot3, plot4]], plot_width=300, plot_height=300) doc = Document() doc.add_root(grid) if __name__ == "__main__": filename = "grid.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Grid Plot Example")) print("Wrote %s" % filename) view(filename)
from bokeh.io import output_file, show from bokeh.layouts import column, gridplot from bokeh.plotting import figure output_file("layout.html") x = list(range(11)) y0 = x y1 = [10 - i for i in x] y2 = [abs(i - 5) for i in x] #Create plot s1 = figure(plot_width=250, plot_height=250, title=None) s1.circle(x, y0, size=10, color='navy', line_alpha=0.5) s2 = figure(plot_width=250, plot_height=250, title=None) s2.circle(x, y1, size=10, color='firebrick', line_alpha=0.5) s3 = figure(plot_width=250, plot_height=250, title=None) s3.circle(x, y2, size=10, color='olive', line_alpha=0.5) #Assign plots to a column position #show(column(s1, s2, s3)) #Gridlike grid = gridplot([[s1, s2], [s2, s3]]) show(grid)
def plot(*args, legend=None, title=None, x_axis_label="Time (s)", y_axis_label=None, grid_plot=False, grid_lines=None, grid_columns=None, hor_lines=None, hor_lines_leg=None, vert_lines=None, vert_lines_leg=None, apply_opensignals_style=True, show_plot=True, warn_print=False, get_fig_list=False, file_name=None, **kwargs): """ ----- Brief ----- Plotting function intended for an easy representation of OpenSignals acquired data. ----------- Description ----------- This function allows to plot data acquired with resource to OpenSignals, available at https://bitalino.com/en/software, in an easy way using the Bokeh library (https://bokeh.pydata.org/en/latest/). The plot will automatically present the OpenSignals style if the corresponding parameter is set to True (apply_opensignals_style=True). This allows to easily use multiple plot elements without the need to know any visualization libraries. ---------- Parameters ---------- *args: list Variable number of arguments with the purpose of giving the user the possibility of defining as an input only the "data" axis or both "time" and "data" axes. legend : list Input where the legend of each plot is specified. Should have the same shape of time. title : list Input where the title of each figure is specified. Should have the same shape of time. x_axis_label : list Input where the x label of each figure is specified. All figures will have the same x label in the current implementation. y_axis_label : list Input where the y label of each figure is specified. Should have a length equal to the number of figures. grid_plot : boolean If True then the plots will be organized in a grid_plot structure. grid_lines : int Number of lines of grid plot. grid_columns : int Number of columns of grid plot. hor_lines : list of lists The parent list must have the same shape of time and each of its elements (child lists) must be formed by numbers defining the y axis position of the horizontal lines. hor_lines_leg : list of lists Legends of the horizontal lines (same shape of hor_lines). vert_lines : list of lists The parent list must have the same shape of time and each of its elements (child lists) must be formed by numbers defining the x axis position of the vertical lines. vert_lines_leg : list of lists Legends of the vertical lines (same shape of vert_lines). apply_opensignals_style : boolean If True then the OpenSignals style will be applied. show_plot : boolean If True the generated figures will be shown. warn_print : bool If True some warnings about invalid kwargs keys will be prompted. get_fig_list : bool If True then it will be returned a list containing the figure objects generated during the function execution. file_name : str Path containing the destination folder where the Bokeh figure will be stored. **kwargs : dict Keyword values for applying in bokeh figures, lines and gridplots. Returns ------- out : bokeh figure or gridplot Object that is produced during the execution of the present function. """ # Generation of the HTML file where the plot will be stored. #file_name = _generate_bokeh_file(file_name) # Data conversion for ensuring that the function only works with lists. if len(args) == 1: time = [list(numpy.linspace(1, len(args[0][0]) + 1, len(args[0][0]))) ] * len(args[0]) data = args[0] elif len(args) == 2: time = list(args[0]) data = list(args[1]) else: raise RuntimeError( "biosignalsnotebooks plot function only accepts 1 or 2 arguments in *args" " input. If only 1 input is given it should be a list with data samples," "otherwise if 2 inputs are given then the first one defines the time" "axis and the second one data values.") # This function offers two input mechanisms (easy and complex). The easiest one consists in # the representation of a single plot in a single figure, so, the user only needs to specify as # inputs "time" and "data" lists. On the other hand, for the complex mechanism, the user can # represent plots in different figures, using for that lists of lists as "time" and "data" # inputs. # In the following lines is ensured that independently of the input given, the function will # achieve is purpose correctly. if _is_instance(Number, data, condition="all") and not _is_instance(bool, data, condition="any") \ and _is_instance(Number, time, condition="all") \ and not _is_instance(bool, time, condition="any"): time = [time] data = [data] if y_axis_label is not None: y_axis_label = [y_axis_label] if hor_lines is not None: hor_lines = [hor_lines] if hor_lines_leg is not None: hor_lines_leg = [hor_lines_leg] if vert_lines is not None: vert_lines = [vert_lines] if vert_lines_leg is not None: vert_lines_leg = [vert_lines_leg] if title is not None: title = [title] if legend is not None: legend = [legend] elif _is_instance(numpy.ndarray, data, condition="any") \ or _is_instance(numpy.ndarray, time, condition="any"): time = list(map(list, time)) data = list(map(list, data)) # Ensures the application or not of opensignals graphical style. if apply_opensignals_style is True: style_figure = { **opensignals_kwargs("figure"), **_filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print) } style_line = { **opensignals_kwargs("line"), **_filter_keywords(Line, kwargs, warn_print=warn_print) } style_gridplot = { **opensignals_kwargs("gridplot"), **_filter_keywords(gridplot, kwargs, warn_print=warn_print) } else: style_figure = _filter_keywords(FigureOptions, kwargs, is_class=True, warn_print=warn_print) style_line = _filter_keywords(Line, kwargs, warn_print=warn_print) style_gridplot = _filter_keywords(gridplot, kwargs, warn_print=warn_print) # ------------------------ Verification if the input arguments are valid ---------------------- if legend is not None: if isinstance(legend, list): if len(legend) != len(time) or len(legend) != len(data): raise RuntimeError( "The shape of legend does not match with time input.") else: raise RuntimeError( "The specified data type of legend field is not valid. Input must " "be a list.") else: legend = [None] * len(time) if title is not None: if isinstance(title, list): if len(title) != len(time) or len(title) != len(data): raise RuntimeError( "The shape of title does not match with time input.") elif isinstance(title, str): if grid_plot is True: raise RuntimeError( "Each figure of the gridplot must have a title, i.e., the shape" " of time, data and title inputs needs to match.") else: title = [title] * len(time) elif grid_plot is False and len(title) != 1: raise RuntimeError( "The number of titles is not compatible with the number of figures " "(only one title is needed).") else: raise RuntimeError( "The specified data type of title field is not valid. Input must be " "a list.") else: title = [None] * len(time) if y_axis_label is not None: if isinstance(y_axis_label, list): if len(y_axis_label) != len(time) or len(y_axis_label) != len( data): raise RuntimeError( "The shape of y_axis_label does not match with time input." ) elif isinstance(y_axis_label, str): y_axis_label = [y_axis_label] * len(time) elif grid_plot is False and len(y_axis_label) != 1: raise RuntimeError( "The number of y axis labels is not compatible with the number of " "figures.") else: raise RuntimeError( "The specified data type of y_axis_label field is not valid. Input " "must be a list or a string when grid_plot field is False.") else: y_axis_label = [None] * len(time) # Coherence between grid_plot, grid_lines and grid_columns inputs. if grid_lines is not None or grid_columns is not None: if grid_plot is not True: raise RuntimeError( "When grid_lines and grid_columns inputs are used the field grid_" "plot must be True.") else: if not isinstance(grid_lines, int) or not isinstance( grid_columns, int): raise RuntimeError( "At least one of the grid_lines or grid_columns values is not " "an integer.") # Horizontal Lines. if hor_lines is not None: if isinstance(hor_lines, list): if len(hor_lines) != len(time) or len(hor_lines) != len(data): raise RuntimeError( "The shape of hor_lines does not match with time input.") else: raise RuntimeError( "The specified data type of hor_lines field is not valid. Input " "must be a list of lists.") # Each sublist entry must be numeric. for cell in hor_lines: if not _is_instance(Number, cell, condition="all") \ or _is_instance(bool, cell, condition="any"): raise RuntimeError( "At least one of the list elements, specified in hor_lines, " "is not numeric.") elif vert_lines_leg is not None: if len(hor_lines) != len(hor_lines_leg): raise RuntimeError( "The shape of hor_lines and hor_lines_leg is not the same." ) # Vertical Lines. if vert_lines is not None: if isinstance(vert_lines, list): if len(vert_lines) != len(time) or len(vert_lines) != len(data): raise RuntimeError( "The shape of vert_lines does not match with time input.") else: raise RuntimeError( "The specified data type of vert_lines field is not valid. " "Input must be a list of lists.") # Each sublist entry must be numeric. for cell in vert_lines: if not _is_instance(Number, cell, condition="all") \ or _is_instance(bool, cell, condition="any"): raise RuntimeError( "At least one of the list elements, specified in vert_lines, " "is not numeric.") elif vert_lines_leg is not None: if len(vert_lines) != len(vert_lines_leg): raise RuntimeError( "The shape of vert_lines and vert_lines_leg is not " "the same.") # --------------------------------------- Plotting Stage -------------------------------------- fig_list = [] # If all data entries are lists, then it is considered that we are in a multiplot situation. if _is_instance(list, data, condition="all") and _is_instance( list, time, condition="all"): for list_entry in range(0, len(time)): # Generation of a figure per plot. # Verification if all elements inside list are numbers. if _is_instance(Number, data[list_entry], condition="all") \ and not _is_instance(bool, data[list_entry], condition="any") \ and _is_instance(Number, time[list_entry], condition="all") \ and not _is_instance(bool, time[list_entry], condition="any"): if len(time[list_entry]) == len( data[list_entry]): # Shape verification if grid_plot is True: # Generation of multiple figures. fig_list.append( figure(title=title[list_entry], y_axis_label=y_axis_label[list_entry], x_axis_label=x_axis_label, **style_figure)) elif grid_plot is False and list_entry == 0: fig_list.append( figure(title=title[list_entry], y_axis_label=y_axis_label[list_entry], x_axis_label=x_axis_label, sizing_mode='scale_both', **style_figure)) fig_list[-1].line(time[list_entry], data[list_entry], legend=legend[list_entry], **style_line) # Representation of horizontal lines. if hor_lines is not None: for hor_line_nbr, hor_line in enumerate( hor_lines[list_entry]): if hor_lines_leg is not None: fig_list[-1].line( [ time[list_entry][0], time[list_entry][-1] ], [hor_line, hor_line], legend=hor_lines_leg[list_entry] [hor_line_nbr], **opensignals_kwargs("line")) else: fig_list[-1].line([ time[list_entry][0], time[list_entry][-1] ], [hor_line, hor_line], **opensignals_kwargs("line")) # Representation of vertical lines. if vert_lines is not None: for vert_line_nbr, vert_line in enumerate( vert_lines[list_entry]): if vert_lines_leg is not None: fig_list[-1].line( [vert_line, vert_line], [ numpy.min(data[list_entry]), numpy.max(data[list_entry]) ], legend=vert_lines_leg[list_entry] [vert_line_nbr], **opensignals_kwargs("line")) else: fig_list[-1].line([vert_line, vert_line], [ numpy.min(data[list_entry]), numpy.max(data[list_entry]) ], **opensignals_kwargs("line")) # Update of line style. if apply_opensignals_style is True: style_line = { **opensignals_kwargs("line"), **_filter_keywords(Line, kwargs, warn_print=warn_print) } else: style_line = _filter_keywords(Line, kwargs, warn_print=warn_print) else: raise RuntimeError( "The shape of time and data inputs does not match.") else: raise RuntimeError( "At least one of the list elements, specified in data or time, " "is not numeric.") # If this happen, then we receive as input a single list for time and data # (Single plot perspective). elif _is_instance(Number, data, condition="all") \ and not _is_instance(bool, data, condition="any") \ and _is_instance(Number, time, condition="all") \ and not _is_instance(bool, time, condition="any"): grid_plot = False # Verification if all elements inside list are numbers. if _is_instance(Number, data, condition="all") \ and not _is_instance(bool, data, condition="any") \ and _is_instance(Number, time, condition="all") \ and not _is_instance(bool, time, condition="any"): if len(time) == len(data): # Shape verification fig_list.append( figure(title=title[0], y_axis_label=y_axis_label[0], x_axis_label=x_axis_label, **style_figure)) fig_list[-1].line(time, data, legend=legend[0], **style_line) else: raise RuntimeError( "The shape of time and data inputs does not match.") else: raise RuntimeError( "At least one of the list elements, specified in data or time, is " "not numeric.") else: raise RuntimeError( "The input 'data' or/and 'time' does not have a valid format. It should " "be a list of numbers or a list of lists.") # Application of the OpenSignals Style. if apply_opensignals_style is True: opensignals_style(fig_list) # Show of plots. if grid_plot is True: nbr_of_spaces = grid_lines * grid_columns nbr_of_figures = len(fig_list) if nbr_of_spaces >= nbr_of_figures > (grid_lines - 1) * grid_columns: # Organization of data accordingly to the number of rows and columns specified as input # arguments. grid_layout = [] fig_nbr = 0 for row in range(0, grid_lines): grid_layout.append([]) for column in range(0, grid_columns): if fig_nbr <= nbr_of_figures - 1: grid_layout[-1].append(fig_list[fig_nbr]) else: grid_layout[-1].append(None) # Update of auxiliary variable. fig_nbr += 1 # Generation of the gridplot. grid = gridplot(grid_layout, **style_gridplot) if show_plot is True: show(grid) #else: # save(grid) #return HTML('<iframe width=100% height=350 src="generated_plots/' + file_name + '"></iframe>') else: raise RuntimeError( "The specified number of lines and columns for the grid plot is not " "compatible.") else: if show_plot is True: show(fig_list[-1]) #else: # save(fig_list[-1]) #return HTML('<iframe width=100% height="' + str(fig_list[-1].plot_height) + '" src="generated_plots/' + file_name + '"></iframe>') if get_fig_list is True: return fig_list
arr_hist18, edges18 = np.histogram(df18['percentage_male'][:800], bins=int(100 / 4), range=[0, 100]) # Put the information in a dataframe uni18 = pd.DataFrame({ 'arr_man': arr_hist18, 'left': edges18[:-1], 'right': edges18[1:] }) # Create the blank plot p18 = figure(plot_height=600, plot_width=600, title='Histogram of percentage man', x_axis_label='percentage man', y_axis_label='Number of universities') p18.y_range = DataRange1d(start=0, end=250) # Add a quad glyph p18.quad(bottom=0, top=uni18['arr_man'], left=uni18['left'], right=uni18['right'], fill_color='green', line_color='black') # Show the plot show(gridplot([p16, p17, p18], [None, None, None]))
# Specify the name of the output_file and show the result output_file('layout_custom.html') show(layout) # Import gridplot from bokeh.layouts from bokeh.layouts import gridplot # Create a list containing plots p1 and p2: row1 row1 = [p1,p2] # Create a list containing plots p3 and p4: row2 row2 = [p3,p4] # Create a gridplot using row1 and row2: layout layout = gridplot([row1, row2]) # Specify the name of the output_file and show the result output_file('grid.html') show(layout) # Import Panel from bokeh.models.widgets from bokeh.models.widgets import Panel # Create tab1 from plot p1: tab1 tab1 = Panel(child=p1, title='Latin America') # Create tab2 from plot p2: tab2 tab2 = Panel(child=p2, title="Africa")
from bokeh.layouts import gridplot from bokeh.plotting import figure, show, output_file x = np.linspace(0, 4*np.pi, 100) y = np.sin(x) TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select" p1 = figure(title="Legend Example", tools=TOOLS) p1.circle(x, y, legend="sin(x)") p1.circle(x, 2*y, legend="2*sin(x)", color="orange") p1.circle(x, 3*y, legend="3*sin(x)", color="green") p1.legend.title = 'Example Title' p2 = figure(title="Another Legend Example", tools=TOOLS) p2.circle(x, y, legend="sin(x)") p2.line(x, y, legend="sin(x)") p2.line(x, 2*y, legend="2*sin(x)", line_dash=(4, 4), line_color="orange", line_width=2) p2.square(x, 3*y, legend="3*sin(x)", fill_color=None, line_color="green") p2.line(x, 3*y, legend="3*sin(x)", line_color="green") output_file("legend.html", title="legend.py example") show(gridplot([p1, p2], ncols=2, plot_width=400, plot_height=400)) # open a browser
# Preparing the second panel picture Hs_figfit_3 = figure(title="3rd Hs & Hn Extrapolation", x_axis_label='1/T (samples-1)', y_axis_label='bits / samples') Rs = third_limit(Hs_fit2_df, figure=Hs_figfit_3) Rn = third_limit( Hn_fit2_df, figure=Hs_figfit_3) # Last figure is shared between Hs & Hn. # Final Grid Layout grid_layout = gridplot([[Hs_figfit_1, Hs_figfit_2, Hs_figfit_3], [Hn_figfit_1, Hn_figfit_2, None]], plot_width=400, plot_height=400, toolbar_location="right") split_path = os.path.split(file_name) name = os.path.splitext(split_path[1])[0] # Figures as svg for editing in vector softwares. Hs_figfit_1.output_backend = "svg" Hs_figfit_2.output_backend = "svg" Hs_figfit_3.output_backend = "svg" Hn_figfit_1.output_backend = "svg" Hn_figfit_2.output_backend = "svg" show(column(Div(text='<h2>' + name + '</h2>'), grid_layout))
def hail_metadata(t_path): """Create a metadata plot for a Hail Table or MatrixTable. Parameters ---------- t_path : str Path to the Hail Table or MatrixTable files. Returns ------- :class:`bokeh.plotting.figure.Figure` or :class:`bokeh.models.widgets.panels.Tabs` or :class:`bokeh.models.layouts.Column` """ def get_rows_data(rows_files): file_sizes = [] partition_bounds = [] parts_file = [ x['path'] for x in rows_files if x['path'].endswith('parts') ] if parts_file: parts = hadoop_ls(parts_file[0]) for i, x in enumerate(parts): index = x['path'].split(f'{parts_file[0]}/part-')[1].split( '-')[0] if i < len(parts) - 1: test_index = parts[i + 1]['path'].split( f'{parts_file[0]}/part-')[1].split('-')[0] if test_index == index: continue file_sizes.append(x['size_bytes']) metadata_file = [ x['path'] for x in rows_files if x['path'].endswith('metadata.json.gz') ] if metadata_file: with hadoop_open(metadata_file[0], 'rb') as f: rows_meta = json.loads(f.read()) try: partition_bounds = [(x['start']['locus']['contig'], x['start']['locus']['position'], x['end']['locus']['contig'], x['end']['locus']['position']) for x in rows_meta['jRangeBounds']] except KeyError: pass return partition_bounds, file_sizes def scale_file_sizes(file_sizes): min_file_size = min(file_sizes) * 1.1 total_file_size = sum(file_sizes) all_scales = [('T', 1e12), ('G', 1e9), ('M', 1e6), ('K', 1e3), ('', 1e0)] for overall_scale, overall_factor in all_scales: if total_file_size > overall_factor: total_file_size /= overall_factor break for scale, factor in all_scales: if min_file_size > factor: file_sizes = [x / factor for x in file_sizes] break total_file_size = f'{total_file_size:.1f} {overall_scale}B' return total_file_size, file_sizes, scale files = hadoop_ls(t_path) rows_file = [x['path'] for x in files if x['path'].endswith('rows')] entries_file = [x['path'] for x in files if x['path'].endswith('entries')] success_file = [ x['modification_time'] for x in files if x['path'].endswith('SUCCESS') ] metadata_file = [ x['path'] for x in files if x['path'].endswith('metadata.json.gz') ] if not metadata_file: raise FileNotFoundError('No metadata.json.gz file found.') with hadoop_open(metadata_file[0], 'rb') as f: overall_meta = json.loads(f.read()) rows_per_partition = overall_meta['components']['partition_counts'][ 'counts'] if not rows_file: raise FileNotFoundError('No rows directory found.') rows_files = hadoop_ls(rows_file[0]) data_type = 'Table' if entries_file: data_type = 'MatrixTable' rows_file = [ x['path'] for x in rows_files if x['path'].endswith('rows') ] rows_files = hadoop_ls(rows_file[0]) row_partition_bounds, row_file_sizes = get_rows_data(rows_files) total_file_size, row_file_sizes, row_scale = scale_file_sizes( row_file_sizes) panel_size = 480 subpanel_size = 120 if not row_partition_bounds: warnings.warn('Table is not partitioned. Only plotting file sizes') row_file_sizes_hist, row_file_sizes_edges = np.histogram( row_file_sizes, bins=50) p_file_size = figure(plot_width=panel_size, plot_height=panel_size) p_file_size.quad(right=row_file_sizes_hist, left=0, bottom=row_file_sizes_edges[:-1], top=row_file_sizes_edges[1:], fill_color="#036564", line_color="#033649") p_file_size.yaxis.axis_label = f'File size ({row_scale}B)' return p_file_size all_data = { 'partition_widths': [-1 if x[0] != x[2] else x[3] - x[1] for x in row_partition_bounds], 'partition_bounds': [f'{x[0]}:{x[1]}-{x[2]}:{x[3]}' for x in row_partition_bounds], 'spans_chromosome': [ 'Spans chromosomes' if x[0] != x[2] else 'Within chromosome' for x in row_partition_bounds ], 'row_file_sizes': row_file_sizes, 'row_file_sizes_human': [f'{x:.1f} {row_scale}B' for x in row_file_sizes], 'rows_per_partition': rows_per_partition, 'index': list(range(len(rows_per_partition))) } if entries_file: entries_rows_files = hadoop_ls(entries_file[0]) entries_rows_file = [ x['path'] for x in entries_rows_files if x['path'].endswith('rows') ] if entries_rows_file: entries_files = hadoop_ls(entries_rows_file[0]) entry_partition_bounds, entry_file_sizes = get_rows_data( entries_files) total_entry_file_size, entry_file_sizes, entry_scale = scale_file_sizes( entry_file_sizes) all_data['entry_file_sizes'] = entry_file_sizes all_data['entry_file_sizes_human'] = [ f'{x:.1f} {entry_scale}B' for x in row_file_sizes ] title = f'{data_type}: {t_path}' msg = f"Rows: {sum(all_data['rows_per_partition']):,}<br/>Partitions: {len(all_data['rows_per_partition']):,}<br/>Size: {total_file_size}<br/>" if success_file[0]: msg += success_file[0] tools = "hover,save,pan,box_zoom,reset,wheel_zoom" source = ColumnDataSource(pd.DataFrame(all_data)) p = figure(tools=tools, plot_width=panel_size, plot_height=panel_size) p.title.text = title p.xaxis.axis_label = 'Number of rows' p.yaxis.axis_label = f'File size ({row_scale}B)' color_map = factor_cmap('spans_chromosome', palette=Spectral8, factors=list(set(all_data['spans_chromosome']))) p.scatter('rows_per_partition', 'row_file_sizes', color=color_map, legend='spans_chromosome', source=source) p.legend.location = 'bottom_right' p.select_one(HoverTool).tooltips = [ (x, f'@{x}') for x in ('rows_per_partition', 'row_file_sizes_human', 'partition_bounds', 'index') ] p_stats = Div(text=msg) p_rows_per_partition = figure(x_range=p.x_range, plot_width=panel_size, plot_height=subpanel_size) p_file_size = figure(y_range=p.y_range, plot_width=subpanel_size, plot_height=panel_size) rows_per_partition_hist, rows_per_partition_edges = np.histogram( all_data['rows_per_partition'], bins=50) p_rows_per_partition.quad(top=rows_per_partition_hist, bottom=0, left=rows_per_partition_edges[:-1], right=rows_per_partition_edges[1:], fill_color="#036564", line_color="#033649") row_file_sizes_hist, row_file_sizes_edges = np.histogram( all_data['row_file_sizes'], bins=50) p_file_size.quad(right=row_file_sizes_hist, left=0, bottom=row_file_sizes_edges[:-1], top=row_file_sizes_edges[1:], fill_color="#036564", line_color="#033649") rows_grid = gridplot([[p_rows_per_partition, p_stats], [p, p_file_size]]) if 'entry_file_sizes' in all_data: title = f'Statistics for {data_type}: {t_path}' msg = f"Rows: {sum(all_data['rows_per_partition']):,}<br/>Partitions: {len(all_data['rows_per_partition']):,}<br/>Size: {total_entry_file_size}<br/>" if success_file[0]: msg += success_file[0] source = ColumnDataSource(pd.DataFrame(all_data)) p = figure(tools=tools, plot_width=panel_size, plot_height=panel_size) p.title.text = title p.xaxis.axis_label = 'Number of rows' p.yaxis.axis_label = f'File size ({entry_scale}B)' color_map = factor_cmap('spans_chromosome', palette=Spectral8, factors=list(set( all_data['spans_chromosome']))) p.scatter('rows_per_partition', 'entry_file_sizes', color=color_map, legend='spans_chromosome', source=source) p.legend.location = 'bottom_right' p.select_one(HoverTool).tooltips = [ (x, f'@{x}') for x in ('rows_per_partition', 'entry_file_sizes_human', 'partition_bounds', 'index') ] p_stats = Div(text=msg) p_rows_per_partition = figure(x_range=p.x_range, plot_width=panel_size, plot_height=subpanel_size) p_rows_per_partition.quad(top=rows_per_partition_hist, bottom=0, left=rows_per_partition_edges[:-1], right=rows_per_partition_edges[1:], fill_color="#036564", line_color="#033649") p_file_size = figure(y_range=p.y_range, plot_width=subpanel_size, plot_height=panel_size) row_file_sizes_hist, row_file_sizes_edges = np.histogram( all_data['entry_file_sizes'], bins=50) p_file_size.quad(right=row_file_sizes_hist, left=0, bottom=row_file_sizes_edges[:-1], top=row_file_sizes_edges[1:], fill_color="#036564", line_color="#033649") entries_grid = gridplot([[p_rows_per_partition, p_stats], [p, p_file_size]]) return Tabs(tabs=[ Panel(child=entries_grid, title='Entries'), Panel(child=rows_grid, title='Rows') ]) else: return rows_grid
price_mid = BoxAnnotation(left=40, right=80, fill_alpha=0.1, fill_color='navy') result.add_layout(price_mid) #这里设置标记区 kw = figure(plot_width=800, plot_height=300, title='口味得分', x_range=data_type, tools=[hover, 'box_select, reset, xwheel_zoom,pan,crosshair']) kw.vbar(x='type', top='kw_norm', source=source, width=0.8, alpha=0.7, color='red') price = figure(plot_width=800, plot_height=300, title='人均消费得分', x_range=data_type, tools=[hover, 'box_select, reset, xwheel_zoom,pan,crosshair']) price.vbar(x='type', top='price_norm', source=source, width=0.8, alpha=0.7, color='green') p = gridplot([[result], [kw], [price]]) #将三个图放在一个画布上 show(p) #一定要加show(p),否则不显示
close=[close], average=[average], color=[color], ) close = source.data['close'] + [close] ma12 = _moving_avg(close[-12:], 12)[0] ma26 = _moving_avg(close[-26:], 26)[0] ema12 = _ema(close[-12:], 12)[0] ema26 = _ema(close[-26:], 26)[0] if mavg.value == MA12: new_data['ma'] = [ma12] elif mavg.value == MA26: new_data['ma'] = [ma26] elif mavg.value == EMA12: new_data['ma'] = [ema12] elif mavg.value == EMA26: new_data['ma'] = [ema26] macd = ema12 - ema26 new_data['macd'] = [macd] macd_series = source.data['macd'] + [macd] macd9 = _ema(macd_series[-26:], 9)[0] new_data['macd9'] = [macd9] new_data['macdh'] = [macd - macd9] source.stream(new_data, 300) curdoc().add_root(column(row(mean, stddev, mavg), gridplot([[p], [p2]], toolbar_location="left", plot_width=1000))) curdoc().add_periodic_callback(update, 50) curdoc().title = "OHLC"
def link_plots(plots, **kwargs): """Make linked plots for the given parameters""" for plt in plots: plt.add_tools(LassoSelectTool()) return gridplot([[plt] for plt in plots])
def grafica(shortticker): from flask import Flask, request, render_template, jsonify import pandas.io.sql as sql import sqlite3 import platform import datetime import numpy as np import pandas as pd import json #import pygal import matplotlib.pyplot as plt from scipy.stats import norm from bokeh.charts import Histogram import plotly #from pandas.io.data import DataReader from pandas_datareader import wb, DataReader from sklearn.linear_model import LogisticRegression from sklearn.lda import LDA from sklearn.qda import QDA from bokeh.layouts import gridplot from bokeh.plotting import figure, show, output_file def datetime(x): return np.array(x, dtype=np.datetime64) symbol = shortticker #"GOOG" df = DataReader(symbol, "google", '01/01/2016', '08/03/2017') df['date'] = df.index p1 = figure(x_axis_type="datetime", title="Stock Closing Prices") p1.grid.grid_line_alpha = 0.3 p1.xaxis.axis_label = 'Date' p1.yaxis.axis_label = 'Price' p1.line(datetime(df['date']), df['Close'], color='#A6CEE3', legend=symbol) #p1.line(datetime(GOOG['date']), GOOG['adj_close'], color='#B2DF8A', legend='GOOG') #p1.line(datetime(IBM['date']), IBM['adj_close'], color='#33A02C', legend='IBM') #p1.line(datetime(MSFT['date']), MSFT['adj_close'], color='#FB9A99', legend='MSFT') #p1.legend.location = "top_left" df_array = np.array(df['Close']) df_dates = np.array(df['date'], dtype=np.datetime64) window_size = 30 window = np.ones(window_size) / float(window_size) aapl_avg = np.convolve(df_array, window, 'same') p2 = figure(x_axis_type="datetime", title="One-Month Average") p2.grid.grid_line_alpha = 0 p2.xaxis.axis_label = 'Date' p2.yaxis.axis_label = 'Price' p2.ygrid.band_fill_color = "olive" p2.ygrid.band_fill_alpha = 0.1 p2.circle(df_dates, df_array, size=4, legend='close', color='darkgrey', alpha=0.2) p2.line(df_dates, aapl_avg, legend='avg', color='navy') p2.legend.location = "top_left" output_file("./templates/stocks.html", title="My Own Bokeh Example") show(gridplot([[p1, p2]], plot_width=400, plot_height=400)) # open a browser return render_template('stocks.html')
w1 = X1[0, :] * 2.7244 ############### # deta output # ############### df = pd.DataFrame({ "t": np.linspace(0, Tend, Nrk + 1), "θ": S[0:2 * Nrk + 1:2, 0], "dθ": S[0:2 * Nrk + 1:2, 1], "ddθ": S[0:2 * Nrk + 1:2, 2], "w1": w1, }) df.to_csv(v["datadir"] + f"output/{i}_output.csv") print("Output saved at \n\t" + v["datadir"] + f"output/{i}_output.csv") ################ # plot setting # ################ figs[i].line(df["t"], df["w1"], line_color=palette[0]) figs[i].line(df["t"], w1_exp, line_color=palette[1]) ############## # total plot # ############## fig = gridplot([ [figs[0], figs[1], figs[2], figs[3]], [figs[4], figs[5], figs[6], figs[7]], [figs[8], figs[9]], ]) show(fig)
def plot_prediction(df, predicted): p = figure(width=600, height=350, x_axis_type="datetime", tools=['box_zoom', 'reset']) p1 = figure(width=600, height=350, x_axis_type="datetime", tools=['box_zoom', 'reset']) #p = figure(width=450, height=300, x_axis_type="datetime") DF1 = df[-30:] date = pd.DatetimeIndex(DF1.time) + pd.DateOffset(6) date = date.to_series() cl = df[-23:]['close'] f_dates = predicted[predicted.columns[0]] f_dates = pd.to_datetime(f_dates) AR = predicted[predicted.columns[1]] ls = predicted[predicted.columns[2]] src = ColumnDataSource(data=dict( time=date, close=cl, future_dates=f_dates, ARIMA=AR, LSTM=ls)) src.add(df['time'].apply(lambda d: d.strftime('%m/%d/%Y')), 'date_') src.add(f_dates.apply(lambda d: d.strftime('%m/%d/%Y')), 'date2') # h1 = p.line(x='time', y='close', color='darkgreen', line_width=1, legend='past price', source=src) hover1 = HoverTool( renderers=[h1], tooltips=[ ('date', '@date_'), ("price:", "@close{$ 0,0.00}"), ], ) p.add_tools(hover1) h2 = p.line(x='future_dates', y='ARIMA', color='purple', line_width=2, legend='ARIMA price', source=src, name='arima') hover2 = HoverTool( renderers=[h2], tooltips=[ ('date', '@date2'), ("ARIMA:", "@ARIMA{$ 0,0.00}"), ], ) p.add_tools(hover2) #closeDF[] p.title.text = " Predicted price by ARIMA model" p.legend.location = "top_left" p.legend.click_policy = "hide" p.grid.grid_line_alpha = 0.1 p.xaxis.axis_label = 'Date' p.yaxis.axis_label = 'Price' p.ygrid.band_fill_color = 'navy' p.ygrid.band_fill_alpha = 0.1 p.legend.click_policy = "hide" h3 = p1.line(x='time', y='close', color='darkgreen', line_width=1, legend='past price', source=src) hover3 = HoverTool( renderers=[h3], tooltips=[ ('date', '@date_'), ("price:", "@close{$ 0,0.00}"), ], ) p1.add_tools(hover3) h4 = p1.line(x='future_dates', y='LSTM', color='purple', line_width=2, legend='LSTM price', source=src, name='lstm') hover4 = HoverTool( renderers=[h4], tooltips=[ ('date', '@date2'), ("LSTM:", "@LSTM{$ 0,0.00}"), ], ) p1.add_tools(hover4) #closeDF[] p1.title.text = " Predicted price by LSTM model" p1.legend.location = "top_left" p1.legend.click_policy = "hide" p1.grid.grid_line_alpha = 0.1 p1.xaxis.axis_label = 'Date' p1.yaxis.axis_label = 'Price' p1.ygrid.band_fill_color = 'coral' p1.ygrid.band_fill_alpha = 0.1 p1.legend.click_policy = "hide" grid = gridplot([[p, p1]]) return grid
def figure_scatter_values(df_chisq): df_chisq["casema07_diff07"] = df_chisq.case_ma07.diff(periods=1) df_chisq["testsma07_diff07"] = df_chisq.tests_ma07.diff(periods=1) df_chisq["casedet_diff07"] = df_chisq.case_detrended.diff(periods=1) df_chisq["casedetpct_diff07"] = df_chisq.caseDet_pct.diff(periods=1) df_chisq[ "angle"] = df_chisq.testsma07_diff07 / df_chisq.casema07_diff07 * 3.14 df_chisq["casema07_start"] = df_chisq.case_ma07 - df_chisq.casema07_diff07 df_chisq[ "testsma07_start"] = df_chisq.tests_ma07 - df_chisq.testsma07_diff07 df_chisq[ "casedet_start"] = df_chisq.case_detrended - df_chisq.casedet_diff07 df_chisq[ "casedetpct_start"] = df_chisq.caseDet_pct - df_chisq.casedetpct_diff07 df_chisq["dt_str"] = df_chisq.Date.dt.strftime("%Y-%m-%d") # FIXME # df_chisq.set_index(["CountryProv","Date"]).tail()[['case_ma07', 'tests_ma07', 'casema07_diff07', 'testsma07_diff07', 'casema07_start', 'testsma07_start']] print("gathering moving 14-day windows") #df_sub = df_chisq[df_chisq.Date >= "2020-04-28"] df_sub = df_chisq df_latest = [] dtmax_n = df_sub.Date.unique().max() dtmin_n = df_sub.Date.unique().min() import datetime as dt #dt_range = df_sub.Date.unique() dt_range = np.arange(dtmax_n, dtmin_n, dt.timedelta(days=-14)) #dtmax_s = str(dtmax_n)[:10] # http://stackoverflow.com/questions/28327101/ddg#28327650 for dt_i in dt_range: dt_delta = (dt_i - dtmin_n).astype('timedelta64[D]').astype(int) if dt_delta < 14: continue print(dt_i, dt_delta) df_i = df_sub[df_sub.Date <= dt_i] df_i = df_i.groupby("CountryProv").apply( lambda g: g.tail(14)).reset_index(drop=True) df_i["color"] = "#73b2ff" df_i["dtLast"] = dt_i df_latest.append(df_i) if len(df_latest) == 0: raise Exception("No data in moving window") df_latest = pd.concat(df_latest, axis=0) df_latest["display_cpcode"] = df_latest.apply( lambda g: "" if g.dtLast != g.Date else g.cp_code, axis=1) print("done") #source_hist = ColumnDataSource(df_chisq) #source_latest = ColumnDataSource(df_latest) # since cannot use View iwth LabelSet, creating a different source per continent # Couldn't figure out how to filter the datasource in add_layout or Arrow, # so just grouping on both continent and dtLast srcLatest_continent = df_latest.groupby( ["Continent", "dtLast"]).apply(lambda g: ColumnDataSource(g)) srcLatest_continent = srcLatest_continent.reset_index().rename( columns={0: "src"}) plot_size_and_tools = { 'plot_height': 300, 'plot_width': 600, 'tools': ['box_select', 'reset', 'help', 'box_zoom'], 'x_axis_type': 'datetime' } # general-use lines slope_y0 = Slope(gradient=0, y_intercept=0, line_color='orange', line_width=50) slope_x0 = Slope(gradient=np.Inf, y_intercept=0, line_color='orange', line_width=50) # scatter plot TOOLTIPS = [ ("Country/Region", "@CountryProv"), ("Date", "@dt_str"), ] # first set for case vs tests, then second set for case diff vs test diff params = ( #('values', 'tests_ma07', 'case_ma07', 'testsma07_start', 'casema07_start', 'ma07(Tests)', 'ma07(Cases)'), #('diffs', 'casema07_diff07', 'testsma07_diff07', 'diff07(ma07(Cases))', 'diff07(ma07(Tests))'), ('values', 'case_detrended', 'case_ma07', 'casedet_start', 'casema07_start', 'detrended(cases)', 'ma07(Cases)'), #('values', 'caseDet_pct', 'case_ma07', 'casedetpct_start', 'casema07_start', 'detrended(ma07(cases))/cases*100', 'ma07(Cases)'), ) p_all = {'values': [], 'diffs': []} from bokeh.models import Arrow, NormalHead, OpenHead, VeeHead for k, fdxv, fdyv, fdxs, fdys, labx, laby in params: p_cont = [] for srcCont_i in srcLatest_continent.iterrows(): srcCont_i = srcCont_i[1] print("Adding plot for %s, %s" % (srcCont_i.Continent, srcCont_i.dtLast)) #init_group=dtmax_s #gf = GroupFilter(column_name='dtLast', group=init_group) #view1 = CDSView(source=srcCont_i.src, filters=[gf]) p_d1 = figure(plot_width=600, plot_height=400, tooltips=TOOLTIPS, title="%s %s" % (srcCont_i.Continent, srcCont_i.dtLast)) #p_d1.triangle(fdxv, fdyv, source=srcCont_i.src, size=12, color='blue', angle="angle") #p_d1.scatter(fdxs, fdys, source=srcCont_i.src, size=3, color='red') #, view=view1) p_d1.scatter(fdxv, fdyv, source=srcCont_i.src, size=3, color='red') p_d1.add_layout( Arrow(end=VeeHead(size=6), x_start=fdxs, y_start=fdys, x_end=fdxv, y_end=fdyv, line_color='blue', source=srcCont_i.src #view=view1 # srcCont_i.src ) #, #view=view1 # not supported ) p_d1.xaxis.axis_label = labx p_d1.yaxis.axis_label = laby from bokeh.models import LabelSet labels = LabelSet(x=fdxv, y=fdyv, text='display_cpcode', level='glyph', x_offset=5, y_offset=5, source=srcCont_i.src, render_mode='canvas') p_d1.add_layout(labels) p_d1.add_layout(slope_y0) p_d1.add_layout(slope_x0) p_cont.append(p_d1) p_all[k] = p_cont # group plots into 3 per row # https://stackoverflow.com/a/1625013/4126114 from itertools import zip_longest for k in ['values', 'diffs']: p_cont = p_all[k] p_cont = list(zip_longest(*(iter(p_cont), ) * 3)) p_cont = [[e for e in t if e != None] for t in p_cont] p_all[k] = p_cont g = gridplot(p_all['values'] + p_all['diffs']) layout = column(g) return layout
fill_alpha=0.8, size=4, #marker=factor_mark('CATEGORIA_DE_ATIVIDADE', marca, categorias), #color=factor_cmap('POPULACAO', cores, categorias), color=linear_cmap('POPULACAO', 'Viridis256', 0, 200000, nan_color="#00FFFF"), selection_color='deepskyblue', nonselection_color='lightgray', source=grafico2_DS) p3 = figure(title="Visão de população e riqueza", plot_width=512, plot_height=512, tools=TOOLS) p3.xaxis.axis_label = 'População' p3.yaxis.axis_label = 'PIB per capita' p3.circle(x='POPULACAO', y='PRODUTO_INTERNO_BRUTO_PER_CAPITA', source=grafico2_DS, color=linear_cmap('PRODUTO_INTERNO_BRUTO_PER_CAPITA', 'Magma256', 0, 100000), radius='PRODUTO_INTERNO_BRUTO_PER_CAPITA', selection_color='deepskyblue', nonselection_color='lightgray') grid = gridplot([[p1, p2, p3]]) show(grid) # In[ ]:
p5 = figure(title='Wind SHOCKS', tools=tools) p5.scatter('SPEED', 'Np', color='black', source=source) p5.select_one(HoverTool).tooltips = tool_tips p5.xaxis.axis_label = '|V| [km/s]' p5.yaxis.axis_label = 'Np [cm^-3]' p6 = figure(title='Wind SHOCKS', tools=tools) p6.scatter('SPEED', 'Vth', color='black', source=source) p6.select_one(HoverTool).tooltips = tool_tips p6.xaxis.axis_label = '|V| [km/s]' p6.yaxis.axis_label = 'Vth [km/s]' p7 = figure(title='Wind SHOCKS', tools=tools) p7.scatter('Np', 'Vth', color='black', source=source) p7.select_one(HoverTool).tooltips = tool_tips p7.xaxis.axis_label = 'Np [cm^-3]' p7.yaxis.axis_label = 'Vth [km/s]' #list for plotting plots = [[p1, p2], [p3, p4], [p5, p6], [ p7, ]] ### EMBEDDING BOKEH #script, div = components(plots) save(gridplot([p1, p2], [p3, p4], [p5, p6], [ p7, ]), filename='../plots/bokeh_training_plot_sigma_wind.html')
def plot(self, metric="r2q2", grid_line=True): """Create a full/cv plot using based on metric selected. Parameters ---------- metric : string, (default "r2q2") metric has to be either "r2q2", "auc", "acc", "f1score", "prec", "sens", or "spec". """ # Choose metric to plot metric_title = np.array([ "ACCURACY", "AUC", "F1-SCORE", "PRECISION", "R²", "SENSITIVITY", "SPECIFICITY" ]) metric_list = np.array( ["acc", "auc", "f1score", "prec", "r2q2", "sens", "spec"]) metric_idx = np.where(metric_list == metric)[0][0] # get full, cv, and diff full = self.table.iloc[2 * metric_idx + 1] cv = self.table.iloc[2 * metric_idx] diff = abs(full - cv) full_text = self.table.iloc[2 * metric_idx + 1].name cv_text = self.table.iloc[2 * metric_idx].name diff_text = "DIFFERENCE " + "(" + full_text + " - " + cv_text + ")" # round full, cv, and diff for hovertool full_hover = [] cv_hover = [] diff_hover = [] for j in range(len(full)): full_hover.append("%.2f" % round(full[j], 2)) cv_hover.append("%.2f" % round(cv[j], 2)) diff_hover.append("%.2f" % round(diff[j], 2)) # get key, values (as string) from param_dict (key -> title, values -> x axis values) for k, v in self.param_dict.items(): key = k values = v values_string = [str(i) for i in values] # store data in ColumnDataSource for Bokeh data = dict(full=full, cv=cv, diff=diff, full_hover=full_hover, cv_hover=cv_hover, diff_hover=diff_hover, values_string=values_string) source = ColumnDataSource(data=data) fig1_yrange = (min(diff) - max(0.1 * (min(diff)), 0.1), max(diff) + max(0.1 * (max(diff)), 0.1)) fig1_xrange = (min(cv) - max(0.1 * (min(cv)), 0.1), max(cv) + max(0.1 * (max(cv)), 0.1)) fig1_title = diff_text + " vs " + cv_text # Figure 1 (DIFFERENCE (R2 - Q2) vs. Q2) fig1 = figure( x_axis_label=cv_text, y_axis_label=diff_text, title=fig1_title, tools= "tap,pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select", y_range=fig1_yrange, x_range=fig1_xrange, plot_width=485, plot_height=405) # Figure 1: Add a line fig1_line = fig1.line(cv, diff, line_width=2, line_color="black", line_alpha=0.25) # Figure 1: Add circles (interactive click) fig1_circ = fig1.circle("cv", "diff", size=17, alpha=0.7, color="green", source=source) fig1_circ.selection_glyph = Circle(fill_color="green", line_width=2, line_color="black", fill_alpha=0.6) fig1_circ.nonselection_glyph.fill_color = "green" fig1_circ.nonselection_glyph.fill_alpha = 0.4 fig1_circ.nonselection_glyph.line_color = "white" fig1_text = fig1.text(x="cv", y="diff", text="values_string", source=source, text_font_size="10pt", text_color="white", x_offset=-3.5, y_offset=7) fig1_text.nonselection_glyph.text_color = "white" fig1_text.nonselection_glyph.text_alpha = 0.6 # Figure 1: Add hovertool fig1.add_tools( HoverTool(renderers=[fig1_circ], tooltips=[(full_text, "@full_hover"), (cv_text, "@cv_hover"), ("Diff", "@diff_hover")])) # Figure 1: Extra formating fig1.axis.major_label_text_font_size = "8pt" if metric is "r2q2" or metric is "auc": fig1.title.text_font_size = "12pt" fig1.xaxis.axis_label_text_font_size = "10pt" fig1.yaxis.axis_label_text_font_size = "10pt" else: fig1.title.text_font_size = "10pt" fig1.xaxis.axis_label_text_font_size = "9pt" fig1.yaxis.axis_label_text_font_size = "9pt" # Figure 2: full/cv fig2_title = full_text + " & " + cv_text + " vs no. of components" fig2 = figure( x_axis_label="components", y_axis_label="Value", title=fig2_title, plot_width=485, plot_height=405, x_range=pd.unique(values_string), y_range=(0, 1.1), tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select") # Figure 2: add confidence intervals if bootnum > 1 if self.bootnum > 1: lower_ci_full = [] upper_ci_full = [] lower_ci_cv = [] upper_ci_cv = [] # Get all upper, lower 95% CI (full/cv) for each specific n_component and append for m in range(len(self.full_boot_metrics)): full_boot = [] cv_boot = [] for k in range(len(self.full_boot_metrics[0])): full_boot.append( self.full_boot_metrics[m][k][metric_title[metric_idx]]) cv_boot.append( self.cv_boot_metrics[m][k][metric_title[metric_idx]]) # Calculated percentile 95% CI and append full_bias = np.mean(full_boot) - full[m] cv_bias = np.mean(cv_boot) - cv[m] lower_ci_full.append(np.percentile(full_boot, 2.5) - full_bias) upper_ci_full.append( np.percentile(full_boot, 97.5) - full_bias) lower_ci_cv.append(np.percentile(cv_boot, 2.5) - cv_bias) upper_ci_cv.append(np.percentile(cv_boot, 97.5) - cv_bias) # Plot as a patch x_patch = np.hstack((values_string, values_string[::-1])) y_patch_r2 = np.hstack((lower_ci_full, upper_ci_full[::-1])) fig2.patch(x_patch, y_patch_r2, alpha=0.10, color="red") y_patch_q2 = np.hstack((lower_ci_cv, upper_ci_cv[::-1])) fig2.patch(x_patch, y_patch_q2, alpha=0.10, color="blue") # Figure 2: add full fig2_line_full = fig2.line(values_string, full, line_color="red", line_width=2) fig2_circ_full = fig2.circle("values_string", "full", line_color="red", fill_color="white", fill_alpha=1, size=8, source=source, legend=full_text) fig2_circ_full.selection_glyph = Circle(line_color="red", fill_color="white", line_width=2) fig2_circ_full.nonselection_glyph.line_color = "red" fig2_circ_full.nonselection_glyph.fill_color = "white" fig2_circ_full.nonselection_glyph.line_alpha = 0.4 # Figure 2: add cv fig2_line_cv = fig2.line(values_string, cv, line_color="blue", line_width=2) fig2_circ_cv = fig2.circle("values_string", "cv", line_color="blue", fill_color="white", fill_alpha=1, size=8, source=source, legend=cv_text) fig2_circ_cv.selection_glyph = Circle(line_color="blue", fill_color="white", line_width=2) fig2_circ_cv.nonselection_glyph.line_color = "blue" fig2_circ_cv.nonselection_glyph.fill_color = "white" fig2_circ_cv.nonselection_glyph.line_alpha = 0.4 # Add hovertool and taptool fig2.add_tools( HoverTool(renderers=[fig2_circ_full], tooltips=[(full_text, "@full_hover")], mode="vline")) fig2.add_tools( HoverTool(renderers=[fig2_circ_cv], tooltips=[(cv_text, "@cv_hover")], mode="vline")) fig2.add_tools(TapTool(renderers=[fig2_circ_full, fig2_circ_cv])) # Figure 2: Extra formating fig2.axis.major_label_text_font_size = "8pt" if metric is "r2q2" or metric is "auc": fig2.title.text_font_size = "12pt" fig2.xaxis.axis_label_text_font_size = "10pt" fig2.yaxis.axis_label_text_font_size = "10pt" else: fig2.title.text_font_size = "10pt" fig2.xaxis.axis_label_text_font_size = "9pt" fig2.yaxis.axis_label_text_font_size = "9pt" # Figure 2: legend if metric is "r2q2": fig2.legend.location = "top_left" else: fig2.legend.location = "bottom_right" # Remove grid lines if grid_line == False: fig1.xgrid.visible = False fig1.ygrid.visible = False fig2.xgrid.visible = False fig2.ygrid.visible = False # Create a grid and output figures grid = np.full((1, 2), None) grid[0, 0] = fig1 grid[0, 1] = fig2 fig = gridplot(grid.tolist(), merge_tools=True) output_notebook() show(fig)
def index(): df = pd.read_csv('static/data/df.csv') df_group = pd.read_csv('static/data/df_group.csv') df_group_agg = pd.read_csv('static/data/df_group_agg.csv') df_profit = pd.read_csv('static/data/df_profit.csv') df_profit_pred_comp = pd.read_csv('static/data/df_profit_pred_comp.csv') df_profit_formatted = pd.read_csv('static/data/df_profit_formatted.csv') df_profit_pred_comp_formatted = pd.read_csv( 'static/data/df_profit_pred_comp_formatted.csv') df_price_chg = pd.read_csv('static/data/df_price_chg.csv') df_price_chg_formatted = pd.read_csv( 'static/data/df_price_chg_formatted.csv') profit_columns = map(lambda x: {"title": x}, df_profit_formatted.columns) profit_dataset = df_profit_formatted.values.tolist() profit_pred_columns = map(lambda x: {"title": x}, df_profit_pred_comp_formatted.columns) profit_pred_dataset = df_profit_pred_comp_formatted.values.tolist() df_scatter = pd.concat([df_profit, df_profit_pred_comp, df_price_chg], axis=1) df_scatter = df_scatter.loc[:, ~df_scatter.columns.duplicated()] scatter_variables = map(lambda x: {"text": x}, [ c for c in df_scatter.columns if isinstance(df_scatter[c][0], Number) ]) id_name = u'股票简称' # draw plots # create mappers palette.reverse() color_mapper = LinearColorMapper(palette=palette, low=-50, high=50) def func(): return x def v_func(): return [(x * 100).toFixed(2) + '%' for x in xs] label_formatter = CustomJSTransform.from_py_func(func=func, v_func=v_func) price_chg_5d = price_chg_plot(df, 'PCT_CHG_PER-5-DAY', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近5天涨幅%') price_chg_5d_script, price_chg_5d_div = components(price_chg_5d) price_chg_10d = price_chg_plot(df, 'PCT_CHG_PER-10-DAY', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近10天涨幅%') price_chg_10d_script, price_chg_10d_div = components(price_chg_10d) price_chg_1m = price_chg_plot(df, 'PCT_CHG_PER-1-MONTH', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近1个月涨幅%') price_chg_1m_script, price_chg_1m_div = components(price_chg_1m) price_chg_3m = price_chg_plot(df, 'PCT_CHG_PER-3-MONTH', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近3个月涨幅%') price_chg_3m_script, price_chg_3m_div = components(price_chg_3m) price_chg_6m = price_chg_plot(df, 'PCT_CHG_PER-6-MONTH', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近6个月涨幅%') price_chg_6m_script, price_chg_6m_div = components(price_chg_6m) price_chg_12m = price_chg_plot(df, 'PCT_CHG_PER-12-MONTH', 'SEC_NAME', '上证流通全收益', color_mapper, label_formatter, title_text='近12个月涨幅%') price_chg_12m_script, price_chg_12m_div = components(price_chg_12m) chart_grid = gridplot([[price_chg_5d, price_chg_10d, price_chg_1m], [price_chg_3m, price_chg_6m, price_chg_12m]]) chart_grid_script, chart_grid_div = components(chart_grid) return render_template("index.html", price_chg_5d_div=price_chg_5d_div, price_chg_5d_script=price_chg_5d_script, price_chg_10d_div=price_chg_10d_div, price_chg_10d_script=price_chg_10d_script, price_chg_1m_div=price_chg_1m_div, price_chg_1m_script=price_chg_1m_script, price_chg_3m_div=price_chg_3m_div, price_chg_3m_script=price_chg_3m_script, price_chg_6m_div=price_chg_6m_div, price_chg_6m_script=price_chg_6m_script, price_chg_12m_div=price_chg_12m_div, price_chg_12m_script=price_chg_12m_script, chart_grid_script=chart_grid_script, chart_grid_div=chart_grid_div, profit_columns=profit_columns, profit_dataset=profit_dataset, profit_pred_columns=profit_pred_columns, profit_pred_dataset=profit_pred_dataset, scatter_variables=scatter_variables, id_name=id_name, scatter_data=df_scatter.to_json(orient='records'))
size=12, fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5) plot.add_glyph(circles_source, circle) return plot #where will this comment show up I = make_plot('I', 'xi', 'yi') II = make_plot('II', 'xii', 'yii') III = make_plot('III', 'xiii', 'yiii') IV = make_plot('IV', 'xiv', 'yiv') grid = gridplot([[I, II], [III, IV]], toolbar_location=None) div = Div(text=""" <h1>Anscombe's Quartet</h1> <p>Anscombe's quartet is a collection of four small datasets that have nearly identical simple descriptive statistics (mean, variance, correlation, and linear regression lines), yet appear very different when graphed. </p> """) doc = Document() doc.add_root(column(div, grid, sizing_mode="scale_width")) if __name__ == "__main__": doc.validate() filename = "anscombe.html"