def _plot_bokeh_MA(self, deseq_data_mod): plots = [] deseq_sig = deseq_data_mod[deseq_data_mod.padj < self._cutoff] deseq_no_sig = deseq_data_mod[deseq_data_mod.padj >= self._cutoff] pl = figure(tools=[HoverTool(tooltips=[ ("Protein_ID", "@Name"), ("Sequence type", "@gbkey"), ("Product", "@product"), ("log2 fold change", "@log2FoldChange"), ("base mean", "@baseMean")]), PanTool(), BoxSelectTool(), BoxZoomTool(), WheelZoomTool(), ResizeTool(), ResetTool()]) pl.background_fill_color = "White" pl.grid.grid_line_color = "black" pl.xaxis.axis_label = 'log10 base mean' pl.yaxis.axis_label = 'log2 fold change' pl.title.text = 'MA Plot' self._plot(pl, np.log10(deseq_sig["baseMean"]), deseq_sig["log2FoldChange"], self._col_sig, self._glyph_size, self._alpha, 'padj significant (cutoff: ' + str(self._cutoff) + ')', ColumnDataSource(deseq_sig)) self._plot(pl, np.log10(deseq_no_sig["baseMean"]), deseq_no_sig["log2FoldChange"], self._col_non_sig, self._glyph_size, self._alpha, 'padj non-significant', ColumnDataSource(deseq_no_sig)) plots.append(pl) self._plot_bokeh_MH(deseq_data_mod, plots)
def __init__(self,**kwargs): super(WeatherPlot,self).__init__(**kwargs) # def __init__(self, # data, # column_names=cs.column_names_weewx, # column_time=cs.time_column_name_weewx, # plot_height=300, # plot_width=800, # border_left=150, # **kwargs): # if "tool_events" not in kwargs: # kwargs["tool_events"] = ToolEvents() # super(WeatherPlot,self).__init__(x_range=DataRange1d(), # y_range=DataRange1d(), # plot_width=800, # plot_height=500, # min_border_left=150, # **kwargs) time_int = 3600 t_max = int(time.time()) t_min = t_max - 3600 * 24 data = sql_con.get_data_ws(time_int=time_int, t_max=t_max, t_min=t_min) self._data = data column_names=cs.column_names_weewx column_time=cs.time_column_name_weewx data_seconds = data data_seconds.iloc[:, 0] = data_seconds.iloc[:, 0] * 1000 add_glyphs_to_plot(column_names, column_time, data_seconds, self) self.add_layout(DatetimeAxis(), 'below') self.add_tools(PanTool(), WheelZoomTool(), ResizeTool(), CrosshairTool())
def showInteractiveDf(self, quotes): import time from math import pi import pandas as pd from bokeh.io import output_notebook from bokeh.plotting import figure, show from bokeh.models import ColumnDataSource, Rect, HoverTool, Range1d, LinearAxis, WheelZoomTool, PanTool, ResetTool, ResizeTool, PreviewSaveTool import numpy as np output_notebook() quotes[quotes['volume']==0]=np.nan quotes= quotes.dropna() openp=quotes['open'] closep=quotes['close'] highp=quotes['high'] lowp=quotes['low'] volume=quotes['volume'] time=quotes.index date=[x.strftime("%Y-%m-%d") for x in quotes.index] quotes['date']=date w = 12*60*60*1000 # half day in ms mids = (openp + closep)/2 spans = abs(closep-openp) inc = closep > openp dec = openp > closep ht = HoverTool(tooltips=[ ("date", "@date"), ("open", "@open"), ("close", "@close"), ("high", "@high"), ("low", "@low"), ("volume", "@volume"), ("money", "@money"),]) TOOLS = [ht, WheelZoomTool(dimensions=['width']), ResizeTool(), ResetTool(),PanTool(dimensions=['width']), PreviewSaveTool()] max_x = max(highp) min_x = min(lowp) x_range = max_x - min_x y_range = (min_x - x_range / 2.0, max_x + x_range * 0.1) p = figure(x_axis_type="datetime", tools=TOOLS, plot_height=600, plot_width=950,toolbar_location="above", y_range=y_range) p.xaxis.major_label_orientation = pi/4 p.grid.grid_line_alpha=0.3 p.background_fill = "black" quotesdate=dict(date1=quotes['date'],open1=openp,close1=closep,high1=highp,low1=lowp) ColumnDataSource(quotesdate) x_rect_inc_src =ColumnDataSource(quotes[inc]) x_rect_dec_src =ColumnDataSource(quotes[dec]) p.rect(time[inc], mids[inc], w, spans[inc], fill_color="red", line_color="red", source=x_rect_inc_src) p.rect(time[dec], mids[dec], w, spans[dec], fill_color="green", line_color="green", source=x_rect_dec_src) p.segment(time[inc], highp[inc], time[inc], lowp[inc], color="red") p.segment(time[dec], highp[dec], time[dec], lowp[dec], color="green") show(p)
def large_plot(n): from bokeh.models import (Plot, LinearAxis, Grid, GlyphRenderer, ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool, BoxSelectTool, ResizeTool, SaveTool, ResetTool) from bokeh.models.layouts import Column from bokeh.models.glyphs import Line col = Column() objects = set([col]) for i in xrange(n): source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1])) xdr = DataRange1d() ydr = DataRange1d() plot = Plot(x_range=xdr, y_range=ydr) xaxis = LinearAxis(plot=plot) yaxis = LinearAxis(plot=plot) xgrid = Grid(plot=plot, dimension=0) ygrid = Grid(plot=plot, dimension=1) tickers = [ xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter ] glyph = Line(x='x', y='y') renderer = GlyphRenderer(data_source=source, glyph=glyph) plot.renderers.append(renderer) pan = PanTool() zoom_in = ZoomInTool() zoom_out = ZoomOutTool() wheel_zoom = WheelZoomTool() box_zoom = BoxZoomTool() box_select = BoxSelectTool() resize = ResizeTool() save = SaveTool() reset = ResetTool() tools = [ pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, resize, save, reset ] plot.add_tools(*tools) col.children.append(plot) objects |= set([ source, xdr, ydr, plot, xaxis, yaxis, xgrid, ygrid, renderer, glyph, plot.x_scale, plot.y_scale, plot.toolbar, plot.tool_events, plot.title, box_zoom.overlay, box_select.overlay ] + tickers + tools) return col, objects
def alignment_stats(self, input_path_file, output_path_file_csv, output_path_file_html): df = pd.read_json(input_path_file, orient='index') df.to_csv(output_path_file_csv + '.csv', index=True, sep='\t') samples = [] alignment_length = [] alignment_freq = [] for index, row in df.iterrows(): for key, value in row['stats_per_reference'].items(): samples.extend([index + '(' + key + ')'] * int(self.nr_items_align(value))) for k, v in value.items(): if k == 'alignment_length_and_freqs': for keys, values in v.items(): alignment_length.extend([float(keys)]) alignment_freq.extend([values]) data1 = {} data1['samples'] = samples data1['aligned read length'] = alignment_length data1['frequency'] = alignment_freq bar1 = Bar(data1, values='frequency', label='aligned read length', stack='samples', agg='sum', title="Alignment read length and frequency", legend='top_left', width=1200, bar_width=1.0, palette=[ 'Blue', 'Aqua', 'SeaGreen', 'SpringGreen', 'Brown', 'Peru', 'Purple', 'Violet' ], tools=[ HoverTool(tooltips=[("Read length", "@x"), ("Frequency", "@y")]), PanTool(), BoxSelectTool(), BoxZoomTool(), WheelZoomTool(), ResizeTool(), ResetTool() ]) output_file(output_path_file_html + '.html') show(bar1)
def large_plot(n): from bokeh.models import (Plot, LinearAxis, Grid, GlyphRenderer, ColumnDataSource, DataRange1d, PanTool, WheelZoomTool, BoxZoomTool, BoxSelectTool, BoxSelectionOverlay, ResizeTool, PreviewSaveTool, ResetTool) from bokeh.models.widgets.layouts import VBox from bokeh.models.glyphs import Line vbox = VBox() objects = set([vbox]) for i in xrange(n): source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1])) xdr = DataRange1d() ydr = DataRange1d() plot = Plot(x_range=xdr, y_range=ydr) xaxis = LinearAxis(plot=plot) yaxis = LinearAxis(plot=plot) xgrid = Grid(plot=plot, dimension=0) ygrid = Grid(plot=plot, dimension=1) tickers = [ xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter ] glyph = Line(x='x', y='y') renderer = GlyphRenderer(data_source=source, glyph=glyph) plot.renderers.append(renderer) pan = PanTool(plot=plot) wheel_zoom = WheelZoomTool(plot=plot) box_zoom = BoxZoomTool(plot=plot) box_select = BoxSelectTool(plot=plot) box_selection = BoxSelectionOverlay(tool=box_select) plot.renderers.append(box_selection) resize = ResizeTool(plot=plot) previewsave = PreviewSaveTool(plot=plot) reset = ResetTool(plot=plot) tools = [ pan, wheel_zoom, box_zoom, box_select, resize, previewsave, reset ] plot.tools.extend(tools) vbox.children.append(plot) objects |= set([ source, xdr, ydr, plot, xaxis, yaxis, xgrid, ygrid, renderer, glyph, plot.tool_events, box_selection ] + tickers + tools) return vbox, objects
def large_plot(): source = ColumnDataSource(data=dict(x=[0, 1], y=[0, 1])) xdr = Range1d(start=0, end=1) xdr.tags.append("foo") xdr.tags.append("bar") ydr = Range1d(start=10, end=20) ydr.tags.append("foo") ydr.tags.append(11) plot = Plot(x_range=xdr, y_range=ydr) ydr2 = Range1d(start=0, end=100) plot.extra_y_ranges = {"liny": ydr2} circle = Circle(x="x", y="y", fill_color="red", size=5, line_color="black") plot.add_glyph(source, circle, name="mycircle") line = Line(x="x", y="y") plot.add_glyph(source, line, name="myline") rect = Rect(x="x", y="y", width=1, height=1, fill_color="green") plot.add_glyph(source, rect, name="myrect") plot.add_layout(DatetimeAxis(), 'below') plot.add_layout(LogAxis(), 'left') plot.add_layout(LinearAxis(y_range_name="liny"), 'left') plot.add_layout(Grid(dimension=0), 'left') plot.add_layout(Grid(dimension=1), 'left') plot.add_tools( BoxZoomTool(), PanTool(), SaveTool(), ResetTool(), ResizeTool(), WheelZoomTool(), ) return plot
def __init__(self, root, scaled=True, nodelabels=True, tiplabels=True, showplot=True, hover=False): """ BokehTree class for plotting trees Args: root (Node): A node object. scaled (bool): Whether or not the tree is scaled. Optional, defaults to True nodelabels (bool): Whether or not to show node labels. Optional, defaults to True. tiplabels (bool): Whether or not to show tip labels. Optional, defaults to True. showplot(bool): Whether or not to display when drawtree is called. Optional, defaults to True. hover (bool): Whether or not to use the hover tool. Optional, defaults to false. """ self.root = root self.n2c = None self.xoff = 0 self.yoff = 0 self.coords = None # Plot coordinates for each node self.tools = [ WheelZoomTool(), BoxZoomTool(), ResizeTool(), ResetTool(), PanTool(), PreviewSaveTool() ] self.source = None # Data source for plotting node points. self.nodelabels = nodelabels self.tiplabels = tiplabels self.showplot = showplot self.scaled = scaled self.hover = hover
def _init_figure(self): """Updates the visual elements on the figure""" wheel_zoom_tool = WheelZoomTool() pan_tool = PanTool() resize_tool = ResizeTool() save_tool = SaveTool() reset_tool = ResetTool() figure_ = figure(tools=[wheel_zoom_tool, pan_tool, resize_tool, save_tool, reset_tool], width=self._width, height=self._height) figure_.toolbar.active_scroll = wheel_zoom_tool hover_tips = ['name'] \ + self._input_data_controller.get_nominal_labels() \ + self._input_data_controller.get_dimensional_labels() if self._hover_controller is None: self._hover_controller = HoverController(figure_, properties=hover_tips) else: self._hover_controller.set_new_figure(figure_) return figure_
def create_graph(sentiments, history, ticker): tools = [HoverTool( tooltips=[("Date", "$index"), ("Cumulative sentiment", "$y")]), BoxSelectTool(), BoxZoomTool(), CrosshairTool(), ResizeTool(), ResetTool()] _plot = figure(title="Twitter Sentiment vs Market Close values", plot_width=1600, plot_height=800, x_axis_type="datetime", tools=tools, toolbar_location="above") _plot.y_range = Range1d(history.Close.min() - 5, history.Close.max() + 5) _plot.xaxis.axis_label = "Date" _plot.yaxis.axis_label = "Close Price" _plot.xaxis.formatter = DatetimeTickFormatter( hours=["%d %B %Y"], days=["%d %B %Y"], months=["%d %B %Y"], years=["%d %B %Y"], ) _plot.extra_y_ranges = {"sentiment": Range1d(start=sentiments.undetermined.min()-2, end=sentiments.bearish.max()+5)} _plot.line(x=history.Date, y=history.Close, legend=ticker, line_color='blue', line_width=2) _plot.circle(x=history.Date, y=history.Close, fill_color="white", line_color="blue", size=6) # Adding the second axis to the plot. _plot.add_layout(LinearAxis(y_range_name="sentiment"), 'right') _plot.line(x=sentiments.index, y=sentiments.bearish, legend='bearish', line_color='brown', line_width=2, y_range_name="sentiment") _plot.circle(x=sentiments.index, y=sentiments.bearish, fill_color="white", line_color="brown", size=6, y_range_name="sentiment") _plot.line(x=sentiments.index, y=sentiments.undetermined, legend='undetermined', line_color='black', line_width=2, y_range_name="sentiment") _plot.circle(x=sentiments.index, y=sentiments.undetermined, fill_color="white", line_color="black", size=6, y_range_name="sentiment") _plot.line(x=sentiments.index, y=sentiments.bullish, legend='bullish', line_color='red', line_width=2, y_range_name="sentiment") _plot.circle(x=sentiments.index, y=sentiments.bullish, fill_color="white", line_color="red", size=6, y_range_name="sentiment") return _plot
def get_ws_plot(data, plot_height=300, plot_width=800, column_names=cs.column_names_weewx, column_time=cs.time_column_name_weewx, border_left=200): data_seconds = data data_seconds.iloc[:, 0] = data_seconds.iloc[:, 0] * 1000 plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_width=plot_width, plot_height=plot_height, min_border_left=border_left, **kwargs) add_glyphs_to_plot(column_names, column_time, data_seconds, plot) plot.add_layout(DatetimeAxis(), 'below') plot.add_tools(PanTool(), WheelZoomTool(), ResizeTool(), CrosshairTool()) return plot
def _plot_bokeh_MH(self, deseq_data_mod, plots): for replicon, data_group in deseq_data_mod.groupby(["Sequence name"]): data_group_sig = data_group[data_group.padj < self._cutoff] data_group_no_sig = data_group[data_group.padj >= self._cutoff] pl = figure(tools=[HoverTool(tooltips=[ ("Protein_ID", "@Name"), ("Sequence type", "@gbkey"), ("Product", "@product"), ("log2 fold change", "@log2FoldChange"), ("base mean", "@baseMean")]), PanTool(), BoxSelectTool(), BoxZoomTool(), WheelZoomTool(), ResizeTool(), ResetTool()]) pl.background_fill_color = "White" pl.grid.grid_line_color = "black" pl.xaxis.axis_label = 'sequence start position' pl.yaxis.axis_label = 'log2 fold change' pl.title.text = 'Manhattan Plot (' + replicon + ')' pl.circle(data_group_sig["Start"], data_group_sig["log2FoldChange"], alpha=float(0.5), size=self._calc_glyph_size(data_group_sig), legend=('padj significant (cutoff: ' + str( self._cutoff) + ')'), color='Red', source=ColumnDataSource(data_group_sig)) pl.circle(data_group_no_sig["Start"], data_group_no_sig["log2FoldChange"], size=self._calc_glyph_size( data_group_no_sig), alpha=float(0.5), legend=('padj non-significant'), color='Black', source=ColumnDataSource(data_group_no_sig)) plots.append(pl) plot = column(*plots) html = file_html(plot, CDN, 'MA & Manhattan Plot {}'.format( self._condition)) with open('{}/MA & Manhattan Plot {}.html'.format(self._output_path, self._condition), 'w') as output_bokeh: output_bokeh.write(html)
def generateAreaSelection(dataframe, userWidth, userHeight, position_min, position_max): snps = dataframe snps["log10"] = -numpy.log10(snps.pvalue_assoc) #transformation snps = snps.sort_values(by="pvalue_assoc") # SORT BY P-VALUE max_pvalue = int(snps.log10[0:1]) # GET MINIMUM P-VALUE #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# #NEW COLUMNS AND RENAMING #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# snps['imp'] = numpy.where(snps['info_assoc'] == 1, snps['log10'], 'NaN') # gather information on imputed snps snps['Imputed'] = numpy.where( snps['info_assoc'] == 1, True, False) #discriminate between imputed and genotyped for table snps['interest'] = numpy.where(snps['log10'] >= (-numpy.log10(0.00000005)), snps['log10'], 'NaN') #select snp of interest #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# source = ColumnDataSource(snps) # SOURCE DATA FOR BOKEH PLOT TOOLS = [ HoverTool(tooltips=[("SNP", "@rs_id_assoc"), ( "Gene", "@gene"), ("P-value", "@pvalue_assoc"), ("Region", "@func")]), CrosshairTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), ResizeTool(), ResetTool(), PanTool(), PreviewSaveTool(), TapTool() ] stringLegend = "pvalue < " + str(0.001) plot = figure(webgl=True, tools=TOOLS, x_axis_label='Position', y_axis_label='-log10(p)', plot_width=userWidth, plot_height=userHeight, x_range=(position_min - 150000, position_max + 150000), y_range=(-3.2, max_pvalue + 3) # y_range = (-1, max_pvalue+1) ) plot.circle('position', 'log10', source=source, size=7, legend='Genotyped') plot.square('position', 'imp', source=source, size=7, color="olive", legend='Imputed') plot.circle('position', 'interest', source=source, size=7, color="red", legend=stringLegend) snps = snps.sort_values(by="position") # SORT POSITIONS snps.drop_duplicates(subset=('gene'), inplace=True, keep="last") # TAKE GENE NAME DUPLICATES OFF # for i in range(0,10): # snps['ligne'+str(i+1)] = snps.start_gen[i:len(snps):10] # snps['Fligne'+str(i+1)] = snps.end_gen[i:len(snps):10] # # positions = { # 'ligne1' : -0.30, # 'ligne2' : -0.55, # 'ligne3' : -0.85, # 'ligne4' : -1.15, # 'ligne5' : -2.95, # 'ligne6' : -2.65, # 'ligne7' : -2.35, # 'ligne8' : -2.05, # 'ligne9' : -1.75, # 'ligne10' : -1.45 # } # for key, value in positions.items(): # print key, value # plot.segment(snps[key], [value]*(len(snps)), snps['F'+key],[value]*(len(snps)), line_width=6, line_color="#8b4513",) #ligne 1 # # plot.text(snps[key]+((snps['F'+key]-snps[key])/2), [value-0.05]*(len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='5pt', text_font_style='bold') snps['ligne1'] = snps.start_gen[0:len(snps):10] snps['Fligne1'] = snps.end_gen[0:len(snps):10] snps['ligne2'] = snps.start_gen[1:len(snps):10] snps['Fligne2'] = snps.end_gen[1:len(snps):10] snps['ligne3'] = snps.start_gen[2:len(snps):10] snps['Fligne3'] = snps.end_gen[2:len(snps):10] snps['ligne4'] = snps.start_gen[3:len(snps):10] snps['Fligne4'] = snps.end_gen[3:len(snps):10] snps['ligne5'] = snps.start_gen[4:len(snps):10] snps['Fligne5'] = snps.end_gen[4:len(snps):10] snps['ligne6'] = snps.start_gen[5:len(snps):10] snps['Fligne6'] = snps.end_gen[5:len(snps):10] snps['ligne7'] = snps.start_gen[6:len(snps):10] snps['Fligne7'] = snps.end_gen[6:len(snps):10] snps['ligne8'] = snps.start_gen[7:len(snps):10] snps['Fligne8'] = snps.end_gen[7:len(snps):10] snps['ligne9'] = snps.start_gen[8:len(snps):10] snps['Fligne9'] = snps.end_gen[8:len(snps):10] snps['ligne10'] = snps.start_gen[9:len(snps):10] snps['Fligne10'] = snps.end_gen[9:len(snps):10] plot.segment( snps.ligne1, [-0.30] * (len(snps)), snps.Fligne1, [-0.30] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 1 plot.text(snps.ligne1 + ((snps.Fligne1 - snps.ligne1) / 2), [-0.25] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne2, [-0.55] * (len(snps)), snps.Fligne2, [-0.55] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 2 plot.text(snps.ligne2 + ((snps.Fligne2 - snps.ligne2) / 2), [-0.50] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne3, [-0.85] * (len(snps)), snps.Fligne3, [-0.85] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 3 plot.text(snps.ligne3 + ((snps.Fligne3 - snps.ligne3) / 2), [-0.80] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne4, [-1.15] * (len(snps)), snps.Fligne4, [-1.15] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 4 plot.text(snps.ligne4 + ((snps.Fligne4 - snps.ligne4) / 2), [-1.10] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne10, [-1.45] * (len(snps)), snps.Fligne10, [-1.45] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 5 plot.text(snps.ligne10 + ((snps.Fligne10 - snps.ligne10) / 2), [-1.40] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne9, [-1.75] * (len(snps)), snps.Fligne9, [-1.75] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 6 plot.text(snps.ligne9 + ((snps.Fligne9 - snps.ligne9) / 2), [-1.70] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne8, [-2.05] * (len(snps)), snps.Fligne8, [-2.05] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 7 plot.text(snps.ligne8 + ((snps.Fligne8 - snps.ligne8) / 2), [-2.00] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne7, [-2.35] * (len(snps)), snps.Fligne7, [-2.35] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 8 plot.text(snps.ligne7 + ((snps.Fligne7 - snps.ligne7) / 2), [-2.30] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne6, [-2.65] * (len(snps)), snps.Fligne6, [-2.65] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 9 plot.text(snps.ligne6 + ((snps.Fligne6 - snps.ligne6) / 2), [-2.60] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.segment( snps.ligne5, [-2.95] * (len(snps)), snps.Fligne5, [-2.95] * (len(snps)), line_width=6, line_color="#8b4513", ) #ligne 10 plot.text(snps.ligne5 + ((snps.Fligne5 - snps.ligne5) / 2), [-2.90] * (len(snps)), text=snps.gene, text_color='black', text_align='center', text_font_size='1em', text_font_style='bold') plot.grid.grid_line_color = None # TAKE GRID LINES OFF THE GRAPH graph, div1 = components(plot, CDN) return graph, div1
]) import itertools messages = collections.defaultdict(list) for contact, text in parsers.Whatsapp("../Whatsapp"): messages[contact].append(text) for contact in messages: messages[contact] = list(itertools.chain.from_iterable(messages[contact])) messages[contact].sort(key=lambda x: x['timestamp']) TOOLS = [ hover, BoxZoomTool(), ResetTool(), PanTool(), ResizeTool(), WheelZoomTool() ] # output to static HTML file output_file("lengths.html", title="Chat lengths") p = figure(width=800, height=500, x_axis_type="datetime", tools=TOOLS) colors = brewer["Spectral"][len(messages)] for i, k in enumerate(messages): days = conversation.Conversation(messages[k]).days() dates = sorted(days.keys()) lengths = np.array([len(days[key]) for key in dates]) x_dates = np.array(sorted(days.keys()), dtype=np.datetime64)
def generateManhattan(manhattanData, treshold, phenotype, userWidth, userHeight): #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# max_v = int(manhattanData.log10.max()) #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# df_slice_dataframe = manhattanData.groupby( 'chromosome') # CUTS GWAS DATA TO COMPUTE GWAS'S POSITIONS counter = 0 dictionary_of_slices = { } # put slices of dataframe in a dictionnary for reforming whole dataframe chr_max_pos = [] # get maximum position to get middle after chr_min_pos = [] # get minimum positions to get middle after manhattan_max_pos = [ 0 ] # get maximum position for each chromosome for manhattan plotting df_concat_slice = [] for name, group in df_slice_dataframe: dictionary_of_slices[str(name)] = group chr_min_pos.append(int(dictionary_of_slices[str(name)]['pos'] [0:1])) #position min chromosome manhattan_max_pos.append( int(dictionary_of_slices[str(name)]['pos'] [len(dictionary_of_slices[str(name)]['pos']) - 1:len(dictionary_of_slices[str(name)]['pos'])]) + manhattan_max_pos[counter]) chr_max_pos.append( int(dictionary_of_slices[str(name)]['pos'] [len(dictionary_of_slices[str(name)]['pos']) - 1:len(dictionary_of_slices[str(name)]['pos'])]) ) #position max chromosome dictionary_of_slices[str( name)]['position'] = group['pos'] + manhattan_max_pos[counter] counter += 1 df_concat_slice.append(dictionary_of_slices[str(name)]) counter = 0 # full_data = pandas.concat( df_concat_slice ) #recreate original manhattan data with position column added #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# milieu = [] # HALF OF CHROMOSOME POSITIONS TO STORE name of chromosomes print chr_min_pos for n in range(1, len(manhattan_max_pos)): milieu.append(manhattan_max_pos[n - 1] + ((manhattan_max_pos[n] - manhattan_max_pos[n - 1]) // 2)) #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# # CREATE MANHATTAN PLOT #----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# source = ColumnDataSource(full_data) # SOURCE DATA FOR BOKEH PLOT TOOLS = [ HoverTool(tooltips=[ ("SNP", "@rs_id_assoc"), ("Gene", "@gene"), ("P-value", "@pvalue_assoc"), ]), CrosshairTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), ResizeTool(), ResetTool(), PanTool(), PreviewSaveTool(), TapTool() ] # BoxSelectTool(), BoxZoomTool(), plot = figure( webgl=True, title=phenotype, tools=TOOLS, x_axis_label='Chromosomes', y_axis_label='-log10(p)', plot_width=userWidth, plot_height=userHeight, y_range=(2.0, max_v + 3), ) #This is for "odd" chromosomes plot.circle('position', 'odd', source=source, size=3) #This is for "even" chromosomes plot.circle('position', 'even', source=source, size=3, color="black") plot.ray(x=[0], y=[-numpy.log10(treshold)], length=0, angle=0, color='red') plot.ray(x=[0], y=[-numpy.log10(treshold)], length=0, angle=numpy.pi, color='red') plot.axis.major_label_text_font_size = '0pt' plot.text(milieu, [2.75] * (len(milieu)), text=[ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22" ], text_color='black', text_align='center', text_font_size='8pt', text_font_style='bold') plot.text(milieu[(len(milieu) / 2) - 4], [2.25], text=["Chromosomes"], text_color='black', text_align='center', text_font_size='10pt', text_font_style='bold') plot.xaxis.major_tick_line_color = None plot.xaxis.minor_tick_line_color = None plot.xaxis.visible = None plot.grid.grid_line_color = None # TAKE GRID LINES OFF THE GRAPH graph, div1 = components(plot, CDN) return graph, div1
def generate_graph(self): """ Generate the graph; return a 2-tuple of strings, script to place in the head of the HTML document and div content for the graph itself. :return: 2-tuple (script, div) :rtype: tuple """ logger.debug('Generating graph for %s', self._graph_id) # tools to use tools = [ PanTool(), BoxZoomTool(), WheelZoomTool(), SaveTool(), ResetTool(), ResizeTool() ] # generate the stacked area graph try: g = Area( self._data, x='Date', y=self._y_series_names, title=self._title, stack=True, xlabel='Date', ylabel='Downloads', tools=tools, # note the width and height will be set by JavaScript plot_height=400, plot_width=800, toolbar_location='above', legend=False ) except Exception as ex: logger.error("Error generating %s graph", self._graph_id) logger.error("Data: %s", self._data) logger.error("y=%s", self._y_series_names) raise ex lines = [] legend_parts = [] # add a line at the top of each Patch (stacked area) for hovertool for renderer in g.select(GlyphRenderer): if not isinstance(renderer.glyph, Patches): continue series_name = renderer.data_source.data['series'][0] logger.debug('Adding line for Patches %s (series: %s)', renderer, series_name) line = self._line_for_patches(self._data, g, renderer, series_name) if line is not None: lines.append(line) legend_parts.append((series_name, [line])) # add the Hovertool, specifying only our line glyphs g.add_tools( HoverTool( tooltips=[ (self._y_name, '@SeriesName'), ('Date', '@FmtDate'), ('Downloads', '@Downloads'), ], renderers=lines, line_policy='nearest' ) ) # legend outside chart area legend = Legend(legends=legend_parts, location=(0, 0)) g.add_layout(legend, 'right') return components(g)
toolbar_location="left") county_patches = Patches(xs="county_xs", ys="county_ys", fill_color="county_colors", fill_alpha=0.7, line_color="white", line_width=0.5) plot.add_glyph(county_source, county_patches) state_patches = Patches(xs="state_xs", ys="state_ys", fill_alpha=0.0, line_color="#884444", line_width=2) plot.add_glyph(state_source, state_patches) plot.add_tools(ResizeTool()) doc = Document() doc.add_root(plot) if __name__ == "__main__": filename = "choropleth.html" with open(filename, "w") as f: f.write( file_html(doc, INLINE, "Choropleth of all US counties, Unemployment 2009")) print("Wrote %s" % filename) view(filename)
def plot_corr(self, X, names=None, title='Feature Correlations', width=None, height=None): ''' Correlation matrix plot ''' n, d = X.shape xcorr = np.corrcoef(X.T) XX, YY = np.meshgrid(np.arange(1, d + 1), np.arange(1, d + 1)) colors = [] alphas = [] for corr in xcorr.ravel(): if corr > 0: colors.append(self.binary_colors[0]) alphas.append(corr) elif corr < 0: colors.append(self.binary_colors[1]) alphas.append(-corr) else: colors.append('lightgrey') alphas.append(self.alpha) dsource = ColumnDataSource(data=dict(xname=XX.ravel(), yname=YY.ravel(), colors=colors, alphas=alphas, corrs=xcorr.ravel())) hover_tooltips = dict({ 'xname': '@xname', 'yname': '@yname', 'corr': '@corrs' }) f = self._get_figure_instance(title=title, x_range=names, y_range=names, xlabel='', ylabel='', width=width, height=height) f.tools = [PanTool(), ResetTool(), ResizeTool()] f.add_tools(HoverTool(tooltips=hover_tooltips)) f.grid.grid_line_color = None f.axis.axis_line_color = None f.axis.major_tick_line_color = None f.axis.major_label_text_font_size = "6pt" f.axis.major_label_standoff = 0 f.xaxis.major_label_orientation = -np.pi / 2 f.rect('xname', 'yname', 0.9, 0.9, source=dsource, color='colors', alpha='alphas', line_color=None, hover_line_color='black', hover_color='colors') return f
def do_plot(self, inputDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata): if not os.path.isdir(inputDir): raise Exception, "inputDir does not currently exist as a directory" if not os.path.exists(plotOutDir): os.mkdir(plotOutDir) ap = AnalysisPaths(inputDir, variant_plot=True) variants = sorted(ap._path_data['variant'].tolist() ) # Sorry for accessing private data variant = variants[0] sim_data = cPickle.load(open(ap.get_variant_kb(variant), "rb")) targetToFC = {} targetToFCTF = {} for tf in sim_data.tfToActiveInactiveConds: for target in sim_data.tfToFC[tf]: if target not in targetToFC: targetToFC[target] = [] targetToFCTF[target] = [] targetToFC[target].append(np.log2(sim_data.tfToFC[tf][target])) targetToFCTF[target].append(tf) for target in targetToFC: targetToFC[target] = np.array(targetToFC[target]) targets = sorted(targetToFC) x = [] y = [] maxVals = [] tfs = [] targetIds = [] for idx, target in enumerate(targets): for FC, tf in zip(targetToFC[target], targetToFCTF[target]): x.append(idx) y.append(FC) if targetToFC[target].max() >= -1. * targetToFC[target].min(): maxVals.append(targetToFC[target].max()) else: maxVals.append(targetToFC[target].min()) tfs.append(tf) targetIds.append(target) conditions = [ sim_data.conditions[tf + "__active"]["nutrients"] for tf in tfs ] x = np.array(x) y = np.array(y) maxVals = np.array(maxVals) sortedIdxs = np.argsort(maxVals) conditions = [conditions[i] for i in sortedIdxs] tfs = [tfs[i] for i in sortedIdxs] targetIds = [targetIds[i] for i in sortedIdxs] fig = plt.figure(figsize=(11, 8.5)) ax = plt.subplot(1, 1, 1) ax.plot(x, y[sortedIdxs], ".") xlabel = "Gene targets (sorted)" ylabel = "log2 (Target expression fold change)" ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) exportFigure(plt, plotOutDir, plotOutFileName, metadata) plt.close("all") source = ColumnDataSource(data=dict(x=x, y=y[sortedIdxs], targetId=targetIds, tfId=tfs, condition=conditions)) hover = HoverTool( tooltips=[("target", "@targetId"), ("TF", "@tfId"), ("condition", "@condition")]) tools = [ hover, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset" ] plot = figure(x_axis_label=xlabel, y_axis_label=ylabel, width=800, height=500, tools=tools) plot.scatter("x", "y", source=source) if not os.path.exists(os.path.join(plotOutDir, "html_plots")): os.makedirs(os.path.join(plotOutDir, "html_plots")) bokeh.io.output_file(os.path.join( plotOutDir, "html_plots", plotOutFileName + "__probBound" + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(plot) bokeh.io.curstate().reset()
def candlestick_volume_plot(input_df, plot_title=None): ''' Copyright Shawn Gu, 2016 The code below is modified based on the snippet from http://bokeh.pydata.org/en/0.11.1/docs/gallery/candlestick.html. ''' input_df = input_df.dropna() input_df['date'] = pd.to_datetime(input_df.index) px_mids = (input_df['open'] + input_df['close']) / 2.0 px_spans = abs(input_df['close'] - input_df['open']) vol_mids = input_df['vol'] / 2.0 vol_spans = input_df['vol'] max_vol = max(input_df['vol']) inc = input_df['close'] >= input_df['open'] dec = input_df['open'] > input_df['close'] inc_color = 'red' dec_color = 'green' width = 12 * 60 * 60 * 1000 # in ms ht = HoverTool(tooltips=[ ("date", "@date"), ("open", "@open"), ("close", "@close"), ("high", "@high"), ("low", "@low"), ]) TOOLS = [ ht, WheelZoomTool(dimensions=['width']), ResizeTool(), ResetTool(), PanTool(dimensions=['width']), PreviewSaveTool() ] max_px = max(input_df['high']) min_px = min(input_df['low']) px_range = max_px - min_px primary_y_range = (min_px - px_range / 10.0, max_px + px_range * 0.1) global p p = figure(x_axis_type="datetime", tools=TOOLS, plot_height=600, plot_width=1000, toolbar_location="above", y_range=primary_y_range) if plot_title: p.title = plot_title p.xaxis.major_label_orientation = pi / 4 p.grid.grid_line_alpha = 0.3 p.background_fill = "black" p.extra_y_ranges = {"vol": Range1d(start=0, end=max_vol * 5)} p.add_layout(LinearAxis(y_range_name="vol"), 'right') px_rect_inc_src = create_data_source(input_df[inc]) px_rect_dec_src = create_data_source(input_df[dec]) p.segment(input_df.date[inc], input_df.high[inc], input_df.date[inc], input_df.low[inc], color=inc_color) p.segment(input_df.date[dec], input_df.high[dec], input_df.date[dec], input_df.low[dec], color=dec_color) p.rect(input_df.date[inc], px_mids[inc], width, px_spans[inc], fill_color=inc_color, line_color=inc_color, source=px_rect_inc_src) p.rect(input_df.date[dec], px_mids[dec], width, px_spans[dec], fill_color=dec_color, line_color=dec_color, source=px_rect_dec_src) p.rect(input_df.date[inc], vol_mids[inc], width, vol_spans[inc], fill_color=inc_color, color=inc_color, y_range_name="vol") p.rect(input_df.date[dec], vol_mids[dec], width, vol_spans[dec], fill_color=dec_color, color=dec_color, y_range_name="vol") """自定义划线""" addlineplot(input_df) if plot_title: output_file(plot_title + "candlestick.html", title=plot_title) else: output_file("untitle_candlestick.html", title="untitled") show(p)
def do_plot(self, simOutDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata): if not os.path.isdir(simOutDir): raise Exception, "simOutDir does not currently exist as a directory" if not os.path.exists(plotOutDir): os.mkdir(plotOutDir) sim_data = cPickle.load(open(simDataFile)) constraintIsKcatOnly = sim_data.process.metabolism.constraintIsKcatOnly constrainedReactions = np.array( sim_data.process.metabolism.constrainedReactionList) # read constraint data enzymeKineticsReader = TableReader( os.path.join(simOutDir, "EnzymeKinetics")) targetFluxes = enzymeKineticsReader.readColumn("targetFluxes") actualFluxes = enzymeKineticsReader.readColumn("actualFluxes") reactionConstraint = enzymeKineticsReader.readColumn( "reactionConstraint") enzymeKineticsReader.close() initialTime = TableReader(os.path.join( simOutDir, "Main")).readAttribute("initialTime") time = TableReader(os.path.join( simOutDir, "Main")).readColumn("time") - initialTime targetAve = np.mean(targetFluxes[BURN_IN_STEPS:, :], axis=0) actualAve = np.mean(actualFluxes[BURN_IN_STEPS:, :], axis=0) relError = np.abs( (actualFluxes[BURN_IN_STEPS:, :] - targetFluxes[BURN_IN_STEPS:, :]) / (targetFluxes[BURN_IN_STEPS:, :] + 1e-15)) aveError = np.mean(relError, axis=0) kcatOnlyReactions = np.all( constraintIsKcatOnly[reactionConstraint[BURN_IN_STEPS:, :]], axis=0) kmAndKcatReactions = ~np.any( constraintIsKcatOnly[reactionConstraint[BURN_IN_STEPS:, :]], axis=0) mixedReactions = ~(kcatOnlyReactions ^ kmAndKcatReactions) kmAndKcatThresholds = [2, 10] kmAndKcatCategorization = np.zeros(np.sum(kmAndKcatReactions)) for i, threshold in enumerate(kmAndKcatThresholds): kmAndKcatCategorization[ targetAve[kmAndKcatReactions] / actualAve[kmAndKcatReactions] > threshold] = i + 1 kmAndKcatCategorization[ actualAve[kmAndKcatReactions] / targetAve[kmAndKcatReactions] > threshold] = i + 1 kmAndKcatCategorization[actualAve[kmAndKcatReactions] == 0] = -1 kcatOnlyThresholds = [2, 10] kcatOnlyCategorization = np.zeros(np.sum(kcatOnlyReactions)) for i, threshold in enumerate(kcatOnlyThresholds): kcatOnlyCategorization[ actualAve[kcatOnlyReactions] / targetAve[kcatOnlyReactions] > threshold] = i + 1 kcatOnlyCategorization[actualAve[kcatOnlyReactions] == 0] = -1 targetAve += 1e-13 actualAve += 1e-13 plt.figure(figsize=(8, 8)) targetPearson = targetAve[kmAndKcatReactions] actualPearson = actualAve[kmAndKcatReactions] # plt.title(pearsonr(np.log10(targetPearson[actualPearson > 0]), np.log10(actualPearson[actualPearson > 0]))) plt.loglog(targetAve[kcatOnlyReactions][kcatOnlyCategorization == 0], actualAve[kcatOnlyReactions][kcatOnlyCategorization == 0], "og") plt.loglog(targetAve[kcatOnlyReactions][kcatOnlyCategorization == 1], actualAve[kcatOnlyReactions][kcatOnlyCategorization == 1], "o") plt.loglog(targetAve[kcatOnlyReactions][kcatOnlyCategorization == 2], actualAve[kcatOnlyReactions][kcatOnlyCategorization == 2], "or") # plt.loglog(targetAve[kmAndKcatReactions], actualAve[kmAndKcatReactions], "o") # plt.loglog(targetAve[kcatOnlyReactions], actualAve[kcatOnlyReactions], "ro") plt.loglog([1e-12, 1], [1e-12, 1], '--g') plt.loglog([1e-12, 1], [1e-11, 10], '--r') plt.xlabel("Target Flux (dmol/L/s)") plt.ylabel("Actual Flux (dmol/L/s)") exportFigure(plt, plotOutDir, plotOutFileName) plt.close("all") return source = ColumnDataSource(data=dict( x=targetAve, y=actualAve, reactionName=constrainedReactions)) hover = HoverTool(tooltips=[ ("Reaction", "@reactionName"), ]) TOOLS = [ hover, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset", ] p1 = figure( x_axis_label="Target", x_axis_type="log", x_range=[min(targetAve[targetAve > 0]), max(targetAve)], y_axis_label="Actual", y_axis_type="log", y_range=[min(actualAve[actualAve > 0]), max(actualAve)], width=800, height=800, tools=TOOLS, ) p1.scatter(targetAve, actualAve, source=source, size=8) p1.line([1e-15, 10], [1e-15, 10], line_color="red", line_dash="dashed") ## bar plot of error # sortedReactions = [constrainedReactions[x] for x in np.argsort(aveError)[::-1]] # aveError[np.log10(aveError) == -np.inf] = 0 # source = ColumnDataSource( # data = dict( # x = sorted(relError, reverse = True), # reactionName = sortedReactions # ) # ) # p2 = Bar(data, values = "x") # hover2 = p2.select(dict(type=HoverTool)) # hover2.tooltips = [("Reaction", "@reactionName")] ## flux for each reaction hover2 = HoverTool(tooltips=[ ("Reaction", "@reactionName"), ]) TOOLS2 = [ hover2, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset", ] p2 = figure( x_axis_label="Time(s)", y_axis_label="Flux", y_axis_type="log", y_range=[1e-8, 1], width=800, height=800, tools=TOOLS2, ) colors = COLORS_LARGE nTimesteps = len(time[BURN_IN_STEPS:]) x = time[BURN_IN_STEPS:] y = actualFluxes[BURN_IN_STEPS:, 0] reactionName = np.repeat(constrainedReactions[0], nTimesteps) source = ColumnDataSource( data=dict(x=x, y=y, reactionName=reactionName)) p2.line(x, y, line_color=colors[0], source=source) # Plot remaining metabolites onto initialized figure for m in np.arange(1, actualFluxes.shape[1]): y = actualFluxes[BURN_IN_STEPS:, m] reactionName = np.repeat(constrainedReactions[m], nTimesteps) source = ColumnDataSource( data=dict(x=x, y=y, reactionName=reactionName)) p2.line(x, y, line_color=colors[m % len(colors)], source=source) if not os.path.exists(os.path.join(plotOutDir, "html_plots")): os.makedirs(os.path.join(plotOutDir, "html_plots")) p = bokeh.io.vplot(p1, p2) bokeh.io.output_file(os.path.join(plotOutDir, "html_plots", plotOutFileName + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(p) bokeh.io.curstate().reset()
def create_plots(model1, model3, model4, model5, live_data, city, display_name): """ Output: Bokeh plot Creates individual timeseries plot """ if city != 'chicago': model1 = model1.query("city_{} == 1 and display_name_{} == 1".format( city, display_name)) # model2 = model2.query("city_{} == 1 and display_name_{} == 1".format(city, display_name)) model3 = model3.query("city_{} == 1 and display_name_{} == 1".format( city, display_name)) model4 = model4.query("city_{} == 1 and display_name_{} == 1".format( city, display_name)) model5 = model5.query("city_{} == 1 and display_name_{} == 1".format( city, display_name)) else: model1 = model1.query( "city_denver == 0 and city_seattle == 0 and city_sf == 0 and city_ny == 0 and display_name_{} == 1" .format(display_name)) # model2 = model2.query("city_denver == 0 and city_seattle == 0 and city_sf == 0 and city_ny == 0 and display_name_{} == 1".format(display_name)) model3 = model3.query( "city_denver == 0 and city_seattle == 0 and city_sf == 0 and city_ny == 0 and display_name_{} == 1" .format(display_name)) model4 = model4.query( "city_denver == 0 and city_seattle == 0 and city_sf == 0 and city_ny == 0 and display_name_{} == 1" .format(display_name)) model5 = model5.query( "city_denver == 0 and city_seattle == 0 and city_sf == 0 and city_ny == 0 and display_name_{} == 1" .format(display_name)) cartype = display_name.lower() live_data = live_data.query("display_name == @cartype and city == @city") source1 = ColumnDataSource(data=dict(d=model1['date'].astype(str), h=model1['hour'], f=model1['y_forecast'], n=model1['name'])) # source2 = ColumnDataSource( # data=dict( # d=model2['date'].astype(str), # h=model2['hour'], # f=model2['y_forecast'], # n=model2['name'] # ) # ) source3 = ColumnDataSource(data=dict(d=model3['date'].astype(str), h=model3['hour'], f=model3['y_forecast'], n=model3['name'])) source4 = ColumnDataSource(data=dict(d=model4['date'].astype(str), h=model4['hour'], f=model4['y_forecast'], n=model4['name'])) source5 = ColumnDataSource(data=dict(d=model5['date'].astype(str), h=model5['hour'], f=model5['y_forecast'], n=model5['name'])) source6 = ColumnDataSource(data=dict(d=live_data['date'].astype(str), h=live_data['hour'], f=live_data['avg_price_est'], n=live_data['name'])) hover = HoverTool(tooltips=[("Model", "@n"), ("Date", "@d"), ("Hour", "@h"), ("Average Price", "@f")]) change_city = { 'denver': 'Denver', 'ny': 'New York', 'chicago': 'Chicago', 'seattle': 'Seattle', 'sf': 'San Francisco' } p = figure(title="Forecast of {} {} Prices - {} to {}".format( change_city[city], display_name, sys.argv[1], sys.argv[2]), plot_width=1000, plot_height=500, x_axis_type="datetime", tools=[ hover, PanTool(), BoxZoomTool(), ResizeTool(), WheelZoomTool(), PreviewSaveTool(), ResetTool() ], toolbar_location="left", title_text_font_size="20pt") p.line(model1['record_time'], model1['y_forecast'], line_color='blue', line_width=2, legend="Random Forest Regressor", alpha=0.5, source=source1) # p.line(model2['record_time'], model2['y_forecast'], line_color='green', line_width=2, legend="RF Model 2 - Without Surge Multiplier", alpha=0.5, source=source2) # line_dash=[4,4] p.line(model3['record_time'], model3['y_forecast'], line_color='magenta', line_width=2, legend="Ridge Regression", alpha=0.5, source=source3) # line_dash=[4,4] p.line(model4['record_time'], model4['y_forecast'], line_color='gray', line_width=2, legend="ARIMA Model", alpha=0.5, source=source4) # line_dash=[4,4] p.line(model5['record_time'], model5['y_forecast'], line_color='green', line_width=2, legend="XGB Regressor", alpha=0.5, source=source5) # line_dash=[4,4] # p.xaxis.axis_label = 'Time' # p.xaxis.axis_label_text_font_size = "10pt" p.yaxis.axis_label = 'Average Price Estimate' p.yaxis.axis_label_text_font_size = "20pt" p.yaxis.axis_label_standoff = 15 p.xgrid[0].ticker.desired_num_ticks = 20 xformatter = DatetimeTickFormatter(formats=dict(hours=["%H"])) p.xaxis.formatter = xformatter p.legend.label_text_font_size = "10pt" # add a text renderer to out plot (no data yet) r = p.circle(x=live_data['record_time'], y=live_data['avg_price_est'], legend="True Average Prices", source=source6, color='red') ds = r.data_source return p, ds
print("Price updated last price {} last time {}".format(last_price, last_time)) print(data.data) return hover = HoverTool(tooltips=[ ("Time", "@time"), ("BTC Real-Time Price", "@price") ]) price_plot = figure(plot_width=800, plot_height=400, #x_axis_type='datetime', tools=[hover, ResizeTool(), SaveTool()], title="Real-Time Price Plot") price_plot.line(source=data, x='time', y='price') price_plot.xaxis.axis_label = "Time" price_plot.yaxis.axis_label = "BTC Real-Time Price" price_plot.title.text = "BTC Real Time Price" ticker_textbox = TextInput(placeholder="Ticker") update = Button(label="Update") update.on_click(update_ticker) inputs = widgetbox([ticker_textbox, update], width=200) curdoc().add_root(row(inputs, price_plot, width=1600)) curdoc().title = "Real-Time Price Plot BTC"
from bokeh.models import WheelZoomTool, ResizeTool, PanTool, BoxZoomTool from bokeh.models import WMTSTileSource output_file("tile_source_example.html", title="Tile Source Example") # set to roughly full extent of web mercator projection x_range = Range1d(start=-20000000, end=20000000) y_range = Range1d(start=-20000000, end=20000000) # create tile source from templated url tile_options = {} tile_options['url'] = 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png' tile_source = WMTSTileSource(**tile_options) # instantiate plot and add tile source p = Plot(x_range=x_range, y_range=y_range, plot_height=800, plot_width=800) p.add_tools(ResizeTool(), WheelZoomTool(), PanTool(), BoxZoomTool()) tile_renderer_options = {} p.add_tile(tile_source, **tile_renderer_options) doc = Document() doc.add(p) if __name__ == "__main__": filename = "tile_source.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Tile Source Example")) print("Wrote %s" % filename) view(filename)
inc = closep >= openp dec = openp > closep quotes['Mids'] = mids quotes['Spans'] = spans ht = HoverTool(tooltips=[ ("date", "@Time"), ("open", "@Open"), ("close", "@Close"), ("high", "@High"), ("low", "@Low"), ("volume", "@Volume"), ("money", "@Money"), ]) TOOLS = [ht, WheelZoomTool(), ResizeTool(), ResetTool(), PanTool(), SaveTool()] max_x = max(highp) min_x = min(lowp) x_range = max_x - min_x y_range = (min_x - x_range / 2.0, max_x + x_range * 0.1) p = figure(x_axis_type="datetime", tools=TOOLS, plot_height=600, plot_width=950, toolbar_location="above", y_range=y_range) p.xaxis.major_label_orientation = pi / 4 p.grid.grid_line_alpha = 0.3 p.background_fill_color = "black"
def rt_flight_radar( fs, center_freq, gain, N_samples, pos_ref, functions ): #clear_output() #time.sleep(1) # create an input output FIFO queues Qin = Queue.Queue() # create a pyaudio object sdr = RtlSdr() sdr.sample_rate = fs # sampling rate sdr.center_freq = center_freq # 1090MhZ center frequency sdr.gain = gain # initialize map x_range = Range1d() y_range = Range1d() map_options = GMapOptions(lat=pos_ref[0], lng=pos_ref[1], zoom=10) plot = GMapPlot( title='Flight Radar', x_range=x_range, y_range=y_range, plot_width = 800, plot_height = 400, map_options=map_options ) # create lat, longitude source source = ColumnDataSource( data=dict( lat=[], lon=[], heading = [], flightnum = [] ) ) # create plane figure oval1 = Oval( x = "lon", y = "lat", width=3000, height=700, angle= "heading", fill_color="blue", line_color="blue") oval2 = Oval( x = "lon", y = "lat", width=1000, height=7000, angle= "heading", fill_color="blue", line_color="blue") text = Text( x = "lon", y = "lat", text_font_size="10pt", text="flightnum", angle= "heading", text_color="red") plot.add_glyph(source, oval1) plot.add_glyph(source, oval2) plot.add_glyph(source, text) # add tools to plot pan = PanTool() wheel_zoom = WheelZoomTool() resize = ResizeTool() plot.add_tools(pan, wheel_zoom, resize) xaxis = LinearAxis(axis_label="lat", major_tick_in=0) plot.add_layout(xaxis, 'below') yaxis = LinearAxis(axis_label="lon", major_tick_in=0) plot.add_layout(yaxis, 'left') handle = bk.show(plot) # initialize write file log = open('rtadsb_log','a',0) # initialize stop_flag stop_flag = threading.Event() # initialize threads t_sdr_read = threading.Thread(target = sdr_read, args = (Qin, sdr, N_samples, stop_flag )) t_signal_process = threading.Thread(target = signal_process, args = ( Qin, source, functions, plot, log, stop_flag)) # start threads t_sdr_read.start() t_signal_process.start() return stop_flag
def do_plot(self, simOutDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata): if not os.path.isdir(simOutDir): raise Exception, "simOutDir does not currently exist as a directory" if not os.path.exists(plotOutDir): os.mkdir(plotOutDir) sim_data = cPickle.load(open(simDataFile)) constraintIsKcatOnly = sim_data.process.metabolism.constraintIsKcatOnly mainListener = TableReader(os.path.join(simOutDir, "Main")) initialTime = mainListener.readAttribute("initialTime") time = mainListener.readColumn("time") - initialTime mainListener.close() massListener = TableReader(os.path.join(simOutDir, "Mass")) cellMass = massListener.readColumn("cellMass") dryMass = massListener.readColumn("dryMass") massListener.close() coefficient = dryMass / cellMass * sim_data.constants.cellDensity.asNumber(MASS_UNITS / VOLUME_UNITS) # read constraint data enzymeKineticsReader = TableReader(os.path.join(simOutDir, "EnzymeKinetics")) targetFluxes = (COUNTS_UNITS / MASS_UNITS / TIME_UNITS) * (enzymeKineticsReader.readColumn("targetFluxes").T / coefficient).T actualFluxes = (COUNTS_UNITS / MASS_UNITS / TIME_UNITS) * (enzymeKineticsReader.readColumn("actualFluxes").T / coefficient).T reactionConstraint = enzymeKineticsReader.readColumn("reactionConstraint") constrainedReactions = np.array(enzymeKineticsReader.readAttribute("constrainedReactions")) enzymeKineticsReader.close() targetFluxes = targetFluxes.asNumber(units.mmol / units.g / units.h) actualFluxes = actualFluxes.asNumber(units.mmol / units.g / units.h) targetAve = np.mean(targetFluxes[BURN_IN_STEPS:, :], axis = 0) actualAve = np.mean(actualFluxes[BURN_IN_STEPS:, :], axis = 0) kcatOnlyReactions = np.all(constraintIsKcatOnly[reactionConstraint[BURN_IN_STEPS:,:]], axis = 0) kmAndKcatReactions = ~np.any(constraintIsKcatOnly[reactionConstraint[BURN_IN_STEPS:,:]], axis = 0) mixedReactions = ~(kcatOnlyReactions ^ kmAndKcatReactions) thresholds = [2, 10] categorization = np.zeros(reactionConstraint.shape[1]) categorization[actualAve == 0] = -2 categorization[actualAve == targetAve] = -1 for i, threshold in enumerate(thresholds): # categorization[targetAve / actualAve > threshold] = i + 1 categorization[actualAve / targetAve > threshold] = i + 1 # url for ecocyc to highlight fluxes that are 0 on metabolic network diagram siteStr = "https://ecocyc.org/overviewsWeb/celOv.shtml?zoomlevel=1&orgid=ECOLI" excluded = ['RXN0-2201', 'RXN-16000', 'RXN-12583', 'RXN-11496', 'DIMESULFREDUCT-RXN', '3.6.1.41-R[4/63051]5-NUCLEOTID-RXN'] # reactions not recognized by ecocyc rxns = [] for i, reaction in enumerate(constrainedReactions): if actualAve[i] == 0: rxn = re.findall(".+RXN", reaction) if len(rxn) == 0: rxn = re.findall("RXN[^-]*-[0-9]+", reaction) if rxn[0] not in excluded: siteStr += "&rnids=%s" % rxn[0] rxns.append(rxn[0]) # print siteStr csvFile = open(os.path.join(plotOutDir, plotOutFileName + ".tsv"), "wb") output = csv.writer(csvFile, delimiter = "\t") output.writerow(["ecocyc link:", siteStr]) output.writerow(["Km and kcat", "Target", "Actual", "Category"]) for reaction, target, flux, category in zip(constrainedReactions[kmAndKcatReactions], targetAve[kmAndKcatReactions], actualAve[kmAndKcatReactions], categorization[kmAndKcatReactions]): output.writerow([reaction, target, flux, category]) output.writerow(["kcat only"]) for reaction, target, flux, category in zip(constrainedReactions[kcatOnlyReactions], targetAve[kcatOnlyReactions], actualAve[kcatOnlyReactions], categorization[kcatOnlyReactions]): output.writerow([reaction, target, flux, category]) if np.sum(mixedReactions): output.writerow(["mixed constraints"]) for reaction, target, flux, category in zip(constrainedReactions[mixedReactions], targetAve[mixedReactions], actualAve[mixedReactions], categorization[mixedReactions]): output.writerow([reaction, target, flux, category]) csvFile.close() targetAve += 1e-6 actualAve += 1e-6 axes_limits = [1e-7, 1e4] plt.figure(figsize = (8, 8)) ax = plt.axes() plt.loglog(axes_limits, axes_limits, 'k') plt.loglog(targetAve, actualAve, "ob", markeredgewidth = 0.25, alpha = 0.25) plt.xlabel("Target Flux (mmol/g/hr)") plt.ylabel("Actual Flux (mmol/g/hr)") plt.minorticks_off() whitePadSparklineAxis(ax) ax.set_ylim(axes_limits) ax.set_xlim(axes_limits) ax.set_yticks(axes_limits) ax.set_xticks(axes_limits) exportFigure(plt, plotOutDir, plotOutFileName) plt.close("all") source = ColumnDataSource( data = dict( x = targetAve, y = actualAve, reactionName = constrainedReactions) ) hover = HoverTool( tooltips = [ ("Reaction", "@reactionName"), ] ) TOOLS = [hover, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset", ] p1 = figure(x_axis_label = "Target", x_axis_type = "log", x_range = [min(targetAve[targetAve > 0]), max(targetAve)], y_axis_label = "Actual", y_axis_type = "log", y_range = [min(actualAve[actualAve > 0]), max(actualAve)], width = 800, height = 800, tools = TOOLS, ) p1.scatter(targetAve, actualAve, source = source, size = 8) p1.line([1e-15, 10], [1e-15, 10], line_color = "red", line_dash = "dashed") ## bar plot of error # sortedReactions = [constrainedReactions[x] for x in np.argsort(aveError)[::-1]] # aveError[np.log10(aveError) == -np.inf] = 0 # source = ColumnDataSource( # data = dict( # x = sorted(relError, reverse = True), # reactionName = sortedReactions # ) # ) # p2 = Bar(data, values = "x") # hover2 = p2.select(dict(type=HoverTool)) # hover2.tooltips = [("Reaction", "@reactionName")] ## flux for each reaction hover2 = HoverTool( tooltips = [ ("Reaction", "@reactionName"), ] ) TOOLS2 = [hover2, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset", ] p2 = figure(x_axis_label = "Time(s)", y_axis_label = "Flux", y_axis_type = "log", y_range = [1e-8, 1], width = 800, height = 800, tools = TOOLS2, ) colors = COLORS_LARGE nTimesteps = len(time[BURN_IN_STEPS:]) x = time[BURN_IN_STEPS:] y = actualFluxes[BURN_IN_STEPS:, 0] reactionName = np.repeat(constrainedReactions[0], nTimesteps) source = ColumnDataSource( data = dict( x = x, y = y, reactionName = reactionName) ) p2.line(x, y, line_color = colors[0], source = source) # Plot remaining metabolites onto initialized figure for m in np.arange(1, actualFluxes.shape[1]): y = actualFluxes[BURN_IN_STEPS:, m] reactionName = np.repeat(constrainedReactions[m], nTimesteps) source = ColumnDataSource( data = dict( x = x, y = y, reactionName = reactionName) ) p2.line(x, y, line_color = colors[m % len(colors)], source = source) if not os.path.exists(os.path.join(plotOutDir, "html_plots")): os.makedirs(os.path.join(plotOutDir, "html_plots")) p = bokeh.io.vplot(p1, p2) bokeh.io.output_file(os.path.join(plotOutDir, "html_plots", plotOutFileName + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(p) bokeh.io.curstate().reset()
def do_plot(self, inputDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata): if not os.path.isdir(inputDir): raise Exception, "inputDir does not currently exist as a directory" ap = AnalysisPaths(inputDir, variant_plot=True) variants = sorted(ap._path_data['variant'].tolist() ) # Sorry for accessing private data if len(variants) <= 1: return all_cells = sorted( ap.get_cells(variant=variants, seed=[0], generation=[0])) if not os.path.exists(plotOutDir): os.mkdir(plotOutDir) #make structures to hold mean flux values mean_fluxes = [] BURN_IN_STEPS = 20 n_variants = 0 IDs = [] #Puts you into the specific simulation's data. Pull fluxes from here #TODO LEARN HOW TO PULL FLUXES FROM LISTENER FILE (see kineticsflux comparison) for variant, simDir in zip(variants, all_cells): sim_data = cPickle.load(open(ap.get_variant_kb(variant), "rb")) simOutDir = os.path.join(simDir, "simOut") #crafting area enzymeKineticsReader = TableReader( os.path.join(simOutDir, "FBAResults")) # "EnzymeKinetics")) actualFluxes = enzymeKineticsReader.readColumn( "reactionFluxes") #"actualFluxes") IDs = enzymeKineticsReader.readAttribute("reactionIDs") enzymeKineticsReader.close() actualAve = np.mean(actualFluxes[BURN_IN_STEPS:, :], axis=0) mean_fluxes.append(actualAve) n_variants = n_variants + 1 ###Plot the fluxes plt.figure(figsize=(8.5, 11)) #Generalizred plotting for j in range(0, n_variants): for k in range(0, n_variants): if j <= k: continue plt.subplot(n_variants - 1, n_variants - 1, j + k) plt.plot(np.log10(mean_fluxes[j][:]), np.log10(mean_fluxes[k][:]), 'o') plt.plot([-12, 0], [-12, 0], color='k', linestyle='-', linewidth=2) plt.xlabel('Variant ' + str(j) + ' Flux') plt.ylabel('Variant ' + str(k) + ' Flux') plt.ylim((-11, 0)) plt.xlim((-11, 0)) exportFigure(plt, plotOutDir, plotOutFileName, metadata) plt.close("all") #nifty fun tool # Bokeh if len(mean_fluxes) < 2: return # Plot first metabolite to initialize plot settings x = np.log10(mean_fluxes[0][:]) y = np.log10(mean_fluxes[1][:]) source = ColumnDataSource(data=dict(x=x, y=y, rxn=IDs)) hover = HoverTool(tooltips=[ ("ID", "@rxn"), ]) TOOLS = [ hover, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset" ] p = figure( x_axis_label="Variant 0 Flux", y_axis_label="Variant 1 Flux", width=800, height=800, tools=TOOLS, ) p.circle( 'x', 'y', size=5, source=source ) #np.log10(mean_fluxes[0][:]),np.log10(mean_fluxes[1][:]), size=10) p.line([-12, 0], [-12, 0], color="firebrick", line_width=2) if not os.path.exists(os.path.join(plotOutDir, "html_plots")): os.makedirs(os.path.join(plotOutDir, "html_plots")) bokeh.io.output_file(os.path.join(plotOutDir, "html_plots", plotOutFileName + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(p) bokeh.io.curstate().reset()
new_price = get_last_price(symbol=TICKER) data.stream( dict(time=new_price["time"], display_time=new_price["display_time"], price=new_price["price"]), 10000) return hover = HoverTool(tooltips=[("Time", "@display_time"), ("IEX Real-Time Price", "@price")]) price_plot = figure(plot_width=800, plot_height=400, x_axis_type='datetime', tools=[hover, ResizeTool(), SaveTool()], title="Real-Time Price Plot") price_plot.line(source=data, x='time', y='price') price_plot.xaxis.axis_label = "Time" price_plot.yaxis.axis_label = "IEX Real-Time Price" price_plot.title.text = "IEX Real Time Price: " + TICKER ticker_textbox = TextInput(placeholder="Ticker") update = Button(label="Update") update.on_click(update_ticker) inputs = widgetbox([ticker_textbox, update], width=200) curdoc().add_root(row(inputs, price_plot, width=1600))
def do_plot(self, inputDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata): if metadata["variant"] != "tfActivity": print "This plot only runs for the 'tfActivity' variant." return if not os.path.isdir(inputDir): raise Exception, "inputDir does not currently exist as a directory" ap = AnalysisPaths(inputDir, variant_plot=True) variants = sorted(ap._path_data['variant'].tolist() ) # Sorry for accessing private data if 0 in variants: variants.remove(0) if len(variants) == 0: return all_cells = sorted( ap.get_cells(variant=variants, seed=[0], generation=[0])) if not os.path.exists(plotOutDir): os.mkdir(plotOutDir) expectedProbBound = [] simulatedProbBound = [] expectedSynthProb = [] simulatedSynthProb = [] targetId = [] targetCondition = [] targetToTfType = {} for variant, simDir in zip(variants, all_cells): sim_data = cPickle.load(open(ap.get_variant_kb(variant), "rb")) shape = sim_data.process.transcription_regulation.recruitmentData[ "shape"] hI = sim_data.process.transcription_regulation.recruitmentData[ "hI"] hJ = sim_data.process.transcription_regulation.recruitmentData[ "hJ"] hV = sim_data.process.transcription_regulation.recruitmentData[ "hV"] H = np.zeros(shape, np.float64) H[hI, hJ] = hV colNames = sim_data.process.transcription_regulation.recruitmentColNames tfList = ["basal (no TF)"] + sorted( sim_data.tfToActiveInactiveConds) simOutDir = os.path.join(simDir, "simOut") tf = tfList[(variant + 1) // 2] tfStatus = None if variant % 2 == 1: tfStatus = "active" else: tfStatus = "inactive" bulkMoleculesReader = TableReader( os.path.join(simOutDir, "BulkMolecules")) bulkMoleculeIds = bulkMoleculesReader.readAttribute("objectNames") rnaSynthProbReader = TableReader( os.path.join(simOutDir, "RnaSynthProb")) rnaIds = rnaSynthProbReader.readAttribute("rnaIds") tfTargetBoundIds = [] tfTargetBoundIndices = [] tfTargetSynthProbIds = [] tfTargetSynthProbIndices = [] for tfTarget in sorted(sim_data.tfToFC[tf]): tfTargetBoundIds.append(tfTarget + "__" + tf) tfTargetBoundIndices.append( bulkMoleculeIds.index(tfTargetBoundIds[-1])) tfTargetSynthProbIds.append(tfTarget + "[c]") tfTargetSynthProbIndices.append( rnaIds.index(tfTargetSynthProbIds[-1])) tfTargetBoundCountsAll = bulkMoleculesReader.readColumn( "counts")[:, tfTargetBoundIndices] tfTargetSynthProbAll = rnaSynthProbReader.readColumn( "rnaSynthProb")[:, tfTargetSynthProbIndices] for targetIdx, tfTarget in enumerate(sorted(sim_data.tfToFC[tf])): tfTargetBoundCounts = tfTargetBoundCountsAll[:, targetIdx].reshape( -1) expectedProbBound.append(sim_data.pPromoterBound[tf + "__" + tfStatus][tf]) simulatedProbBound.append(tfTargetBoundCounts[5:].mean()) tfTargetSynthProbId = [tfTarget + "[c]"] tfTargetSynthProbIndex = np.array( [rnaIds.index(x) for x in tfTargetSynthProbId]) tfTargetSynthProb = tfTargetSynthProbAll[:, targetIdx].reshape(-1) rnaIdx = np.where( sim_data.process.transcription.rnaData["id"] == tfTarget + "[c]")[0][0] regulatingTfIdxs = np.where(H[rnaIdx, :]) for i in regulatingTfIdxs[0]: if colNames[i].split("__")[1] != "alpha": if tfTarget not in targetToTfType: targetToTfType[tfTarget] = [] targetToTfType[tfTarget].append( sim_data.process.transcription_regulation. tfToTfType[colNames[i].split("__")[1]]) expectedSynthProb.append( sim_data.process.transcription.rnaSynthProb[ tf + "__" + tfStatus][rnaIdx]) simulatedSynthProb.append(tfTargetSynthProb[5:].mean()) targetId.append(tfTarget) targetCondition.append(tf + "__" + tfStatus) bulkMoleculesReader.close() rnaSynthProbReader.close() expectedProbBound = np.array(expectedProbBound) simulatedProbBound = np.array(simulatedProbBound) expectedSynthProb = np.array(expectedSynthProb) simulatedSynthProb = np.array(simulatedSynthProb) regressionResult = scipy.stats.linregress( np.log10(expectedProbBound[expectedProbBound > NUMERICAL_ZERO]), np.log10(simulatedProbBound[expectedProbBound > NUMERICAL_ZERO])) regressionResultLargeValues = scipy.stats.linregress( np.log10(expectedProbBound[expectedProbBound > 1e-2]), np.log10(simulatedProbBound[expectedProbBound > 1e-2])) ax = plt.subplot(2, 1, 1) ax.scatter(np.log10(expectedProbBound), np.log10(simulatedProbBound)) plt.xlabel("log10(Expected probability bound)", fontsize=6) plt.ylabel("log10(Simulated probability bound)", fontsize=6) plt.title( "Slope: %0.3f Intercept: %0.3e (Without Small Values: Slope: %0.3f Intercept: %0.3e)" % (regressionResult.slope, regressionResult.intercept, regressionResultLargeValues.slope, regressionResultLargeValues.intercept), fontsize=6) ax.tick_params(which='both', direction='out', labelsize=6) regressionResult = scipy.stats.linregress( np.log10(expectedSynthProb[expectedSynthProb > NUMERICAL_ZERO]), np.log10(simulatedSynthProb[expectedSynthProb > NUMERICAL_ZERO])) ax = plt.subplot(2, 1, 2) ax.scatter(np.log10(expectedSynthProb), np.log10(simulatedSynthProb)) plt.xlabel("log10(Expected synthesis probability)", fontsize=6) plt.ylabel("log10(Simulated synthesis probability)", fontsize=6) plt.title("Slope: %0.3f Intercept: %0.3e" % (regressionResult.slope, regressionResult.intercept), fontsize=6) ax.tick_params(which='both', direction='out', labelsize=6) plt.tight_layout() exportFigure(plt, plotOutDir, plotOutFileName, metadata) plt.close("all") # Probability bound - hover for ID source1 = ColumnDataSource(data=dict(x=np.log10(expectedProbBound), y=np.log10(simulatedProbBound), ID=targetId, condition=targetCondition)) hover1 = HoverTool(tooltips=[("ID", "@ID"), ("condition", "@condition")]) tools1 = [ hover1, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset" ] s1 = figure(x_axis_label="log10(Expected probability bound)", y_axis_label="log10(Simulated probability bound)", width=800, height=500, tools=tools1) s1.scatter("x", "y", source=source1) if not os.path.exists(os.path.join(plotOutDir, "html_plots")): os.makedirs(os.path.join(plotOutDir, "html_plots")) bokeh.io.output_file(os.path.join( plotOutDir, "html_plots", plotOutFileName + "__probBound" + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(s1) # Synthesis probability - hover for ID source2 = ColumnDataSource(data=dict(x=np.log10(expectedSynthProb), y=np.log10(simulatedSynthProb), ID=targetId, condition=targetCondition)) hover2 = HoverTool(tooltips=[("ID", "@ID"), ("condition", "@condition")]) tools2 = [ hover2, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset" ] s2 = figure(x_axis_label="log10(Expected synthesis probability)", y_axis_label="log10(Simulated synthesis probability)", width=800, height=500, tools=tools2) s2.scatter("x", "y", source=source2) bokeh.io.output_file(os.path.join( plotOutDir, "html_plots", plotOutFileName + "__synthProb" + ".html"), title=plotOutFileName, autosave=False) bokeh.io.save(s2) # Synthesis probability - filter targets by TF type bokeh.io.output_file(os.path.join( plotOutDir, "html_plots", plotOutFileName + "__synthProb__interactive" + ".html"), title=plotOutFileName, autosave=False) tfTypes = [] for i in targetId: if i in targetToTfType: uniqueSet = np.unique(targetToTfType[i]) if uniqueSet.shape[0] == 1: tfTypes.append(uniqueSet[0]) elif uniqueSet.shape[0] == 3: tfTypes.append("all") else: tfTypes.append(uniqueSet[0] + "_" + uniqueSet[1]) else: tfTypes.append("none") tfTypes = np.array(tfTypes) x0 = np.copy(expectedSynthProb) x0[np.where(tfTypes != "0CS")] = np.nan x1 = np.copy(expectedSynthProb) x1[np.where(tfTypes != "1CS")] = np.nan x2 = np.copy(expectedSynthProb) x2[np.where(tfTypes != "2CS")] = np.nan x01 = np.copy(expectedSynthProb) x01[np.where(tfTypes != "0CS_1CS")] = np.nan x02 = np.copy(expectedSynthProb) x02[np.where(tfTypes != "0CS_2CS")] = np.nan x12 = np.copy(expectedSynthProb) x12[np.where(tfTypes != "1CS_2CS")] = np.nan y0 = np.copy(simulatedSynthProb) y0[np.where(tfTypes != "0CS")] = np.nan y1 = np.copy(simulatedSynthProb) y1[np.where(tfTypes != "1CS")] = np.nan y2 = np.copy(simulatedSynthProb) y2[np.where(tfTypes != "2CS")] = np.nan y01 = np.copy(simulatedSynthProb) y01[np.where(tfTypes != "0CS_1CS")] = np.nan y02 = np.copy(simulatedSynthProb) y02[np.where(tfTypes != "0CS_2CS")] = np.nan y12 = np.copy(simulatedSynthProb) x12[np.where(tfTypes != "1CS_2CS")] = np.nan source_all = ColumnDataSource(data=dict(x=np.log10(expectedSynthProb), y=np.log10(simulatedSynthProb), ID=targetId, condition=targetCondition)) source_tf = ColumnDataSource( data=dict(x0=np.log10(x0), y0=np.log10(y0), x1=np.log10(x1), y1=np.log10(y1), x2=np.log10(x2), y2=np.log10(y2), x01=np.log10(x01), y01=np.log10(y01), x02=np.log10(x02), y02=np.log10(y02), x12=np.log10(x12), y12=np.log10(y12), x123=np.log10(expectedSynthProb), y123=np.log10(simulatedSynthProb), ID=targetId, condition=targetCondition)) hover3 = HoverTool(tooltips=[("ID", "@ID"), ("condition", "@condition")]) tools3 = [ hover3, BoxZoomTool(), LassoSelectTool(), PanTool(), WheelZoomTool(), ResizeTool(), UndoTool(), RedoTool(), "reset" ] axis_max = np.ceil(np.log10(expectedSynthProb).max()) for i in np.sort(expectedSynthProb): if i > 0: break axis_min = np.floor(np.log10(i)) s3 = figure( x_axis_label="log10(Expected synthesis probability)", y_axis_label="log10(Simulated synthesis probability)", plot_width=800, plot_height=500, x_range=(axis_min, axis_max), y_range=(axis_min, axis_max), tools=tools3, ) s3.scatter("x", "y", source=source_all) callback = CustomJS(args=dict(source_all=source_all, source_tf=source_tf), code=""" var data_all = source_all.get('data'); var data_tf = source_tf.get('data'); data_all['x'] = data_tf['x' + cb_obj.get("name")]; data_all['y'] = data_tf['y' + cb_obj.get("name")]; source_all.trigger('change'); """) toggle0 = Button(label="0CS", callback=callback, name="0") toggle1 = Button(label="1CS", callback=callback, name="1") toggle2 = Button(label="2CS", callback=callback, name="2") toggle3 = Button(label="0CS and 1CS", callback=callback, name="01") toggle4 = Button(label="0CS and 2CS", callback=callback, name="02") toggle5 = Button(label="1CS and 2CS", callback=callback, name="12") toggle6 = Button(label="All", callback=callback, name="123") layout = vplot(toggle0, toggle1, toggle2, toggle3, toggle4, toggle5, toggle6, s3) bokeh.io.save(layout) bokeh.io.curstate().reset()