def test_return_type(self): class fake_template: def __init__(self, tester, user_template_variables=None): self.tester = tester self.template_variables = { "title", "bokeh_js", "bokeh_css", "plot_script", "plot_div" } if user_template_variables is not None: self.template_variables.update(user_template_variables) def render(self, template_variables): self.tester.assertTrue( self.template_variables.issubset( set(template_variables.keys()) ) ) return "template result" r = embed.file_html(_embed_test_plot, CDN, "title") self.assertTrue(isinstance(r, str)) r = embed.file_html(_embed_test_plot, CDN, "title", fake_template(self)) self.assertTrue(isinstance(r, str)) r = embed.file_html(_embed_test_plot, CDN, "title", fake_template(self, {"test_var"}), {"test_var": "test"}) self.assertTrue(isinstance(r, str))
def simple_chart(request): from bokeh.plotting import figure from bokeh.resources import CDN from bokeh.embed import file_html plot = figure() plot.circle([1,2], [3,4]) html = file_html(plot, CDN, "my plot") return HttpResponse(html)
def test_return_type(self): class fake_template: def render(self, title, plot_resources, plot_script, plot_div): return "template result" r = embed.file_html(_embed_test_plot, CDN, "title") self.assertTrue(isinstance(r, str)) r = embed.file_html(_embed_test_plot, CDN, "title", fake_template()) self.assertTrue(isinstance(r, str))
def mites_plot(self): source = ColumnDataSource(self.data) p = figure(plot_width=1000, plot_height=400, x_axis_type="datetime", title="Click legend entries to show/hide", toolbar_location="above") y_max = math.ceil((self.data['out_free_mites'].append(self.data['out_worker_brood_mites']).append( self.data['out_mites_dying']).max() + .01) / 10000) * 10000 #round max up to nearest 10k #conc_y_max = self.data['out_chemical_conc_pollen'].append(self.data['out_chemical_conc_nectar']).max() #conc_y_max = conc_y_max + conc_y_max * .5 # if conc_y_max == 0: conc_y_max = 10 p.yaxis.axis_label = '# of mites' p.y_range = Range1d(start=0, end=y_max, bounds='auto') #p.extra_y_ranges = {"conc": Range1d(0, conc_y_max, bounds="auto")} # p.add_layout(LinearAxis(y_range_name="conc", axis_label='A.I. concentration (\u03bcg/g)'), 'right') r_freemites = p.line(x='out_date', y='out_free_mites', source=source, color='navy', alpha=0.5, line_width=6, legend='Free mites', line_join='round') r_workerbroodm = p.line(x='out_date', y='out_worker_brood_mites', source=source, color='darkred', alpha=0.5, line_width=6, legend='Worker brood mites', line_join='round') r_dronebroodm = p.line(x='out_date', y='out_drone_brood_mites', source=source, color='olive', alpha=0.5, line_width=6, legend='Drone brood mites',line_dash='solid', line_join='round') r_mitesdying = p.line(x='out_date', y='out_mites_dying', source=source, color='slategray', alpha=0.5, line_width=6, legend='Mite mortality', line_dash='solid', line_join='round') p.legend.location = 'top_left' p.legend.click_policy = "hide" self.mites = p p = self.add_exposure_label(p) html = file_html(p, CDN, "mites_plot") return html
def pol_nec_plot(self): source = ColumnDataSource(self.data) p = figure(plot_width=1000, plot_height=400, x_axis_type="datetime", title="Click legend entries to show/hide", toolbar_location="above") y_max = max(self.model_obj.MaxColPollen, self.model_obj.MaxColNectar) + 1000 conc_y_max = self.data['out_chemical_conc_pollen'].append(self.data['out_chemical_conc_nectar']).max() conc_y_max = conc_y_max + conc_y_max*.5 if conc_y_max == 0: conc_y_max = 10 p.yaxis.axis_label = 'grams' p.y_range = Range1d(start=0, end=y_max, bounds='auto') p.extra_y_ranges = {"conc": Range1d(0,conc_y_max,bounds="auto")} p.add_layout(LinearAxis(y_range_name="conc", axis_label = 'A.I. concentration (\u03bcg/g)'), 'right') r_pollen = p.line(x='out_date', y='out_colony_pollen', source=source, color='navy', alpha=0.5, line_width=6, legend='Colony pollen', line_join = 'round') r_nectar = p.line(x='out_date', y='out_colony_nectar', source=source, color='darkred', alpha=0.5, line_width=6, legend='Colony nectar', line_join = 'round') r_pollenc = p.line(x='out_date', y='out_chemical_conc_pollen', source=source, color='navy', alpha=0.5, line_width=4, legend='A.I. in pollen', y_range_name = 'conc', line_dash='dashed', line_join = 'round') r_nectarc = p.line(x='out_date', y='out_chemical_conc_nectar', source=source, color='darkred', alpha=0.5, line_width=4, legend='A.I. in nectar', y_range_name = 'conc', line_dash='dashed', line_join = 'round') p.legend.location = 'top_left' p.legend.click_policy = "hide" self.pol_nec = p p = self.add_exposure_label(p) html = file_html(p, CDN, "nec_pol_plot") return html
def bee_pop_plot(self): source = ColumnDataSource(self.data) p = figure(plot_width=1000, plot_height=400, x_axis_type="datetime", title= "Click legend entries to show/hide", toolbar_location="above") r_csize = p.line(x='out_date', y='out_colony_size', source=source, color='navy', alpha=0.5, line_width = 6, legend='Colony size', line_join = 'round') r_foragers = p.line(x='out_date', y='out_foragers', source=source, color='mediumseagreen', alpha=0.5, line_width=4, legend='Foragers', line_join = 'round') r_wadult = p.line(x='out_date', y='out_adult_workers', source=source, color='darkred', alpha=0.5, line_width=4, legend='Worker adults', line_join = 'round') r_wbrood = p.line(x='out_date', y='out_capped_worker_brood', source=source, color='tomato', alpha=0.5, line_width=4, legend='Worker brood', line_join = 'round') r_wlarvae = p.line(x='out_date', y='out_worker_larvae', source=source, color='orange', alpha=0.5, line_width=4, legend='Worker larvae', line_join = 'round') r_weggs = p.line(x='out_date', y='out_worker_eggs', source=source, color='yellow', alpha=0.5, line_width=4, legend='Worker eggs', line_join = 'round') r_dadult = p.line(x='out_date', y='out_adult_drones', source=source, color='slategray', alpha=0.5, line_width=4, legend='Drone adults', line_join = 'round') p.yaxis.axis_label = '# of individuals' y_max = math.ceil(self.data['out_colony_size'].max()/5000)*5000 p.y_range = Range1d(start=0,end=y_max, bounds = 'auto') p.legend.location = 'top_left' p.legend.click_policy="hide" self.bee_pop = p p = self.add_exposure_label(p) html = file_html(p, CDN, "population_plot") return html
def mortality_plot(self): source = ColumnDataSource(self.data) p = figure(plot_width=1000, plot_height=400, x_axis_type="datetime", title="Click legend entries to show/hide", toolbar_location="above") y_max = math.ceil((self.data['out_dead_worker_adults'].append(self.data['out_dead_worker_larvae']).append( self.data['out_dead_foragers']).append(self.data['out_dead_drone_adults']).append(self.data['out_dead_drone_larvae']) .max() + .01) / 2000) * 2000 # round max up to nearest 2k p.yaxis.axis_label = '# of individuals' p.y_range = Range1d(start=0, end=y_max, bounds='auto') r_wadultmort = p.line(x='out_date', y='out_dead_worker_adults', source=source, color='navy', alpha=0.5, line_width=6, legend='Worker adult mortality', line_join='round') r_wlarvaemort = p.line(x='out_date', y='out_dead_worker_larvae', source=source, color='olive', alpha=0.5, line_width=6, legend='Worker larvae mortality', line_dash='solid', line_join='round') r_foragermort = p.line(x='out_date', y='out_dead_foragers', source=source, color='darkred', alpha=0.5, line_width=6, legend='Forager mortality', line_join='round') r_dadultmort = p.line(x='out_date', y='out_dead_drone_adults', source=source, color='slategray', alpha=0.5, line_width=6, legend='Drone adult mortality', line_dash='solid', line_join='round') r_dlarvaemort = p.line(x='out_date', y='out_dead_drone_larvae', source=source, color='lightgray', alpha=0.5, line_width=6, legend='Drone larvae mortality', line_dash='solid', line_join='round') p.legend.location = 'top_left' p.legend.click_policy = "hide" self.mites = p p = self.add_exposure_label(p) html = file_html(p, CDN, "mortality_plot") return html
def drawSymbol(self, html = None): html = file_html(html, CDN, "my plot") online = 'http://cdn.pydata.org/bokeh/release' offline = 'file://' + os.getcwd().split('/BokehGraphEditor')[0] + '/BokehJS' self.html = html.replace(online, offline) self.Web.setHtml(self.html) self.Web.reload()
def live_map(gdx_file, variable, verbose=False): # Read the indicated variable(s) from the GDX file data = gdx_file.extract(variable) # Truncate unused years if 't' in data.coords: t_max = int(gdx_file.extract('t_max')) years = list(filter(lambda t: int(t) <= t_max, gdx_file.set('t'))) columns = years data = data.sel(t=years) else: years = None columns = [variable] # Determine the coordinate containing region data region = 'r' if 'r' in data.coords else 'rs' if region == 'rs': data = data.sel(rs=gdx_file.set('r')) if 't' in data.coords: data = data.to_dataframe().T.stack(region) data.index = data.index.droplevel(0) else: data = data.to_dataframe() # Load map coordinates, merge, and colorize map_data = pd.read_hdf(join(DATA_DIR, 'province_map_data.hdf'), 'df') if verbose: print(data) all_data = map_data.merge(data, left_on='alpha', right_index=True) colored_data, data_range = color_data(all_data, columns) if years is not None: colored_data['active_year'] = t_max colored_data['active_value'] = colored_data[str(t_max)] colored_data['active_color'] = colored_data['%s_color' % t_max] # Plot title: description of the variable to be plotted TITLE = gdx_file[variable].attrs['_gdx_description'] # Build the map map_box = build_map(colored_data, columns, years) # Output the map # Open our custom HTML template with open(join(DATA_DIR, 'map_template.jinja'), 'r') as f: template = Template(f.read()) resources = Resources(mode='inline') # Update these to change the text template_variables = { 'title': TITLE, 'narrative': 'Data range: {}–{}'.format(data_range[0], data_range[1]), 'tooltip_css': open(join(DATA_DIR, 'tooltip.css')).read(), 'bokeh_min_js': resources.js_raw[0], } # Use inline resources, render the html and open html = file_html(map_box, resources, TITLE, template=template, template_variables=template_variables) display_html(html, raw=True)
def make_html_plots(G,outdir): px = plot_distance_distrib(G) py = plot_sem_distrib(G) pz = plot_node_degrees(G) p = gridplot([[px, py], [pz, None]]) html = file_html(p, CDN, "Distance distribution") out = open(outdir+'/plots.html','w') out.write(html) out.close()
def test_no_border_or_background_fill(output_file_url, selenium, screenshot): # Have body background-color that should appear through the no-fill plot template = Template(""" <!doctype html> <html lang="en"> <head> {{ bokeh_js }} {{ bokeh_css}} <style> body { background-color: lightblue; } </style> </head> <body> {{ plot_script }} {{ plot_div }} </body> </html> """) plot = Plot(plot_height=HEIGHT, plot_width=WIDTH, x_range=Range1d(0, 10), y_range=Range1d(0, 10), toolbar_location=None) # This is the no-fill that we're testing plot.background_fill_color = None plot.border_fill_color = None plot.add_glyph(Circle(x=3, y=3, size=50, fill_color='#ffffff')) plot.add_glyph(Circle(x=6, y=6, size=50, fill_color='#ffffff')) plot.add_layout(LinearAxis(major_label_text_color='#ffffff', major_label_text_font_size="30pt"), 'left') plot.add_layout(LinearAxis(major_label_text_color='#ffffff', major_label_text_font_size="30pt"), 'below') html = file_html(plot, INLINE, template=template) # filename has to match test function + '.html' light filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_no_border_or_background_fill.html") with io.open(filepath, "w", encoding="utf-8") as f: f.write(decode_utf8(html)) selenium.get(output_file_url) assert has_no_console_errors(selenium) screenshot.assert_is_valid()
def drawMap(title, datas): """ color : [(lat, lon)] """ lat = [] lon = [] c = [] for color in datas: c_lat, c_lon = zip(*datas[color]) #print (c_lat) lat.extend(map(float, c_lat)) lon.extend(map(float, c_lon)) c.extend([color]*len(datas[color])) center_lat = np.median(lat) center_lon = np.median(lon) x_range = Range1d() y_range = Range1d() source = ColumnDataSource( data=dict( lat=lat, lon=lon, fill=c ) ) map_options = GMapOptions(lat=center_lat, lng=center_lon, map_type="roadmap", zoom=11) plot = GMapPlot( x_range=x_range, y_range=y_range, map_options=map_options, title=title ) # Glyphs (dots on graph) circle = Circle(x="lon", y="lat", size=6, line_width=0, fill_color="fill", fill_alpha=0.5, line_alpha=0.0) plot.add_glyph(source, circle) #Navigation pan = PanTool() wheel_zoom = WheelZoomTool() box_select = BoxSelectTool() plot.add_tools(pan, wheel_zoom, box_select) overlay = BoxSelectionOverlay(tool=box_select) plot.add_layout(overlay) return display.HTML(file_html(plot, INLINE, "Google Maps Example"))
def analyzeAndPlot(tickerSymbol, typeOfData): # tickerSymbol = 'PKI' # typeOfData = 'Open' rjson = requests.get('https://www.quandl.com/api/v3/datasets/YAHOO/'+tickerSymbol+'.json?auth_token=L9jUoYTsWeow-DN_EraF') data = rjson.json() data = data['dataset'] mydict = {data['column_names'][column]:np.array(data['data'])[:,column] for column in range(len(data['column_names']))} def newdate(mystring): yyyy, mm, dd = mystring.split('-') dt = datetime( int(yyyy), int(mm), int(dd)) return dt df = DataFrame(mydict) df.Date = df.Date.apply(newdate) df = df.set_index('Date').sort_index(ascending=True) output_file("stocks.html", title=tickerSymbol+"Data") # p1 = figure(x_axis_type = "datetime") # # p1.title = datetime.now().strftime('%m/%d/%Y') # p1.line(df.index, df[typeOfData], color='red', legend=tickerSymbol) # p1.title = tickerSymbol + " Stock Prices : " + datetime.now().strftime('%m/%d/%Y %H:%M:%S') # p1.grid.grid_line_alpha=0.3 # p1.xaxis.axis_label = 'Date' # p1.yaxis.axis_label = 'Price' # df2 = df.astype(float)[['Open','Close','Adjusted Close']] df2 = df.astype(float)[typeOfData] p1 = TimeSeries(df2.reset_index(), index='Date', legend=True, title=tickerSymbol + ' Stock Prices', xlabel='Date', ylabel='Prices') outputOption = 2 # output options if outputOption == 1: # option 1: using bokeh.plotting.save to directly save to html file save(p1) # need to import from bokeh.plotting! elif outputOption == 2: # option 2: using bokeh.embed.file_html to save to standalone html file html = file_html(p1, CDN, "my plot") f = open('./templates/_'+tickerSymbol+'.html','w') returnBtn = '<h1>Generated graph for '+tickerSymbol+'<br><a href="/index">Back</a></h1>' html = html.replace('<body>','<body>'+returnBtn) f.write('%s' % html) f.close() elif outputOption == 3: # option 2: using bokeh.embed.components to save to html components script, div = components(p1) f = open(tickerSymbol+'.html','w') f.write('%s' % script) f.close()
def make_dts_plot(): plots = [] etime = datetime.combine(date.today(), datetime.min.time()) stime = etime - timedelta(14) graphstime = datetime.strptime('01-01-15 00:00:00', '%m-%d-%y %H:%M:%S') days = 1 ### Fetch Data ### accept_df = get_data.get_accept_time() ingest_df = get_data.get_ingest_time() sispi_df = pd.DataFrame(query.query_exptime(query.connect_to_db('db-sispi')[1], etime, datetime.now()), columns = ['sispi_time','filename']) weekly_df = pd.DataFrame(query.query_dts_delay(query.connect_to_db('db-destest')[1], stime, etime), columns = ['total_time', 'ncsa_time', 'noao_time', 'xtime']) alltime_df = pd.DataFrame(query.query_dts_delay(query.connect_to_db('db-destest')[1], graphstime, etime), columns = ['total_time', 'ncsa_time', 'noao_time', 'xtime']) ### Standardize file names for merge ### trimed_fn = [] for i, line in sispi_df.iterrows(): try: trimed_fn.append(os.path.basename(line['filename'].split(':')[1])) except: trimed_fn.append(os.path.basename(line['filename'])) sispi_df['filename']=trimed_fn ### Merge data ### log_df = pd.merge(accept_df, ingest_df, how='inner', on=['filename']) live_df = pd.merge(log_df, sispi_df, how='inner', on=['filename']) live_df = get_data.convert_timezones(live_df) ### Smooth plot ### sm_df = get_data.smooth_dts(weekly_df) av_df = get_data.average_dts(alltime_df, graphstime, days) ### Plot Data ### plots.append(plotter.plot_realtime_dts(sm_df, live_df)) plots.append(plotter.plot_monthly_dts(av_df, days)) plots.append(plotter.plot_average_dts(alltime_df, days)) ### Writing plots to HTML ### html = file_html(vplot(*plots),INLINE,'dts') filename = 'dts_plot.html' filepath = os.path.join(app.config["STATIC_PATH"],filename) with open(filepath,'w') as h: h.write('<h5> Last updated on: %s </h5>' % "{0}".format(datetime.now())) h.write('<center>\n') h.write(html) h.write('</center>\n')
def my_link(s1): ex = db.get_data(fname=s1)[0] # patterns = [] # for foot in range(2): # for st in ex.steps_annotation[foot]: # if st[1]-st[0] < 30: # continue # patterns += [Pattern(dict(coord='RY', l_pat=st[1]-st[0], # foot='right' if foot else 'left'), # ex.data_sensor[6*foot+4, st[0]:st[1]])] # patterns += [Pattern(dict(coord='AZ', l_pat=st[1]-st[0], # foot='right' if foot else 'left'), # ex.data_sensor[6*foot+2, st[0]:st[1]])] # patterns += [Pattern(dict(coord='AV', l_pat=st[1]-st[0], # foot='right' if foot else 'left'), # ex.data_earth[6*foot+2, st[0]:st[1]])] # stepDet = StepDetection(patterns=patterns, lmbd=.8, mu=.1) # steps, steps_label = stepDet.compute_steps(ex) # print('steps: ',steps) seg = ex.seg_annotation print('segm: ',seg) print(ex.DAZ[0][seg[1]:seg[2]].T) T = len(ex.DAZ[0][seg[0]:seg[1]]) t = np.arange(T)/100 plot = figure(width=350, plot_height=250, title="Aller") plot.line(t,ex.DAZ[0][seg[0]:seg[1]]) T = len(ex.DAZ[0][seg[1]:seg[2]]) t = np.arange(T)/100 plot1 = figure(width=350, plot_height=250, title="u-Turn") plot1.line(t,ex.DAZ[0][seg[1]:seg[2]]) T = len(ex.DAZ[0][seg[2]:seg[3]]) t = np.arange(T)/100 plot2 = figure(width=350, plot_height=250, title="Return") plot2.line(t,ex.DAZ[0][seg[2]:seg[3]]) p = hplot(plot, plot1, plot2) tab1 = Panel(child=p, title="Segmentation") tabs = Tabs(tabs=[tab1]) text_input = TextInput(value=ex.fname, title="Enregistrement: ") layout = vform(text_input, tabs) html = file_html(layout, CDN, "home2") return html
def get(self): # self.write('List of discharges\n') # # conn=sqlite3.connect('ishtar') # curs=conn.cursor() # curs.execute('select * from shots') # for row,form in enumerate(curs): # for column, item in enumerate(form): # #print str(item) # self.write(str(item)+'\n') env=Environment() time,data,sampling=readHdf5.getData('00857_Data','Generator/Fpower',env) #print time[100],data[100] plot = figure() plot.line(np.array(time), np.array(data)) html = file_html(plot, CDN, "my plot") self.write(html)
def build_graph(ticker): url = 'https://www.quandl.com/api/v3/datasets/WIKI/'+ ticker.upper() +'.json?column_index=4&start_date=2015-08-01&end_date=2015-08-31&order=asc' data = requests.get(url).text data = simplejson.loads(data) #pick out useful list from json stock_data = data['dataset']['data'] stock_value = [ member[1] for member in stock_data ] stock_date = [ datetime.strptime(str(member[0]), '%Y-%m-%d') for member in stock_data ] #use lists generated as x and y axes plot = figure(x_axis_type = "datetime") plot.line(stock_date, stock_value, legend = ticker.upper()+': Close') plot.title = ticker.upper() + ": Aug-2015 Data from Quandl WIKI" plot.xaxis.axis_label = 'Date' plot.yaxis.axis_label = 'Price' plot.circle(stock_date, stock_value) html = file_html(plot, CDN, ticker.upper()+" Closing Price plot") return html
def index(): if request.method == 'GET': return render_template('index.html',ans1='Closing price',ans2='Adjusted closing price',ans3='Volume') else: # request was a POST data=pd.read_csv("wiki.csv",parse_dates=['date']) #d=data[data['name'] == 'A'] #dates=d['date'].tolist() sname=pd.read_csv("stock_name.csv")['x'].tolist() s=request.form['symbol'] if s not in sname: return render_template('end.html') else: d=data[data['name'] == s] dates=d['date'].tolist() #app.vars['symbol'] = request.form['symbol'] #f = open('%s.txt'%(app.vars['symbol']),'w') #f.write('%s\n'%(request.form.getlist('answer')==["Closing price","Adjusted closing price","Volume"])) #f.write('%s\n'%(request.form['symbol']=='A')) #f.write('%s\n'%(request.form['symbol'])) #f.write('%s\n'%(app.vars['symbol'])) #f.close() #f = open('%s.txt'%(app_lulu.vars['symbol']),'a') #f.write('%s\n\n'%(request.form.getlist['answer'])) #this was the 'name' on layout.html! color=["blue","yellow","red"] #output_file("a.html") p=figure(x_axis_type="datetime",x_axis_label='date',title="Data from Quandle WIKI set") nd={} nd['Closing']="Closing price" nd['Adjusted']="Adjusted closing price" nd['Volume']="Volume" j=0 ts=d['Closing price'].tolist() for i in request.form.getlist('answer'): ts=d[nd[i]].tolist() p.line(dates,ts,legend=nd[i],color=color[j]) j=j+1 html=file_html(p,CDN,"plot") fh=open("templates/plot.html",'w') fh.write(html) fh.close() #f.write("%s\n" % item) return render_template('plot.html')
def StockData(): # Request input data from html webform stock = request.form['Ticker Symbol'] Ptype = int(request.form['Price']) #Grab Data from Quandl API & Create Pandas Data Frame data=requests.get(url='https://www.quandl.com/api/v3/datasets/WIKI/' + stock + '/data.json?start_date=2012-11-01?api_key=ekga5KU471MGZ5SnFsTM') pf=pd.read_json(data.text) seriesdata=pd.Series(pf.dataset_data) #note that the closing value is at entry 4 of the data node #Initialize Arrays & Parameters L = len(seriesdata['data']) #total number of data points to plot Price = np.zeros(L) Pdate=[] for n in range(L): Price[n]=seriesdata['data'][L-n-1][Ptype] Pdate.append(seriesdata['data'][L-n-1][0]) # # Create plot of stock price output_file("stocks.html", title="Stock Price Example", autosave=True) Pdate=np.linspace(0,L-1,L) p2 = figure(x_axis_type="datetime") #p2.circle(Pdate, Price, size=4, color='darkgrey', alpha=0.2, legend='close') p2.line(Pdate, Price, color='navy') p2.title = "Stock Price History" p2.grid.grid_line_alpha=0 p2.xaxis.axis_label = 'Day' p2.yaxis.axis_label = 'Price' p2.ygrid.band_fill_color="olive" p2.ygrid.band_fill_alpha = 0.1 html = file_html(p2, CDN, "stocks") with open("static/stocks.html", "w") as f: f.write(html) #show(p2) # open a browser return redirect('static/stocks.html')
def index(): if request.method == 'GET': return render_template('questions.html') else: try: # reading data provided by an user app.vars['ticker'] = request.form['ticker'] app.vars['features'] = request.form.getlist("features") # reading requested data from quandl website app.vars['data'] = ds.monthly_data(app.vars['ticker'], var_list=app.vars['features']) # plotting data using bokeh plot = figure(title="data from Quandl WIKI", x_axis_label='date', x_axis_type="datetime") colors = ["red", "blue", "green"] for i, var in enumerate(app.vars['features']): plot.line(app.vars['data']["time"], app.vars['data'][var], color=colors[i], legend=var) app.vars["html"] = file_html(plot, CDN, "my_plot") return redirect('/ploting') except: return redirect('/error')
def post(): form = Cadastro(request.POST) # ----- POST METHOD a = form.a.data b = form.b.data c = form.c.data derivada = a x_range = range(-100, 100) y = [(a*x**2)+(x*b)+c for x in x_range] p = figure(title="Segundo grau", x_axis_label='x', y_axis_label='y') p.line(x_range, y, legend="Linha", line_width=2) graph_line = file_html(p, CDN) return dict(bokeh_line=graph_line, texto="2º Grau", derivada=derivada)
def post(): form = Cadastro(request.POST) # ----- POST METHOD ex = form.x.data #derivada = a if ex < 0: return redirect('/error_0') x_range = range(-100, 100) y = [ x**ex for x in x_range] p = figure(title="Exponencial", x_axis_label='x', y_axis_label='y') p.line(x_range, y, legend="Linha", line_width=2) graph_line = file_html(p, CDN) return dict(bokeh_line=graph_line, texto="Exponencial")
def create_transposed_plot(self, name, dataset): name = name.replace('/ ', '_').replace('/', ' ') # correct encoding error dataset = dataset.select_dtypes(include=['float64', 'datetime64']) dataset = dataset.sort_values('date') dataset = dataset.transpose() # years, months = mdates.YearLocator(), mdates.MonthLocator() colors = brewer['Paired'][len(dataset.index)] # generate color palette lines = {} for i, color in zip(dataset.index, colors): # associate colors with index if i != 'date': # ignore date row lines[i] = dict(x=list(dataset.loc['date']), y=list(dataset.loc[i]), bokehType='line', legend=i, color=color) logging.debug('Transposed plot created') plot = BokehPlot(name, lines, figProp=dict(x_axis_type='datetime', title=name)) html = file_html(plot.document(), CDN) return html, plot.plotName
def make_coadd_html(): try: all_df, processed_df, band_df = get_data.create_coadd_map('db-desoper',"Y3A1_COADD") p = plotter.plot_coadd(all_df, processed_df, band_df, "Y3A1_COADD") except: print 'Coadd plot not rendered!' pass # Creating output path path = os.path.join(app.config["STATIC_PATH"],"reports/coadd/") if not os.path.isdir(path): os.makedirs(path) # Writing plots to HTML html = file_html(vplot(p),INLINE,'coadd') filename = 'coadd_map_save.html' includepath = 'reports/coadd/coadd_map_save.html' filepath = os.path.join(path,filename) with open(filepath,'w') as h: h.write('<h5> Last updated on: %s </h5>' % "{0}".format(datetime.now())) h.write('<center>\n') h.write(html) h.write('</center>\n')
def plotData(self, stack, dataSet): plotPack = [] if not stack: for i, data in enumerate(dataSet): num = self.lineIDCounter + 1 plotRange = self.getRange([data]) plotWrap = self.initPlotArea(rng = plotRange, num = num) self.addPlotHolder(plotWrap, plotRange) plotPack.append(plotWrap) legendText = 'plot ' + str(num) lineWrap = self.addPlotLine(plotWrap,legendText = legendText, data = data) self.addLineHolder(lineWrap) html = self.insertPlot(plotPack) else: num = self.lineIDCounter + 1 plotRange = self.getRange(dataSet) plotWrap = self.initPlotArea(rng = plotRange, num = num) self.addPlotHolder(plotWrap, plotRange) for i, data in enumerate(dataSet): legendText = 'plot ' + str(num) lineWrap = self.addPlotLine(plotWrap, legendText = legendText, data = data) self.addLineHolder(lineWrap) plotPack.append(plotWrap) html = self.insertPlot(plotPack) html = file_html(html, CDN, "my plot") online = 'http://cdn.pydata.org/bokeh/release' offline = 'file://' + os.getcwd() + '/BokehJS' self.html = html.replace(online, offline) self.Web.setHtml(self.html) self.Web.reload() return self.html
def PDS_to_html(PDS_inputfile, xCol, yCols, HTML_outputfile, title=None, width=600, height=600): dfD, dfM = PDS_to_df(PDS_inputfile) plot = df_plot(dfD, xCol, yCols, width=width, height=height) with open('my_template.jinja', 'r') as f: template = Template(f.read()) js_resources = JSResources(mode='inline') css_resources = CSSResources(mode='inline') if not title: title = os.path.basename(PDS_inputfile) html = file_html(plot, None, title, template=template, js_resources=js_resources, css_resources=css_resources, template_variables={"metadata": dfM.sort_values(by='Attribute').to_html(index=False), "H3_title": title, "footer": "Created the " + datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') }) with open(HTML_outputfile, 'w') as f: f.write(html)
def resetGraph(self): self.Web = QWebView() self.Web.setContextMenuPolicy(Qt.CustomContextMenu) plotPack = [] for ID in self.plotIDDict: plotWrap, rng = self.plotIDDict[ID] plotPack.append(plotWrap) html = self.insertPlot(plotPack) html = file_html(html, CDN, "my plot") online = 'http://cdn.pydata.org/bokeh/release' offline = 'file://' + os.getcwd() + '/BokehJS' self.html = html.replace(online, offline) self.Web.setHtml(self.html) f = open('./save.html', 'wb') if f: f.write(self.html) page = self.Web.page() frame = page.mainFrame() time.sleep(1) frame.loadFinished.connect(self.Web.reload) #this should proceed by multi thread self.setCentralWidget(self.Web)
def post(): form = Cadastro(request.POST) # ----- POST METHOD base = form.base.data print(type(base));print(base) #derivada = a if base <= 0: return redirect('/error_1') x_range = range(1,100) y = [ math.log(x, base) for x in x_range] p = figure(title="Logarítmica", x_axis_label='x', y_axis_label='y') p.line(x_range, y, legend="Linha", line_width=2) graph_line = file_html(p, CDN) return dict(bokeh_line=graph_line, texto="Logarítmica")
def list(s1): s=int(s1) print("Exercise n: ", s) #output_file("home.html") exercise = list_exercises[s] T = exercise.X.shape[1] t = np.arange(T)/100 plot = figure(width=350, plot_height=250, title="Droit Acceleration X") plot.line(t,exercise.get_signal("DAX")[0]) plot2=figure(width=350, plot_height=250, title="Droit Acceleration Y") plot2.line(t,exercise.get_signal("DAY")[0]) plot3=figure(width=350, plot_height=250, title="Droit Acceleration Z") plot3.line(t,exercise.get_signal("DAZ")[0]) plot4=figure(width=350, plot_height=250, title="Droit Rotation X") plot4.line(t,exercise.get_signal("DRX")[0]) plot5=figure(width=350, plot_height=250, title="Droit Rotation X") plot5.line(t,exercise.get_signal("DRY")[0]) plot6=figure(width=350, plot_height=250, title="Droit Rotation X") plot6.line(t,exercise.get_signal("DRZ")[0]) #p = hplot(plot, plot2) p = gridplot([[plot, plot2, plot3], [plot4, plot5, plot6]]) html = file_html(p, CDN, "home") return html
def plot(): stock_symbol = request.form['ticker'] today = datetime.date.today() end_date = today.strftime('%y-%m-%d') last_month = today - dateutil.relativedelta.relativedelta(months=1) start_date = last_month.strftime('%y-%m-%d') query = ('https://www.quandl.com/api/v3/datasets/WIKI/%s/data.csv?' 'column_index=4&exclude_column_names=true&' 'start_date=%s&end_date=%s&order=asc&transform=rdiff?' 'api_key=-65ceTJjtB5J-CK5H1jH' % ( stock_symbol, start_date, end_date ) ) r = requests.get(query) df = pd.DataFrame(columns=['date', 'closing price']) closing_prices = r.text.split('\n') for i in range(len(closing_prices) - 1): df.loc[i] = closing_prices[i].split(',') df['date'] = pd.to_datetime(df['date']) df['closing price'] = df['closing price'].values.astype(float) # create a new plot with a title and axis labels fig = figure( title="Closing price for %s" % stock_symbol, x_axis_type="datetime", x_axis_label='Date', y_axis_label='Closing Price (USD)' ) # add a line renderer with legend and line thickness fig.line(df['date'], df['closing price'], line_width=2) return file_html(fig, INLINE, 'stock plot')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) plot.add_tools(HoverTool()) tab = Panel(child=plot, title=title, closable=True) return tab def make_tabs(objs): return Tabs(tabs=[make_tab(title, obj) for title, obj in objs], width=600) layout = Column(children=[ Paragraph(text="Only Image and ImageRGBA glyphs are not demonstrated."), make_tabs(glyphs), make_tabs(markers) ]) doc = Document() doc.add_root(layout) if __name__ == "__main__": doc.validate() filename = "glyphs.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Glyphs")) print("Wrote %s" % filename) view(filename)
def florida(): df = pd.read_csv( 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv' ) df = df.loc[(df['Country_Region'] == 'US') & (df['Province_State'] == 'Florida')] drop_cols = [ 'UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Lat', 'Long_', 'Combined_Key' ] df.drop(drop_cols, axis=1, inplace=True) df = df.melt(id_vars=['Province_State', 'Country_Region', 'Admin2'], value_name='Cases', var_name='Date') df['Date'] = pd.to_datetime(df['Date']).dt.date yesterday_df = df.loc[df['Date'] == datetime.datetime.today().date() - datetime.timedelta(1)] tile_provider = get_provider(CARTODBPOSITRON) shapefile = 'cb_2018_us_county_500k/cb_2018_us_county_500k.shp' usa = gpd.read_file(shapefile) usa = usa[usa['STATEFP'] == '12'] usa = usa[['NAME', 'GEOID', 'geometry']] usa.columns = ['county', 'county_code', 'geometry'] cases_by_county_df = yesterday_df.groupby( 'Admin2')['Cases'].sum().reset_index().sort_values('Cases', ascending=False) merged = usa.merge(cases_by_county_df, left_on='county', right_on='Admin2', how='inner') geosource = GeoJSONDataSource(geojson=merged.to_json()) # Define color palettes #Define a sequential multi-hue color palette. palette = brewer['YlGnBu'][8] #Reverse color order so that dark blue is highest obesity. palette = palette[::-1] # Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors. color_mapper = LogColorMapper(palette=palette, low=merged.Cases.min(), high=merged.Cases.max()) # Create figure object. p = figure(plot_height=800, plot_width=700, toolbar_location='below', tools='pan, wheel_zoom, box_zoom, reset') p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None # Add patch renderer to figure. counties = p.patches('xs', 'ys', source=geosource, fill_color={ 'field': 'Cases', 'transform': color_mapper }, line_color='gray', line_width=0.75, fill_alpha=1, hover_color='#eda1d1') p.background_fill_color = '#1b1c1b' p.xaxis.major_tick_line_color = None # turn off x-axis major ticks p.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks p.xaxis.ticker = [] p.yaxis.major_tick_line_color = None # turn off y-axis major ticks p.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks p.yaxis.ticker = [] p.outline_line_color = '#1b1c1b' p.border_fill_color = '#1b1c1b' code_hover = ''' if((Bokeh.grabbing == 'undefined') || !Bokeh.grabbing) { var elm = document.getElementsByClassName('bk-canvas-events')[0] elm.style.cursor = 'context-menu' } ''' # Create hover tool p.add_tools( HoverTool(renderers=[counties], tooltips=[('County', '@county'), ('Cases', '@Cases')], callback=CustomJS(code=code_hover))) #show(p) p1_html = file_html(p, CDN, "USA") days = 21 date_begin = datetime.datetime.today().date() - datetime.timedelta(1 + days) date_end = datetime.datetime.today().date() - datetime.timedelta(1) df = df.loc[(df['Date'] >= date_begin) & (df['Date'] <= date_end)] cases_by_day_df = df.groupby(['Province_State', 'Date'])['Cases'].sum().reset_index() cases_by_day_df['new_cases'] = cases_by_day_df['Cases'].diff().dropna() cases_by_day_df.dropna(inplace=True) cases_by_day_df['Date'] = cases_by_day_df['Date'].astype(str) p2 = figure(plot_width=700, plot_height=400, x_range=cases_by_day_df['Date']) p2.vbar(x=cases_by_day_df['Date'], width=1, bottom=0, top=cases_by_day_df['new_cases'], color="#ffbe3d", line_color="black", hover_fill_color="#ffbe78") code_hover2 = ''' if((Bokeh.grabbing == 'undefined') || !Bokeh.grabbing) { var elm = document.getElementsByClassName('bk-canvas-events')[0] elm.style.cursor = 'context-menu' } ''' p2.add_tools( HoverTool(tooltips=[('Date', '@x'), ('New Cases', '@top')], callback=CustomJS(code=code_hover2))) p2.xaxis.ticker = SingleIntervalTicker(interval=7) p2.background_fill_color = '#1b1c1b' #p.xaxis.major_tick_line_color = None # turn off x-axis major ticks #p.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks #p.xaxis.ticker = [] #p.yaxis.major_tick_line_color = None # turn off y-axis major ticks #p.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks #p.yaxis.ticker = [] p2.outline_line_color = '#1b1c1b' p2.border_fill_color = '#1b1c1b' p2.xgrid.grid_line_color = None p2.ygrid.grid_line_color = '#595959' # p.xaxis.major_label_orientation = math.pi/2 p2.yaxis.major_label_text_color = "#ffde21" p2.xaxis.major_label_text_color = "#ffde21" label_dict = {} label_dict[0] = '3 weeks ago' label_dict[7] = '2 weeks ago' label_dict[14] = '1 week ago' label_dict[21] = 'Yesterday' p2.xaxis.formatter = FuncTickFormatter(code=""" var labels = %s; return labels[tick]; """ % label_dict) #p.xaxis.ticker = [0,7,14,21] #p.xaxis.major_label_overrides = {0:'3 weeks ago', 7:'2 weeks ago', 14:'1 week ago', 21:'Yesterday'} p2.title.text = "New Cases by Day: Last 21 Days" p2.title.align = "center" p2.title.text_color = "#ffde21" p2.title.text_font_size = "18px" p2.title.background_fill_color = "#1b1c1b" p2_html = file_html(p2, CDN, "home_layout") return render_template('florida.html', p1=p1_html, p2=p2_html)
def main(): print('''Please select the CSV dataset you\'d like to use. The dataset should contain these columns: - metric to apply threshold to - indicator of event to detect (e.g. malicious activity) - Please label this as 1 or 0 (true or false); This will not work otherwise! ''') # Import the dataset imported_data = None while isinstance(imported_data, pd.DataFrame) == False: file_path = input('Enter the path of your dataset: ') imported_data = file_to_df(file_path) time.sleep(1) print(f'''\nGreat! Here is a preview of your data: Imported fields:''') # List headers by column index. cols = list(imported_data.columns) for index in range(len(cols)): print(f'{index}: {cols[index]}') print(f'Number of records: {len(imported_data.index)}\n') # Preview the DataFrame time.sleep(1) print(imported_data.head(), '\n') # Prompt for the metric and source of truth. time.sleep(1) metric_col, indicator_col = columns_picker(cols) # User self-validation. col_check = input('Can you confirm if this is correct? (y/n): ').lower() # If it's wrong, let them try again while col_check != 'y': metric_col, indicator_col = columns_picker(cols) col_check = input( 'Can you confirm if this is correct? (y/n): ').lower() else: print( '''\nGreat! Thanks for your patience. Generating summary stats now..\n''' ) # Generate summary stats. time.sleep(1) malicious, normal = classification_split(imported_data, metric_col, indicator_col) mal_mean = malicious.mean() mal_stddev = malicious.std() mal_count = malicious.size mal_median = malicious.median() norm_mean = normal.mean() norm_stddev = normal.std() norm_count = normal.size norm_median = normal.median() print(f'''Normal vs Malicious Summary (metric = {metric_col}): Normal: ----------------------------- Observations: {round(norm_count, 2)} Average: {round(norm_mean, 2)} Median: {round(norm_median, 2)} Standard Deviation: {round(norm_stddev, 2)} Malicious: ----------------------------- Observations: {round(mal_count, 2)} Average: {round(mal_mean, 2)} Median: {round(mal_median, 2)} Standard Deviation: {round(mal_stddev, 2)} ''') # Insights and advisories # Provide the accuracy metrics of a generic threshold at avg + 3 std deviations generic_threshold = confusion_matrix( malicious, normal, threshold_calc(norm_mean, norm_stddev, 3)) time.sleep(1) print( f'''A threshold at (average + 3x standard deviations) {metric_col} would result in: - True Positives (correctly identified malicious events: {generic_threshold['TP']:,} - False Positives (wrongly identified normal events: {generic_threshold['FP']:,} - True Negatives (correctly identified normal events: {generic_threshold['TN']:,} - False Negatives (wrongly identified malicious events: {generic_threshold['FN']:,} Accuracy Metrics: - Precision (what % of events above threshold are actually malicious): {round(generic_threshold['precision'] * 100, 1)}% - Recall (what % of malicious events did we catch): {round(generic_threshold['recall'] * 100, 1)}% - F1 Score (blends precision and recall): {round(generic_threshold['f1_score'] * 100, 1)}%''' ) # Distribution skew check. if norm_mean >= (norm_median * 1.1): time.sleep(1) print( f'''\nYou may want to be cautious as your normal traffic\'s {metric_col} has a long tail towards high values. The median is {round(norm_median, 2)} compared to {round(norm_mean, 2)} for the average.''') if mal_mean < threshold_calc(norm_mean, norm_stddev, 2): time.sleep(1) print( f'''\nWarning: you may find it difficult to avoid false positives as the average {metric_col} for malicious traffic is under the 95th percentile of the normal traffic.''' ) # For fun/anticipation. Actually a nerd joke because of the method we'll be using. if '-q' not in sys.argv[1:]: time.sleep(1) play_a_game.billy() decision = input('yes/no: ').lower() while decision != 'yes': time.sleep(1) print('...That\'s no fun...') decision = input('Let\'s try that again: ').lower() # Let's get to the simulations! time.sleep(1) print('''\nInstead of manually experimenting with threshold multipliers, let\'s simulate a range of options and see what produces the best result. This is similar to what is known as \"Monte Carlo simulation\".\n''') # Initialize session name & create app folder if there isn't one. time.sleep(1) session_name = input('Please provide a name for this project/session: ') session_folder = make_folder(session_name) # Generate list of multipliers to iterate over. time.sleep(1) mult_start = float( input( 'Please provide the minimum multiplier you want to start at. We recommend 2: ' )) # Set the max to how many std deviations away the sample max is. mult_end = (imported_data[metric_col].max() - norm_mean) / norm_stddev mult_interval = float( input('Please provide the desired gap between multiplier options: ')) # range() only allows integers, let's manually populate a list multipliers = [] mult_counter = mult_start while mult_counter < mult_end: multipliers.append(round(mult_counter, 2)) mult_counter += mult_interval print('Generating simulations..\n') # Run simulations using our multipliers. simulations = monte_carlo(malicious, normal, norm_mean, norm_stddev, multipliers) print('Done!') time.sleep(1) # Save simulations as CSV for later use. simulation_filepath = os.path.join( session_folder, f'{session_name}_simulation_results.csv') simulations.to_csv(simulation_filepath, index=False) print(f'Saved results to: {simulation_filepath}') # Find the first threshold with the highest F1 score. # This provides a balanced approach between precision and recall. f1_max = simulations[simulations.f1_score == simulations.f1_score.max()].head(1) f1_max_mult = f1_max.squeeze()['multiplier'] time.sleep(1) print( f'''\nBased on the F1 score metric, setting a threshold at {round(f1_max_mult,1)} standard deviations above the average magnitude might provide optimal results.\n''') time.sleep(1) print(f'''{f1_max} We recommend that you skim the CSV and the following visualization outputs to sanity check results and make your own judgement. ''') # Now for the fun part..generating the visualizations via Bokeh. # Header & internal CSS. title_text = ''' <style> @font-face { font-family: RobotoBlack; src: url(fonts/Roboto-Black.ttf); font-weight: bold; } @font-face { font-family: RobotoBold; src: url(fonts/Roboto-Bold.ttf); font-weight: bold; } @font-face { font-family: RobotoRegular; src: url(fonts/Roboto-Regular.ttf); } body { background-color: #f2ebe6; } title_header { font-size: 80px; font-style: bold; font-family: RobotoBlack, Helvetica; font-weight: bold; margin-bottom: -200px; } h1, h2, h3 { font-family: RobotoBlack, Helvetica; color: #313596; } p { font-size: 12px; font-family: RobotoRegular } b { color: #58c491; } th, td { text-align:left; padding: 5px; } tr:nth-child(even) { background-color: white; opacity: .7; } .vertical { border-left: 1px solid black; height: 190px; } </style> <title_header style="text-align:left; color: white;"> Cream. </title_header> <p style="font-family: RobotoBold, Helvetica; font-size:18px; margin-top: 0px; margin-left: 5px;"> Because time is money, and <b style="font-size=18px;">"Cash Rules Everything Around Me"</b>. </p> </div> ''' title_div = Div(text=title_text, width=800, height=160, margin=(40, 0, 0, 70)) # Summary stats from earlier. summary_text = f''' <h1>Results Overview</h1> <i>metric = magnitude</i> <table style="width:100%"> <tr> <th>Metric</th> <th>Normal Events</th> <th>Malicious Events</th> </tr> <tr> <td>Observations</td> <td>{norm_count:,}</td> <td>{mal_count:,}</td> </tr> <tr> <td>Average</td> <td>{round(norm_mean, 2):,}</td> <td>{round(mal_mean, 2):,}</td> </tr> <tr> <td>Median</td> <td>{round(norm_median, 2):,}</td> <td>{round(mal_median, 2):,}</td> </tr> <tr> <td>Standard Deviation</td> <td>{round(norm_stddev, 2):,}</td> <td>{round(mal_stddev, 2):,}</td> </tr> </table> ''' summary_div = Div(text=summary_text, width=470, height=320, margin=(3, 0, -70, 73)) # Results of the hypothetical threshold. hypothetical = f''' <h1>"Rule of thumb" Hypothetical Threshold</h1> <p>A threshold at <i>(average + 3x standard deviations)</i> {metric_col} would result in:</p> <ul> <li>True Positives (correctly identified malicious events: <b>{generic_threshold['TP']:,}</b></li> <li>False Positives (wrongly identified normal events: <b>{generic_threshold['FP']:,}</b></li> <li>True Negatives (correctly identified normal events: <b>{generic_threshold['TN']:,}</b></li> <li>False Negatives (wrongly identified malicious events: <b>{generic_threshold['FN']:,}</b></li> </ul> <h2>Accuracy Metrics</h2> <ul> <li>Precision (what % of events above threshold are actually malicious): <b>{round(generic_threshold['precision'] * 100, 1)}%</b></li> <li>Recall (what % of malicious events did we catch): <b>{round(generic_threshold['recall'] * 100, 1)}%</b></li> <li>F1 Score (blends precision and recall): <b>{round(generic_threshold['f1_score'] * 100, 1)}%</b></li> </ul> ''' hypo_div = Div(text=hypothetical, width=600, height=320, margin=(5, 0, -70, 95)) line = ''' <div class="vertical"></div> ''' vertical_line = Div(text=line, width=20, height=320, margin=(80, 0, -70, -10)) # Let's get the exploratory charts generated. malicious_hist, malicious_edge = np.histogram(malicious, bins=100) mal_hist_df = pd.DataFrame({ 'metric': malicious_hist, 'left': malicious_edge[:-1], 'right': malicious_edge[1:] }) normal_hist, normal_edge = np.histogram(normal, bins=100) norm_hist_df = pd.DataFrame({ 'metric': normal_hist, 'left': normal_edge[:-1], 'right': normal_edge[1:] }) exploratory = figure( plot_width=plot_width, plot_height=plot_height, sizing_mode='fixed', title=f'{metric_col.capitalize()} Distribution (σ = std dev)', x_axis_label=f'{metric_col.capitalize()}', y_axis_label='Observations') exploratory.title.text_font_size = title_font_size exploratory.border_fill_color = cell_bg_color exploratory.border_fill_alpha = cell_bg_alpha exploratory.background_fill_color = cell_bg_color exploratory.background_fill_alpha = plot_bg_alpha exploratory.min_border_left = left_border exploratory.min_border_right = right_border exploratory.min_border_top = top_border exploratory.min_border_bottom = bottom_border exploratory.quad(bottom=0, top=mal_hist_df.metric, left=mal_hist_df.left, right=mal_hist_df.right, legend_label='malicious', fill_color=malicious_color, alpha=.85, line_alpha=.35, line_width=.5) exploratory.quad(bottom=0, top=norm_hist_df.metric, left=norm_hist_df.left, right=norm_hist_df.right, legend_label='normal', fill_color=normal_color, alpha=.35, line_alpha=.35, line_width=.5) exploratory.add_layout( Arrow(end=NormalHead(fill_color=malicious_color, size=10, line_alpha=0), line_color=malicious_color, x_start=mal_mean, y_start=mal_count, x_end=mal_mean, y_end=0)) arrow_label = Label(x=mal_mean, y=mal_count, y_offset=5, text='Malicious Events', text_font_style='bold', text_color=malicious_color, text_font_size='10pt') exploratory.add_layout(arrow_label) exploratory.xaxis.formatter = NumeralTickFormatter(format='0,0') exploratory.yaxis.formatter = NumeralTickFormatter(format='0,0') # 3 sigma reference line sigma_ref(exploratory, norm_mean, norm_stddev) exploratory.legend.location = "top_right" exploratory.legend.background_fill_alpha = .3 # Zoomed in version overlap_view = figure( plot_width=plot_width, plot_height=plot_height, sizing_mode='fixed', title=f'Overlap Highlight', x_axis_label=f'{metric_col.capitalize()}', y_axis_label='Observations', y_range=(0, mal_count * .33), x_range=(norm_mean + (norm_stddev * 2.5), mal_mean + (mal_stddev * 3)), ) overlap_view.title.text_font_size = title_font_size overlap_view.border_fill_color = cell_bg_color overlap_view.border_fill_alpha = cell_bg_alpha overlap_view.background_fill_color = cell_bg_color overlap_view.background_fill_alpha = plot_bg_alpha overlap_view.min_border_left = left_border overlap_view.min_border_right = right_border overlap_view.min_border_top = top_border overlap_view.min_border_bottom = bottom_border overlap_view.quad(bottom=0, top=mal_hist_df.metric, left=mal_hist_df.left, right=mal_hist_df.right, legend_label='malicious', fill_color=malicious_color, alpha=.85, line_alpha=.35, line_width=.5) overlap_view.quad(bottom=0, top=norm_hist_df.metric, left=norm_hist_df.left, right=norm_hist_df.right, legend_label='normal', fill_color=normal_color, alpha=.35, line_alpha=.35, line_width=.5) overlap_view.xaxis.formatter = NumeralTickFormatter(format='0,0') overlap_view.yaxis.formatter = NumeralTickFormatter(format='0,0') sigma_ref(overlap_view, norm_mean, norm_stddev) overlap_view.legend.location = "top_right" overlap_view.legend.background_fill_alpha = .3 # Probability Density - bigger bins for sparser malicous observations malicious_hist_dense, malicious_edge_dense = np.histogram(malicious, density=True, bins=50) mal_hist_dense_df = pd.DataFrame({ 'metric': malicious_hist_dense, 'left': malicious_edge_dense[:-1], 'right': malicious_edge_dense[1:] }) normal_hist_dense, normal_edge_dense = np.histogram(normal, density=True, bins=100) norm_hist_dense_df = pd.DataFrame({ 'metric': normal_hist_dense, 'left': normal_edge_dense[:-1], 'right': normal_edge_dense[1:] }) density = figure(plot_width=plot_width, plot_height=plot_height, sizing_mode='fixed', title='Probability Density', x_axis_label=f'{metric_col.capitalize()}', y_axis_label='% of Group Total') density.title.text_font_size = title_font_size density.border_fill_color = cell_bg_color density.border_fill_alpha = cell_bg_alpha density.background_fill_color = cell_bg_color density.background_fill_alpha = plot_bg_alpha density.min_border_left = left_border density.min_border_right = right_border density.min_border_top = top_border density.min_border_bottom = bottom_border density.quad(bottom=0, top=mal_hist_dense_df.metric, left=mal_hist_dense_df.left, right=mal_hist_dense_df.right, legend_label='malicious', fill_color=malicious_color, alpha=.85, line_alpha=.35, line_width=.5) density.quad(bottom=0, top=norm_hist_dense_df.metric, left=norm_hist_dense_df.left, right=norm_hist_dense_df.right, legend_label='normal', fill_color=normal_color, alpha=.35, line_alpha=.35, line_width=.5) density.xaxis.formatter = NumeralTickFormatter(format='0,0') density.yaxis.formatter = NumeralTickFormatter(format='0.000%') sigma_ref(density, norm_mean, norm_stddev) density.legend.location = "top_right" density.legend.background_fill_alpha = .3 # Simulation Series to be used false_positives = simulations.FP false_negatives = simulations.FN multiplier = simulations.multiplier precision = simulations.precision recall = simulations.recall f1_score = simulations.f1_score f1_max = simulations[simulations.f1_score == simulations.f1_score.max( )].head(1).squeeze()['multiplier'] # False Positives vs False Negatives errors = figure(plot_width=plot_width, plot_height=plot_height, sizing_mode='fixed', x_range=(multiplier.min(), multiplier.max()), y_range=(0, false_positives.max()), title='False Positives vs False Negatives', x_axis_label='Multiplier', y_axis_label='Count') errors.title.text_font_size = title_font_size errors.border_fill_color = cell_bg_color errors.border_fill_alpha = cell_bg_alpha errors.background_fill_color = cell_bg_color errors.background_fill_alpha = plot_bg_alpha errors.min_border_left = left_border errors.min_border_right = right_border errors.min_border_top = top_border errors.min_border_bottom = right_border errors.line(multiplier, false_positives, legend_label='false positives', line_width=2, color=fp_color) errors.line(multiplier, false_negatives, legend_label='false negatives', line_width=2, color=fn_color) errors.yaxis.formatter = NumeralTickFormatter(format='0,0') errors.extra_y_ranges = {"y2": Range1d(start=0, end=1.1)} errors.add_layout( LinearAxis(y_range_name="y2", axis_label="Score", formatter=NumeralTickFormatter(format='0.00%')), 'right') errors.line(multiplier, f1_score, line_width=2, color=f1_color, legend_label='F1 Score', y_range_name="y2") # F1 Score Maximization point f1_thresh = Span(location=f1_max, dimension='height', line_color=f1_color, line_dash='dashed', line_width=2) f1_label = Label(x=f1_max + .05, y=180, y_units='screen', text=f'F1 Max: {round(f1_max,2)}', text_font_size='10pt', text_font_style='bold', text_align='left', text_color=f1_color) errors.add_layout(f1_thresh) errors.add_layout(f1_label) errors.legend.location = "top_right" errors.legend.background_fill_alpha = .3 # False Negative Weighting. # Intro. weighting_intro = f''' <h3>Error types differ in impact.</h3> <p>In the case of security incidents, a false negative, though possibly rarer than false positives, is likely more costly. For example, downtime suffered from a DDoS attack (lost sales/customers) incurs more loss than time wasted chasing a false positive (labor hours). </p> <p>Try playing around with the slider to the right to see how your thresholding strategy might need to change depending on the relative weight of false negatives to false positives. What does it look like at 1:1, 50:1, etc.?</p> ''' weighting_div = Div(text=weighting_intro, width=420, height=180, margin=(0, 75, 0, 0)) # Now for the weighted errors viz default_weighting = 10 initial_fp_cost = 100 simulations['weighted_FN'] = simulations.FN * default_weighting weighted_fn = simulations.weighted_FN simulations[ 'total_weighted_error'] = simulations.FP + simulations.weighted_FN total_weighted_error = simulations.total_weighted_error simulations['fp_cost'] = initial_fp_cost fp_cost = simulations.fp_cost simulations[ 'total_estimated_cost'] = simulations.total_weighted_error * simulations.fp_cost total_estimated_cost = simulations.total_estimated_cost twe_min = simulations[simulations.total_weighted_error == simulations.total_weighted_error.min()].head( 1).squeeze()['multiplier'] twe_min_count = simulations[simulations.multiplier == twe_min].head( 1).squeeze()['total_weighted_error'] generic_twe = simulations[simulations.multiplier.apply( lambda x: round(x, 2)) == 3.00].squeeze()['total_weighted_error'] comparison = f''' <p>Based on your inputs, the optimal threshold is around <b>{twe_min}</b>. This would result in an estimated <b>{int(twe_min_count):,}</b> total weighted errors and <b>${int(twe_min_count * initial_fp_cost):,}</b> in losses.</p> <p>The generic threshold of 3.0 standard deviations would result in <b>{int(generic_twe):,}</b> total weighted errors and <b>${int(generic_twe * initial_fp_cost):,}</b> in losses.</p> <p>Using the optimal threshold would save <b>${int((generic_twe - twe_min_count) * initial_fp_cost):,}</b>, reducing costs by <b>{(generic_twe - twe_min_count) / generic_twe * 100:.1f}%</b> (assuming near-future events are distributed similarly to those from the past).</p> ''' comparison_div = Div(text=comparison, width=420, height=230, margin=(0, 75, 0, 0)) loss_min = ColumnDataSource(data=dict(multiplier=multiplier, fp=false_positives, fn=false_negatives, weighted_fn=weighted_fn, twe=total_weighted_error, fpc=fp_cost, tec=total_estimated_cost, precision=precision, recall=recall, f1=f1_score)) evaluation = Figure(plot_width=900, plot_height=520, sizing_mode='fixed', x_range=(multiplier.min(), multiplier.max()), title='Evaluation Metrics vs Total Estimated Cost', x_axis_label='Multiplier', y_axis_label='Cost') evaluation.title.text_font_size = title_font_size evaluation.border_fill_color = cell_bg_color evaluation.border_fill_alpha = cell_bg_alpha evaluation.background_fill_color = cell_bg_color evaluation.background_fill_alpha = plot_bg_alpha evaluation.min_border_left = left_border evaluation.min_border_right = right_border evaluation.min_border_top = top_border evaluation.min_border_bottom = bottom_border evaluation.line('multiplier', 'tec', source=loss_min, line_width=3, line_alpha=0.6, color=total_weighted_color, legend_label='Total Estimated Cost') evaluation.yaxis.formatter = NumeralTickFormatter(format='$0,0') # Evaluation metrics on second right axis. evaluation.extra_y_ranges = {"y2": Range1d(start=0, end=1.1)} evaluation.add_layout( LinearAxis(y_range_name="y2", axis_label="Score", formatter=NumeralTickFormatter(format='0.00%')), 'right') evaluation.line('multiplier', 'precision', source=loss_min, line_width=3, line_alpha=0.6, color=precision_color, legend_label='Precision', y_range_name="y2") evaluation.line('multiplier', 'recall', source=loss_min, line_width=3, line_alpha=0.6, color=recall_color, legend_label='Recall', y_range_name="y2") evaluation.line('multiplier', 'f1', source=loss_min, line_width=3, line_alpha=0.6, color=f1_color, legend_label='F1 score', y_range_name="y2") evaluation.legend.location = "bottom_right" evaluation.legend.background_fill_alpha = .3 twe_thresh = Span(location=twe_min, dimension='height', line_color=total_weighted_color, line_dash='dashed', line_width=2) twe_label = Label(x=twe_min - .05, y=240, y_units='screen', text=f'Cost Min: {round(twe_min,2)}', text_font_size='10pt', text_font_style='bold', text_align='right', text_color=total_weighted_color) evaluation.add_layout(twe_thresh) evaluation.add_layout(twe_label) # Add in same f1 thresh as previous viz evaluation.add_layout(f1_thresh) evaluation.add_layout(f1_label) handler = CustomJS(args=dict(source=loss_min, thresh=twe_thresh, label=twe_label, comparison=comparison_div), code=""" var data = source.data var ratio = cb_obj.value var multiplier = data['multiplier'] var fp = data['fp'] var fn = data['fn'] var weighted_fn = data['weighted_fn'] var twe = data['twe'] var fpc = data['fpc'] var tec = data['tec'] var generic_twe = 0 function round(value, decimals) { return Number(Math.round(value+'e'+decimals)+'e-'+decimals); } function comma_sep(x) { return x.toString().replace(/\B(?<!\.\d*)(?=(\d{3})+(?!\d))/g, ","); } for (var i = 0; i < multiplier.length; i++) { weighted_fn[i] = Math.round(fn[i] * ratio) twe[i] = weighted_fn[i] + fp[i] tec[i] = twe[i] * fpc[i] if (round(multiplier[i],2) == 3.00) { generic_twe = twe[i] } } var min_loss = Math.min.apply(null,twe) var new_thresh = 0 for (var i = 0; i < multiplier.length; i++) { if (twe[i] == min_loss) { new_thresh = multiplier[i] thresh.location = new_thresh thresh.change.emit() label.x = new_thresh label.text = `Cost Min: ${new_thresh}` label.change.emit() comparison.text = ` <p>Based on your inputs, the optimal threshold is around <b>${new_thresh}</b>. This would result in an estimated <b>${comma_sep(round(min_loss,0))}</b> total weighted errors and <b>$${comma_sep(round(min_loss * fpc[i],0))}</b> in losses.</p> <p>The generic threshold of 3.0 standard deviations would result in <b>${comma_sep(round(generic_twe,0))}</b> total weighted errors and <b>$${comma_sep(round(generic_twe * fpc[i],0))}</b> in losses.</p> <p>Using the optimal threshold would save <b>$${comma_sep(round((generic_twe - min_loss) * fpc[i],0))}</b>, reducing costs by <b>${comma_sep(round((generic_twe - min_loss) / generic_twe * 100,0))}%</b> (assuming near-future events are distributed similarly to those from the past).</p> ` comparison.change.emit() } } source.change.emit(); """) slider = Slider(start=1.0, end=500, value=default_weighting, step=.25, title="FN:FP Ratio", bar_color='#FFD100', height=50, margin=(5, 0, 5, 0)) slider.js_on_change('value', handler) cost_handler = CustomJS(args=dict(source=loss_min, comparison=comparison_div), code=""" var data = source.data var new_cost = cb_obj.value var multiplier = data['multiplier'] var fp = data['fp'] var fn = data['fn'] var weighted_fn = data['weighted_fn'] var twe = data['twe'] var fpc = data['fpc'] var tec = data['tec'] var generic_twe = 0 function round(value, decimals) { return Number(Math.round(value+'e'+decimals)+'e-'+decimals); } function comma_sep(x) { return x.toString().replace(/\B(?<!\.\d*)(?=(\d{3})+(?!\d))/g, ","); } for (var i = 0; i < multiplier.length; i++) { fpc[i] = new_cost tec[i] = twe[i] * fpc[i] if (round(multiplier[i],2) == 3.00) { generic_twe = twe[i] } } var min_loss = Math.min.apply(null,twe) var new_thresh = 0 for (var i = 0; i < multiplier.length; i++) { if (twe[i] == min_loss) { new_thresh = multiplier[i] comparison.text = ` <p>Based on your inputs, the optimal threshold is around <b>${new_thresh}</b>. This would result in an estimated <b>${comma_sep(round(min_loss,0))}</b> total weighted errors and <b>$${comma_sep(round(min_loss * new_cost,0))}</b> in losses.</p> <p>The generic threshold of 3.0 standard deviations would result in <b>${comma_sep(round(generic_twe,0))}</b> total weighted errors and <b>$${comma_sep(round(generic_twe * new_cost,0))}</b> in losses.</p> <p>Using the optimal threshold would save <b>$${comma_sep(round((generic_twe - min_loss) * new_cost,0))}</b>, reducing costs by <b>${comma_sep(round((generic_twe - min_loss)/generic_twe * 100,0))}%</b> (assuming near-future events are distributed similarly to those from the past).</p> ` comparison.change.emit() } } source.change.emit(); """) cost_input = TextInput(value=f"{initial_fp_cost}", title="How much a false positive costs:", height=75, margin=(20, 75, 20, 0)) cost_input.js_on_change('value', cost_handler) # Include DataTable of simulation results dt_columns = [ TableColumn(field="multiplier", title="Multiplier"), TableColumn(field="fp", title="False Positives", formatter=NumberFormatter(format='0,0')), TableColumn(field="fn", title="False Negatives", formatter=NumberFormatter(format='0,0')), TableColumn(field="weighted_fn", title="Weighted False Negatives", formatter=NumberFormatter(format='0,0.00')), TableColumn(field="twe", title="Total Weighted Errors", formatter=NumberFormatter(format='0,0.00')), TableColumn(field="fpc", title="Estimated FP Cost", formatter=NumberFormatter(format='$0,0.00')), TableColumn(field="tec", title="Estimated Total Cost", formatter=NumberFormatter(format='$0,0.00')), TableColumn(field="precision", title="Precision", formatter=NumberFormatter(format='0.00%')), TableColumn(field="recall", title="Recall", formatter=NumberFormatter(format='0.00%')), TableColumn(field="f1", title="F1 Score", formatter=NumberFormatter(format='0.00%')), ] data_table = DataTable(source=loss_min, columns=dt_columns, width=1400, height=700, sizing_mode='fixed', fit_columns=True, reorderable=True, sortable=True, margin=(30, 0, 20, 0)) # weighting_layout = column([weighting_div, evaluation, slider, data_table]) weighting_layout = column( row(column(weighting_div, cost_input, comparison_div), column(slider, evaluation), Div(text='', height=200, width=60)), data_table) # Initialize visualizations in browser time.sleep(1.5) layout = grid([ [title_div], [row(summary_div, vertical_line, hypo_div)], [ row(Div(text='', height=200, width=60), exploratory, Div(text='', height=200, width=10), overlap_view, Div(text='', height=200, width=40)) ], [Div(text='', height=10, width=200)], [ row(Div(text='', height=200, width=60), density, Div(text='', height=200, width=10), errors, Div(text='', height=200, width=40)) ], [Div(text='', height=10, width=200)], [ row(Div(text='', height=200, width=60), weighting_layout, Div(text='', height=200, width=40)) ], ]) # Generate html resources for dashboard fonts = os.path.join(os.getcwd(), 'fonts') if os.path.isdir(os.path.join(session_folder, 'fonts')): shutil.rmtree(os.path.join(session_folder, 'fonts')) shutil.copytree(fonts, os.path.join(session_folder, 'fonts')) else: shutil.copytree(fonts, os.path.join(session_folder, 'fonts')) html = file_html(layout, INLINE, "Cream") with open(os.path.join(session_folder, f'{session_name}.html'), "w") as file: file.write(html) webbrowser.open("file://" + os.path.join(session_folder, f'{session_name}.html'))
from bokeh.resources import INLINE from bokeh.util.browser import view x = arange(-2*pi, 2*pi, 0.1) y = sin(x) source = ColumnDataSource( data=dict(x=x, y=y) ) plot = Plot(min_border=80) circle = Circle(x="x", y="y", fill_color="red", size=5, line_color="black") plot.add_glyph(source, circle) plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') plot.add_tools(PanTool(), WheelZoomTool()) doc = Document() doc.add_root(plot) if __name__ == "__main__": doc.validate() filename = "basic_plot.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Basic Glyph Plot")) print("Wrote %s" % filename) view(filename)
def query(): """Query script entry point.""" hl.init(default_reference='GRCh38') mt = hl.read_matrix_table(HGDP1KG_TOBWGS) scores = hl.read_table(SCORES) # Filter outliers and related samples mt = mt.semi_join_cols(scores) mt = mt.annotate_cols(scores=scores[mt.s].scores) mt = mt.annotate_cols( study=hl.if_else(mt.s.contains('TOB'), 'TOB-WGS', 'HGDP-1kG')) # PCA plot must all come from the same object columns = mt.cols() pca_scores = columns.scores labels = columns.study sample_names = columns.s cohort_sample_codes = list(set(labels.collect())) tooltips = [('labels', '@label'), ('samples', '@samples')] # get percent variance explained eigenvalues = hl.import_table(EIGENVALUES) eigenvalues = eigenvalues.to_pandas() eigenvalues.columns = ['eigenvalue'] eigenvalues = pd.to_numeric(eigenvalues.eigenvalue) variance = eigenvalues.divide(float(eigenvalues.sum())) * 100 variance = variance.round(2) # Get number of PCs number_of_pcs = len(eigenvalues) print('Making PCA plots labelled by study') for i in range(0, (number_of_pcs - 1)): pc1 = i pc2 = i + 1 print(f'PC{pc1 + 1} vs PC{pc2 + 1}') plot = figure( title='TOB-WGS + HGDP/1kG Dataset', x_axis_label=f'PC{pc1 + 1} ({variance[pc1]}%)', y_axis_label=f'PC{pc2 + 1} ({variance[pc2]}%)', tooltips=tooltips, ) source = ColumnDataSource( dict( x=pca_scores[pc1].collect(), y=pca_scores[pc2].collect(), label=labels.collect(), samples=sample_names.collect(), )) plot.circle( 'x', 'y', alpha=0.5, source=source, size=4, color=factor_cmap('label', ['#1b9e77', '#d95f02'], cohort_sample_codes), legend_group='label', ) plot.add_layout(plot.legend[0], 'left') plot_filename = output_path(f'study_pc{pc2}.png', 'web') with hl.hadoop_open(plot_filename, 'wb') as f: get_screenshot_as_png(plot).save(f, format='PNG') html = file_html(plot, CDN, 'my plot') plot_filename_html = output_path(f'study_pc{pc2}.html', 'web') with hl.hadoop_open(plot_filename_html, 'w') as f: f.write(html) print('Making PCA plots labelled by the subpopulation') labels = columns.hgdp_1kg_metadata.labeled_subpop.collect() labels = ['TOB-WGS' if x is None else x for x in labels] subpopulation = list(set(labels)) # change ordering of subpopulations # so TOB-WGS is at the end and glyphs appear on top subpopulation.append(subpopulation.pop(subpopulation.index('TOB-WGS'))) tooltips = [('labels', '@label'), ('samples', '@samples')] for i in range(0, (number_of_pcs - 1)): pc1 = i pc2 = i + 1 print(f'PC{pc1 + 1} vs PC{pc2 + 1}') plot = figure( title='Subpopulation', x_axis_label=f'PC{pc1 + 1} ({variance[pc1]}%)', y_axis_label=f'PC{pc2 + 1} ({variance[pc2]}%)', tooltips=tooltips, ) source = ColumnDataSource( dict( x=pca_scores[pc1].collect(), y=pca_scores[pc2].collect(), label=labels, samples=sample_names.collect(), )) plot.circle( 'x', 'y', alpha=0.5, source=source, size=4, color=factor_cmap('label', turbo(len(subpopulation)), subpopulation), legend_group='label', ) plot.add_layout(plot.legend[0], 'left') plot_filename = output_path(f'subpopulation_pc{pc2}.png', 'web') with hl.hadoop_open(plot_filename, 'wb') as f: get_screenshot_as_png(plot).save(f, format='PNG') html = file_html(plot, CDN, 'my plot') plot_filename_html = output_path(f'subpopulation_pc{pc2}.html', 'web') with hl.hadoop_open(plot_filename_html, 'w') as f: f.write(html)
import_marker = process_PAsearch.get_markerpos(stub_loc['stub_dir'][0]) df_MRK = pd.DataFrame.from_dict(import_marker, orient='index') df_MRK.columns = ['StgX', 'StgY'] # add the column names # ------------ Create the plots -------------- # Open our custom template with open('PB_template.jinja', 'r') as f: template = Template(f.read()) # Use inline resources, render the html and open bokehlayout = create_bokehplot.makelayout(df_EDAX, df_MRK, img_list) title = 'Particle Search Results' js_resources = JSResources(mode='cdn') css_resources = CSSResources(mode='cdn') html = file_html(bokehlayout, resources=(js_resources, css_resources), title=title, template=template, template_variables=sample_info) script, div = components(bokehlayout) """html = template.render(js_resources=js_resources, css_resources=css_resources, div=div)""" output_file = directory + '.html' with open(directory + '/' + output_file, mode='w', encoding='utf-8') as f: f.write(html)
doc = Document() doc.add_root(layout) return doc @staticmethod def validate_show_document(html_document, html_filename, html_dir, viewHtml=False): html_document.validate() proper_dir = OSMuxImpl.get_proper_path(html_dir) proper_filename = proper_dir + html_filename with open(proper_filename, "w", encoding='utf-8') as f: f.write(file_html(html_document, INLINE, "Data Tables")) LOGGER.info( "extend_bokeh_datatables.ExtendBokeh.validate_show_document(): wrote %s in dir %s ", html_filename, proper_dir) if viewHtml is not False: view(proper_filename) if __name__ == "__main__": doc = ExtendDataTable.make_example_datatable() doc.validate() dir_name = "workspace/data/bokeh/html/" filename = OSMuxImpl.get_proper_path(dir_name) + "data_tables.html" with open(filename, "w", encoding='utf-8') as f: f.write(file_html(doc, INLINE, "Data Tables")) print("Wrote %s" % filename) view(filename)
def test_file_html_title_is_escaped(): r = embed.file_html(_embed_test_plot, CDN, "&<") assert "<title>&<</title>" in r
def test_file_html_provides_warning_if_no_js(mock_warn): css_resources = CSSResources() embed.file_html(_embed_test_plot, (None, css_resources), "title") mock_warn.assert_called_once_with( 'No Bokeh JS Resources provided to template. If required you will need to provide them manually.' )
plot.add_glyph(source, text) xaxis = CategoricalAxis() xaxis.major_label_text_font_size = "11px" xaxis.major_label_standoff = 0 xaxis.major_tick_line_color = None xaxis.axis_line_color = None plot.add_layout(xaxis, 'above') hover_tool = HoverTool(renderers=[rect_renderer], tooltips=[("Holiday", "@month_holidays")]) plot.tools.append(hover_tool) return plot months = [[make_calendar(2014, 3 * i + j + 1) for j in range(3)] for i in range(4)] grid = gridplot(toolbar_location=None, children=months) doc = Document() doc.add_root(grid) if __name__ == "__main__": doc.validate() filename = "calendars.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Calendar 2014")) print("Wrote %s" % filename) view(filename)
def index(): df = pd.read_csv( 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv' ) drop_cols = [ 'UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Lat', 'Long_', 'Combined_Key' ] df.drop(drop_cols, axis=1, inplace=True) df = df.melt(id_vars=['Province_State', 'Country_Region', 'Admin2'], value_name='Cases', var_name='Date') df['Date'] = pd.to_datetime(df['Date']).dt.date # apply filters for recent datd as of yesterday df_yesterday = df.loc[df['Date'] == datetime.datetime.today().date() - datetime.timedelta(1)] tile_provider = get_provider(CARTODBPOSITRON) shapefile = 'cb_2018_us_state_20m/cb_2018_us_state_20m.shp' usa = gpd.read_file(shapefile)[['NAME', 'STUSPS', 'geometry']] usa.columns = ['state', 'state_code', 'geometry'] cases_by_state_df = df_yesterday.groupby( 'Province_State')['Cases'].sum().reset_index().sort_values( 'Cases', ascending=False) merged = usa.merge(cases_by_state_df, left_on='state', right_on='Province_State', how='inner') merged = merged.loc[-merged['state']. isin(['Alaska', 'Hawaii', 'Puerto Rico'])] geosource = GeoJSONDataSource(geojson=merged.to_json()) # Define color palettes #Define a sequential multi-hue color palette. palette = brewer['YlGnBu'][8] #Reverse color order so that dark blue is highest obesity. palette = palette[::-1] # Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors. color_mapper = LogColorMapper(palette=palette, low=merged.Cases.min(), high=merged.Cases.max()) # Create figure object. p = figure( plot_height=600, plot_width=1000, toolbar_location='below', tools='pan, wheel_zoom, box_zoom, reset', ) p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None # Add patch renderer to figure. states = p.patches('xs', 'ys', source=geosource, fill_color={ 'field': 'Cases', 'transform': color_mapper }, line_color='#4a4a4a', line_width=0.75, fill_alpha=1) p.background_fill_color = '#1b1c1b' p.xaxis.major_tick_line_color = None # turn off x-axis major ticks p.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks p.xaxis.ticker = [] p.yaxis.major_tick_line_color = None # turn off y-axis major ticks p.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks p.yaxis.ticker = [] p.outline_line_color = '#1b1c1b' p.border_fill_color = '#1b1c1b' # Create hover tool p.add_tools( HoverTool(renderers=[states], tooltips=[('State', '@state'), ('Cases', '@Cases')])) p1_html = file_html(p, CDN, "USA") return render_template('index.html', p1=p1_html)
map_options=map_options, title="Austin" ) source = ColumnDataSource( data=dict( lat=[30.2861, 30.2855, 30.2869], lon=[-97.7394, -97.7390, -97.7405], fill=['orange', 'blue', 'green'] ) ) circle = Circle(x="lon", y="lat", size=15, fill_color="fill", line_color="black") plot.add_glyph(source, circle) pan = PanTool() wheel_zoom = WheelZoomTool() box_select = BoxSelectTool() plot.add_tools(pan, wheel_zoom, box_select) doc = Document() doc.add_root(plot) if __name__ == "__main__": filename = "maps.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Google Maps Example")) print("Wrote %s" % filename) view(filename)
angle = half_tooth(planet_teeth) for i, j in [(+1, 0), (0, +1), (-1, 0), (0, -1)]: glyph = Gear(x=radius * i, y=radius * j, module=module, teeth=planet_teeth, angle=angle, fill_color=fill_color[1], line_color=line_color) plot.add_glyph(glyph) return plot doc = Document() doc.add_root(sample_gear()) classical = classical_gear(5, 52, 24) epicyclic = epicyclic_gear(5, 24, 12) doc.add_root(classical) doc.add_root(epicyclic) if __name__ == "__main__": filename = "gears.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Gears")) print("Wrote %s" % filename) view(filename)
source = ColumnDataSource(data=dict(x=x, y=y, y2=y2)) plot = Plot(x_range=Range1d(start=-6.5, end=6.5), y_range=Range1d(start=-1.1, end=1.1), min_border=80) plot.extra_y_ranges = {"foo": Range1d(start=0, end=100)} circle = Circle(x="x", y="y", fill_color="red", size=5, line_color="black") plot.add_glyph(source, circle) plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') circle2 = Circle(x="x", y="y2", fill_color="blue", size=5, line_color="black") plot.add_glyph(source, circle2, y_range_name="foo") plot.add_layout(LinearAxis(y_range_name="foo"), 'left') plot.add_tools(PanTool(), WheelZoomTool()) doc = Document() doc.add(plot) if __name__ == "__main__": filename = "twin_axis.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Twin Axis Plot")) print("Wrote %s" % filename) view(filename)
plot.add_glyph(text_source, glyph) def to_base64(png): return "data:image/png;base64," + base64.b64encode(png).decode("utf-8") urls = [ to_base64(icons.get(browser, b"")) for browser in browsers ] x, y = polar_to_cartesian(1.7, start_angles, end_angles) icons_source = ColumnDataSource(dict(urls=urls, x=x, y=y)) glyph = ImageURL(url="urls", x="x", y="y", anchor="center") plot.add_glyph(icons_source, glyph) text = [ "%.02f%%" % value for value in selected.Share ] x, y = polar_to_cartesian(0.7, start_angles, end_angles) text_source = ColumnDataSource(dict(text=text, x=x, y=y)) glyph = Text(x="x", y="y", text="text", text_align="center", text_baseline="middle") plot.add_glyph(text_source, glyph) doc = Document() doc.add_root(plot) if __name__ == "__main__": doc.validate() filename = "donut.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Donut Chart")) print("Wrote %s" % filename) view(filename)
plot.add_layout(LinearAxis(), 'above') plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') plot.add_layout(LinearAxis(), 'right') pan = PanTool() wheel_zoom = WheelZoomTool() preview_save = PreviewSaveTool() plot.add_tools(pan, wheel_zoom, preview_save) from bokeh.core.enums import LegendLocation for location in LegendLocation: legend = Legend(legends=[(location, [line])], location=location) plot.add_layout(legend) legend = Legend(legends=[("x=100px, y=150px", [line])], location=(100, 150)) plot.add_layout(legend) doc = Document() doc.add_root(plot) if __name__ == "__main__": filename = "legends.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Legends Example")) print("Wrote %s" % filename) view(filename)
h.continuerun(5) plot = figure() plot.line(t, v, line_width=2) plot2 = figure() x = h.Vector(range(10)) plot2.line(x, x*x, line_width=3, line_color='red') html = """ <style>body {{background-color: white}}</style> <h1><span style="color:blue">Hello</span> everybody</h1> Press the button to toggle the graphs (actually: a new one is drawn each time).<br/> <button data-onclick="go">Press me</button><br/><br/><br/> <div id="myplot">{}</div> """.format(file_html(plot, CDN, "")) count_press = 0 def go(): global count_press count_press += 1 if count_press % 2: w.update_html("#myplot", file_html(plot2, CDN, "")) else: w.update_html("#myplot", file_html(plot, CDN, "")) w = window(html, {'go': go}, title='Bokeh Demo')
def create_plot(self, name,mode="spec", scale = 1.0): """ creates displayed plots dumps also peaks in html and csv """ TOOLS="pan, box_zoom, hover, undo, redo, reset, save" diccol = {'r':'red', 'b':'blue', 'g':'green', 'o':'orange', 'k':'black', 'm':'magenta', 'f':'grey'} dbk = {'tools': TOOLS, 'sizing_mode':'scale_width'} dfig = {} fact = 1 if self.kind in ['T2T2', 'T1T2']: self.savesol = self.sol['NNLS_tikhonovK'].reshape(self.N[1], self.N[2])[::-1,::-1] elif self.kind == 'DT2': self.savesol = self.sol['NNLS_tikhonovK'].reshape(self.N[1], self.N[2])[:,::-1] #if debug>0: print("sol.max() before ", sol.max()) # maximum height in solution self.savesol *= (1/self.savesol.max()) # diminish height self.listpp_x, self.listpp_y, self.listpp_z = self.peakpicking(self.savesol) #creates the peak list html file in pandas dataframe format if mode in ("spec","pp"): self.fig, ax = mplt.subplots() xs, ys, col = self.get_contour_data(self.make_plot(ax, self.savesol, scale=scale,remove_artefacts=True, debug=0)) self.xlab='T2 (ms)' dbk['x_axis_label'] = self.xlab if self.kind == "T2T2": self.ylab='T2 (ms)' elif self.kind == "T1T2": self.ylab='T1 (ms)' elif self.kind == "DT2": self.ylab=u'Diff (µm²/s)' dbk['y_axis_label']=self.ylab dbk['title'] = self.kind+ ' spectrum' dbk['x_axis_type'] = 'log' dbk['y_axis_type'] = 'log' min_xs = [] max_xs = [] min_ys = [] max_ys = [] for i in range(len(xs)): min_xs.append(min(xs[i])) max_xs.append(max(xs[i])) for i in range(len(ys)): min_ys.append(min(ys[i])) max_ys.append(max(ys[i])) dbk['x_range'] = Range1d(1E0, 2*max(max_xs)) dbk['y_range'] = Range1d(1E0, 2*max(max_ys)) p = figure(**dbk) dfig['xs']=xs dfig['ys']=ys dfig['color']=col p.multi_line(**dfig) p.line([1, 2*max(max_xs),2*max(max_xs),1,1], [1,1,2*max(max_ys),2*max(max_ys),1], line_width=1.5, color='black') if mode == "pp": p.circle_x(self.listpp_x, self.listpp_y, size=20,color="#DD1C77", fill_alpha=0.2) self.html_plot=file_html(p,CDN) elif mode in ("fidlin","fidlog"): result_calculation, sig_after_randomproj = self.signals_for_comparison() print("Plotting comparison") dbk['x_axis_label']='#FID' dbk['y_axis_label']='Intensity' dbk['x_axis_type'] = "linear" if mode == "fidlin": dbk['title'] = 'Linear FID' dbk['y_axis_type'] = "linear" p = figure(**dbk) p.line(np.arange(result_calculation.size//fact), result_calculation[::fact], legend='Calculated',line_width=1,line_color='blue') p.line(np.arange(self.vv.size//fact), self.vv[::fact], legend='Original',line_color='green') p.line(np.arange(sig_after_randomproj.size//fact), sig_after_randomproj[::fact], line_dash=[4, 4],legend='Signal with random projection',line_color='red') elif mode == "fidlog": dbk['title'] = 'Logarithmic FID' dbk['y_axis_type'] = "log" p = figure(**dbk) p.line(np.arange(self.vv.size//fact), self.vv[::fact], legend='Original',line_color='green') p.line(np.arange(result_calculation.size//fact), np.maximum(1.0,result_calculation[::fact]), legend='Calculated',line_width=1,line_color='blue') self.html_plot=file_html(p,CDN) elif mode == "residual": result_calculation, sig_after_randomproj = self.signals_for_comparison() dfig["y"] = (self.vv[::fact])-(result_calculation[::fact]) dbk['title'] = "Residual" dbk['x_axis_label'] = 'a.u.' dbk['y_axis_label'] = 'a.u.' p = figure(**dbk) dfig["x"] = np.arange(self.vv.size//fact) dfig["size"] = 3 p.scatter(**dfig) self.html_plot=file_html(p,CDN) else: print("Internal ERROR in generic_2D_ILT.create_plot") return
def get_plot(year, quarter): with open('filtered_X_test_predicted_plot_data.pickle', 'rb') as handle: filtered_X_test_predicted_plot_data = pickle.load(handle) with open('dowjones_date_quarter_price_dict.pickle', 'rb') as handle: dowjones_date_quarter_price_dict = pickle.load(handle) month_to_quarter = { 1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10: 4, 11: 4, 12: 4 } int_to_quarter = { 1:'Jan 1 - Mar 31', 2:'Apr 1 - Jun 30', 3:'Jul 1 - Sep 30', 4:'Oct 1 - Dec 31', } #Get dow jones data xdatastr = dowjones_date_quarter_price_dict[(str(year), int_to_quarter[quarter])].keys() xdata = [datetime.strptime(i, '%Y-%m-%d') for i in xdatastr] ypricedata = list(dowjones_date_quarter_price_dict[(str(year), int_to_quarter[quarter])].values()) ydata = [(i-ypricedata[-1])*100/ypricedata[-1] for i in ypricedata] #Get ML model data _columns = filtered_X_test_predicted_plot_data.columns filtered_dict = {c:[] for c in _columns} for index in range(len(filtered_X_test_predicted_plot_data)): row = filtered_X_test_predicted_plot_data.iloc[index] _date = row['Report Date'] if(_date.year == year and month_to_quarter[_date.month] == quarter): for col, value in zip(_columns, row): filtered_dict[col].append(value) df = pd.DataFrame(filtered_dict) df = df.sort_values(by='Report Date') running_cost = 0 running_profit = 0 investment_yield = {} for index in range(len(df)): row = df.iloc[index] cost = row['Cost'] profit = row['Profit'] _date = row['Report Date'] running_cost += cost running_profit += profit temp_yield = running_profit * 100 / running_cost investment_yield[_date] = temp_yield investment_yield = {datetime(d.year, d.month, d.day): v for d, v in investment_yield.items()} #expand ML data to whole quarter #print('xdata') #print(xdata) #print(type(xdata[0])) #print(type(list(investment_yield.keys())[0])) investment_yield1 = {} for _date in xdata: temp = datetime(_date.year, _date.month, _date.day) if investment_yield.get(temp) is None: investment_yield1[_date] = 0 else: investment_yield1[_date] = investment_yield[_date] #backward fill current_yield = 0 for _date in [i for i in reversed(xdata)]: if investment_yield1[_date] != 0: current_yield = investment_yield1[_date] else: investment_yield1[_date] = current_yield #print(investment_yield1) MLxdata = list(investment_yield1.keys()) MLydata = list(investment_yield1.values()) MLxdata = [datetime(d.year, d.month, d.day) for d in MLxdata] try: names = ['ML Predictor Investment Return:', '{}%'.format(np.round(MLydata[0], decimals=2)), 'Naive Investment Return:', '{}%'.format(np.round(ydata[0], decimals=2))] except: print('No data for this selection') names = ['ML Predictor Investment Return:', '{}%'.format(0, decimals=2), 'Naive Investment Return:', '{}%'.format(np.round(ydata[0], decimals=2))] p = figure( tools=['pan','box_zoom','reset','save'], title="Machine Learning and Naive Model Portfolio Growth", x_axis_label='Date', y_axis_label='Portfolio Growth (% Change)', x_axis_type='datetime') p.width = 600 p.height = 400 p.line(xdata, ydata, color='black', legend_label='Dow Jones Index Fund (Naive)') p.line(MLxdata, MLydata, color='red', legend_label='ML Predictor Model') p.legend.location = 'top_left' html = file_html(p, CDN, _always_new=True) with open('templates/plot.html','w') as f: f.write(html) return names
plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') customjs = CustomJS.from_coffeescript(args=dict(source=source), code=""" Util = require "util/util" data = source.data for i in Util.get_indices(source) color = data['color'][i] window.alert("Selected color: #{color}") """) tap = TapTool(renderers=[circle_renderer], callback=customjs) plot.add_tools(PanTool(), WheelZoomTool(), tap) doc = Document() doc.add_root(plot) if __name__ == "__main__": doc.validate() filename = "customjs.html" with open(filename, "w") as f: f.write( file_html( doc, INLINE, "Demonstration of custom callback written in CoffeeScript")) print("Wrote %s" % filename) view(filename)
select, multi_select, multi_choice, slider, range_slider, date_slider, date_range_slider, spinner, color_picker, date_picker, Row(children=[switch_0, switch_1]), paragraph, div, pre_text, ]), tabs, ]), table, ]) doc = Document() doc.add_root(widgets) if __name__ == "__main__": doc.validate() filename = "widgets.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Widgets")) print("Wrote %s" % filename) view(filename)
from bokeh.models import WheelZoomTool, PanTool, BoxZoomTool from bokeh.models import WMTSTileSource output_file("tile_source_example.html", title="Tile Source Example") # set to roughly full extent of web mercator projection x_range = Range1d(start=-20000000, end=20000000) y_range = Range1d(start=-20000000, end=20000000) # create tile source from templated url tile_options = {} tile_options['url'] = 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png' tile_source = WMTSTileSource(**tile_options) # instantiate plot and add tile source p = Plot(x_range=x_range, y_range=y_range, plot_height=800, plot_width=800) p.add_tools(WheelZoomTool(), PanTool(), BoxZoomTool(match_aspect=True)) tile_renderer_options = {} p.add_tile(tile_source, **tile_renderer_options) doc = Document() doc.add_root(p) if __name__ == "__main__": filename = "tile_source.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Tile Source Example")) print("Wrote %s" % filename) view(filename)
import os import sys from bokeh.embed import file_html from bokeh.models import Range1d from bokeh.plotting import figure from bokeh.resources import CDN filename = sys.argv[1] x_coords = [] y_coords = [] # read the coords file and extract the x any coordinates with open(os.path.join('uploads\coords\\', filename + '.coords'), 'r') as f: for line in f: x, y = line.strip().split(',') x_coords.append(x) y_coords.append(y) # build a graph representing the image layout graph = figure(plot_width=500, plot_height=500) graph.x_range = Range1d(0, 50) graph.y_range = Range1d(0, 40) graph.circle(x_coords, y_coords, size=10, color='navy', alpha=0.5) # build the html template displaying the layout html = file_html(graph, CDN, filename + ' Layout') f = open(os.path.join('templates\\', filename + '.html'), 'w') f.write(html) f.close()
# # output_file("Afghanistan.html") # # f = figure(plot_width=300, plot_height=300) # # f.multi_line(xs = [Afghan_fuel, Afghan_rice, Afghan_wheat], ys = [y_1, y_2, y_3], color=["red", "green", "blue"]) # # show(f) # f.multi_line(xs = [df["year"][(df["country"] == "Afghanistan") & (df["food"] == "Fuel (diesel)")], # df["year"][(df["country"] == "Afghanistan") & (df["food"] == "Rice (low quality)")] # df["year"][(df["country"] == "Afghanistan") & (df["food"] == "Wheat")]]) for country in countries: x_list = [] y_list = [] products = df["food"][df["country"] == country].unique() for product in products: x = df["year"][(df["country"] == country) & (df["food"] == product)] print(x) y = df["average_price"][(df["country"] == country) & (df["food"] == product)] x_list.append(x) y_list.append(y) fOut = open("chart1.html", "a") f = figure(plot_width=500, plot_height=500, title=country) f.multi_line(xs=x_list, ys=y_list) html = file_html(f, CDN, "chart1") fOut.write(html) fOut.close()
from bokeh.plotting import figure from bokeh.resources import CDN from bokeh.embed import file_html plot = figure() plot.circle([1, 2], [3, 4]) html = file_html(plot, CDN, "my plot") with open('bokeh.txt', 'w') as f: f.write(html)
y = sin(x) # Create an array of times, starting at the current time, and extending # for len(x) number of hours. times = np.arange(len(x)) * 3600000 + time.time() source = ColumnDataSource(data=dict(x=x, y=y, times=times)) xdr = DataRange1d() ydr = DataRange1d() plot = Plot(x_range=xdr, y_range=ydr, min_border=80) circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black") plot.add_glyph(source, circle) plot.add_layout(DatetimeAxis(), 'below') plot.add_layout(DatetimeAxis(), 'left') plot.add_tools(PanTool(), WheelZoomTool()) doc = Document() doc.add_root(plot) if __name__ == "__main__": filename = "dateaxis.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Date Axis Example")) print("Wrote %s" % filename) view(filename)
h=20, angle=0.0, anchor="top_left") plot.add_glyph(source, image2) image3 = ImageURL(url=dict(value=url), x=200, y=-100, angle=0.0, anchor="bottom_right") plot.add_glyph(source, image3) xaxis = LinearAxis() plot.add_layout(xaxis, 'below') yaxis = LinearAxis() plot.add_layout(yaxis, 'left') plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) doc = Document() doc.add(plot) if __name__ == "__main__": filename = "image_url.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "Image URL Example")) print("Wrote %s" % filename) view(filename)
def bkExport(self, settings): #layer = iface.legendInterface().layers()[0] layer = settings["layer"] field = settings["field"] gdfList = [] total = float(layer.featureCount()) counter = 0 for feature in layer.getFeatures(): counter = counter + 1 self.dlg.progressBar.setValue(counter / total * 100) featJsonString = feature.geometry().geometry().asJSON(17) featJson = json.loads(featJsonString) df = {} df["geometry"] = shape(featJson) if field: df["data"] = feature[field] else: df["data"] = 0 df["class"] = -1 for hField in settings["hoverFields"]: df[hField[0]] = feature[hField[0]] gdf = gpd.GeoDataFrame([df]) gdfList.append(gdf) gdf2 = gpd.GeoDataFrame(pd.concat(gdfList, ignore_index=True)) lons, lats = self.gpd_bokeh(gdf2) data = list(gdf2["data"]) height = settings["height"] width = settings["width"] renderer = layer.rendererV2() if renderer.type() == 'singleSymbol': print "singleSymbol" color = renderer.symbol().color().name() color_mapper = CategoricalColorMapper(factors=[-1], palette=[color]) elif renderer.type() == 'categorizedSymbol': print "categorizedSymbol" categories = renderer.categories() for i in xrange(len(categories)): if categories[i].value(): try: gdf2["class"][( gdf2["data"] == categories[i].value())] = i except: gdf2["class"][(gdf2["data"] == float( categories[i].value()))] = i colorPalette = [ symbol.color().name() for symbol in renderer.symbols() ] color_mapper = CategoricalColorMapper(factors=sorted( list(gdf2["class"].unique())), palette=colorPalette) elif renderer.type() == 'graduatedSymbol': print "graduatedSymbol" ranges = renderer.ranges() gdf2["class"] = map(renderer.legendKeyForValue, gdf2["data"]) colorPalette = [ symbol.color().name() for symbol in renderer.symbols() ] color_mapper = CategoricalColorMapper(factors=sorted( list(gdf2["class"].unique())), palette=colorPalette) else: print "otherSymbols" if settings["toolbar_location"] == "none": TOOLS = "" else: TOOLS = "pan,wheel_zoom,box_zoom,reset,hover,save" colorClass = list(gdf2["class"]) source = ColumnDataSource( data=dict(x=lons, y=lats, data=data, category=colorClass)) for hField in settings["hoverFields"]: source.add(gdf2[hField[0]], name=hField[0]) if settings["GoogleEnabled"]: print("Enable Google") map_options = GMapOptions( lat=np.nanmean([val for sublist in lats for val in sublist]), lng=np.nanmean([val for sublist in lons for val in sublist]), map_type=settings["googleMapType"], zoom=settings["zoomLevel"]) plot = GMapPlot(plot_width=width, plot_height=height, x_range=DataRange1d(), y_range=DataRange1d(), map_options=map_options, api_key=settings["GMAPIKey"]) source_patches = source patches = Patches(xs='x', ys='y', fill_alpha=settings["alpha"] / 100.0, line_color=settings["outlineColor"], line_width=settings["outlineWidth"], fill_color={ 'field': 'category', 'transform': color_mapper }) patches_glyph = plot.add_glyph(source_patches, patches) plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool(), HoverTool()) else: #plot_width=width, plot_height=height, plot = figure(title=settings["title"], tools=TOOLS, plot_width=width, plot_height=height, x_axis_location=None, y_axis_location=None) plot.grid.grid_line_color = None plot.patches('x', 'y', source=source, fill_alpha=settings["alpha"] / 100.0, line_color=settings["outlineColor"], line_width=settings["outlineWidth"], fill_color={ 'field': 'category', 'transform': color_mapper }) plot.border_fill_color = settings["border_fill_color"] plot.background_fill_color = settings["background_fill_color"] plot.background_fill_alpha = settings["background_fill_alpha"] plot.outline_line_alpha = settings["outline_line_alpha"] plot.outline_line_color = settings["outline_line_color"] plot.outline_line_width = settings["outline_line_width"] if settings["toolbar_location"] == "none": plot.toolbar_location = None else: plot.toolbar_location = settings["toolbar_location"] plot.min_border_left = settings["min_border_left"] plot.min_border_right = settings["min_border_right"] plot.min_border_top = settings["min_border_top"] plot.min_border_bottom = settings["min_border_bottom"] plot.sizing_mode = settings["sizing_mode"] if settings["hoverFields"]: hover = plot.select_one(HoverTool) hover.point_policy = "follow_mouse" hover.tooltips = [ #(field, "@data") #("(Long, Lat)", "($x, $y)"), ] for hField in settings["hoverFields"]: temp = "@" + hField[0] hover.tooltips.append((hField[1], temp)) if settings["BokehJS"] == "CDN": print("CDN") html = file_html(plot, CDN, "my plot") elif settings["BokehJS"] == "INLINE": print("INLINE") html = file_html(plot, INLINE, "my plot") with open(self.settings["outputFile"] + "/map.html", "w") as my_html: my_html.write(html) settings["layer"] = settings["layer"].name() print settings with open(self.settings["outputFile"] + "/settings.json", 'w') as fp: json.dump(settings, fp)
def test_file_html_handles_css_only_resources(): css_resources = CSSResources(mode="relative", components=["bokeh"]) template = Template("<head>{{ bokeh_css }}</head><body></body>") output = embed.file_html(_embed_test_plot, (None, css_resources), "title", template=template) html = encode_utf8("<head>%s</head><body></body>" % css_resources.render_css()) assert output == html
value=1, step=1, title="Year", callback=callback, name='testy') callback.args["slider"] = slider callback.args["renderer_source"] = renderer_source callback.args["text_source"] = text_source # Stick the plot and the slider together layout = vplot(plot, slider) # Open our custom template with open('gapminder_template.jinja', 'r') as f: template = Template(f.read()) # Use inline resources, render the html and open resources = Resources(mode='inline') template_variables = {'bokeh_min_js': resources.js_raw[0]} title = "Bokeh - Gapminder Bubble Plot" html = file_html(layout, resources, title, template=template, template_variables=template_variables) output_file = 'gapminder.html' with open(output_file, 'w') as f: f.write(html) view(output_file)