def total_stats_tab(convoStats): conversationTitles = sorted([x.title for x in convoStats]) def make_dataset(convoTitles): df = pd.DataFrame( columns=['title', 'messagesSent', 'conversationsInitiated']) convoStats.sort(key=lambda x: x.title) for stats in convoStats: if stats.title not in convoTitles: continue totalMessages = stats.totalMessages totalInitiations = sum(stats.initiationsBySender.values()) for participant in sorted(stats.countsBySender.keys()): tdf = pd.DataFrame() tdf['title'] = [f'{stats.title} ({participant})'] tSent = stats.countsBySender[participant] tdf['messagesSent'] = [ f'{tSent} out of {totalMessages} ({tSent/totalMessages*100:.2f}%)'] tInitiated = stats.initiationsBySender[participant] tdf['conversationsInitiated'] = [ f'{tInitiated} out of {totalInitiations} ({tInitiated/totalInitiations*100:.2f}%)'] df = df.append(tdf) return ColumnDataSource(df) def make_table(src): # Columns of table table_columns = [TableColumn(field='title', title='Chat and participant'), TableColumn(field='messagesSent', title='Messages sent'), TableColumn(field='conversationsInitiated', title='Conversations initiated')] stats_table = DataTable( source=src, columns=table_columns, sizing_mode='stretch_both') return stats_table def on_conversation_selection_changed(attr, oldValue, newValue): convoTitles = [conversationTitles[i] for i in convoSelection.active] newSrc = make_dataset(convoTitles) src.data.update(newSrc.data) # A dropdown list to select a conversation convoSelection = CheckboxGroup( labels=conversationTitles, active=[0]) convoSelection.on_change('active', on_conversation_selection_changed) src = make_dataset(conversationTitles[:1]) stats_table = make_table(src) controls = column(convoSelection) layout = row(controls, stats_table) tab = Panel(child=layout, title='Total Stats Table') return tab
class WordFreqsWidget: def __init__(self): self._word_filter_select = RadioGroup( labels=[word_filter.label for word_filter in WordFilter], active=1, ) self._should_normalize = CheckboxGroup( labels=["Normalize word frequencies per day"], active=[]) self.widget = column( self._word_filter_select, self._should_normalize, sizing_mode="stretch_width", ) def on_change(self, *callbacks: Callable[[str, object, object], None]) -> None: self._word_filter_select.on_change("active", *callbacks) self._should_normalize.on_change("active", *callbacks) def set_enabled(self, enabled: bool) -> None: self._word_filter_select.disabled = not enabled self._should_normalize.disabled = not enabled @property def word_filter(self) -> WordFilter: for i, word_filter in enumerate(WordFilter): if i == self._word_filter_select.active: return word_filter raise ValueError() @property def should_normalize(self) -> bool: return bool(self._should_normalize.active)
def _setup_state_display_checkbox(self, p): """Setup checkbox to display result and/or close list state""" checkbox_group = CheckboxGroup(labels=["Result", "CloseList"], active=[0]) checkbox_group.on_change("active", self._update_state_display_checkbox_cb) self.checkbox_group_show = [ (self.result_state_glyph,), (self.close_state_glyph,), ] self.state_display_checkbox = checkbox_group
def _piechart_widgets(col, sort, mergepast, drop_n, callback): if sort: active = [0] else: active = [] sorted_button = CheckboxGroup(labels=["Sorted"], active=active) sorted_button.on_change('active', callback) merge_slider = Slider(start=1, end=col.nunique(), value=mergepast or col.nunique(), step=1, title="Merge Slider") merge_slider.on_change('value', callback) drop_slider = Slider(start=0, end=col.nunique(), value=drop_n or 0, step=1, title="Drop Slider") drop_slider.on_change('value', callback) return sorted_button, merge_slider, drop_slider
p.legend.location = "top_left" return p def update(attr, old, new): companies_to_plot = [company_selection.labels[i] for i in company_selection.active] new_src = make_dataset(companies_to_plot) src.data.update(new_src.data) def company(state): available_companies = df_companies.loc[df_companies['state'] == state][:] sort_companies = list(set(available_companies['company'][:20])) sort_companies.sort() return sort_companies company_selection = CheckboxGroup(labels=company('California'), active = [0, 1]) company_selection.on_change('active', update) initial_companies = [company_selection.labels[i] for i in company_selection.active] company_colors = Category20_16 company_colors.sort() src = make_dataset(initial_companies) plot_state = make_plot(src) controls = WidgetBox(company_selection, width = 200) l1 = row(controls, plot_state) if plot_title == '1': r = row([plot, tplot], sizing_mode='stretch_height') doc.add_root(column(r, l1)) else: l = column(plot, row(splot, tplot), row(numco_plot, sales_plot)) doc.add_root(column(slider, l))
def bargraph_tab(nyc_311_calls): #dataset for bar graph given boroughs, categorical variable, start date, and end date #only top 15 records are shown def make_dataset(boroughs, category, start_date, end_date): date_filter = nyc_311_calls[ (nyc_311_calls['created_mdy'] >= start_date) & (nyc_311_calls['created_mdy'] <= end_date)] borough_filter = date_filter[date_filter['borough'].isin(boroughs)] df = pd.DataFrame( borough_filter.groupby([category, 'borough'])['count'].sum()).reset_index() df_pivot = df.pivot_table(values='count', index=category, columns='borough') df_pivot['sum'] = df_pivot.sum(axis=1) df_sorted = df_pivot.sort_values('sum', ascending=False).fillna(0)[:15] return ColumnDataSource(df_sorted) def style(p): p.title.align = 'center' p.title.text_font_size = '19pt' p.axis.axis_label_text_font_size = '12pt' p.axis.major_label_text_font_size = '10pt' p.title.text_font = 'avenir' p.axis.axis_label_text_font = 'avenir' p.axis.major_label_text_font = 'avenir' p.legend.label_text_font = 'avenir' p.title.text_color = 'dimgray' p.axis.major_label_text_color = 'dimgray' p.axis.axis_label_text_color = 'dimgray' p.xaxis.axis_label = 'Calls' p.title.text_font_style = 'normal' p.axis.axis_label_text_font_style = 'normal' p.axis.major_label_text_font_style = 'normal' p.legend.label_text_font_style = 'normal' p.toolbar_location = None p.xaxis.formatter = NumeralTickFormatter(format="0,0") p.legend.location = "bottom_right" return p #horizontal stacked bar graph: y-axis is unique category values, bars are split by boroughs def make_plot(src, title): active_category_values = list(reversed(src.data[active_category])) boroughs = [ x for x in list(src.data.keys()) if x in available_boroughs ] colors = brewer['YlGnBu'][len(boroughs)] p = figure(y_range=active_category_values, title=title, plot_height=700, plot_width=1100) p.hbar_stack(boroughs, y=active_category, height=0.9, source=src, color=colors, legend=[x.lower() for x in boroughs], fill_alpha=0.8) category_value = f'@{active_category}' #format number values in hover tool annotations as '10,000' hover = HoverTool( tooltips=[(display_category, category_value), ( 'Brooklyn', '@Brooklyn{0,0}'), ('Bronx', '@Bronx{0,0}'), ('Staten Island', '@Staten_Island{0,0}'), ('Manhattan', '@Manhattan{0,0}'), ('Queens', '@Queens{0,0}'), ('Unspecified', '@Unspecified{0,0}')]) p.add_tools(hover) p = style(p) return p def update(attr, old, new): #set new categorical variable, boroughs, and colors to plot category_to_plot = labels_lookup[category_select.value] boroughs_to_plot = [ borough_selection.labels[i] for i in borough_selection.active ] colors = brewer['BuPu'][len(boroughs_to_plot)] #convert date range slider values to timestamp, given dtype of returned value if isinstance(date_range_slider.value[0], (int, float)): start_date = pd.Timestamp(float(date_range_slider.value[0]) * 1e6) end_date = pd.Timestamp(float(date_range_slider.value[1]) * 1e6) else: start_date = pd.Timestamp(date_range_slider.value[0]) end_date = pd.Timestamp(date_range_slider.value[1]) new_src = make_dataset(boroughs_to_plot, category_to_plot, start_date, end_date) src.data.update(new_src.data) category_to_plot_values = list(src.data[category_to_plot]) p = figure(y_range=category_to_plot_values, title=category_to_plot, plot_height=700, plot_width=1100) p.hbar_stack(boroughs_to_plot, y=category_to_plot, height=0.9, source=src, color=colors, legend=[x.lower() for x in boroughs_to_plot]) p.xaxis.axis_label = category_to_plot p.title.text = display_category print( f'new category: {category_to_plot}, new boroughs: {boroughs_to_plot}, start: {start_date}, end: {end_date}' ) #set boroughs available for selection available_boroughs = list(set(nyc_311_calls['borough'])) available_boroughs.sort() #checkbox for boroughs borough_selection = CheckboxGroup(labels=available_boroughs, active=[0, 1, 2, 3, 4, 5]) borough_selection.on_change('active', update) #slider for date range date_range_slider = DateRangeSlider(title="Date Range: ", start=date(2020, 1, 1), end=date.today(), value=(date(2020, 1, 1), date.today()), step=10, bar_color='#8c96c6', tooltips=True) date_range_slider.on_change('value', update) #dropdown for which category to plot display_labels = [ 'Agency', 'City', 'Descriptor', 'Location Type', 'Status', 'Zip Code' ] actual_labels = [ 'agency_name', 'cleaned_city', 'cleaned_descriptor', 'cleaned_location_type', 'status', 'incident_zip' ] labels_lookup = { display: actual for display, actual in zip(display_labels, actual_labels) } category_select = Select(title="Category:", value='Agency', options=display_labels) category_select.on_change('value', update) #divider text for borough checkbox div = Div(text="""Borough:""", width=200, height=15) #set initial dataset params display_category = category_select.value active_category = labels_lookup[display_category] initial_boroughs = [ borough_selection.labels[i] for i in borough_selection.active ] start_date = pd.to_datetime(date_range_slider.value[0]) end_date = pd.to_datetime(date_range_slider.value[1]) #create initial plot src = make_dataset(initial_boroughs, active_category, start_date, end_date) p = make_plot(src, f'Calls by {display_category}') controls = WidgetBox(date_range_slider, category_select, div, borough_selection) layout = row(controls, p) tab = Panel(child=layout, title='Calls by Category') tabs = Tabs(tabs=[tab]) return tab
'game': df.loc['2019-05-09 01'].game, 'color': df.loc['2019-05-09 01'].game_colors, 'legend': df.loc['2019-05-09 01'].game }) # add circles to the map plot.circle(x='x', y='y', size='size', color='color', legend='legend', source=source, alpha=0.5) plot.legend.orientation = "horizontal" plot.legend.location = "bottom_center" # add three control widgets slider = Slider(title='Hour', start=1, end=50, step=1, value=1) menu = Select(title='Shading', options=['Game Name', 'Sentiment Score'], value='Game Name') checkbox = CheckboxGroup(labels=games, active=[0, 1, 2, 3, 4, 5, 6]) # Attach the callback to the widgets slider.on_change('value', update_plot) menu.on_change('value', update_plot) checkbox.on_change('active', update_plot) # Make a row layout of widgetbox(slider) and plot and add it to the current document layout = row(plot, column(menu, checkbox, widgetbox(slider))) curdoc().add_root(layout)
class Correlation: def __init__(self, sources, categories, custom_title): self.sources = sources self.correlation_names = categories.correlation_names self.range_categories = categories.range self.regression = None self.data = {n: [] for n in GROUP_LABELS} self.bad_uid = {n: [] for n in GROUP_LABELS} self.fig = figure(plot_width=900, plot_height=700, x_axis_location="above", tools="pan, box_zoom, wheel_zoom, reset, save", x_range=[''], y_range=['']) self.fig.xaxis.axis_label_text_font_size = options.PLOT_AXIS_LABEL_FONT_SIZE self.fig.yaxis.axis_label_text_font_size = options.PLOT_AXIS_LABEL_FONT_SIZE self.fig.xaxis.major_label_text_font_size = options.PLOT_AXIS_MAJOR_LABEL_FONT_SIZE self.fig.yaxis.major_label_text_font_size = options.PLOT_AXIS_MAJOR_LABEL_FONT_SIZE self.fig.min_border_left = 175 self.fig.min_border_top = 130 self.fig.xaxis.major_label_orientation = pi / 4 self.fig.toolbar.active_scroll = "auto" self.fig.title.align = 'center' self.fig.title.text_font_style = "italic" self.fig.xaxis.axis_line_color = None self.fig.xaxis.major_tick_line_color = None self.fig.xaxis.minor_tick_line_color = None self.fig.xgrid.grid_line_color = None self.fig.ygrid.grid_line_color = None self.fig.yaxis.axis_line_color = None self.fig.yaxis.major_tick_line_color = None self.fig.yaxis.minor_tick_line_color = None self.fig.outline_line_color = None corr_1_pos = self.fig.circle(x='x', y='y', color='color', alpha='alpha', size='size', source=sources.correlation_1_pos) corr_1_neg = self.fig.circle(x='x', y='y', color='color', alpha='alpha', size='size', source=sources.correlation_1_neg) corr_2_pos = self.fig.circle(x='x', y='y', color='color', alpha='alpha', size='size', source=sources.correlation_2_pos) corr_2_neg = self.fig.circle(x='x', y='y', color='color', alpha='alpha', size='size', source=sources.correlation_2_neg) self.fig.add_tools( HoverTool( show_arrow=True, line_policy='next', tooltips=[('Group', '@group'), ('x', '@x_name'), ('y', '@y_name'), ('r', '@r'), ('p', '@p'), ('Norm p-value x', '@x_normality{0.4f}'), ('Norm p-value y', '@y_normality{0.4f}')], )) self.fig.line(x='x', y='y', source=sources.corr_matrix_line, line_width=3, line_dash='dotted', color='black', alpha=0.8) # Set the legend legend_corr = Legend(items=[("+r Group 1", [corr_1_pos]), ("-r Group 1", [corr_1_neg]), ("+r Group 2", [corr_2_pos]), ("-r Group 2", [corr_2_neg])], location=(0, -575)) # Add the layout outside the plot, clicking legend item hides the line self.fig.add_layout(legend_corr, 'right') self.fig.legend.click_policy = "hide" self.fig_text_1 = Div(text="Group 1:", width=110) self.fig_text_2 = Div(text="Group 2:", width=110) self.fig_include = CheckboxGroup( labels=self.correlation_names, active=options.CORRELATION_MATRIX_DEFAULTS_1) self.fig_include_2 = CheckboxGroup( labels=['DVH Endpoints', 'EUD', 'NTCP / TCP'], active=options.CORRELATION_MATRIX_DEFAULTS_2) self.fig_include.on_change('active', self.fig_include_ticker) self.fig_include_2.on_change('active', self.fig_include_ticker) self.download_corr_fig = Button( label="Download Correlation Figure Data", button_type="default", width=150) self.download_corr_fig.callback = CustomJS( args=dict(source=self.sources.correlation_csv), code=open(join(dirname(dirname(__file__)), "download_new.js")).read()) self.layout = column( Div(text="<b>DVH Analytics v%s</b>" % options.VERSION), row(custom_title['1']['correlation'], Spacer(width=50), custom_title['2']['correlation']), self.download_corr_fig, row(Div(text="<b>Sample Sizes</b>", width=100), self.fig_text_1, self.fig_text_2), row(self.fig, self.fig_include, self.fig_include_2)) def fig_include_ticker(self, attr, old, new): if len(self.fig_include.active) + len(self.fig_include_2.active) > 1: self.update_correlation_matrix() def update_correlation_matrix(self): categories = [ key for index, key in enumerate(self.correlation_names) if index in self.fig_include.active ] if 0 in self.fig_include_2.active: if self.data['1']: categories.extend([ x for x in list(self.data['1']) if x.startswith("DVH Endpoint") ]) elif self.data['2']: categories.extend([ x for x in list(self.data['2']) if x.startswith("DVH Endpoint") ]) if 1 in self.fig_include_2.active: if "EUD" in list(self.data['1']) or "EUD" in list(self.data['2']): categories.append("EUD") if 2 in self.fig_include_2.active: if "NTCP/TCP" in list(self.data['1']) or "NTCP/TCP" in list( self.data['2']): categories.append("NTCP/TCP") categories.sort() categories_count = len(categories) categories_for_label = [ category.replace("Control Point", "CP") for category in categories ] categories_for_label = [ category.replace("control point", "CP") for category in categories_for_label ] categories_for_label = [ category.replace("Distance", "Dist") for category in categories_for_label ] for i, category in enumerate(categories_for_label): if category.startswith('DVH'): categories_for_label[i] = category.split("DVH Endpoint: ")[1] self.fig.x_range.factors = categories_for_label self.fig.y_range.factors = categories_for_label[::-1] # 0.5 offset due to Bokeh 0.12.9 bug self.sources.corr_matrix_line.data = { 'x': [0.5, len(categories) - 0.5], 'y': [len(categories) - 0.5, 0.5] } s_keys = [ 'x', 'y', 'x_name', 'y_name', 'color', 'alpha', 'r', 'p', 'group', 'size', 'x_normality', 'y_normality' ] s = { k: {sk: [] for sk in s_keys} for k in ['1_pos', '1_neg', '2_pos', '2_neg'] } max_size = 45 for x in range(categories_count): for y in range(categories_count): if x != y: data_to_enter = False if x > y and self.data['1'][categories[0]]['uid']: n = '1' data_to_enter = True elif x < y and self.data['2'][categories[0]]['uid']: n = '2' data_to_enter = True if data_to_enter: x_data = self.data[n][categories[x]]['data'] y_data = self.data[n][categories[y]]['data'] if x_data and len(x_data) == len(y_data): r, p_value = pearsonr(x_data, y_data) else: r, p_value = 0, 0 if r >= 0: k = '%s_pos' % n s[k]['color'].append( getattr(options, 'GROUP_%s_COLOR' % n)) s[k]['group'].append('Group %s' % n) else: k = '%s_neg' % n s[k]['color'].append( getattr(options, 'GROUP_%s_COLOR_NEG_CORR' % n)) s[k]['group'].append('Group %s' % n) if np.isnan(r): r = 0 s[k]['r'].append(r) s[k]['p'].append(p_value) s[k]['alpha'].append(abs(r)) s[k]['size'].append(max_size * abs(r)) # 0.5 offset due to bokeh 0.12.9 bug s[k]['x'].append(x + 0.5) s[k]['y'].append(categories_count - y - 0.5) s[k]['x_name'].append(categories_for_label[x]) s[k]['y_name'].append(categories_for_label[y]) x_norm, x_p = normaltest(x_data) y_norm, y_p = normaltest(y_data) s[k]['x_normality'].append(x_p) s[k]['y_normality'].append(y_p) for k in ['1_pos', '1_neg', '2_pos', '2_neg']: getattr(self.sources, "correlation_%s" % k).data = s[k] group_count = {n: 0 for n in GROUP_LABELS} for n in GROUP_LABELS: if self.data[n]: group_count[n] = len(self.data[n][list( self.data[n])[0]]['uid']) self.fig_text_1.text = "Group 1: %d" % group_count[GROUP_LABELS[0]] self.fig_text_2.text = "Group 2: %d" % group_count[GROUP_LABELS[1]] def validate_data(self): for n in GROUP_LABELS: if self.data[n]: for range_var in list(self.data[n]): for i, j in enumerate(self.data[n][range_var]['data']): if j == 'None': current_uid = self.data[n][range_var]['uid'][i] if current_uid not in self.bad_uid[n]: self.bad_uid[n].append(current_uid) print( "%s[%s] (mrn: %s) is non-numerical, will remove this patient from correlation data" % (range_var, i, self.data[n][range_var]['mrn'][i])) new_correlation = {} for range_var in list(self.data[n]): new_correlation[range_var] = { 'mrn': [], 'uid': [], 'data': [], 'units': self.data['1'][range_var]['units'] } for i in range(len(self.data[n][range_var]['data'])): current_uid = self.data[n][range_var]['uid'][i] if current_uid not in self.bad_uid[n]: for j in {'mrn', 'uid', 'data'}: new_correlation[range_var][j].append( self.data[n][range_var][j][i]) self.data[n] = new_correlation def update_data(self, correlation_variables): self.data = {'1': {}, '2': {}} temp_keys = ['uid', 'mrn', 'data', 'units'] # remove review and stats from source include = get_include_map(self.sources) # Get data from DVHs table for key in correlation_variables: src = self.range_categories[key]['source'] curr_var = self.range_categories[key]['var_name'] table = self.range_categories[key]['table'] units = self.range_categories[key]['units'] if table in {'DVHs'}: temp = {n: {k: [] for k in temp_keys} for n in GROUP_LABELS} temp['units'] = units for i in range(len(src.data['uid'])): if include[i]: for n in GROUP_LABELS: if src.data['group'][i] in { 'Group %s' % n, 'Group 1 & 2' }: temp[n]['uid'].append(src.data['uid'][i]) temp[n]['mrn'].append(src.data['mrn'][i]) temp[n]['data'].append(src.data[curr_var][i]) for n in GROUP_LABELS: self.data[n][key] = {k: temp[n][k] for k in temp_keys} uid_list = { n: self.data[n]['ROI Max Dose']['uid'] for n in GROUP_LABELS } # Get Data from Plans table for key in correlation_variables: src = self.range_categories[key]['source'] curr_var = self.range_categories[key]['var_name'] table = self.range_categories[key]['table'] units = self.range_categories[key]['units'] if table in {'Plans'} or key.startswith('Beam Complexity') or key.startswith('Beam Area') or \ key.startswith('CP MU') or key.startswith('Beam Perimeter'): temp = {n: {k: [] for k in temp_keys} for n in GROUP_LABELS} temp['units'] = units for n in GROUP_LABELS: for i in range(len(uid_list[n])): uid = uid_list[n][i] uid_index = src.data['uid'].index(uid) temp[n]['uid'].append(uid) temp[n]['mrn'].append(src.data['mrn'][uid_index]) temp[n]['data'].append(src.data[curr_var][uid_index]) for n in GROUP_LABELS: self.data[n][key] = {k: temp[n][k] for k in temp_keys} # Get data from Beams table for key in correlation_variables: src = self.range_categories[key]['source'] curr_var = self.range_categories[key]['var_name'] table = self.range_categories[key]['table'] units = self.range_categories[key]['units'] stats = ['min', 'mean', 'median', 'max'] if table in { 'Beams' } and not (key.startswith('Beam Complexity') or key.startswith('Beam Area') or key.startswith('CP MU') or key.startswith('Beam Perimeter')): beam_keys = stats + ['uid', 'mrn'] temp = {n: {bk: [] for bk in beam_keys} for n in GROUP_LABELS} for n in GROUP_LABELS: for i in range(len(uid_list[n])): uid = uid_list[n][i] uid_indices = [ j for j, x in enumerate(src.data['uid']) if x == uid ] plan_values = [ src.data[curr_var][j] for j in uid_indices ] temp[n]['uid'].append(uid) temp[n]['mrn'].append(src.data['mrn'][uid_indices[0]]) for s in stats: temp[n][s].append(getattr(np, s)(plan_values)) for s in stats: for n in GROUP_LABELS: corr_key = "%s (%s)" % (key, s.capitalize()) self.data[n][corr_key] = { 'uid': temp[n]['uid'], 'mrn': temp[n]['mrn'], 'data': temp[n][s], 'units': units } self.update_csv() def update_or_add_endpoints_to_correlation(self): include = get_include_map(self.sources) # clear out any old DVH endpoint data for n in GROUP_LABELS: if self.data[n]: for key in list(self.data[n]): if key.startswith('ep'): self.data[n].pop(key, None) src = self.sources.endpoint_calcs for j in range(len(self.sources.endpoint_defs.data['label'])): key = self.sources.endpoint_defs.data['label'][j] units = self.sources.endpoint_defs.data['units_out'][j] ep = "DVH Endpoint: %s" % key temp_keys = ['uid', 'mrn', 'data', 'units'] temp = {n: {k: [] for k in temp_keys} for n in GROUP_LABELS} temp['units'] = units for i in range(len(src.data['uid'])): if include[i]: for n in GROUP_LABELS: if src.data['group'][i] in { 'Group %s' % n, 'Group 1 & 2' }: temp[n]['uid'].append(src.data['uid'][i]) temp[n]['mrn'].append(src.data['mrn'][i]) temp[n]['data'].append(src.data[key][i]) for n in GROUP_LABELS: self.data[n][ep] = {k: temp[n][k] for k in temp_keys} if ep not in list(self.regression.multi_var_reg_vars): self.regression.multi_var_reg_vars[ep] = False # declare space to tag variables to be used for multi variable regression for n in GROUP_LABELS: for key, value in listitems(self.data[n]): self.data[n][key]['include'] = [False] * len(value['uid']) self.clear_old_endpoints() self.update_csv() def update_eud_in_correlation(self): # Get data from EUD data uid_roi_list = [ "%s_%s" % (uid, self.sources.dvhs.data['roi_name'][i]) for i, uid in enumerate(self.sources.dvhs.data['uid']) ] temp_keys = ['eud', 'ntcp_tcp', 'uid', 'mrn'] temp = {n: {tk: [] for tk in temp_keys} for n in GROUP_LABELS} for i, uid in enumerate(self.sources.rad_bio.data['uid']): uid_roi = "%s_%s" % (uid, self.sources.rad_bio.data['roi_name'][i]) source_index = uid_roi_list.index(uid_roi) group = self.sources.dvhs.data['group'][source_index] for n in GROUP_LABELS: if group in {'Group %s' % n, 'Group 1 & 2'}: temp[n]['eud'].append(self.sources.rad_bio.data['eud'][i]) temp[n]['ntcp_tcp'].append( self.sources.rad_bio.data['ntcp_tcp'][i]) temp[n]['uid'].append(uid) temp[n]['mrn'].append( self.sources.dvhs.data['mrn'][source_index]) for n in GROUP_LABELS: self.data[n]['EUD'] = { 'uid': temp[n]['uid'], 'mrn': temp[n]['mrn'], 'data': temp[n]['eud'], 'units': 'Gy' } self.data[n]['NTCP/TCP'] = { 'uid': temp[n]['uid'], 'mrn': temp[n]['mrn'], 'data': temp[n]['ntcp_tcp'], 'units': '' } # declare space to tag variables to be used for multi variable regression for n in GROUP_LABELS: for key, value in listitems(self.data[n]): self.data[n][key]['include'] = [False] * len(value['uid']) self.validate_data() self.update_correlation_matrix() self.update_csv() def clear_old_endpoints(self): # As defined in DVHs tab web view current_eps = self.sources.endpoint_defs.data['label'] # endpoints currently in correlation dataset eps_in_correlation = [ key for key in list(self.data[GROUP_LABELS[0]]) if key.startswith('DVH Endpoint: ') ] # remove endpoint keys in correlation data that are no longer in DVHs tab web view for key in eps_in_correlation: if key.startswith('DVH Endpoint'): if key.replace('DVH Endpoint: ', '') not in current_eps: self.regression.multi_var_reg_vars.pop(key) for n in GROUP_LABELS: self.data[n].pop(str(key)) self.regression.update_axis_selector_options() def clear_bad_uids(self): self.bad_uid = {n: [] for n in GROUP_LABELS} def add_regression_link(self, regression): self.regression = regression def update_csv(self): src_data = [ self.sources.correlation_1_pos.data, self.sources.correlation_1_neg.data, self.sources.correlation_2_pos.data, self.sources.correlation_2_neg.data ] src_names = [ 'Group 1 Positive R', 'Group 1 Negative R', 'Group 2 Positive R', 'Group 2 Negative R' ] columns = [ 'group', 'x_name', 'y_name', 'x_normality', 'y_normality', 'r', 'p' ] csv_text = get_csv(src_data, src_names, columns) self.sources.correlation_csv.data = {'text': [csv_text]}
def candlestick_plot(): def obv_indicator(data): res = talib.OBV(data.close.values, data.volume.values) return res def rsi_indicator(data): res = talib.RSI(data.close.values, timeperiod=14) return res def cci_indicator(data): res = talib.CCI(data.high.values, data.low.values, data.close.values, timeperiod=14) return res def technical_indicator(data, indicator): if indicator == 'CCI': data['tech'] = cci_indicator(data) elif indicator == 'RSI': data['tech'] = rsi_indicator(data) else: data['tech'] = obv_indicator(data) return data def load_data(obid, start, end, freq='1d'): print('running....') data = get_price(obid, start, end, freqency=freq).reset_index() data['pct_change'] = data['close'].pct_change() # data.dropna(inplace=True) data['pct_change'] = data['pct_change'].apply(lambda x: str(round(x * 100, 2)) + '%') data['index'] = list(np.arange(len(data))) data['date'] = data['date'].apply(lambda x: x.strftime("%Y%m%d")) return data def moving_average(data, selection): selection_mapping = {k: int(k.split('_')[-1]) for k in selection} for k, v in selection_mapping.items(): data[k] = data['close'].rolling(window=v).mean() return data def update_lines(attr, old, new): line_0.visible = 0 in average_selection.active line_1.visible = 1 in average_selection.active line_2.visible = 2 in average_selection.active line_3.visible = 3 in average_selection.active line_4.visible = 4 in average_selection.active line_5.visible = 5 in average_selection.active def update_plot(attr, old, new): indicator = indicator_selection.value new_data = technical_indicator(data, indicator) new_source = ColumnDataSource(new_data) source.data.update(new_source.data) def update_data(): # global obid, start, end obid = order_book_id.value start = start_date.value end = end_date.value # 提取数据,均线根据选取与否进行添加 new_data = load_data(obid, start, end) new_data_1 = moving_average(new_data, average_labels) new_data_2 = technical_indicator(new_data, indicator_selection.value) new_source = ColumnDataSource(new_data_2) new_source_1 = ColumnDataSource(new_data_1) source.data.update(new_source.data) source_1.data.update(new_source_1.data) inc = new_data.close >= new_data.open dec = new_data.close < new_data.open inc_source.data = inc_source.from_df(new_data_2.loc[inc]) dec_source.data = dec_source.from_df(new_data_2.loc[dec]) p.title.text = instruments(obid).symbol p.x_range.end = len(new_data) + 1 p2.xaxis.major_label_overrides = {i: date for i, date in enumerate(new_data['date'])} today = datetime.now().date() average_labels = ["MA_5", "MA_10", "MA_20", 'MA_30', 'MA_60', 'MA_120'] average_selection = CheckboxGroup(labels=average_labels, active=[0, 1, 2, 3, 4, 5, 6]) indicator_selection = Select(title='TechnicalIndicator', value='RSI', options=['OBV', 'RSI', 'CCI']) order_book_id = TextInput(title='StockCode', value='002916.XSHE') symbol = instruments(order_book_id.value).symbol start_date = DatePicker(title="StartDate", value='2018-01-01', min_date='2015-01-01', max_date=today) end_date = DatePicker(title="EndDate", value=today, min_date=start_date.value, max_date=today) # labels = [average_selection.labels[i] for i in average_selection.active] data = load_data(order_book_id.value, start_date.value, end_date.value) # 均线计算 data_1 = moving_average(data, average_labels) # 计算各种长度的均线 # 技术指标计算 data_2 = technical_indicator(data, indicator_selection.value) source = ColumnDataSource(data_2) source_1 = ColumnDataSource(data_1) inc = data.close >= data.open dec = data.open > data.close inc_source = ColumnDataSource(data_2.loc[inc]) dec_source = ColumnDataSource(data_2.loc[dec]) TOOLS = 'save, pan, box_zoom, reset, wheel_zoom' hover = HoverTool(tooltips=[('date', '@date'), ('open', '@open'), ('high', '@high'), ('low', '@low'), ('close', '@close'), ('pct_change', "@pct_change") ] ) length = len(data) p = figure(plot_width=1000, plot_height=500, title='{}'.format(symbol), tools=TOOLS, x_range=(0, length + 1)) p.xaxis.visible = False # 隐藏x-axis p.min_border_bottom = 0 # 均线图 line_0 = p.line(x='index', y='MA_5', source=source_1, color=Spectral6[5]) line_1 = p.line(x='index', y='MA_10', source=source_1, color=Spectral6[4]) line_2 = p.line(x='index', y='MA_20', source=source_1, color=Spectral6[3]) line_3 = p.line(x='index', y='MA_30', source=source_1, color=Spectral6[2]) line_4 = p.line(x='index', y='MA_60', source=source_1, color=Spectral6[1]) line_5 = p.line(x='index', y='MA_120', source=source_1, color=Spectral6[0]) p.segment(x0='index', y0='high', x1='index', y1='low', color='red', source=inc_source) p.segment(x0='index', y0='high', x1='index', y1='low', color='green', source=dec_source) p.vbar('index', 0.5, 'open', 'close', fill_color='red', line_color='red', source=inc_source, hover_fill_alpha=0.5) p.vbar('index', 0.5, 'open', 'close', fill_color='green', line_color='green', source=dec_source, hover_fill_alpha=0.5) p.add_tools(hover) p1 = figure(plot_width=p.plot_width, plot_height=200, x_range=p.x_range, toolbar_location=None) p1.vbar('index', 0.5, 0, 'volume', color='red', source=inc_source) p1.vbar('index', 0.5, 0, 'volume', color='green', source=dec_source) p1.xaxis.visible = False p2 = figure(plot_width=p.plot_width, plot_height=p1.plot_height, x_range=p.x_range, toolbar_location=None) p2.line(x='index', y='tech', source=source) p2.xaxis.major_label_overrides = {i: date for i, date in enumerate(data['date'])} p2.xaxis.major_label_orientation = pi / 4 p2.min_border_bottom = 0 button = Button(label="ClickToChange", button_type="success") button.on_click(update_data) average_selection.inline = True average_selection.width = 500 average_selection.on_change('active', update_lines) indicator_selection.on_change('value', update_plot) widgets = column(row(order_book_id, start_date, end_date, button), row(indicator_selection, average_selection)) layouts = column(widgets, p, p1, p2) # doc.add_root(pp) # make a layout tab = Panel(child=layouts, title='StockPrice') return tab
# event to deselect all selected locations #map_fig.on_event(events.DoubleTap, update_on_double_tap) # %% define map-controls and handlers map_options = list(TILE_SOURCES.keys()) map_active = [ idx for idx, v in enumerate(TILE_SOURCES.values()) if v["visible"] ] map_layers = CheckboxGroup(labels=map_options, active=map_active) map_fig_idx = { i.name: idx for idx, i in enumerate(map_fig.renderers) if i.name in map_layers.labels } map_layers.on_change("active", update_map_layers) background = RadioGroup(labels=["topografie", "luchtfoto"], active=0) background.on_change("active", update_background) map_controls = column(Div(text="<b>Kaartopties</b><br><br>Kaartlagen"), map_layers, Div(text="Achtergrond"), background) # %% define main filter selection and handlers filters = list() for name, subfilter in zip(data.filters.names, data.filters.filters): select_filter = MultiSelect( title=f"{name}:", value=subfilter.value, options=subfilter.options,
data_source.data = empty_df elif old and i not in old: range = Range1d(start=min([ x.index.min() for j, x in enumerate(data_frames) if j in checkbox_group.active ]), end=max([ x.index.max() for j, x in enumerate(data_frames) if j in checkbox_group.active ])) resample_data_source(i, sub_data_sources, range) checkbox_group = CheckboxGroup(labels=["AAPL", "GOOG"], active=[0, 1]) checkbox_group.on_change("active", change_active_stocks) x_range = Range1d() y_range = Range1d() def do_reset(active_stocks): x_range.start = min([ x.index.min() for i, x in enumerate(data_frames) if i in active_stocks ]) x_range.end = max([ x.index.max() for i, x in enumerate(data_frames) if i in active_stocks ]) y_range.start = min([ x['close'].min() for i, x in enumerate(data_frames) if i in active_stocks
class Scatter: # X axis choices AXIS_MAP = { "Tempo": "tempo", "Duration (sec)": "duration_s", "Danceability": "danceability", "Energy": "energy", "Loudness": "loudness", "Speechiness": "speechiness", "Acousticness": "acousticness", "Instrumentalness": "instrumentalness", "Liveness": "liveness", "Valence": "valence" } # Tooltips for circle glyphs CIRC_TOOLTIPS = [("Track", "@track_name"), ("Artist", "@artist_name"), ("Times Played", "@count")] def __init__(self, df: pd.DataFrame): # Initialize data sources for scatter plot and regression line self.backing_df = df self.circ_source = ColumnDataSource({ 'x': [], 'y': [], 'track_name': [], 'artist_name': [], 'count': [], 'circle_size': [] }) self.line_source = ColumnDataSource({'x': [], 'y_pred': []}) # Initialize widgets self.x_axis = Select(title="X Axis", options=list(self.AXIS_MAP.keys()), value="Tempo") self.y_axis = Select(title="Y Axis", options=list(self.AXIS_MAP.keys()), value="Duration (sec)") time_start = datetime.datetime(1970, 1, 1, hour=0, minute=0, second=0) time_end = datetime.datetime(1970, 1, 1, hour=23, minute=59, second=59) start_date = min(self.backing_df['date_played']) start_dt = datetime.datetime(year=start_date.year, month=start_date.month, day=start_date.day, hour=0, minute=0, second=0) end_date = max(self.backing_df['date_played']) end_dt = datetime.datetime(year=end_date.year, month=end_date.month, day=end_date.day, hour=23, minute=59, second=59) date_step_size = 1000 * 60 * 60 * 24 # Step size of 1 day in ms self.date_slider = DateRangeSlider(title="Date Range", start=start_dt, end=end_dt, value=(start_dt, end_dt), format="%d %b %Y", step=date_step_size) time_step_size = 1000 * 60 * 30 # 30 minues in ms self.time_slider = DateRangeSlider(title="Time Range", value=(time_start, time_end), start=time_start, end=time_end, format="%X", step=time_step_size) self.track_name = TextInput(title="Song name includes") self.artist_name = TextInput(title="Artist name includes") self.reg_line_check = CheckboxGroup(labels=["Add Regression Line"], active=[]) # Create the hover tools self.points_hover = HoverTool(tooltips=self.CIRC_TOOLTIPS, names=["circles"]) self.line_hover = HoverTool(tooltips=[], names=["reg_line"]) # Create the scatter plot and regression line self.plot = figure(title="Scatter", plot_height=450, plot_width=800, tools=[self.points_hover, self.line_hover]) self.plot.circle(x="x", y="y", source=self.circ_source, size="circle_size", fill_alpha=0.6, name="circles") self.reg_line = self.plot.line(x='x', y='y_pred', source=self.line_source, color='#FFAF87', name="reg_line") self.layout = row( column(self.x_axis, self.y_axis, self.date_slider, self.time_slider, self.track_name, self.artist_name, self.reg_line_check), self.plot) # Fill data and create events for on change self.update() self.on_change() def on_change(self): """ Creates on change events for all widgets in the scatter plot. """ widgets = [ self.x_axis, self.y_axis, self.date_slider, self.time_slider, self.track_name, self.artist_name ] for control in widgets: control.on_change("value", lambda attr, old, new: self.update()) self.reg_line_check.on_change("active", lambda attr, old, new: self.update()) def update(self): """ Updates the data source and regression line based on current values of all widgets. """ new_df = self.get_selected() # Get number of individual plays and then remove duplicate tracks for plotting num_plays = len(new_df) new_df.drop_duplicates(subset='track_id', inplace=True) # Choose the x and y axis x_name = self.AXIS_MAP[self.x_axis.value] y_name = self.AXIS_MAP[self.y_axis.value] self.plot.xaxis.axis_label = self.x_axis.value self.plot.yaxis.axis_label = self.y_axis.value # Calculate correlation coefficient between x and y axis corr = np.corrcoef(new_df[x_name], new_df[y_name])[0, 1] if not new_df.empty else 0 self.plot.title.text = f"{num_plays} track plays selected, correlation: {round(corr, 2)}" # Provide the new selected data to the Data Source data_dict = { 'x': new_df[x_name], 'y': new_df[y_name], 'track_name': new_df['song_name'], 'artist_name': new_df['artist_name'], 'count': new_df['counts'], 'circle_size': new_df['circle_size'] } self.circ_source.data = data_dict # Update the regression line if more than one track is selected if len(new_df) <= 1: self.reg_line.visible = False else: x = sm.add_constant(new_df[x_name]) reg_model = sm.OLS(new_df[y_name], x) results = reg_model.fit() y_pred = list( map( lambda x: results.params.iloc[1] * x + results.params.iloc[ 0], new_df[x_name])) reg_data_dict = {'x': new_df[x_name], 'y_pred': y_pred} self.line_source.data = reg_data_dict # Update hover tool for regression line self.line_hover.tooltips = [( "Y=", f"{round(results.params.iloc[1], 2)}x + {round(results.params.iloc[0], 2)}" ), ("R\u00b2", str(round(results.rsquared, 2)))] self.reg_line.visible = (len(self.reg_line_check.active) > 0) def get_selected(self): """ Filter data based on widget values. Returns filtered DataFrame """ df = self.backing_df if not self.track_name.value.isspace(): df = df[df['song_name'].str.lower().str.contains( self.track_name.value.strip().lower())] if not self.artist_name.value.isspace(): df = df[df['artist_name'].str.lower().str.contains( self.artist_name.value.strip().lower())] # Filter by date played date_begin = pd.to_datetime(self.date_slider.value[0], unit='ms') date_end = pd.to_datetime(self.date_slider.value[1], unit='ms') df = df[(date_begin <= df['date_played']) & (df['date_played'] <= date_end)] # Filter by time played time_begin = pd.to_datetime(self.time_slider.value[0], unit='ms').time() time_end = pd.to_datetime(self.time_slider.value[1], unit='ms').time() df = df[(time_begin <= df['time_played']) & (df['time_played'] <= time_end)] # Join the counts and circle size columns to the df df = self.get_selected_counts(df) return df def get_selected_counts(self, df): """ If no tracks are selected, simply join empty columns for counts and circle_size. Otherwise, compute the counts and circle sizes, and join those columns to the df. Arguemnts: -df : filtered DataFrame Returns filtered DataFrame with additional columns for counts and circle_size. """ if df.empty: df['counts'] = pd.Series([]) df['circle_size'] = pd.Series([]) return df df_counts = df.groupby(['song_name', 'artist_name' ]).size().reset_index(name='counts') df_counts = df_counts.apply(self.apply_circle_sizes, axis=1) return pd.merge(df, df_counts, on=['song_name', 'artist_name'], how='left') def apply_circle_sizes(self, row): """ Determines the size of each circle based on the number of times that track has been played. """ if row['counts'] == 1: row['circle_size'] = 5 elif 1 < row['counts'] <= 5: row['circle_size'] = 7 elif row['counts'] > 5: row['circle_size'] = 10 return row
def main_gui_fnc(doc): # Make widgets plottype_list = ['Trends', '1D Distribution', '2D Distribution'] plottype_dropdown = Select(title='Plot Type', value=plottype_list[0], options=plottype_list) trend_x_list = ['z', 't'] trend_y_list = [ 'Beam Size', 'Bunch Length', 'Emittance (x,y)', 'Emittance (4D)', 'Slice emit. (x,y)', 'Slice emit. (4D)', 'Charge', 'Energy', 'Trajectory' ] trend_x_dropdown = Select(title='X axis', options=trend_x_list, value=trend_x_list[0]) trend_y_dropdown = Select(title='Y axis', options=trend_y_list, value=trend_y_list[0]) dist_list = ['t', 'x', 'y', 'px', 'py', 'pz'] trend_slice_var_dropdown = Select(title='Slice variable', options=dist_list, value=dist_list[0]) trend_slice_nslices_text = make_text_input(text_input_params, 'Number of slices', min=5, max=500, value=50, step=1) screen_z_dropdown = Select(title='Screen z (m)', options=screen_z_str_list, value=screen_z_str_list[0]) dist_x_1d_dropdown = Select(title='X axis', options=dist_list, value=dist_list[0]) dist_type_1d_list = [ 'Charge Density', 'Emittance X', 'Emittance Y', 'Emittance 4D', 'Sigma X', 'Sigma Y' ] dist_type_1d_dropdown = Select(title='Y axis', options=dist_type_1d_list, value=dist_type_1d_list[0]) nbin_1d_text = make_text_input(text_input_params, 'Histogram bins', min=5, max=500, value=50, step=1) screen_z_dropdown_copy = Select(title='Screen z (m)', options=screen_z_str_list, value=screen_z_str_list[0]) screen_z_dropdown_copy.js_link('value', screen_z_dropdown, 'value') screen_z_dropdown.js_link('value', screen_z_dropdown_copy, 'value') dist_x_dropdown = Select(title='X axis', options=dist_list, value=dist_list[1]) dist_y_dropdown = Select(title='Y axis', options=dist_list, value=dist_list[2]) dist_2d_type_list = ['Scatter', 'Histogram'] dist2d_type_dropdown = Select(title='Plot method', options=dist_2d_type_list, value=dist_2d_type_list[1]) scatter_color_list = [ 'density', 't', 'x', 'y', 'r', 'px', 'py', 'pz', 'pr' ] scatter_color_dropdown = Select(title='Scatter color variable', options=scatter_color_list, value=scatter_color_list[0]) axis_equal_checkbox = CheckboxGroup(labels=['Enabled'], active=[]) nbin_x_text = make_text_input(text_input_params, 'Histogram bins, X', min=5, max=500, value=50, step=1) nbin_y_text = make_text_input(text_input_params, 'Histogram bins, Y', min=5, max=500, value=50, step=1) cyl_copies_checkbox = CheckboxGroup(labels=['Enabled'], active=[]) cyl_copies_text = make_text_input(text_input_params, 'Number of copies', min=5, max=500, value=50, step=1) remove_correlation_checkbox = CheckboxGroup(labels=['Enabled'], active=[]) remove_correlation_n_text = make_text_input(text_input_params, 'Max polynomial power', min=0, max=10, value=1, step=1) remove_correlation_var1_dropdown = Select(title='Independent var (x)', options=dist_list, value=dist_list[1]) remove_correlation_var2_dropdown = Select(title='Dependent var (y)', options=dist_list, value=dist_list[3]) take_slice_checkbox = CheckboxGroup(labels=['Enabled'], active=[]) take_slice_var_dropdown = Select(title='Slice variable', options=dist_list, value=dist_list[0]) take_slice_nslices_text = make_text_input(text_input_params, 'Number of slices', min=5, max=500, value=50, step=1) take_slice_index_text = make_text_input( text_input_params, 'Slice index', min=0, max=int(take_slice_nslices_text.value) - 1, value=0, step=1) trends_tab = column(add_label(trend_x_dropdown, widget_width), add_label(trend_y_dropdown, widget_width), add_label(trend_slice_var_dropdown, widget_width), add_label(trend_slice_nslices_text, widget_width)) dist_1d_tab = column(add_label(screen_z_dropdown, widget_width), add_label(dist_x_1d_dropdown, widget_width), add_label(dist_type_1d_dropdown, widget_width), add_label(nbin_1d_text, widget_width)) dist_2d_tab = column( add_label(screen_z_dropdown_copy, widget_width), add_label(dist2d_type_dropdown, widget_width), add_label(scatter_color_dropdown, widget_width), add_label(dist_x_dropdown, widget_width), add_label(dist_y_dropdown, widget_width), add_label(axis_equal_checkbox, widget_width, label='Equal scale axes'), add_label(nbin_x_text, widget_width), add_label(nbin_y_text, widget_width)) postprocess_tab = column( add_label(cyl_copies_checkbox, widget_width, label='Cylindrical copies'), add_label(cyl_copies_text, widget_width), add_label(remove_correlation_checkbox, widget_width, label='Remove Correlation'), add_label(remove_correlation_n_text, widget_width), add_label(remove_correlation_var1_dropdown, widget_width), add_label(remove_correlation_var2_dropdown, widget_width), add_label(take_slice_checkbox, widget_width, label='Take slice of data'), add_label(take_slice_var_dropdown, widget_width), add_label(take_slice_index_text, widget_width), add_label(take_slice_nslices_text, widget_width)) tab1 = Panel(child=trends_tab, title='Trends') tab2 = Panel(child=dist_1d_tab, title='1D Dist.') tab3 = Panel(child=dist_2d_tab, title='2D Dist.') tab4 = Panel(child=postprocess_tab, title='Postprocess') tabs = Tabs(tabs=[tab1, tab2, tab3, tab4]) main_panel = column(row(add_label(plottype_dropdown, widget_width)), tabs) # Main plotting function def create_plot(): #Get current GUI settings plottype = plottype_dropdown.value.lower() trend_x = 'mean_' + trend_x_dropdown.value.lower() trend_y = get_trend_vars(trend_y_dropdown.value.lower()) trend_slice_var = trend_slice_var_dropdown.value trend_slice_nslices = int( trend_slice_nslices_text.value ) #constrain_text_input(trend_slice_nslices_text, text_input_params) screen_z = float(screen_z_dropdown.value) dist_x_1d = dist_x_1d_dropdown.value dist_y_1d = dist_type_1d_dropdown.value.lower() nbins_1d = int( nbin_1d_text.value ) # constrain_text_input(nbin_1d_text, text_input_params) dist_x = dist_x_dropdown.value dist_y = dist_y_dropdown.value ptype = dist2d_type_dropdown.value.lower() scatter_color_var = scatter_color_dropdown.value.lower() is_trend = (plottype == 'trends') is_slice_trend = any(['slice' in yy for yy in trend_y]) is_dist1d = (plottype == '1d distribution') is_dist2d = (plottype == '2d distribution') nbins = [int(nbin_x_text.value), int(nbin_y_text.value)] axis_equal = (0 in axis_equal_checkbox.active) cyl_copies_on = (0 in cyl_copies_checkbox.active) and (plottype != 'trends') cyl_copies = int(cyl_copies_text.value) remove_correlation = (0 in remove_correlation_checkbox.active ) and (plottype != 'trends') remove_correlation_n = int(remove_correlation_n_text.value) remove_correlation_var1 = remove_correlation_var1_dropdown.value remove_correlation_var2 = remove_correlation_var2_dropdown.value take_slice = (0 in take_slice_checkbox.active) and (plottype != 'trends') take_slice_var = take_slice_var_dropdown.value take_slice_nslices = int(take_slice_nslices_text.value) text_input_params[ take_slice_index_text.id]['max'] = take_slice_nslices - 1 take_slice_index = int(take_slice_index_text.value) # Disable widgets trend_x_dropdown.disabled = not is_trend trend_y_dropdown.disabled = not is_trend trend_slice_var_dropdown.disabled = not is_slice_trend trend_slice_nslices_text.disabled = not is_slice_trend screen_z_dropdown.disabled = not is_dist1d dist_x_1d_dropdown.disabled = not is_dist1d dist_type_1d_dropdown.disabled = not is_dist1d nbin_1d_text.disabled = not is_dist1d screen_z_dropdown_copy.disabled = not is_dist2d dist2d_type_dropdown.disabled = not is_dist2d scatter_color_dropdown.disabled = not (is_dist2d and ptype == 'scatter') dist_x_dropdown.disabled = not is_dist2d dist_y_dropdown.disabled = not is_dist2d axis_equal_checkbox.disabled = not is_dist2d nbin_x_text.disabled = not is_dist2d nbin_y_text.disabled = not is_dist2d if (is_trend): params = {} if (is_slice_trend): params['slice_key'] = trend_slice_var params['n_slices'] = trend_slice_nslices p = gpt_plot(gpt_data, trend_x, trend_y, show_plot=False, format_input_data=False, **params) if (is_dist1d): ptype_1d = get_dist_plot_type(dist_y_1d) params = {} if (cyl_copies_on): params['cylindrical_copies'] = cyl_copies if (remove_correlation): params['remove_correlation'] = (remove_correlation_var1, remove_correlation_var2, remove_correlation_n) if (take_slice): params['take_slice'] = (take_slice_var, take_slice_index, take_slice_nslices) p = gpt_plot_dist1d(gpt_data, dist_x_1d, screen_z=screen_z, plot_type=ptype_1d, nbins=nbins_1d, show_plot=False, format_input_data=False, **params) if (is_dist2d): params = {} params['color_var'] = scatter_color_var if (axis_equal): params['axis'] = 'equal' if (cyl_copies_on): params['cylindrical_copies'] = cyl_copies if (remove_correlation): params['remove_correlation'] = (remove_correlation_var1, remove_correlation_var2, remove_correlation_n) if (take_slice): params['take_slice'] = (take_slice_var, take_slice_index, take_slice_nslices) p = gpt_plot_dist2d(gpt_data, dist_x, dist_y, screen_z=screen_z, plot_type=ptype, nbins=nbins, show_plot=False, format_input_data=False, **params) p.width = plot_area_width return p gui = row(main_panel, create_plot()) doc.add_root(gui) #callback functions def just_redraw(attr, old, new): gui.children[1] = create_plot() def change_tab(attr, old, new): new_index = plottype_list.index(new) if (tabs.active < 3): tabs.active = new_index gui.children[1] = create_plot() # Assign callbacks plottype_dropdown.on_change('value', change_tab) trend_x_dropdown.on_change('value', just_redraw) trend_y_dropdown.on_change('value', just_redraw) trend_slice_var_dropdown.on_change('value', just_redraw) trend_slice_nslices_text.on_change('value', just_redraw) screen_z_dropdown.on_change('value', just_redraw) dist_x_1d_dropdown.on_change('value', just_redraw) dist_type_1d_dropdown.on_change('value', just_redraw) nbin_1d_text.on_change('value', just_redraw) dist_x_dropdown.on_change('value', just_redraw) dist_y_dropdown.on_change('value', just_redraw) dist2d_type_dropdown.on_change('value', just_redraw) scatter_color_dropdown.on_change('value', just_redraw) nbin_x_text.on_change('value', just_redraw) nbin_y_text.on_change('value', just_redraw) axis_equal_checkbox.on_change('active', just_redraw) cyl_copies_checkbox.on_change('active', just_redraw) cyl_copies_text.on_change('value', just_redraw) remove_correlation_checkbox.on_change('active', just_redraw) remove_correlation_n_text.on_change('value', just_redraw) remove_correlation_var1_dropdown.on_change('value', just_redraw) remove_correlation_var2_dropdown.on_change('value', just_redraw) take_slice_checkbox.on_change('active', just_redraw) take_slice_var_dropdown.on_change('value', just_redraw) take_slice_index_text.on_change('value', just_redraw) take_slice_nslices_text.on_change('value', just_redraw)
kjonnselect = Select(title="Kjønn", value="Begge kjønn", options=sorted(set(df['kjonn']))) fullfselect = Select(title="Fullføringsgrad", value="I alt", options=sorted(set(df['fullforingsgrad']))) studretnselect = Select(title="Studieretning", value="Alle studieretninger/utdanningsprogram", options=sorted( set(df['studieretning_utdanningsprogram']))) prosent = CheckboxGroup(labels=["Vis prosent"], active=[]) kjonnselect.on_change('value', callback) fullfselect.on_change('value', callback) studretnselect.on_change('value', callback) prosent.on_change('active', callback) p = figure(plot_height=400, plot_width=950, tooltips=TOOLTIPS, sizing_mode='scale_width') p.xaxis.axis_label = "Skoleår" p.yaxis.axis_label = andelprosent_label(prosent) # Apply list on grouped dataframe returns list of lists - must be run on each variable. df2 = df.loc[(df['kjonn'] == kjonnselect.value) & (df['fullforingsgrad'] == fullfselect.value) & (df['studieretning_utdanningsprogram'] == studretnselect.value)].groupby('foreldrenes_utdanningsniva') pctrow = row(prosent, width=950)
def bkapp(doc): def createDataSet(): ''' Charge les données d'entrées dans un dataFrame geopandas Données d'entrées : - shapefile contenant le traçé des communes au format geojson - fichier texte contenant les données aui nous interesse au format csv Tâches réalisées : - chargement des fichiers - reprojection de wgs84 vers webmercator - tri des données inutiles - calcul de données à partir des données existantes - assemblage des données dans un geodataframe - tri des NaN/inf Sortie : - un geoDataFrame ''' # Fichier contenant le tracé des communes (format geojson) city_shapefile = "DATA/communes-20190101.json" # Fichiers de données taxe_hab = "DATA/taux_taxe_habitation.xlsx" taxe_fon = "DATA/taux_taxe_fonciere.xlsx" ########## Gestion de la géométrie des communes ############ # import de la geometrie des communes df_shape = gpd.read_file(city_shapefile) # Suppression des colonnes "wiki" et "surface", inutiles df_shape.drop(columns=["wikipedia", "surf_ha"], inplace=True) # reprojection en webmercator df_shape["geometry"] = df_shape["geometry"].to_crs("EPSG:3857") df_shape.crs = "EPSG:3857" ########## Gestion des stats sur les communes ############ # Taxe habitation # Import des taux d'imposition par commune dans la dataframe dfTH = pd.read_excel(taxe_hab, sheet_name="COM", header=2, usecols="A:B,E:G", converters={ 'Code commune': str, 'Code DEP': str }) # Mise en forme des libelles des colonnes dfTH.columns = dfTH.columns.str.replace(' ', '_') dfTH.columns = dfTH.columns.str.replace('Taux_communal_TH*', 'TauxTH').str.replace( 'Taux_communal_voté_TH*', 'TauxTH') # On crée le code INSEE en concatenant le code departement et commune # Le code Insee sera la clé commune entre les dataframe de géométrie et de data. # Création du code Insee dans une nouvelle colonne de la df dfTH["insee"] = dfTH["Code_DEP"] + dfTH["Code_commune"] # Suppression de la colonne code commune qui ne sert plus à rien dfTH.drop(columns=["Code_commune"], inplace=True) # On converti les valeurs non numériques de la colonnes TauxTH en NaN pour les filtrer dfTH["TauxTH_2018"] = pd.to_numeric(dfTH["TauxTH_2018"], errors='coerce') dfTH["TauxTH_2017"] = pd.to_numeric(dfTH["TauxTH_2017"], errors='coerce') # Taxe foncière dfTF = pd.read_excel(taxe_fon, sheet_name="COM", header=2, usecols="A:B,D:F", converters={ 'Code commune': str, 'Code DEP': str }) dfTF.columns = dfTF.columns.str.replace(' ', '_') dfTF.columns = dfTF.columns.str.replace('Taux_communal_TFB*', 'TauxTF').str.replace( 'Taux_communal_voté_TFB*', 'TauxTF') dfTF["insee"] = dfTF["Code_DEP"] + dfTF["Code_commune"] dfTF.drop(columns=["Code_commune"], inplace=True) dfTF.drop(columns=["Code_DEP"], inplace=True) # On converti les valeurs non numériques de la colonnes TauxTH en NaN pour les filtrer dfTF["TauxTF_2018"] = pd.to_numeric(dfTF["TauxTF_2018"], errors='coerce') dfTF["TauxTF_2017"] = pd.to_numeric(dfTF["TauxTF_2017"], errors='coerce') # Assemblage de la géométrie et des taux d'imposition. dataCities = pd.merge(df_shape, dfTH, left_on="insee", right_on="insee", how='left') dataCities = pd.merge(dataCities, dfTF, left_on="insee", right_on="insee", how='left') return dataCities ### Fonctions de traitement ### def select_data(df, ogCity, dist): """ Fonction qui permet de sélectionner les données à afficher Sélectionnées en fonction de la distance autour de la ville : On prend toutes les villes dont le contour est intersecté par le contour de la ville originale augmenté de dist Entrées : - df : dataframe qui contient toutes les données - ogCity : extract de la commune sélectionnée - dist : distance à l'origine (l'unité dépend du CRS, le EPSG:3857 est en m) Sortie : - dataFrame ne contenant que les données retenues """ # La fonction renvoie les communes qui sont intersectées par le cercle de centre ogCity # et de rayon dist*1000 (le rayon est entré en km) return df[df.intersects(other=ogCity.geometry.buffer(dist * 1000))] def create_choropleth(displaySet, displayParam, palette, ogCity): """ Fonction qui met à jour la coloration des communes affichées Entrées : - displaySet : dataFrame contenant les données affichées - displayParam : paramètres que l'on souhaite aficher - palette : liste de couleurs (identique à celle de la choroplèthe) - ogCity : extract de la commune sélectionnée Sorties : - Figure contenant la carte. """ # On récupère les limites géographiques pour initialiser la carte displayBounds = displaySet.total_bounds # conversion du tracé des communes en json pour interpretation par Bokeh geosource = GeoJSONDataSource(geojson=displaySet.to_json()) # Creation de la figure de la carte choroPlot = figure(title='Taux ' + select_imp.value + " " + str(slider_yr.value), x_range=(displayBounds[0], displayBounds[2]), y_range=(displayBounds[1], displayBounds[3]), x_axis_type="mercator", y_axis_type="mercator", plot_height=500, plot_width=850, sizing_mode="scale_width", toolbar_location='below', tools="pan, wheel_zoom, box_zoom, reset", x_axis_location=None, y_axis_location=None) choroPlot.xgrid.grid_line_color = None choroPlot.ygrid.grid_line_color = None # Ajout d'un évèmenent de type clic, pour sélectionnr la commune de référence choroPlot.on_event(Tap, update_loc) #outil de zoom molette activé par défaut choroPlot.toolbar.active_scroll = choroPlot.select_one(WheelZoomTool) # ajout du fond de carte tile_provider = get_provider(Vendors.CARTODBPOSITRON) choroPlot.add_tile(tile_provider) # On détermine les vals min et max du jeu de test pour la gestion des couleurs mini = displaySet[displayParam].min() maxi = displaySet[displayParam].max() # Création d'une échelle de couleur évoulant linéairement avec le paramètre à afficher color_mapper = LinearColorMapper(palette=defaultPalette, low=mini, high=maxi, nan_color='#808080') # Ajout du tracé des communes sur la carte citiesPatch = choroPlot.patches('xs', 'ys', source=geosource, line_color='gray', line_width=0.25, fill_alpha=0.5, fill_color={ 'field': displayParam, 'transform': color_mapper }) # création de la legende # color_bar = ColorBar(color_mapper=color_mapper, label_standoff=8, location=(0, 0), orientation='vertical') choroPlot.add_layout(color_bar, 'right') # ajout d'une flèche sur la commune de reférence start = ogCity.geometry.centroid pin_point = Arrow(end=VeeHead(size=15), line_color="red", x_start=start.x, y_start=start.y, x_end=start.x, y_end=start.y - 0.001) choroPlot.add_layout(pin_point) # Ajout d'un tooltip au survol de la carte choroHover = HoverTool(renderers=[citiesPatch], tooltips=[('Commune', '@nom'), (displayParam, '@' + displayParam)]) toolTip = choroPlot.add_tools(choroHover) return choroPlot # Fonction de création de l'histogramme def createHisto(displaySet, displayParam, palette, ogCity): """ L'histogramme permet de visualiser la répartition des taux des communes affichées Entrées : - displaySet : dataFrame contenant les données affichées - displayParam : paramètres que l'on souhaite aficher - palette : liste de couleurs (identique à celle de la choroplèthe) - ogCity : extract de la commune sélectionnée Sorties : - figure contenant l'histogramme. """ # On crée autant de regroupement que de couleurs passées à la fct° nBins = len(palette) # Calcul de l'histogramme hist, edges = np.histogram(displaySet[displayParam].dropna(), bins=nBins) # Nombre de lignes dans displaySet (vectorisé pour passage à datasource) total = displaySet[displayParam].size * np.ones(nBins, np.int8) # Normalisation de l'histogramme (affichage en % du total d'éléments) hist_pct = 100 * hist / total[0] # Calcul de l'étendue l'échelle verticale hmax = max(hist_pct) * 1.1 hmin = -0.1 * hmax # Calcul de la moyenne et médiane de l'échantillon mean = displaySet[displayParam].mean() med = displaySet[displayParam].quantile(0.5) # Création de la figure contenant l'histogramme histoPlot = figure(title='Répartition du taux de ' + select_imp.value + " " + str(slider_yr.value), y_range=(hmin, hmax), plot_height=300, plot_width=400, sizing_mode="scale_width", y_axis_location='right', toolbar_location=None) histoPlot.xgrid.grid_line_color = None histoPlot.xaxis.axis_label = displayParam histoPlot.xaxis.axis_line_color = None histoPlot.ygrid.grid_line_color = "white" histoPlot.yaxis.axis_label = '% de l\'échantillon' histoPlot.yaxis.axis_line_color = None # Source de données data = dict(right=edges[1:], left=edges[:-1], top=hist_pct, nb=hist, total=total, color=palette) histoSource = ColumnDataSource(data=data) # Tracé de l'histogramme histoDraw = histoPlot.quad(bottom=0, left="left", right="right", top="top", fill_color="color", line_color=None, source=histoSource) # Ajout d'un tooltip au survol de la carte histoHover = HoverTool(renderers=[histoDraw], mode="vline", tooltips=[ ('Taille', '@nb'), ('Fourchette', '@left - ' + '@right'), ]) histoTooltip = histoPlot.add_tools(histoHover) # Ajout d'un repère vertical pour la commune sélectionnée if ~np.isnan(ogCity[displayParam]): ogCityDraw = histoPlot.quad(bottom=hmin, top=hmax, left=ogCity[displayParam] - 0.05, right=ogCity[displayParam] + 0.05, fill_color="pink", line_color=None, legend_label=ogCity["nom"] + ' (' + ogCity["Code_DEP"] + ')') # Ajout d'un tooltip au survol de la commune d'orginie displayName = ogCity["nom"] + " (" + ogCity["Code_DEP"] + ")" ogCityHover = HoverTool(renderers=[ogCityDraw], mode="vline", tooltips=[ ('Commune sélectionnée', displayName), (displayParam, str(ogCity[displayParam])), ]) histoTooltip = histoPlot.add_tools(ogCityHover) # Ajout d'un repère vertical pour la moyenne de l'échantillon ogCityDraw = histoPlot.quad(bottom=hmin, top=hmax, left=mean - 0.05, right=mean + 0.05, fill_color="blue", line_color=None, legend_label="Moyenne ") # Ajout d'un repère vertical pour la mediane de l'échantillon ogCityDraw = histoPlot.quad(bottom=hmin, top=hmax, left=med - 0.05, right=med + 0.05, fill_color="purple", line_color=None, legend_label="Mediane ") # On rend la légende interactive histoPlot.legend.click_policy = "hide" # On oriente horizontalement la légende histoPlot.legend.orientation = "vertical" # Réduction de la police histoPlot.legend.label_text_font_size = "8px" # On place la légende hors de la zone de tracé histoLegend = histoPlot.legend[0] histoPlot.legend[0] = None histoPlot.add_layout(histoLegend, 'right') return histoPlot def create_info(displaySet, displayParam, ogCity): """ Affiche un panneau textuel contenant des infomations sur le jeu de données affiché et la commune sélectionnée. Entrées : - displaySet : dataFrame contenant les données affichées - displayParam : paramètres que l'on souhaite aficher - ogCity : extract de la commune sélectionnée Sorties : - figure contenant le texte à afficher. """ stats = displaySet[displayParam].dropna().describe( percentiles=[0.5]).round(decimals=2) stats = stats[stats.index != 'count'] #On supprime la variable "count" deja affichée # Modification de l'intitulé des colonnes stats.columns = ["Taux " + str(elt) for elt in data_yr] # Creation du texte infoText = [ f"<b>Communes affichées</b> : {len(displaySet)}", f"<b>Commune sélectionnée</b> : {ogCity['nom']} ({ogCity['Code_DEP']})", "</br><b>Statistiques</b> : " + select_imp.value ] return [Div(text="</br>".join(infoText)), PreText(text=str(stats))] def update_layout(displaySet, displayParam, ogCity, palette): """ Fonction permettant de mettre à jour toutes les figures du layout Entrées : - displaySet : dataFrame contenant les données affichées - displayParam : paramètres que l'on souhaite aficher - ogCity : extract de la commune sélectionnée - palette : liste de couleurs Sorties : - rien """ # Mise à jour de la chroplèthe appLayout.children[0].children[0] = create_choropleth( displaySet, displayParam, palette, ogCity) # Mise à jour de l'histogramme appLayout.children[1].children[0] = createHisto( displaySet, displayParam, palette, ogCity) # Mise à jour des infos appLayout.children[1].children[1:] = create_info( displaySet, infoParam, ogCity) def create_displayParam(impot='TauxTH_', year=2018): """ Fonction qui retourne le paramètre à afficher dans la dataframe, à partir de l'impôt et de l'année désirée. Entrées : - impot : l'impot que l'ont souhaite afficher (str) - année (int) : l'année que l'on souhait afficher (int) Sortie : - displayParam : le paramètre d'affichge (str) """ return impot + str(year) ### Fonction callback ### # Callback fonction (met à jour le graphique) def update_yr(attr, old, new): """ Fonction callback appelée au changement du slider des années. Permet de modifier l'année du taux affiché. """ # Création du paramètre à afficher en fonction de l'année sélectionnée : displayParam = create_displayParam(impot, slider_yr.value) # Mise à jour du layout update_layout(displaySet, displayParam, ogCity, defaultPalette) def update_dst(attr, old, new): """ Fonction callback appelée au changement de la distance d'affichage Modifie le jeu de données afiché et recalcule les couleurs de nouveau jeu """ # Utilisation de variables globales (nécessaire car utilisée par plusieurs callback) global displaySet # Création du paramètre à afficher en fonction de l'année sélectionnée : displayParam = create_displayParam(impot, slider_yr.value) # Mise à jour du jeu d'affichage displaySet = select_data(dataCities, ogCity, slider_dst.value) # Mise à jour du layout update_layout(displaySet, displayParam, ogCity, defaultPalette) def update_loc(event): """ Fonction callback activé au clic sur la map Permet de changer la commune sélectionnée Maj la carte avec la nouvelle commune de référence """ # Utilisation de variables globales (nécessaire car utilisée par plusieurs callback) global ogCity global displaySet ### Identification de la commune sous le point cliqué ### # Création d'un objet shapely de type Point aux coords du clic : clicPoint = gpd.points_from_xy([event.x], [event.y]) # Creation d'une geoserie contenant le point : # Rem : utilisation de iloc[0] pour ne pas avoir d'index # cf issue https://github.com/geopandas/geopandas/issues/317 pointSerie = gpd.GeoSeries(crs='epsg:3857', data=clicPoint).iloc[0] # On recherche la commune qui contient le point : clicCity = dataCities[dataCities.contains(other=pointSerie)] # On vérifie avant maj que le clic a bien retourné une géométrie if not clicCity.empty: ogCity = clicCity.iloc[0] ### Mise à jour de la carte avec la commune cliquée pour référence ### # Calcul du nouveau jeu de données à afficher displaySet = select_data(dataCities, ogCity, slider_dst.value) # Création du paramètre à afficher en fonction de l'année sélectionnée : displayParam = create_displayParam(impot, slider_yr.value) # Mise à jour du layout update_layout(displaySet, displayParam, ogCity, defaultPalette) def update_colormap(attr, old, new): """ Change la palette de couleurs utilisée à l'action sur le toggle idoine """ global defaultPalette # Création du paramètre à afficher en fonction de l'année sélectionnée : displayParam = create_displayParam(impot, slider_yr.value) if len(new) > 0: print('Mode Daltonien') defaultPalette = Colorblind[7] else: print('Mode Normal') defaultPalette = brewer['RdYlGn'][7] # Mise à jour du layout update_layout(displaySet, displayParam, ogCity, defaultPalette) def update_impot(attr, old, new): global impot dict_imp = {"Taxe d'habitation": "TauxTH_", "Taxe foncière": "TauxTF_"} impot = dict_imp[new] # Création du paramètre à afficher en fonction de l'année sélectionnée : displayParam = create_displayParam(impot, slider_yr.value) # Mise à jour du layout update_layout(displaySet, displayParam, ogCity, defaultPalette) #%% # Chargement du jeu de test try: dataCities = gpd.read_file("DATA/dataCities.json") except: print("fichier dataCities.json non trouvé, génération en cours") dataCities = createDataSet() # Sauvegarde du dataSet dataCities.to_file("DATA/dataCities.json", driver='GeoJSON') # %% ### Main Code #### ### Constrution de la carte et légende #### global displaySet global impot global defaultPalette global ogCity # Paramètres par défaut ogCity = dataCities[dataCities["nom"] == 'Paris'].iloc[ 0] # Paris sélectionnée par défaut # paramètre affiché par défaut = taxe d'habitation la plus récente defaultParam = impot + str(data_yr[-1]) infoParam = [impot + str(elt) for elt in data_yr] # Création du set de donnée à afficher displaySet = select_data(dataCities, ogCity, dist) ### Construction du front-end ### # Ajout d'un slider pour choisir l'année slider_yr = Slider(title='Année', start=data_yr[0], end=data_yr[-1], step=1, value=data_yr[-1], default_size=250) slider_yr.on_change('value', update_yr) # Ajout d'un slider pour choisir la distance d'affichage slider_dst = Slider(title='Distance d\'affichage (km)', start=0, end=100, step=5, value=dist, default_size=250) slider_dst.on_change('value', update_dst) # Ajout d'un sélecteur pour choisir l'impot à afficher select_imp = Select(title="Impôt:", value="Taxe d'habitation", options=["Taxe d'habitation", "Taxe foncière"]) select_imp.on_change('value', update_impot) # Ajout d'un mode daltonien checkbox_dalto = CheckboxGroup(labels=["Mode Daltonien"]) checkbox_dalto.on_change('active', update_colormap) # Creation de la choropleth choroPlot = create_choropleth(displaySet, defaultParam, defaultPalette, ogCity) # Creation de l'historamme histoPlot = createHisto(displaySet, defaultParam, defaultPalette, ogCity) # Creation des figures infos infoTitle, infoDisplaySet = create_info(displaySet, infoParam, ogCity) # Organisation colones/lignes Col1 = column(slider_yr, slider_dst) Col2 = column(select_imp, checkbox_dalto) row_wgt = row(Col1, Col2) Col3 = column(choroPlot, row_wgt) Col4 = column(histoPlot, infoTitle, infoDisplaySet) appLayout = row(Col3, Col4) doc.add_root(appLayout) doc.title = "VizImpôts" doc.theme = Theme(filename="theme.yaml")
checkbox = CheckboxGroup(labels=factors, active=[]) source = get_dataset() plot = make_plot(source) p = Div( text= """<h><b>The Shanghai city is now being evaluated by __ districts for tessellation</b></h>""", width=600, height=10) img = Div( text= """<img src="myapp/static/images/icon.png" alt="Engie Lab China" height="50" width="85">""", width=85, height=50) slider.on_change("value", update_plot) checkbox.on_change("active", update_plot) div = Div( text= """<h><b>WHERE TO DO BUSINESS FOR DISTRICT HEATING AND COOLING? </b></h></br></br>DEMO by <b><a href="http://www.engie.cn/en/media-center/press-releases/engie-a-new-lab-set-up-in-china/" target="_blank">Engie Lab China</a></b> using urban data and artificial intelligence to automatically tessellate districts of a city and estimate boundaries and potentials for the use of district energy system.<br></br><br></br>""", width=1125, height=50) div_help = Div( text= """<b><h style="color: red;">INSTRUCTIONS</b></h></br></br>1.Drag the slider to choose the size of each district for tessellation, and select the urban factors from checkbox that should be considered to estimate the potential of district energy for each district.<br></br> 2.Scores representing the estimated potentials of implementing district energy system are given to districts, encoded with different colors. Hover the districts to check the exact scores.<br></br> <img src="https://bokeh.pydata.org/en/latest/_images/PointDraw.png" alt="Point Draw Tool"> <br></br> """, width=600,
# publication_counts_df = data_sources.publication_counts.create_publication_counts_dataset( query) publication_count_graph = components.publication_counts.create_chart( statistic_indices=included_statistics_checkbox.active, publication_count_df=publication_counts_df) def update_chart(): page.children[1] = components.publication_counts.create_chart( statistic_indices=included_statistics_checkbox.active, publication_count_df=publication_counts_df) included_statistics_checkbox.on_change('active', lambda attr, old, new: update_chart()) # # Content Type Section # def create_search_match_table(data_source): columns = [ TableColumn(field='name', title='Name'), TableColumn(field='publication_count', title='Publication Match Count'), ] return DataTable(source=data_source, columns=columns, height=275,
def __init__(self): ### Methods self.args = Settings() self.index = None self.data_getter = None self.filter = None self._data = None self._model_type = None self._model_dir = self.args.models_path + 'unique_object/' self.controls = {} self.scene_plotter = ScenePlotter(self._set_head_selection) ### initialization of the interface ## Model type selector def update_select_net(): if self._model_dir is not None: file_list = [fn for fn in os.listdir(self._model_dir) if os.path.isfile(os.path.join(self._model_dir, fn))] file_list.sort(key=lambda fn: os.stat(os.path.join(self._model_dir, fn)).st_mtime, reverse=True) file_list = [os.path.splitext(fn)[0] for fn in file_list] self.controls['net'].options = file_list # print(self._model_dir) # print('file_list') # print(file_list) if len(file_list) > 0: self.controls['net'].value = file_list[0] else: self.controls['net'].value = None def update_model_type(): if self.controls['multi_mono_object'].active == 0: self._model_type = 'mono' self._model_dir = self.args.models_path + 'unique_object/' elif self.controls['multi_mono_object'].active == 1: self._model_type = 'multi_obj' self._model_dir = self.args.models_path + 'multi_objects/' elif self.controls['multi_mono_object'].active == 2: self._model_type = 'multi_pred' self._model_dir = self.args.models_path + 'multi_pred/' model_types = ['CV', 'CA', 'Bicycle', 'CV_LSTM', 'CA_LSTM', 'Bicycle_LSTM', 'nn_attention'] existing_types = [type for type in model_types if os.path.isdir(self._model_dir + type)] self.controls['model_sub_type'].options = existing_types print('existing types') print(existing_types) if len(existing_types) > 0 and not self.controls['model_sub_type'].value in existing_types: self.controls['model_sub_type'].value = existing_types[0] return set_model_sub_type() update_select_net() def set_model_sub_type(): if self.controls['model_sub_type'].value is not None: self._model_dir = self._model_dir + self.controls['model_sub_type'].value + '/' self.args.model_type = self.controls['model_sub_type'].value else: self._model_dir = None def update_multi_mono_object(attr, old, new): update_model_type() print(self._model_dir) self._set_data_getter() print('___') multi_mono_object = RadioButtonGroup(labels=["Mono-object", "Multi-objects", "Multi-pred"], active=1) self.controls['multi_mono_object'] = multi_mono_object multi_mono_object.on_change('active', update_multi_mono_object) ## Model sub type selector model_types = ['CV', 'CA', 'Bicycle', 'CV_LSTM', 'CA_LSTM', 'Bicycle_LSTM', 'nn_attention'] model_sub_type = Select(title='Select model type:', value=model_types[3], options=model_types) self.controls['model_sub_type'] = model_sub_type model_sub_type.on_change('value', lambda att, old, new: update_model_type()) ## Model selector select = Select(title="Select parameter file:", value=None, options=[]) self.controls['net'] = select select.on_change('value', lambda att, old, new: self._set_data_getter()) ## Select dataset to use dataset_list = ['Argoverse', 'Fusion', 'NGSIM'] select = Select(title='Dataset:', value=dataset_list[0], options=dataset_list) self.controls['dataset'] = select select.on_change('value', lambda att, old, new: self._set_data_getter(change_index=True)) ## Set what to draw checkbox_group = CheckboxGroup( labels=['Draw lanes', 'Draw history', 'Draw true future', 'Draw forecast', 'Draw forecast covariance'], active=[0, 1, 2, 3, 4]) self.controls['check_box'] = checkbox_group checkbox_group.on_change('active', lambda att, old, new: (self._update_cov(), self._update_lanes(), self._update_path())) ## Set the number of pred n_pred = Slider(start=1, end=6, step=1, value=1, title='Number of prediction hypothesis') self.controls['n_pred'] = n_pred n_pred.on_change('value', lambda att, old, new: (self._update_cov(), self._update_path())) ## Sequence ID input text_input = TextInput(title="Sequence ID to plot:", value="Random") self.controls['sequence_id'] = text_input ## Head selection input multi_select_head = MultiSelect(title='Attention head multiple selection:', value=[], options=[]) self.controls['Head_selection'] = multi_select_head multi_select_head.on_change('value', self.scene_plotter.set_active_heads) ## Refresh all sample button = Button(label="Refresh", button_type="success") self.controls['refresh'] = button button.on_click( lambda event: (self._set_index(), self._set_data())) # button.js_on_click(CustomJS(args=dict(p=self.image), code="""p.reset.emit()""")) update_multi_mono_object(None, None, None) ## Set the interface layout inputs = column(*(self.controls.values()), width=320, height=1000) inputs.sizing_mode = "fixed" lay = layout([[inputs, self.scene_plotter.get_image()]], sizing_mode="scale_both") curdoc().add_root(lay) self.scene_plotter._tap_on_veh('selected', [], [0])
# update text box ST_values = [SigmaR_new,SigmaP_new,Q_new,A_new,n_new] # update the title values_string = ('SigmaR = ' + "{0:.3}".format(ST_values[0]) + ', SigmaP = ' + "{0:.3}".format(ST_values[1]) + ', Q = ' + "{0:.2E}".format(ST_values[2]) + ', A = '+ "{0:.2E}".format(ST_values[3]) + ', n = ' + "{0:.3}".format(ST_values[4])) fig_590.title.text = values_string # Set checkboxes LABELS = ["SigmaR", "SigmaP", "Q", "A", "n"] checkbox_group = CheckboxGroup(labels=LABELS) checkbox_group.on_change('active', callback) # insert sliders for the max and min values SigmaR_range_slider = RangeSlider(start=0, end=100, value=(0,50), step=.1, title="SigmaR") SigmaP_range_slider = RangeSlider(start=0, end=100, value=(0,50), step=.1, title="SigmaP") Q_range_slider = RangeSlider(start=0.5e5, end=3e5, value=(0.5e5,3e5), step=1e3, title="Q") A_range_slider = RangeSlider(start=1.00e7, end=1.00e9, value=(1.00e7,1.00e9), step=1.00e7, title="A") n_range_slider = RangeSlider(start=0, end=10, value=(3,6), step=.1, title="n") # When slider values change call the callback function SigmaR_range_slider.on_change('value', callback) SigmaP_range_slider.on_change('value', callback) Q_range_slider.on_change('value', callback) A_range_slider.on_change('value', callback) n_range_slider.on_change('value', callback)
p.yaxis.major_label_text_font_size = '12pt' return p def get_dataset(carrier_list): subset = by_carrier[by_carrier['name'].isin(carrier_list)] new_src = ColumnDataSource(subset) return new_src def update(attr, old, new): carrier_list = [available_carriers[i] for i in carrier_select.active] new_src = get_dataset(carrier_list) src.data.update(new_src.data) carrier_select = CheckboxGroup(labels=available_carriers, active=[0]) carrier_select.on_change('active', update) src = get_dataset([available_carriers[i] for i in carrier_select.active]) p = make_plot(src) layout = row(carrier_select, p) tab = Panel(child=layout, title='Histogram') tabs = Tabs(tabs=[tab]) curdoc().add_root(tabs)
class BokehBaseExplorer(Loggable, ABC): """ ???+ note "Base class for visually exploring data with `Bokeh`." Assumes: - in supplied dataframes - (always) xy coordinates in `x` and `y` columns; - (always) an index for the rows; - (always) classification label (or ABSTAIN) in a `label` column. Does not assume: - a specific form of data; - what the map serves to do. """ SUBSET_GLYPH_KWARGS = {} MANDATORY_COLUMNS = ["label", "x", "y"] TOOLTIP_KWARGS = {"label": True, "coords": True, "index": True} def __init__(self, df_dict, **kwargs): """ ???+ note "Constructor shared by all child classes." | Param | Type | Description | | :---------- | :----- | :--------------------------- | | `df_dict` | `dict` | `str` -> `DataFrame` mapping | | `**kwargs` | | forwarded to `bokeh.plotting.figure` | 1. settle the figure settings by using child class defaults & kwargs overrides 2. settle the glyph settings by using child class defaults 3. create widgets that child classes can override 4. create data sources the correspond to class-specific data subsets. 5. activate builtin search callbacks depending on the child class. 6. initialize a figure under the settings above """ self.figure_kwargs = { "tools": STANDARD_PLOT_TOOLS, "tooltips": self._build_tooltip(kwargs.pop("tooltips", "")), # bokeh recommends webgl for scalability "output_backend": "webgl", } self.figure_kwargs.update(kwargs) self.figure = figure(**self.figure_kwargs) self.glyph_kwargs = { _key: _dict["constant"].copy() for _key, _dict in self.__class__.SUBSET_GLYPH_KWARGS.items() } self._setup_dfs(df_dict) self._setup_sources() self._setup_widgets() self._activate_search_builtin() @classmethod def from_dataset(cls, dataset, subset_mapping, *args, **kwargs): """ ???+ note "Alternative constructor from a `SupervisableDataset`." | Param | Type | Description | | :--------------- | :----- | :--------------------------- | | `dataset` | `SupervisableDataset` | dataset with `DataFrame`s | | `subset_mapping` | `dict` | `dataset` -> `explorer` subset mapping | | `*args` | | forwarded to the constructor | | `**kwargs` | | forwarded to the constructor | """ # local import to avoid import cycles from hover.core.dataset import SupervisableDataset assert isinstance(dataset, SupervisableDataset) df_dict = {_v: dataset.dfs[_k] for _k, _v in subset_mapping.items()} return cls(df_dict, *args, **kwargs) def view(self): """ ???+ note "Define the high-level visual layout of the whole explorer." """ from bokeh.layouts import column return column(self._layout_widgets(), self.figure) def _build_tooltip(self, extra): """ ???+ note "Define a windowed tooltip which shows inspection details." | Param | Type | Description | | :--------------- | :----- | :--------------------------- | | `extra` | `str` | user-supplied extra HTML | Note that this is a method rather than a class attribute because child classes may involve instance attributes in the tooltip. """ standard = bokeh_hover_tooltip(**self.__class__.TOOLTIP_KWARGS) return f"{standard}\n{extra}" def _setup_widgets(self): """ ???+ note "High-level function creating widgets for interactive functionality." """ self._info("Setting up widgets") self._dynamic_widgets = OrderedDict() self._dynamic_callbacks = OrderedDict() self._setup_search_highlight() self._setup_selection_option() self._setup_subset_toggle() @abstractmethod def _layout_widgets(self): """ ???+ note "Define the low-level layout of widgets." """ pass @abstractmethod def _setup_search_highlight(self): """ ???+ note "Define how to search and highlight data points." Left to child classes that have a specific feature format. """ pass def _setup_selection_option(self): """ ???+ note "Create a group of checkbox(es) for advanced selection options." """ from bokeh.models import CheckboxGroup self.selection_option_box = CheckboxGroup( labels=["cumulative selection"], active=[] ) def _setup_subset_toggle(self): """ ???+ note "Create a group of buttons for toggling which data subsets to show." """ from bokeh.models import CheckboxButtonGroup data_keys = list(self.__class__.SUBSET_GLYPH_KWARGS.keys()) self.data_key_button_group = CheckboxButtonGroup( labels=data_keys, active=list(range(len(data_keys))) ) def update_data_key_display(active): visible_keys = {self.data_key_button_group.labels[idx] for idx in active} for _renderer in self.figure.renderers: # if the renderer has a name "on the list", update its visibility if _renderer.name in self.__class__.SUBSET_GLYPH_KWARGS.keys(): _renderer.visible = _renderer.name in visible_keys # store the callback (useful, for example, during automated tests) and link it self._callback_subset_display = lambda: update_data_key_display( self.data_key_button_group.active ) self.data_key_button_group.on_click(update_data_key_display) def value_patch(self, col_original, col_patch, **kwargs): """ ???+ note "Allow source values to be dynamically patched through a slider." | Param | Type | Description | | :--------------- | :----- | :--------------------------- | | `col_original` | `str` | column of values before the patch | | `col_patch` | `str` | column of list of values to use as patches | | `**kwargs` | | forwarded to the slider | [Reference](https://github.com/bokeh/bokeh/blob/2.3.0/examples/howto/patch_app.py) """ # add a patch slider to widgets, if none exist if "patch_slider" not in self._dynamic_widgets: slider = Slider(start=0, end=1, value=0, step=1, **kwargs) slider.disabled = True self._dynamic_widgets["patch_slider"] = slider else: slider = self._dynamic_widgets["patch_slider"] # create a slider-adjusting callback exposed to the outside def adjust_slider(): """ Infer slider length from the number of patch values. """ num_patches = None for _key, _df in self.dfs.items(): assert ( col_patch in _df.columns ), f"Subset {_key} expecting column {col_patch} among columns, got {_df.columns}" # find all array lengths; note that the data subset can be empty _num_patches_seen = _df[col_patch].apply(len).values assert ( len(set(_num_patches_seen)) <= 1 ), f"Expecting consistent number of patches, got {_num_patches_seen}" _num_patches = _num_patches_seen[0] if _df.shape[0] > 0 else None # if a previous subset has implied the number of patches, run a consistency check if num_patches is None: num_patches = _num_patches else: assert ( num_patches == _num_patches ), f"Conflicting number of patches: {num_patches} vs {_num_patches}" assert num_patches >= 2, f"Expecting at least 2 patches, got {num_patches}" slider.end = num_patches - 1 slider.disabled = False self._dynamic_callbacks["adjust_patch_slider"] = adjust_slider # create the callback for patching values def update_patch(attr, old, new): for _key, _df in self.dfs.items(): # calculate the patch corresponding to slider value _value = [_arr[new] for _arr in _df[col_patch].values] _slice = slice(_df.shape[0]) _patch = {col_original: [(_slice, _value)]} self.sources[_key].patch(_patch) slider.on_change("value", update_patch) self._good(f"Patching {col_original} using {col_patch}") def _setup_dfs(self, df_dict, copy=False): """ ???+ note "Check and store DataFrames **by reference by default**." Intended to be extended in child classes for pre/post processing. | Param | Type | Description | | :---------- | :----- | :--------------------------- | | `df_dict` | `dict` | `str` -> `DataFrame` mapping | | `copy` | `bool` | whether to copy `DataFrame`s | """ self._info("Setting up DataFrames") supplied_keys = set(df_dict.keys()) expected_keys = set(self.__class__.SUBSET_GLYPH_KWARGS.keys()) # perform high-level df key checks supplied_not_expected = supplied_keys.difference(expected_keys) expected_not_supplied = expected_keys.difference(supplied_keys) for _key in supplied_not_expected: self._warn( f"{self.__class__.__name__}.__init__(): got unexpected df key {_key}" ) for _key in expected_not_supplied: self._warn( f"{self.__class__.__name__}.__init__(): missing expected df key {_key}" ) # create df with column checks self.dfs = dict() for _key, _df in df_dict.items(): if _key in expected_keys: for _col in self.__class__.MANDATORY_COLUMNS: if _col not in _df.columns: # edge case: DataFrame has zero rows assert ( _df.shape[0] == 0 ), f"Missing column '{_col}' from non-empty {_key} DataFrame: found {list(_df.columns)}" _df[_col] = None self.dfs[_key] = _df.copy() if copy else _df def _setup_sources(self): """ ???+ note "Create, **(not update)**, `ColumnDataSource` objects." Intended to be extended in child classes for pre/post processing. """ self._info("Setting up sources") self.sources = {_key: ColumnDataSource(_df) for _key, _df in self.dfs.items()} self._postprocess_sources() # initialize attributes that couple with sources # extra columns for dynamic plotting self._extra_source_cols = defaultdict(dict) self._setup_selection_tools() def _setup_selection_tools(self): """ ???+ note "Create data structures and callbacks for dynamic selections." Useful for linking and filtering selections across explorers. """ from bokeh.events import SelectionGeometry # store the last manual selections self._last_selections = { _key: RootUnionFind(set()) for _key in self.sources.keys() } # store commutative, idempotent index filters self._selection_filters = { _key: RootUnionFind(set()) for _key in self.sources.keys() } def cumulative_selection_flag(): """ Determine whether cumulative selection is enabled. """ return bool(0 in self.selection_option_box.active) def store_selection(): """ Keep track of the last manual selection. Useful for applying cumulation / filters dynamically. """ # store selection indices for _key, _source in self.sources.items(): _selected = _source.selected.indices # use clear() and update() instead of assignment to keep clean references if not cumulative_selection_flag(): self._last_selections[_key].data.clear() self._last_selections[_key].data.update(_selected) else: self._last_selections[_key].data.update(_selected) _source.selected.indices = list(self._last_selections[_key].data) def trigger_selection_filters(subsets=None): """ Filter selection indices on specified subsets. """ if subsets is None: subsets = self.sources.keys() else: assert set(subsets).issubset( self.sources.keys() ), f"Expected subsets from {self.sources.keys()}" for _key in subsets: _selected = self._last_selections[_key].data for _func in self._selection_filters[_key].data: _selected = _func(_selected, _key) self.sources[_key].selected.indices = list(_selected) # keep reference to trigger_store_selection() for testing only self._store_selection = store_selection self.figure.on_event( SelectionGeometry, lambda event: self._store_selection() if event.final else None, ) # keep reference to trigger_selection_filter() for further access # for example, toggling filters should call the trigger self._trigger_selection_filters = trigger_selection_filters self.figure.on_event( SelectionGeometry, lambda event: self._trigger_selection_filters() if event.final else None, ) def _update_sources(self): """ ???+ note "Update the sources with the corresponding dfs." Note that the shapes and fields of sources are overriden. Thus supplementary fields (those that do not exist in the dfs), such as dynamic plotting kwargs, need to be re-assigned. """ for _key in self.dfs.keys(): self.sources[_key].data = self.dfs[_key] self._postprocess_sources() # self._activate_search_builtin(verbose=False) # reset attribute values that couple with sources for _key in self.sources.keys(): _num_points = len(self.sources[_key].data["label"]) # add extra columns for _col, _fill_value in self._extra_source_cols[_key].items(): self.sources[_key].add([_fill_value] * _num_points, _col) # clear last selection but keep the set object self._last_selections[_key].data.clear() # DON'T DO: self._last_selections = {_key: set() for _key in self.sources.keys()} def _postprocess_sources(self): """ ???+ note "Infer source attributes from the dfs, without altering the dfs." Useful for assigning dynamic glyph attributes, similarly to `activate_search()`. """ pass def _activate_search_builtin(self, verbose=True): """ ???+ note "Assign Highlighting callbacks to search results in a manner built into the class." Typically called once during initialization. Note that this is a template method which heavily depends on class attributes. | Param | Type | Description | | :---------- | :----- | :--------------------------- | | `verbose` | `bool` | whether to log verbosely | """ for _key, _dict in self.__class__.SUBSET_GLYPH_KWARGS.items(): if _key in self.sources.keys(): # determine responding attributes _responding = list(_dict["search"].keys()) # create a field that holds search results that could be used elsewhere _num_points = len(self.sources[_key].data["label"]) self._extra_source_cols[_key][SEARCH_SCORE_FIELD] = 0 self.sources[_key].add([0] * _num_points, SEARCH_SCORE_FIELD) # make attributes respond to search for _flag, _params in _dict["search"].items(): self.glyph_kwargs[_key] = self.activate_search( _key, self.glyph_kwargs[_key], altered_param=_params, ) if verbose: self._info( f"Activated {_responding} on subset {_key} to respond to the search widgets." ) @abstractmethod def activate_search(self, subset, kwargs, altered_param=("size", 10, 5, 7)): """ ???+ note "Left to child classes that have a specific feature format." | Param | Type | Description | | :-------------- | :------ | :--------------------------- | | `subset` | `str` | the subset to activate search on | | `kwargs` | `bool` | kwargs for the plot to add to | | `altered_param` | `tuple` | (attribute, positive, negative, default) | """ pass def _prelink_check(self, other): """ ???+ note "Sanity check before linking two explorers." | Param | Type | Description | | :------ | :------ | :----------------------------- | | `other` | `BokehBaseExplorer` | the other explorer | """ assert other is not self, "Self-loops are fordidden" assert isinstance(other, BokehBaseExplorer), "Must link to BokehBaseExplorer" def link_selection(self, key, other, other_key): """ ???+ note "Synchronize the selected indices between specified sources." | Param | Type | Description | | :------ | :------ | :----------------------------- | | `key` | `str` | the key of the subset to link | | `other` | `BokehBaseExplorer` | the other explorer | | `other_key` | `str` | the key of the other subset | """ self._prelink_check(other) # link selection in a bidirectional manner sl, sr = self.sources[key], other.sources[other_key] def left_to_right(attr, old, new): sr.selected.indices = sl.selected.indices[:] def right_to_left(attr, old, new): sl.selected.indices = sr.selected.indices[:] sl.selected.on_change("indices", left_to_right) sr.selected.on_change("indices", right_to_left) # link last manual selections (pointing to the same set) self._last_selections[key].union(other._last_selections[other_key]) # link selection filter functions (pointing to the same set) self._selection_filters[key].data.update( other._selection_filters[other_key].data ) self._selection_filters[key].union(other._selection_filters[other_key]) def link_selection_options(self, other): """ ???+ note "Synchronize the selection option values between explorers." | Param | Type | Description | | :------ | :------ | :----------------------------- | | `other` | `BokehBaseExplorer` | the other explorer | """ def left_to_right(attr, old, new): other.selection_option_box.active = self.selection_option_box.active[:] def right_to_left(attr, old, new): self.selection_option_box.active = other.selection_option_box.active[:] self.selection_option_box.on_change("active", left_to_right) other.selection_option_box.on_change("active", right_to_left) def link_xy_range(self, other): """ ???+ note "Synchronize plotting ranges on the xy-plane." | Param | Type | Description | | :------ | :------ | :----------------------------- | | `other` | `BokehBaseExplorer` | the other explorer | """ self._prelink_check(other) # link coordinate ranges in a bidirectional manner for _attr in ["start", "end"]: self.figure.x_range.js_link(_attr, other.figure.x_range, _attr) self.figure.y_range.js_link(_attr, other.figure.y_range, _attr) other.figure.x_range.js_link(_attr, self.figure.x_range, _attr) other.figure.y_range.js_link(_attr, self.figure.y_range, _attr) @abstractmethod def plot(self, *args, **kwargs): """ ???+ note "Plot something onto the figure." Implemented in child classes based on their functionalities. | Param | Type | Description | | :--------- | :---- | :-------------------- | | `*args` | | left to child classes | | `**kwargs` | | left to child classes | """ pass def auto_color_mapping(self): """ ???+ note "Find all labels and an appropriate color for each." """ from hover.utils.bokeh_helper import auto_label_color labels = set() for _key in self.dfs.keys(): labels = labels.union(set(self.dfs[_key]["label"].values)) return auto_label_color(labels)
figures[fig].figure.visible = False if add_remove in figures[fig]._indicators: if flag == False: figures[fig].hide(add_remove) if fig != 'fig1': figures[fig].figure.xaxis.visible = False if xaxis_visible != '': figures['fig1'].figure.xaxis.visible = False figures[xaxis_visible].figure.xaxis.visible = True else: figures['fig1'].figure.xaxis.visible = True select_indicator.on_change('active', select_indicator_callback) ### Adding to Template curdoc().add_root(select_indicator) #tmp function def instruments_selection(inst): if inst == 'Copper': tmp = dict(ColumnDataSource(df1).data) elif inst == 'Gold': tmp = dict(ColumnDataSource(df2).data) return tmp #Instrument Selection instruments_list = ['Copper', 'Gold']
""" Example demonstrating turning lines on and off - with bokeh server """ import numpy as np from bokeh.io import curdoc from bokeh.layouts import row from bokeh.palettes import Viridis3 from bokeh.plotting import figure from bokeh.models import CheckboxGroup p = figure() props = dict(line_width=4, line_alpha=0.7) x = np.linspace(0, 4 * np.pi, 100) l0 = p.line(x, np.sin(x), color=Viridis3[0], legend="Line 0", **props) l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props) l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props) checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], width=100) def update(attr, old, new): l0.visible = 0 in checkbox.active l1.visible = 1 in checkbox.active l2.visible = 2 in checkbox.active checkbox.on_change('active', update) layout = row(checkbox, p) curdoc().add_root(layout)
p.xaxis.major_label_text_font_size = '12pt' p.yaxis.axis_label_text_font_size = '12pt' p.yaxis.major_label_text_font_size = '12pt' return p def get_dataset(carrier_list): subset = by_carrier[by_carrier['name'].isin(carrier_list)] new_src = ColumnDataSource(subset) return new_src def update(attr, old, new): carrier_list = [available_carriers[i] for i in carrier_select.active] new_src = get_dataset(carrier_list) src.data.update(new_src.data) carrier_select = CheckboxGroup(labels=available_carriers, active=[0]) carrier_select.on_change('active', update) src = get_dataset([available_carriers[i] for i in carrier_select.active]) p = make_plot(src) layout = row(carrier_select, p) tab = Panel(child=layout, title='Histogram') tabs = Tabs(tabs=[tab]) curdoc().add_root(tabs)
""" Example demonstrating turning lines on and off - with bokeh server """ import numpy as np from bokeh.io import curdoc from bokeh.layouts import row from bokeh.models import CheckboxGroup from bokeh.palettes import Viridis3 from bokeh.plotting import figure p = figure() props = dict(line_width=4, line_alpha=0.7) x = np.linspace(0, 4 * np.pi, 100) l0 = p.line(x, np.sin(x), color=Viridis3[0], legend_label="Line 0", **props) l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend_label="Line 1", **props) l2 = p.line(x, np.tan(x), color=Viridis3[2], legend_label="Line 2", **props) checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], width=100) def update(attr, old, new): l0.visible = 0 in checkbox.active l1.visible = 1 in checkbox.active l2.visible = 2 in checkbox.active checkbox.on_change('active', update) layout = row(checkbox, p) curdoc().add_root(layout)
def __init__(self, chart_title, x_axis_label, y_axis_label, x_data, y_data, x_axis_type='linear', y_axis_type='linear', tooltips=None, formatters=None, tail=False, tail_threshold=0): cb_height = 595 cb_width = 160 # The country we're looking at default_countries = [ 'France', 'United Kingdom', 'China', 'US', 'Brazil', 'Australia', 'India', 'Sweden', 'Germany', 'Russia', 'Philippines', 'Nigeria', 'Saudi Arabia', 'South Africa', 'Mexico', 'Spain' ] country_data = CovidData() checkboxes = CheckboxGroup(labels=country_data.menu, sizing_mode='fixed', height=cb_height, width=cb_width) plot = figure(title=chart_title, x_axis_label=x_axis_label, y_axis_label=y_axis_label, x_axis_type=x_axis_type, y_axis_type=y_axis_type, tools='pan, wheel_zoom, reset', active_drag='pan', active_scroll='wheel_zoom', sizing_mode='stretch_both') def AddDefaultCountries(): for country in default_countries: index = checkboxes.labels.index(country) checkboxes.active.append(index) def SelectCountry(attr, old, new): now_selected = list(set(new) - set(old)) was_selected = list(set(old) - set(new)) if now_selected: country = checkboxes.labels[now_selected[0]] if country_data.glyph_dict[country] == None: country_df = country_data.GetDataFrame(country) if tail == True: country_df = country_data.GetTail( country, x_data, y_data, tail_threshold) country_cds = ColumnDataSource(country_df) country_data.glyph_dict[country] = plot.line( x=x_data, y=y_data, source=country_cds, name=country, line_color=country_data.colour_dict[country], line_width=1) for tool in plot.tools: if type(tool).__name__ == 'HoverTool': plot.tools.remove(tool) # Create a hover tool hover_tool = HoverTool() # Set the tooltips hover_tool.tooltips = tooltips # Formatter for dates hover_tool.formatters = formatters # Add the tooltip plot.add_tools(hover_tool) country_data.glyph_dict[country].visible = True elif was_selected: country = checkboxes.labels[was_selected[0]] country_data.glyph_dict[country].visible = False checkboxes.on_change('active', SelectCountry) Column = column(checkboxes, sizing_mode='fixed', height=cb_height, width=cb_width, css_classes=['scrollable']) Row = row(Column, plot) Row.sizing_mode = 'stretch_both' AddDefaultCountries() curdoc().add_root(Row)
format='%Y-%m-%d') # Create a colormap for category cat_scatter = sorted(category_counts['category'].unique()) colors_scatter = d3['Category20'][len(cat_scatter)] cmp_scatter = dict(zip(cat_scatter, colors_scatter)) category_counts['color_by_cat'] = category_counts['category'].map(cmp_scatter) src_scatter = ColumnDataSource(category_counts) p4 = plot_scatter(src_scatter) category3 = CheckboxGroup(labels=list(cat_scatter), active=set_default_category(category_counts)) update_scatter() category3.on_change('active', lambda attr, old, new: update_scatter()) l3 = column( column(DIV_COUNT, ANNT_COUNT, p3), row(column(DIV_FILTER, DIV_CATEGORY, category3, width=200), column(DIV_CAT_COUNT, ANNT_CAT_COUNT, p4))) tab3 = Panel(child=l3, title='Event Count') # ---------------------------- Fourth Tab: Word Cloud ---------------------------- def plot_wordcloud(): p = figure(x_range=(0, 1200), y_range=(0, 500), plot_width=1000,
pie_data = tempData.groupby('sex').size().reset_index(name='count') angle = pie_data['count']/pie_data['count'].sum() * 2*pi columnData.data['angle'] = angle columnData.data['data'] = pie_data print(new[0]) elif new[0] == 1: tempData = df[(df['school']== 'MS')] pie_data = tempData.groupby('sex').size().reset_index(name='count') angle = pie_data['count']/pie_data['count'].sum() * 2*pi columnData.data['angle'] = angle columnData.data['data'] = pie_data print(new[0]) checkbox_group.on_change('active', updatepie) pie_chart.wedge(x=0, y=1, radius=0.4, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", fill_color='color', legend='sex', source=columnData) pie_chart.axis.axis_label=None pie_chart.axis.visible=False pie_chart.grid.grid_line_color = None col2 = column(checkbox_group,pie_chart) ########################################################################################################## # Create Stacked Bar Chart dataG3 = df.groupby(['G3','sex']).size().reset_index(name='counts') dfG3 = df.G3.unique().tolist() dfG3.sort()
""" ############################################################################### # Read site geojson nlp_sites_gdf = gpd.read_file(platform_dir_path / 'data/.nlp/site_info.geojson') site_description_div = Div( text="<h2>Pick sites</h2>Select the sites you want to simulate " + "(multi-selection possible):", sizing_mode="stretch_width") # Site names site_labels = [row["name"] for _, row in nlp_sites_gdf.iterrows()] sites_checkbox_group = CheckboxGroup(labels=site_labels, active=[]) # Reset checks if anything changes sites_checkbox_group.on_change('active', _input_changed) def _check_sites(): return bool(sites_checkbox_group.active) sites_section = column(site_description_div, sites_checkbox_group) ############################################################################### """ Directory paths """ ############################################################################### input_dir_paths_div = Div( text=
checkbox_to_plot = [ checkbox_selection.labels[i] for i in checkbox_selection.active ] # Generate the new curve source.data = { 'year': data[(data.month == k) & (data.Country.isin(checkbox_to_plot))].year, 'month': data[(data.month == k) & (data.Country.isin(checkbox_to_plot))].month, 'temperature': data[(data.month == k) & (data.Country.isin(checkbox_to_plot))].AverageTemperature, 'country': data[(data.month == k) & (data.Country.isin(checkbox_to_plot))].Country } slider_month.on_change('value', update) checkbox_selection.on_change('active', update) # STEP 7: design our layout layout = column( p, row(checkbox_selection, slider_month)) # we create the layout in a column and row shape output_file = ('scatter.html') # we output the html file # STEP 8: run the server behind the visualisation! curdoc().add_root(layout)
y1=[], cx=[], cy=[], ) CustomJS(args=dict(p=p), code=""" p.reset.emit() """) toggle_checkbox = CheckboxGroup(labels=[ "Toggle Start Points", "Toggle End Points", "Toggle Hexes", "Toggle Path" ], active=[0, 1, 2, 3]) toggle_checkbox.on_change("active", toggle_checkbox_handler) weekday_checkbox = CheckboxGroup(labels=[ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ], active=[0, 1, 2, 3, 4, 5, 6]) cid_filter = CheckboxGroup(labels=["Filter Customers"], active=[]) #cid_filter.on_change('active',cid_filter_handler) #print(df.columns) CID = list(set(df['customer_id'].astype('str'))) print('CID', len(CID)) select_cid = Select(title="Select Customer IDs:", value=CID[0], options=CID)
serieslist[ll] = 'aia.lev1_euv_12s' def Wavelngth_checkbox_change(attrname, old, new): labelsactive = [ Wavelngth_checkbox.labels[ll] for ll in Wavelngth_checkbox.active ] if 'All' in labelsactive: Wavelngth_checkbox.active = range(len(Wavelngth_checkbox.labels) - 1) Wavelngth_checkbox.labels = wavelngth_list[0:-1] + ['None'] elif 'None' in labelsactive: Wavelngth_checkbox.active = [0] Wavelngth_checkbox.labels = wavelngth_list Wavelngth_checkbox.on_change('active', Wavelngth_checkbox_change) def get_num(x): return int(''.join(ele for ele in x if ele.isdigit())) global MkPlot_args_dict MkPlot_args_dict = {} def DownloadData(): [tst, ted] = gettime() if ted.mjd <= tst.mjd: Div_JSOC_info.text = '''Error: start time must occur earlier than end time. please re-enter start time and end time!!!''' elif len(Wavelngth_checkbox.active) == 0: