def load_data_plot(renderer, frame_indices, gripper_status, action_status, gripper_action_label, height, width): # load the gripper data gripper_data = hv.Table({ 'Gripper': gripper_status, 'Frame': frame_indices }, ['Gripper', 'Frame']) gripper_curves = gripper_data.to.curve('Frame', 'Gripper') gripper_curves = gripper_curves.options(width=width, height=height // 4) gripper_plot = renderer.get_plot(gripper_curves) # load the action data action_data = hv.Table({ 'Action': action_status, 'Frame': frame_indices }, ['Action', 'Frame']) action_curves = action_data.to.curve('Frame', 'Action') action_curves = action_curves.options(width=width, height=height // 4) action_plot = renderer.get_plot(action_curves) # load the gripper action label gripper_action_data = hv.Table( { 'Gripper Action': gripper_action_label, 'Frame': frame_indices }, ['Gripper Action', 'Frame']) gripper_action_curves = gripper_action_data.to.curve( 'Frame', 'Gripper Action') gripper_action_curves = gripper_action_curves.options(width=width, height=height // 4) gripper_action_plot = renderer.get_plot(gripper_action_curves) return gripper_plot, action_plot, gripper_action_plot
def plot_overview(): produccion = serie_campo.groupby(by='fecha').mean() estado_mecanico = serie_status elementos_status = dict(status=pd.unique(estado_mecanico.estado_actual),numero=estado_mecanico.estado_actual.value_counts()) elementos_trayectoria = dict(trayectoria=pd.unique(estado_mecanico.trayectoria),numero=estado_mecanico.trayectoria.value_counts()) elementos_pozos = dict(indice=resumen.index[0:6], valores=resumen[0:6]) elementos_volumen = dict(indice=resumen.index[13:], valores=resumen[13:]) tabla_pozos = hv.Table(elementos_pozos,'indice','valores') tabla_pozos.opts(height=500,fontscale=20) tabla_volumen = hv.Table(elementos_volumen,'indice','valores') tabla_volumen.opts(height=500,fontscale=20) plot_prod_aceite = hv.Curve(produccion, 'fecha', 'aceite_Mbd',label='Aceite Mbd') plot_prod_gas = hv.Curve(produccion,'fecha','gas_asociado_MMpcd',label='Gas Asociado MMpcd') plot_produccion = plot_prod_aceite * plot_prod_gas plot_produccion.opts(width=600, fontscale=1.5) plot_trayectoria = hv.Bars(elementos_trayectoria,'trayectoria','numero') plot_trayectoria.opts(stacked=True, color='trayectoria', cmap='Spectral', invert_axes=True, fontscale=1.5, yaxis=None) #fill_color=factor_cmap('trayectoria', palette=Spectral6, factors=elementos_trayectoria['trayectoria'])) plot_status = hv.Bars(elementos_status,'status','numero') plot_status.opts(stacked=True, color='status', fill_color=factor_cmap('status', palette=Spectral6, factors=elementos_status['status']), xrotation=90, invert_axes=True, fontscale=1.5, xticks=None, yaxis=None) row1 = tabla_pozos + plot_status + plot_trayectoria row2 = tabla_volumen + plot_produccion fig1 = hv.render(row1) hv.output(row1, backend='bokeh', fig='html', size=200) fig2 = hv.render(row2) hv.output(row2, backend='bokeh', fig='html', size=200) return
def generate_country_list(self): if self.multi_select.value: food_list = self.multi_select.value country_list = [] for x in food_list: # This Loop Filters the data to get the lowest value for each food # temp = df.loc[df['mp_year'] == 2020] temp = df[(df['mp_year'] >= self.date_select) & (df['mp_year'] <= self.date_select1)] temp = temp.loc[temp['cm_name'] == x] temp = temp.sort_values('mp_price') temp = temp.drop_duplicates(subset=['adm0_name'], keep='first') temp = temp.nsmallest(5, ['mp_price']) country_list.append(temp) df_final = pd.concat(country_list) df_final = df_final.reset_index(drop=True) df_final = df_final.sort_values('mp_price') mask = df_final['adm0_name'].isin( df_final['adm0_name'].value_counts()[:5].index.tolist()) df_final = df_final.loc[mask] df_final = df_final.drop_duplicates(subset=['adm0_name'], keep='first') mes = f"If you buy {food_list} the most. You should live in the following countries:" self.message = '######' + mes return hv.Table(data=df_final['adm0_name']).opts(width=1000) else: return hv.Table(data=pd.DataFrame(columns=['adm0_name'])).opts( width=1000)
def daily_table(species=None, day=None): if not species or not day: return hv.Table( pd.DataFrame(columns=['Species', 'Speed [km/day]'])).relabel( 'No species selected') subset = df[df.species.isin(species)] subset = subset[subset.day == day] return hv.Table( pd.DataFrame({ 'Species': species, 'Speed [km/day]': subset['speed'] })).relabel('day: {}'.format(day))
def tap_table(self, left_x, bottom_y, right_x, top_y, index): e = self._posxy._dataset[self._data.data_dims] #print left_x, bottom_y, right_x, top_y, index if len(index) == 0 or left_x is None or bottom_y is None: #return hv.Points(([0],[0])) return hv.Table(e.head(0).reset_index()) d = e.iloc[index] if len(index) < 1000: return hv.Table(d.reset_index(), label="%d sequences" % (len(d))) else: p = hv.Table(e.head(0).reset_index(), label="Not showing %d sequences" % (len(d))) return p
def panel(self): self.event_table = hv.Table(self.events) selected = hv.streams.Selection1D(source=self.event_table) tags = pn.widgets.MultiChoice(value=["Weird"], name="Tags", options=[ "Weird", "Very weird", "Super weird", "Uber weird", "Weird with a cherry on top" ], solid=False, width=700) event_browser = pn.Row( self.event_table, hv.DynamicMap(self.peak_plot, streams=[selected])) comments = pn.widgets.input.TextAreaInput( name='Comments', placeholder='Add some extra comments...', height=500) return pn.Column( pn.pane.Markdown( "## Make a nice interactive app to browse through the waveforms grouped by event and maybe some other options for grouping..." ), pn.layout.Divider(), event_browser, pn.layout.Divider(), pn.pane.Markdown( "## Allow for adding tags and comments and track them for each waveform." ), tags, comments, )
def plotQC(df, value, title, size=10, jitter=0.35, factor_reduce=0.5): df_i = df.dropna(subset=[value]) df_i = df_i.reset_index(drop=True) key_dimensions = [(value, title)] value_dimensions = [('Gene', 'Gene'), ('Metadata_X', 'Position')] macro = hv.Table(df_i, key_dimensions, value_dimensions) options = dict(color_index='Position', legend_position='left', jitter=jitter, width=1000, height=600, scaling_method='width', scaling_factor=2, size_index=2, show_grid=True, tools=['hover', 'box_select', 'lasso_select'], line_color='k', cmap='Category20', size=size, nonselection_color='lightskyblue') quality_scatter = macro.to.scatter('Gene', [title]).options(**options) sel = streams.Selection1D(source=quality_scatter) image_name = df_i.loc[0, "img_name_raw"] img = cv2.imread(image_name, 0) h, w = img.shape w = int(factor_reduce * w) h = int(factor_reduce * h) pad = int(2.2 * w) def selection_callback(index): if not index: return hv.Div("") divtext = f'<table width={pad} border=1 cellpadding=10 align=center valign=center>' for i, j in grouped(index, 2): value_s = '{:f}'.format(df_i[value][i]) value_s2 = '{:f}'.format(df_i[value][j]) divtext += '<tr>' divtext += f'<td align=center valign=center><br> {i} Value: {value_s}</br></td>' + "\n" divtext += f'<td align=center valign=center><br> {j} Value: {value_s2}</br></td>' + "\n" divtext += '</tr><tr>' divtext += f'<td align=center valign=center><img src={df_i.loc[i, "img_name_raw"]} width={w} height={h}></td>' divtext += f'<td align=center valign=center><img src={df_i.loc[j, "img_name_raw"]} width={w} height={h}></td>' divtext += '</tr>' if len(index) % 2 == 1: value_s = '{:f}'.format(df_i[value][index[-1]]) divtext += '<tr>' divtext += f'<td align=center valign=center><br> {index[-1]} Value: {value_s}</br></td>' + "\n" divtext += f'<td align=center valign=center><br> </br></td>' + "\n" divtext += '</tr><tr>' divtext += f'<td align=center valign=center><img src={df_i.loc[index[-1], "img_name_raw"]} width={w} height={h}></td>' divtext += f'<td align=center valign=center></td>' divtext += '</tr>' divtext += '</table>' return hv.Div(str(divtext)) div = hv.DynamicMap(selection_callback, streams=[sel]) hv.streams.PlotReset(source=quality_scatter, subscribers=[lambda reset: sel.event(index=[])]) return hv.Layout(quality_scatter + div).cols(1), sel
def GenDepth(sonar_data, chan_id, regen_cache=False): normalized_channel = NormalizeChannel(sonar_data, chan_id, regen_cache=regen_cache) hv_ds = holoviews.Dataset(normalized_channel) img = hv_ds.to(holoviews.Image, kdims=["frame_index", "depth"]) img = img.opts(cmap='viridis', logz=False) img = img.opts(tools=['hover', 'crosshair']) channel = sonar_data.sel(channel=chan_id) depth_data = holoviews.Table((channel.frame_index, channel.water_depth), 'frame_index', 'depth') depth_curve = holoviews.Curve(depth_data) depth_curve = depth_curve.opts(line_width=0.5, color='red', alpha=0.5) #, line_dash='dashed') depth_curve = depth_curve.opts(tools=['hover', 'crosshair']) depth_curve = depth_curve.opts(active_tools=['box_zoom']) # @todo We should consider using the native height instead of 1024 as we will see more detail. #y_size = len(normalized_channel.depth) x_size = 1024 y_size = 768 rasterized_img = holoviews.operation.datashader.rasterize(img, width=x_size, height=y_size, precompute=True) rasterized_img = rasterized_img.opts(invert_yaxis=True) graph = holoviews.Overlay([rasterized_img, depth_curve]) graph = graph.collate() return graph
def plot_wave_height(self, avg_df): """ Create a HTML plot of the wave height data from the CSV file. :param avg_df: Dataframe of the csv file :return: """ # Sort the data for only the "XdcrDepth" data type selected_avg_df = avg_df[avg_df.data_type.str.contains("XdcrDepth")] # Remove all the columns except datetime and value selected_avg_df = selected_avg_df[['datetime', 'value']] # Set independent variables or index kdims = [('datetime', 'Date and Time')] # Set the dependent variables or measurements vdims = hv.Dimension(('value', 'Wave Height'), unit='m') # Plot and select a bin plot = hv.Curve(selected_avg_df, kdims, vdims) + hv.Table(selected_avg_df) # Save the plot to a file renderer = hv.renderer('bokeh') bk_plot = renderer.get_plot(plot).state bk_plot.sizing_mode = 'scale_both' output_file(self.html_file) save(bk_plot) #renderer.save(plot, self.html_file) # Refresh the web view self.refresh_web_view_sig.emit()
def plot_global_stat(self, global_stat): data = [[i, name, self.data[global_stat][j][i]] for j, name in enumerate([i.name for i in self.networks]) for i in range(self.turn + 1)] table = holoviews.Table(data, ['turn', 'network'], [global_stat]) plot = table.to(holoviews.Curve).overlay('network') return plot
def plot_arms_error(self, start_time=None, interval=None, lookahead=0, lookbehind=0): error_plots = {} real_pos_plots = {} joint_pos_plots = {} for arm, arm_table in self.arm_error_tables.items(): if not (start_time is None or interval is None): #arm_table = arm_table[start_time: start_time+interval] pass if len(arm_table) == 0: # dummy table arm_table = hv.Table([[start_time,self.max_pos,0,0]], kdims=['time'], vdims=['real_pos','plt_error_down','plt_error_up']) error_plots[arm] = hv.ErrorBars(arm_table, kdims='time', vdims=['real_pos', 'plt_error_down', 'plt_error_up'], extents=(start_time, self.min_pos, start_time+interval, self.max_pos)) error_plots[arm].redim(time={'range': (start_time, start_time+interval)}) real_pos_plots[arm] = arm_table.to.points(kdims=['time', 'real_pos'], vdims=['real_pos'], extents=(start_time, self.min_pos, start_time+interval, self.max_pos)) #joint_pos_plots[arm] = real_pos_plots[arm] * error_plots[arm] errorbar_overlay = hv.NdOverlay(error_plots, kdims='arm') errorbars = hv.NdOverlay(error_plots, kdims='arm') points = hv.NdOverlay(real_pos_plots, kdims='arm') return errorbars * points
def view(self): layout = pn.Tabs() plot = None if self.liste_des_sources: plot = self.liste_des_sources.plot.graphique_default() plot.opts(toolbar='above', default_tools=['box_select', 'wheel_zoom', 'reset'], active_tools=['tap', 'wheel_zoom']) layout.append( ('Graphique', pn.Row(plot, sizing_mode='stretch_width'))) dataTable = self.liste_des_sources._dataframe if hvplot.util.is_geodataframe(dataTable): dataTable = pd.DataFrame(dataTable.drop(['geometry'], axis=1)) table = hv.Table(dataTable, sizing_mode='stretch_width').opts(height=650, width=1500) layout.append(('Table', pn.Row(table))) if len(plot) == len(table): DataLink(plot, table) return layout else: layout = pn.Column(pn.pane.HTML(f'Aucun catalogue disponible'), sizing_mode='stretch_width') return layout
def create_table(self): self.filename_df["Standard deviation"] = self.filename_df[ "Standard deviation"].apply(np.format_float_scientific, args=[3]) table = hv.Table(self.filename_df) table.opts(width=1300, height=800) self.PlotDict["All"] = self.PlotDict["All"] + table
def plot_heatmap(self): ##Stacked Bar chart for Day and Event Type self.day_hour = self.data.groupby(by=['Weekday', 'Hour']).agg({ 'Weekday': 'count' }).rename(columns={'Weekday': 'Sessions'}) self.day_hour = self.day_hour.reset_index() #print(self.day_hour) key_dimensions_hm = [('Weekday', 'Weekday'), ('Hour', 'Hour')] value_dimensions_hm = [('Sessions')] macro_hm = hv.Table(self.day_hour, key_dimensions_hm, value_dimensions_hm) hm = macro_hm.to.heatmap(['Weekday', 'Hour'], 'Sessions', []).options(width=748, show_legend=True, height=525, color=hv.Cycle('Spectral'), tools=['hover'], title_format="Heatmap") hm_plot = renderer.get_plot(hm).state hm_plot.y_range = FactorRange(factors=[ '01 AM', '02 AM', '03 AM', '04 AM', '05 AM', '06 AM', '07 AM', '08 AM', '09 AM', '10 AM', '11 AM', '12 PM', '01 PM', '02 PM', '03 PM', '04 PM', '05 PM', '06 PM', '07 PM', '08 PM', '09 PM', '10 PM', '11 PM' ]) hm_plot.x_range = FactorRange( factors=['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']) return hm_plot
def __init__(self, data_file, cat_specifier='name', topn=10): # cross-ref category with nestorParams possible_cats = list(nestorParams.datatype_search(cat_specifier)) # load up the data self.df = pd.read_hdf(data_file, key='df') self.tag_df = pd.read_hdf(data_file, key='tags') # print(self.tag_df) self.names = [ name for name in possible_cats if name in self.df.columns.tolist() ] self.name_opt = { name: { 'name': nestorParams.datatypes[name], 'opts': self._get_cat_list(name, topn) } for name in self.names } # filtering tags by count # self.node_thres = range(1, 91, 10) self.node_thres = np.around(np.logspace(0, 1.5), decimals=1) # for network-based plot options self.weights = ['cosine', 'count'] self.edge_thres = range(1, 91, 10) self.table = hv.Table(self.df)
def __init__(self, error_table): self.error_table = error_table self.arm_error_tables = {} maxlist = [] minlist = [] for arm in self.arms: arm_table = error_table.loc[:, idx[ arm, ['real_pos', 'plt_error_up', 'plt_error_down']]] arm_table.columns = arm_table.columns.droplevel(0) arm_table = arm_table.reset_index(level=['time']).reindex( columns=['time', 'real_pos', 'plt_error_up', 'plt_error_down']) self.arm_error_tables[arm] = hv.Table( arm_table.dropna(), kdims='time', vdims=['real_pos', 'plt_error_down', 'plt_error_up']) maxlist.append( max(self.arm_error_tables[arm]['real_pos'] + self.arm_error_tables[arm]['plt_error_up'])) minlist.append( min(self.arm_error_tables[arm]['real_pos'] + self.arm_error_tables[arm]['plt_error_down'])) self.min_pos = min(minlist) self.max_pos = max(maxlist)
def __init__(self, odtpath, omfpaths): """ ODT2hv(field, slice_axis, slice_coord) This function takes a list of OMF files and matches the filenames with outputs in a corresponding ODT file. Graphs of properties can be plotted using the method ODT2hv.get_curve Inputs ====== odtpath: Path to an OOMMF ODT file. omfpaths: List of OMF files. """ self.omfpaths = omfpaths strarray = [re.findall(r"[\w']+", file)[-3:-1] for file in omfpaths] relevantfiles = np.array([[int(i[0]), int(i[1])] for i in strarray]) index = pd.DataFrame(relevantfiles, columns=('stage', 'iteration')) odtframe = oommfodt.OOMMFodt(odtpath).df reduced = pd.merge(index, odtframe) reduced = reduced.reset_index() reduced.rename(columns={'index': 'File'}, inplace=True) self.frame = reduced self.headers = list(reduced.columns)[1:] self.hv = hv.Table(self.frame)
def get_global_stats_table(self) -> holoviews.Table: data = [[i, name, global_stat, self.data[global_stat][j][i]] for j, name in enumerate([i.name for i in self.networks]) for i in range(self.turn + 1) for global_stat in FIELD_STATS + RESEARCH] table = holoviews.Table(data, ['turn', 'network', 'stat'], 'value') return table
def create_table(self): fit_v = N_fit = der_v = N_der = t = phi = '_' df = pd.DataFrame(columns=[ "File", "fit_voltage", "Nox_fit", "der_voltage", "Nox_der", "tox [nm]", "phi_ms" ]) for file in self.data["keys"]: if self.do_fit: fit_v, N_fit = self.data[file]["fit"]["flatband"], self.data[ file]["fit"]["parameters"]["Nox"] t, phi = self.data[file]["fit"]["parameters"]["t"], self.data[ file]["fit"]["parameters"]["phi_ms"] if self.do_derivative: der_v, N_der = self.data[file]["derivative"][ "flatband"], self.data[file]["derivative"]["parameters"][ "Nox"] t, phi = self.data[file]["derivative"]["parameters"][ "t"], self.data[file]["derivative"]["parameters"]["phi_ms"] dic = { "File": file, "fit_voltage": fit_v, "Nox_fit": f(N_fit, 3), "der_voltage": der_v, "Nox_der": f(N_der, 3), "tox [nm]": t * 10**9, "phi_ms": phi } #f() --> numpy.format_float_scientific df = df.append(dic, ignore_index=True) table = hv.Table(df) table.opts(width=1300, height=800) self.PlotDict["All"] = self.PlotDict["All"] + table
def setUp(self): keys = [('M', 10), ('M', 16), ('F', 12)] values = [(15, 0.8), (18, 0.6), (10, 0.8)] self.table = hv.Table(zip(keys, values), kdims=['Gender', 'Age'], vdims=['Weight', 'Height']) super(TestEllipsisTable, self).setUp()
def _update_opinion_warning(self): self._opinion_warning.alert_type = "primary" if self.current_user is None: self._opinion_warning.object = """ Log in to give your opinion """ self._opinion_warning.height = 80 elif opinion.get(self.lsd, self.revision, self.current_user): self._opinion_warning.object = """ **You already voted on the data quality of this day.** Choose a different option to change your decision. """ self._opinion_warning.height = 110 else: self._opinion_warning.object = "You didn't give your opinion yet." self._opinion_warning.height = 80 self._opinion_notes.value = None # Also update day stats here if self.lsd is not None: num_opinions = opinion.get_opinions_for_day(self.lsd) num_opinions.update({"total": sum(num_opinions.values())}) self._day_stats[0] = hv.Table( (list(num_opinions.keys()), list(num_opinions.values())), "Decision", "Number of opinions", label="Opinions on this day", ).opts(sortable=False, index_position=None) opinions_by_user = opinion.get_user_stats(zero=False) opinions_by_user = [(k, opinions_by_user[k]) for k in sorted( opinions_by_user, key=opinions_by_user.get, reverse=True)] self._day_stats[2] = hv.Table( opinions_by_user, "User", "Number of opinions", label="Highscore", ) notes = opinion.get_notes_for_day(self.lsd) text = """ <span style="color:black;font-family:Arial;font-style:bold;font-weight:bold;font-size:12pt"> Notes </span><div style="text-align: left"> """ for user, entry in notes.items(): text = f"{text}<b>{user}</b>: (<i>{entry[0]}</i>) {entry[1]}</br>" text = f"{text}</div>" self._day_stats[1] = pn.pane.HTML(text, )
def get_acc_table(complete_df): daily_cases = complete_df.groupby('Fecha').sum() daily_cases = daily_cases.reindex(index=daily_cases.index[::-1]) table = hv.Table(daily_cases, kdims='Fecha', vdims=list(daily_cases.columns)[0:], label='Tabla') table.opts(width=width, height=400, show_title=False) return table
def selection(self): table = hv.Table(dset).opts(width=1550) if self.linked_selection and self.linked_selection.selection_expr: selected = table[self.linked_selection.selection_expr.apply(table)] self.events = selected.data return selected self.events = table.data return table
def genTable(infoset=p1.keys()[0], player=PLAYER, **kwargs): ''' generate a Table object containing the reach of the infoset ''' iS = p1[infoset] if player == 'P1' else p2[infoset] label = 'Reach at iterate %d for %s' % (ITERATE, iS.alg) idx = ITERATE if (0 <= ITERATE and ITERATE < len(iS.reach)) else (len(iS.reach) - 1) return hv.Table(([iS.reach[idx]], ), [label])
def makeCtrlReport(combineddf, posname, cellname, watername): """ this function makes a holoviews datatable out of the control wells could also add some metrics if desired (pass, fail) the control name should be in the Sample_Name column typically posname will start with PC, cellname will start with RPE, and watername will be NTC """ pcdf = pd.DataFrame(columns=combineddf.columns) celldf = pd.DataFrame(columns=combineddf.columns) waterdf = pd.DataFrame(columns=combineddf.columns) removelist = [] for idx in combineddf.index: if (str(combineddf['Sample_Name'][idx]).startswith(posname)): pcdf.loc[len(pcdf)] = combineddf.iloc[idx] removelist.append(idx) if (str(combineddf['Sample_Name'][idx]).startswith(cellname)): celldf.loc[len(celldf)] = combineddf.iloc[idx] removelist.append(idx) if (str(combineddf['Sample_Name'][idx]).startswith(watername)): waterdf.loc[len(waterdf)] = combineddf.iloc[idx] removelist.append(idx) #remove those rows from the combineddf combineddf.drop(removelist, inplace=True) combineddf.reset_index(inplace=True) #now combine the control dataframes, adding a control type column pcdf['type'] = 'PC' celldf['type'] = 'Cells' waterdf['type'] = 'Water' ctrldf = pd.concat([pcdf, celldf, waterdf], ignore_index=True) #need to drop a bunch of columns for ease of reading ctrldf.drop(['Column', 'Row', 'Base1', 'Base2', 'Base3', 'n1', 'n2', 'n3'], axis=1, inplace=True) #now lets make a pass fail table for these #pc should all be positive (ct<40), cells only hRNA pos, and water all negative ctrlcatdf = pd.DataFrame(columns=('Ctrl_Type', 'Status', 'JStatus')) pcres = testCtrl(pcdf, 'pos', 40.0) ctrlcatdf.loc[0] = ['PC', pcres[0], pcres[1]] cellres = testCtrl(celldf, 'neg', 40.0) ctrlcatdf.loc[1] = ['Cells', cellres[0], cellres[1]] waterres = testCtrl(waterdf, 'bad', 40.0) ctrlcatdf.loc[2] = ['Water', waterres[0], waterres[1]] return hv.Table(ctrldf).opts( width=900, height=600), hv.Table(ctrlcatdf).opts(width=300, height=300)
def get_networks_table(self) -> holoviews.Table: data = collections.defaultdict(list) for field in self.fields: data['x'].append(field.coordinates[0]) data['y'].append(field.coordinates[1]) data['network'].append( self.game.networks.index(field.network) if field. network else None) table = holoviews.Table(data, kdims=['x', 'y'], vdims=['network']) return table
def visualize_loc_history(self): df = pd.DataFrame(self.loc_hist, columns=["date", "x", "y"]) df["date"] = df["date"].apply(lambda d: d.strftime("%c")) scatter = hv.Scatter(df, kdims=["x", "y"], vdims=["date", "date"]).\ opts(tools=["hover"], color="date", cmap="Blues", size=6, padding=0.05, legend_position="bottom") line = hv.Curve(scatter, label="path") table = hv.Table(self.as_df().T.reset_index().rename(columns={"index": "Field", 0: "Details"})) return table + (line * scatter).opts(width=500, height=500, xlim=(0, self.cc.length), ylim=(0, self.cc.breadth))
def plot(nwbfile): macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t') key_dimensions = [('year', 'Year'), ('country', 'Country')] value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'), ('gdp', 'GDP Growth'), ('trade', 'Trade')] macro = hv.Table(macro_df, key_dimensions, value_dimensions) gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment']) gdp_unem_scatter.overlay('Country') return gdp_unem_scatter
def _process(self, dset, key=None): ds = filter_dset(dset, filter_range=self.p.filter_range, flags=self.p.flags, bad_flags=self.p.bad_flags) if self.p.ydim is None: cols = [dim.name for dim in dset.vdims] else: cols = [self.p.ydim] df = ds.data[cols] return hv.Table(df.describe().loc[['count', 'mean', 'std']])
def plot_view(self): data = self.df[self.feature.value] data = data.loc[data.first_valid_index():] if self.log_scale.value: data = pd.Series(np.log(data.values), index=data.index) data.name = f"{self.feature.value} - log of closing price" else: data.name = f"{self.feature.value} - closing price" df_ma_15 = data.rolling(window=15).mean() df_ma_30 = data.rolling(window=30).mean() df_ma_15.name = "moving average - 15 days" df_ma_30.name = "moving average - 30 days" tag_plot = data.hvplot.line(title=self.feature.value, xlabel="Timestamp", height=self.plot_height, width=self.plot_width) ma_15 = df_ma_15.hvplot.line(title=self.feature.value, xlabel="Timestamp", height=self.plot_height, width=self.plot_width) ma_30 = df_ma_30.hvplot.line(title=self.feature.value, xlabel="Timestamp", height=self.plot_height, width=self.plot_width) min_ = self.tag_bounds[self.feature.value][0] max_ = self.tag_bounds[self.feature.value][1] if self.log_scale.value: min_, max_ = data.min(), data.max() histogram = data.hvplot.hist(bins=100, bin_range=(min_, max_), muted_alpha=0, legend="top", height=400, width=200, title="Histogram") right_col = [histogram] frame = data.describe().reset_index() description_table = hv.Table(frame).opts(height=250, width=400) second_plot = hv.Layout(histogram + description_table).cols(2) plots = hv.Layout(((tag_plot * ma_15 * ma_30) << histogram) + description_table).cols(1) return plots