def build_pies(): for series_key, series in data['data'].items(): for y2 in y: negative_values = [] for x_val, y_val in zip(series['x'], series[y2]): if y_val < 0: negative_values.append('{} ({})'.format(x_val, y_val)) chart = wrapper( dcc.Graph( id='pie-{}-graph'.format(series_key), figure={ 'data': [ go.Pie(**dict_merge( dict(labels=series['x'], values=series[y2] ), name_builder(y2, series_key))) ], 'layout': build_layout( build_title(x, y2, group=series_key, agg=inputs.get('agg'))) })) if len(negative_values): error_title = ( 'The following negative values could not be represented within the {}Pie chart' ).format('' if series_key == 'all' else '{} '.format(series_key)) error_div = html.Div( [ html.I(className='ico-error'), html.Span(error_title), html.Div(html.Pre(', '.join(negative_values)), className='traceback') ], className='dtale-alert alert alert-danger') yield html.Div([ html.Div(error_div, className='col-md-12'), html.Div(chart, className='col-md-12') ], className='row') else: yield chart
def update_progress(n_intervals): progress = rds.get_value( f'{OPLSModel.redis_prefix}_image_save_progress') progress_fraction = rds.get_value( f'{OPLSModel.redis_prefix}_image_save_progress_fraction') label_text = rds.get_value( f'{OPLSModel.redis_prefix}_image_save_label') job_id = rds.get_value(f'{OPLSModel.redis_prefix}_job_id').decode( 'utf-8') job = Job.fetch(job_id, rds.get_redis()) job_status = job.get_status() if isinstance(label_text, bytes): label_text = label_text.decode('utf-8') if isinstance(progress, bytes): progress = int(float(progress)) if isinstance(progress_fraction, bytes): progress_fraction = progress_fraction.decode('utf-8') if job_status == 'finished': message = dbc.Alert(f'Prepared plots file as {job.result}.', color='success', dismissable=True) class_name = 'btn btn-success' path = job.result progress_label = dbc.FormText(label_text, color='success') animated = False elif job.get_status() == 'failed': message = dbc.Alert( [f'Error occurred.', html.Pre(job.exc_info)], color='danger', dismissable=True) class_name = 'btn btn-secondary disabled' path = '' progress_label = dbc.FormText(label_text, color='danger') animated = False else: message = [] class_name = 'btn btn-secondary disabled' path = '' progress_label = dbc.FormText(label_text) animated = True return progress, animated, progress_fraction, progress_label, url_for( 'api.download_temporary_file', path=path), class_name, message
def layout(): return html.Div([ html.H3('Admin page'), html.Div([ html.B("ABM Model Cache"), html.Pre(f'{facade.rs.list(CompartmentalModel.ID)}', id='cache_contents'), html.Button('Clear', id='clear_button'), ]), html.Div([ html.Button('Clear redis', id='clear_redis_button'), html.Div(id='notification_div1') ]), html.Div([ html.Button('Clear cache', id='clear_cache_button'), html.Div(id='notification_div2'), ]), ], style={'margin': 10})
def parse_contents(contents, filename, date): return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), # HTML images accept base64 encoded strings in the same format # that is supplied by the upload html.Img(src=contents, style={ 'width': 'auto', 'height': '400px' }), html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def display_click_data(clickData, DropValue, ValT1X1, ValT1Y1, ValT1X2, ValT1Y2, ValT2X1, ValT2Y1, ValT2X2, ValT2Y2): Val_Tangente = { 'T1X1': ValT1X1, 'T1Y1': ValT1Y1, 'T1X2': ValT1X2, 'T1Y2': ValT1Y2, 'T2X1': ValT2X1, 'T2Y1': ValT2Y1, 'T2X2': ValT2X2, 'T2Y2': ValT2Y2 } #logging.debug(Val_Tangente) #logging.debug(clickData) Val_Tangente[DropValue] = 'Test' return html.Div([ html.Pre(json.dumps(clickData, indent=2), style=styles['pre']), html.Div([value]) ])
def generate_html(n, backgroundcolor='yellow', textcolor='black'): data_n = data.get(n) if not data_n: return [html.H3("No data")] text = data_n.get('text', "Somehow no text key") header = data_n.get('header', "Somehow no header key") new_text = [] for line in text: phrases = get_phrases(line) new_line = [] for phrase in phrases: color = phrase[0][1:-1] new_line.append( html.Span(phrase[1], style={'color': color} ) if color else phrase[1]) new_text.extend(new_line) new_text.append(html.Br()) span_style = {'fontSize': '20px', 'color': textcolor} div_style = { 'fontSize': '16px', 'backgroundColor': backgroundcolor, 'borderWidth': 'medium', 'borderColor': 'black', 'borderStyle': 'solid' } # complete kluge to present table (infobox=11) with monospaced font if n != 11: return [ html.Div([html.H3(header), html.Span(new_text, style=span_style)], style=div_style) ] return [ html.Div( [html.H3(header), html.Pre(new_text, style={'fontSize': '20px'})], style=div_style) ]
def test_surface_selector(dash_duo): app = dash.Dash(__name__) app.config.suppress_callback_exceptions = True realizations = pd.read_csv("tests/data/realizations.csv") s = SurfaceSelector(app, surface_context, realizations) app.layout = html.Div( children=[s.layout, html.Pre(id="pre", children="ok")]) @app.callback(Output("pre", "children"), [Input(s.storage_id, "children")]) def _test(data): return json.dumps(json.loads(data)) dash_duo.start_server(app) dash_duo.wait_for_contains_text("#pre", json.dumps(return_value), timeout=4)
def parse_contents(contents, filename, date): content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file df = pd.read_csv( io.StringIO(decoded.decode('utf-8'))) elif 'xls' in filename: # Assume that the user uploaded an excel file df = pd.read_excel(io.BytesIO(decoded)) else: raise Exception('unknown extension') except Exception: return html.Div([ 'There was an error processing this file.' ]) # print([{'name': i, 'id': i} for i in df.columns]) df2 = pd.DataFrame(df.dtypes).reset_index() df2.columns = ['Column', 'Type'] return html.Div([ html.H5(filename), #html.Div([ # ColumnChecklist, # ColumnSubmitButton, #]), html.Div(dash_table.DataTable( data=df2.to_dict('records'), columns=[{'name': i, 'id': i} for i in df.columns] )), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def execute_code(n_clicks, code): if n_clicks < 1 or code is None: raise PreventUpdate try: resp = requests.post("http://127.0.0.1:5000/code/kostas", json={"code": code}) code_output = resp.json()["code_output"] console_output = str(resp.json()["console_output"]) return [[ html.Div([html.H4(k), html.Pre("Output: " + str(v)), html.Hr()]) for (k, v) in code_output.items() ], str(console_output)] except Exception as e: return no_update, str(e)
def update_gs_input(spin_mode, kppra, gs_type, json_structure): if not json_structure: return structure_undefined_error() structure = abilab.mjson_loads(json_structure) pseudos = os.path.join(abidata.pseudo_dir, "14si.pspnc") # Build input for GS calculation # ecut must be specified because this pseudopotential does not provide hints for ecut. try: gs_inp = abilab.gs_input(structure, pseudos, kppa=kppra, ecut=8, spin_mode=spin_mode, smearing=None) gs_inp.pop_vars(("charge", "chksymbreak")) if gs_type == "relax": gs_inp.set_vars(optcell=2, ionmov=2, ecutsm=0.5, dilatmx=1.05) #multi = ebands_input(structure, pseudos, # kppa=kppra, nscf_nband=None, ndivsm=15, # ecut=8, pawecutdg=None, scf_nband=None, accuracy="normal", spin_mode=spin_mode, # smearing="fermi_dirac:0.1 eV", charge=None, dos_kppa=None): gs_inp.set_mnemonics(False) except Exception as exc: return html.Div([ dbc.Jumbotron([ html.H2("There was an error processing this file. %s" % str(exc), className="text-danger") ]) ]) s = DangerouslySetInnerHTML(gs_inp._repr_html_()) return html.Div([ copy_to_clipboard_button(id="copy_gsinput"), html.Hr(), html.Pre(s, id="copy_gsinput"), # className="text-sm-left", ])
def parse_contents(contents, filename, date): print matimg[:100] print fcodes[:100] print contents[:100] return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), # HTML images accept base64 encoded strings in the same format # that is supplied by the upload html.Img(src='data:image/png;base64,{}'.format(matimg)), html.Img(src='data:image/png;base64,{}'.format(fcodes)), html.Img(src=contents), html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def mask_content(self): return [ dcc.Markdown(""" To mask a region, click on the **Select region button** then select your region on the graph. To mask one of the trace, click on it (selected trace are highlihted. Confirmation will be asked before saving the masks. """, className='markdown'), # roi x limits dbc.InputGroup([ dbc.InputGroupAddon("Masks", id='masks', addon_type="prepend"), dbc.Button("Select region", color="secondary", outline=True, size='sm', id='select-mask') ], size='sm'), html.Pre(children="No masks selected", id='text-data'), dcc.ConfirmDialog(id='confirm-mask', ), ]
def modify_text(): """ Text describing current operations with record of previous operations as tooltip """ return html.Div([ # Current operations html.Pre(id='show_operations', style={ 'fontWeight': 'bold', 'fontSize': 14, 'textAlign': 'center', 'marginLeft': '3%' }), # Tooltip explaining previous operations dbc.Tooltip(id='prev_operations', target='show_operations', placement='bottom', style={'fontSize': 12}) ])
def node_info(self, state): # [html.P(atd_rec.graph_info()) for atd_rec in self.model.uid_state_map[n_uid].atd_records] +\ # TODO for presentation purposes use image_atd_paths ui_elem = [html.P(f"Node: {state.unique_id}")] + \ [ html.Img(id=f"node_img_{state.unique_id}", src=get_state_img(image_path), style={'width': '600px'}) for image_path in state.image_paths ] +\ [html.P(f"Merged states:")] +\ [ html.Pre(f"{merged_state}") for merged_state in state.merged_states ] +\ [html.P(f"ATDs:")] +\ [html.P(atd_rec.graph_info()) for atd_rec in state.atd_records] +\ [ html.P(f"Features (pruned): {state.unique_features}"), html.P(f"Features: {state.features}"), html.P(f"Use cases: {[uc.to_string_short() for uc in state.use_cases]}"), ] return ui_elem
def __init__(self, contents: str, filename: str, time_stamp: int, df: pd.DataFrame): self.component = html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(time_stamp)), dash_table.DataTable(data=df.to_dict('rows'), columns=[{ 'name': i, 'id': i } for i in df.columns]), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def launch_manual_collector(self) -> None: self.get_new_candlesticks() fig = self.get_fig() app = dash.Dash() app.layout = html.Div([ dcc.Graph(id='graph', figure=fig), dcc.Interval(id='graph-update', interval=1000), html.Pre(id='click-data', style={'display': 'none'}), html.Button('Get new data', id='retrieve-data-button', n_clicks=0), html.Div(id='retrieve-data-output', style={'display': 'none'}), ]) @app.callback(Output('click-data', 'children'), [Input('graph', 'clickData')]) def display_click_data(click_data): if click_data is not None: self.click_candlestick(click_data['points'][0]) @app.callback(Output(component_id='graph', component_property='figure'), [ Input(component_id='graph-update', component_property='n_intervals') ]) def update_graph(_): return self.get_fig() @app.callback( Output(component_id='retrieve-data-output', component_property='children'), [ Input(component_id='retrieve-data-button', component_property='n_clicks') ]) def retrieve_data(n_clicks): if n_clicks <= 0: return log.info('Processing collected data and getting new candlesticks') self.process_collected_data() self.get_new_candlesticks() return app.run_server(debug=True, use_reloader=False)
def update_graph(contents, filename): fig = { 'layout': go.Layout(plot_bgcolor=colors["graphBackground"], paper_bgcolor=colors["graphBackground"]) } if contents: contents = contents[0] filename = filename[0] df = parse_data(contents, filename) print(df) # trace1 = go.Scatter( # x=df.iloc[:,0], # y=df.iloc[:,1], # mode='lines+markers', # ) # fig['data'] = [traces1] df = df.set_index(df.columns[0]) fig['data'] = df.iplot(asFigure=True, kind='scatter', mode='lines+markers', size=1) table = html.Div([ html.H5(filename), dash_table.DataTable(data=df.to_dict('rows'), columns=[{ 'name': i, 'id': i } for i in df.columns]), html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ]) return fig
def parse_contents(contents, filename, date): content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file df = pd.read_csv( io.StringIO(decoded.decode('utf-8'))) elif 'xls' in filename: # Assume that the user uploaded an excel file df = pd.read_excel(io.BytesIO(decoded)) elif 'tsv' in filename: # Assume that the user uploaded a TSV file # this doesn't seem to work: when TSV is uploaded, table does not display df = pd.read_csv( io.StringIO(decoded.decode('utf-8')), sep = '\t') except Exception as e: print(e) return html.Div([ 'There was an error processing this file.' ]) return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), dash_table.DataTable( data=df.round(1).to_dict('records'), columns=[{'name': i, 'id': i} for i in df.columns] ), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def parse_contents(contents, filename, date): content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file df = pd.read_csv(io.StringIO(decoded.decode('utf-8'))) elif 'xls' in filename: # Assume that the user uploaded an excel file df = pd.read_excel(io.BytesIO(decoded)) except Exception as e: print(e) return html.Div(['There was an error processing this file.']) return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), dash_table.DataTable(data=df.to_dict('rows'), columns=[{ 'name': i, 'id': i } for i in df.columns], style_data_conditional=[{ "if": { "row_index": 4 }, "backgroundColor": "#3D9970", 'color': 'white' }], n_fixed_rows=1), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def parse_contents(contents, filename, date): content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file newdf = pd.read_csv(io.StringIO(decoded.decode('utf-8'))) updatedf = df.append(newdf) updatedf.to_excel('Data/crime against women 2001-2020.xlsx', index=False) elif 'xls' in filename: # Assume that the user uploaded an excel file newdf = pd.read_excel(io.BytesIO(decoded)) updatedf = df.append(newdf) updatedf.to_excel('Data/crime against women 2001-2020.xlsx', index=False) except Exception as e: print(e) return html.Div(['There was an error processing this file.']) return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), html.H4("Dataframe added!"), # dash_table.DataTable( # data=updatedf.to_dict('rows'), # columns=[{'name': i, 'id': i} for i in updatedf.columns] # ), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def build_error(error, tb): """ Returns error/traceback information in standard component with styling :param error: execption message :type error: str :param tb: traceback :type tb: str :return: error component :rtype: :dash:`dash_html_components.Div <dash-html-components/div>` """ if isinstance(error, ChartBuildingError): if error.details: tb = error.details error = error.error return html.Div([ html.I(className='ico-error'), html.Span(str(error)), html.Div(html.Pre(str(tb)), className='traceback') ], className='dtale-alert alert alert-danger')
def summary_window(id, text, component_theme): return html.Div( id=id, children=[ html.H5(children=["Summary"], style={ "margin-left": "5px", }), html.Pre(id='group_summary', style={ "height": "150px", "border": f"5px solid {component_theme['bg_color']}", 'background-color': "black", 'color': component_theme['text_color'], 'overflowX': 'auto', 'overflowY': 'scroll', }), ], style={ 'background-color': component_theme['bg_color'], 'color': component_theme['text_color'] })
def parse_contents(contents, filename, date): import qmin content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file is CPRM style (evandro) df = qmin.test_cprm_datasets_web( io.StringIO(decoded.decode('ISO-8859-1'))) elif 'xls' in filename: # Assume that the user uploaded an excel file #This excel is format of Microssonda!!!! df = qmin.load_data_ms_web(io.BytesIO(decoded)) # csv_string = df.to_csv(index=False, encoding='utf-8') #csv_string = "data:text/csv;charset=utf-8," + urllib.quote(csv_string) #update_download_link(df) except Exception as e: print(e) return html.Div(['There was an error processing this file.']) return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), dash_table.DataTable(data=df.to_dict('records'), columns=[{ 'name': i, 'id': i } for i in df.columns]), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def parse_contents(contents, filename, date): # upload time date_u = datetime.datetime.fromtimestamp(date) content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) image_path = BytesIO(decoded) # color extraction get_colors(infile = image_path) image_filename = 'outfile.png' # replace with your own image encoded_image = base64.b64encode(open(image_filename, 'rb').read()) # return pal return html.Div([ html.Div([ html.Div([ html.H5(['File Name : ' + filename], style={'margin-top': '5%'}), html.H5(['Upload date : ' + str(date_u.year) + "/" + str(date_u.month) + "/" + str(date_u.day)], style={'margin-bottom': '5%'} ), html.Img(src=contents, style={'width': '200px'}, className="animated bounceInDown") ], style={'float': 'left', 'width':'50%'}), html.Div([ # extracted image html.H5(['Result']), html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), style={'width': '200px'}) ], style={'float': 'left', 'width':'50%'}) ]), html.Div([ html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:30] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }), html.Hr(), ]) ])
def display_click_point(clickData, dataset, iterations, perplexity, pca_dim, learning_rate): if not clickData: return None try: data_url = [ "embeddings", str(dataset), "iterations_" + str(iterations), "perplexity_" + str(perplexity), "pca_" + str(pca_dim), "learning_rate_" + str(learning_rate), "data.csv", ] full_path = PATH.joinpath(*data_url) embedding_df = pd.read_csv(full_path, encoding="ISO-8859-1") except FileNotFoundError as error: print( error, "\nThe dataset was not found. Please generate it using generate_embeddings.py", ) return # Convert the point clicked into float64 numpy array click_point_np = np.array([ clickData["points"][0][i] for i in ["x", "y", "z"] ]).astype(np.float64) # Create a boolean mask of the point clicked, truth value exists at only one row bool_mask_click = (embedding_df.loc[:, "x":"z"].eq(click_point_np).all( axis=1)) # Retrieve the index of the point clicked, given it is present in the set if bool_mask_click.any(): clicked_idx = embedding_df[bool_mask_click].index[0] # Retrieve the data corresponding to the index (Dimension reduction 이전의 원래 vector) origin_vector = origin_datas[dataset].iloc[clicked_idx] return html.Pre(children=pprint.pformat(origin_vector.to_dict()))
def parse_contents(contents, filename, date): global res res = main('data/resized2014/' + filename) text_list.append(res[7:-5]) return html.Div([ html.H5("File Name:" + filename), html.H6("Time Stamp:" + str(datetime.datetime.fromtimestamp(date))), # HTML images accept base64 encoded strings in the same format # that is supplied by the upload html.Img(src=contents, style={ 'position': 'bottom-left', "width": "25%", "height": "30%", "margin-left": "500px", "margin-bottom": "0px", "padding": "0px", 'text-align': "center", 'horizontal-align': 'middle', 'position': 'relative' }), html.Hr(), html.Div('Raw Content:', style={ 'color': "Blue", 'textAlign': 'center' }), # html.P( res+ '...', style={ # 'whiteSpace': 'pre-wrap', # 'wordBreak': 'break-all', 'margin-left':"380px",'color':"Black" # }), dcc.Textarea(placeholder='Enter a value...', value=res[7:-5], style={'width': '100%'}), html.Hr(), html.Pre(id='hover-data', style=styles1['pre']), ])
def parse_contents(contents, filename, date): content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file df = prepared_csv(filename) elif 'xls' in filename: # Assume that the user uploaded an excel file df = pd.read_excel(io.BytesIO(decoded)) except Exception as e: print(e) return html.Div([ 'There was an error processing this file.' ]) view = html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), dash_table.DataTable( data=df.to_dict('records'), columns=[{'name': i, 'id': i} for i in df.columns] ), html.Hr(), # horizontal line # For debugging, display the raw contents provided by the web browser html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ]) return_component = (view, df) # use a tuple to return multiple values return return_component
def post_results(n_clicks, name, analysis_ids): if not n_clicks: raise PreventUpdate('Callback triggered with no action.') pca_data = PCAModel() try: iter(analysis_ids) except TypeError: analysis_ids = [analysis_ids] try: return pca_data.post_results(name, analysis_ids) except Exception as e: return [ dbc.Alert([ html.P([html.Strong('Error: '), f'{e}']), html.Strong('Traceback:'), html.P( html.Pre(traceback.format_exc(), className='text-white')) ], color='danger', dismissable=True) ]
def parse_contents(contents, filename, dates): if contents is not None: content_type, content_string = contents.split(',') print(filename) decoded = base64.b64decode(content_string) try: if 'csv' in filename: # Assume that the user uploaded a CSV file df = pd.read_csv( io.StringIO(decoded.decode('utf-8'))) elif 'xls' in filename: # Assume that the user uploaded an excel file df = pd.read_excel(io.BytesIO(decoded), sheet_name='Données brutes', index_col=None, header=None) df.columns = ['Temps', 'Température', 'Dilatation'] except Exception as e: print(e) return html.Div([ 'There was an error processing this file.' ]) return html.Div([ html.H5(filename), #html.H6(datetime.datetime.fromtimestamp(date)), # HTML images accept base64 encoded strings in the same format # that is supplied by the upload html.Hr(), dcc.Graph(id='trc_graph', figure={'data': [{'x': df.Température, "y": df.Dilatation, 'type': 'Scatter', 'name': 'Test'}], 'layout': {'title': 'Test Titre' }}), html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ])
def update_table(contents, filename): table = html.Div() if contents: contents = contents[0] filename = filename[0] df = parse_data(contents, filename) table = html.Div([ html.H5(filename), dash_table.DataTable(data=df.to_dict('rows'), columns=[{ 'name': i, 'id': i } for i in df.columns]), html.Hr(), html.Div('Raw Content'), html.Pre(contents[0:200] + '...', style={ 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all' }) ]) return table