def _draw_aggrid_df(self, df) -> AgGrid: gb = GridOptionsBuilder.from_dataframe(df) gb.configure_pagination(paginationPageSize=25) gb.configure_default_column(floatingFilter=True, selectable=False) gb.configure_grid_options(domLayout='normal') gridOptions = gb.build() gridOptions['rowStyle'] = {'background': 'white'} jscode = self._aggrid_style_rows() gridOptions['getRowStyle'] = jscode if self._state.experimental_ok: retmode = 'FILTERED' upd8_mode = GridUpdateMode.FILTERING_CHANGED else: retmode = 'AS_INPUT' upd8_mode = GridUpdateMode.VALUE_CHANGED grid_response = AgGrid( df, gridOptions=gridOptions, allow_unsafe_jscode=True, data_return_mode=retmode, update_mode=upd8_mode, theme='streamlit', key='xplore_show_tbl', ) return grid_response
def _draw_aggrid_df(self, expander, df): with expander: if df.empty: st.info('No matching result found') elif 'error' in df: st.error(df['error'][0]) else: gb = GridOptionsBuilder.from_dataframe(df) gb.configure_pagination(paginationPageSize=25) gb.configure_default_column(floatingFilter=True, editable=False, selectable=False) gb.configure_grid_options(domLayout='normal', preventDefaultOnContextMenu=True) gridOptions = gb.build() if df.shape[0] == 1: height = 150 elif df.shape[0] < 4: height = 200 else: height = 400 _ = AgGrid(df, height=height, gridOptions=gridOptions, allow_unsafe_jscode=True, update_mode=GridUpdateMode.NO_UPDATE, theme='streamlit', key=str(randint(1, 10000000)))
def _draw_aggrid_df(self, df): gb = GridOptionsBuilder.from_dataframe(df) gb.configure_pagination(paginationPageSize=25) gb.configure_default_column(floatingFilter=True) if 'error' in df.columns: jscode = self._aggrid_style() gb.configure_column("error", floatingFilter=True, cellStyle=jscode, editable=False) gb.configure_grid_options(domLayout='normal', preventDefaultOnContextMenu=True) gridOptions = gb.build() gridOptions['getRowStyle'] = self._aggrid_style_rows(df) _ = AgGrid( df, gridOptions=gridOptions, allow_unsafe_jscode=True, update_mode=GridUpdateMode.NO_UPDATE, theme='streamlit', )
def withinNYC_check(data): st.header(f"Mapped Capital Projects That Are Not in NYC") st.markdown(f""" We check whether all mapped capital projects are located within the NYC borough boundaries (water included). """) def fetch_dataframe(geo_check_records, field): records = [ i["values"] for i in geo_check_records if i["field"] == field ][0] if records: df = pd.DataFrame(records) return df else: return pd.DataFrame() df = data['geospatial_check'] df["result"] = df["result"].apply(json.loads) geo_check_records = df.to_dict("records") if not geo_check_records: st.write("No such projects.") else: geo_check = [i['result'] for i in geo_check_records][0] df = fetch_dataframe(geo_check, "projects_not_within_NYC") if df.empty: st.write("No such projects.") else: count = df.shape[0] AgGrid(df) st.write( f'There are {count} mapped projects that are not within the NYC borough boundaries water included.' )
def display_dataframe(self, field): df = self.fetch_dataframe(field) if df.empty: st.markdown(self.markdown_dict[field]) st.write("There are no outliers for this check.") st.info(self.info_dict[field]) else: st.markdown(self.markdown_dict[field]) AgGrid(df) st.write(f"There are {df.shape[0]} outliers in total.") st.info(self.info_dict[field])
def main(): st.header("Моделирование. Лабораторная работа №3") st.write("Предельные вероятности состояний. Уравнения Колмогорова") if st.checkbox("Показать ТЗ"): show_tz() c1, c2 = st.beta_columns(2) N = c1.slider("Задайте количество состояний системы (N):", min_value=1, max_value=10, value=5) values = c2.selectbox("Заполнить? (единицами, случайно):", (1, "случайными значениями")) df = get_data(N, values) st.subheader("Введите значения интенсивности переходов (λ):") grid_return = AgGrid( df, editable=True, # sortable=False, # filter=False, # resizable=False, # defaultWidth=5, # fit_columns_on_grid_load=True, reload_data=False, ) arr = grid_return["data"].to_numpy() # Находим предельные вероятности probas, start_probas = calc_probas(arr, N) st.write("Средний процент времени нахождения системы в предельном режиме в состоянии n:") for i in range(N): pr = round(probas[i], 2) perc = round(pr * 100, 2) st.write(f"S_{i} - {perc}%") output('Предельные вероятности:', 'p', probas) # Находим время стабилизации start_probabilities = get_start_probabilities(N, all_equal=False) stabilization_time = calc_stabilization_times(arr.tolist(), start_probas.tolist(), probas, N) # TODO fix output('Время стабилизации:', 't', stabilization_time) # Выводим графики вероятностей как функции времени times, probabilities_over_time = calc_probability_over_time(arr, start_probabilities, 5) plot_probability_over_time(probas, stabilization_time, times, probabilities_over_time) # TODO fix # Рисуем графы G = nx.from_numpy_array(arr, create_using=nx.DiGraph) plot_graph(G) plot_graph2(G)
def app(): kiara = st.session_state["kiara"] table_value = st.session_state.data st.markdown( 'Wait for file preview to be displayed, before proceeding to the next step' ) st.markdown('*Temporary screen for file names metadata step*') st.markdown('*This module will be completed at a later stage *') process_metadata = st.radio("Do your file names contain metadata?", ("no", "yes")) st.write("Supported pattern: '/sn86069873/1900-01-05/'") st.write("LCCN title information and publication date (yyyy-mm-dd)") if process_metadata: if process_metadata == 'no': st.session_state.metadata = False st.session_state.augmented_data = False elif process_metadata == 'yes': # load the pipeline file and create a workflow augment_pipeline = os.path.join( os.path.dirname(__file__), "..", "..", "..", "newspaper_corpora", "augment_newspaper_corpora_table.json", ) workflow = kiara.create_workflow(augment_pipeline) # set our table as input to the workflow workflow.inputs.set_value("value_id", table_value.id) # retrieve the actual table value augmented_table_value = workflow.outputs.get_value_obj("table") st.session_state.augmented_data = augmented_table_value st.session_state.metadata = True table = augmented_table_value.get_value_data() df = table.to_pandas() st.write('Result preview') AgGrid(df.head(50))
def files_select_ag_grid(add_current=False, mode="single"): file_list = [ x.replace(".pickle", "") for x in list_dir("saved_sessions") if ".pickle" in x ] file_times = [ # datetime.datetime.fromtimestamp( get_m_time(os.path.join("saved_sessions", f"{x}.pickle")) # ) for x in file_list ] files_desc = [] for f_name in file_list: desc_f_name = os.path.join("saved_sessions", f"{f_name}.txt") if file_exists(desc_f_name): files_desc.append(open_file(desc_f_name).read()) else: files_desc.append("") if add_current: file_list = ["Análise Atual"] + file_list file_times = [""] + file_times files_desc = [""] + files_desc input_df = pd.DataFrame( list(zip(file_list, file_times, files_desc)), columns=["Nome", "Ùltima Modificação", "Descrição"], ) gb = GridOptionsBuilder.from_dataframe(input_df) gb.configure_selection(mode, use_checkbox=True) gridOptions = gb.build() update_mode_value = GridUpdateMode.__members__["MODEL_CHANGED"] return AgGrid( input_df, gridOptions=gridOptions, update_mode=update_mode_value, height=200, )
def aggriddf(df): gb = GridOptionsBuilder.from_dataframe(df) gb.configure_default_column(groupable=True, value=True, enableRowGroup=True, aggFunc='sum', editable=False) gb.configure_grid_options(domLayout='normal') #gb.configure_side_bar() gridOptions = gb.build() grid_response = AgGrid( df, gridOptions=gridOptions, height=330, width='100%', #data_return_mode=return_mode_value, #update_mode=update_mode_value, allow_unsafe_jscode= True, #Set it to True to allow jsfunction to be injected enable_enterprise_modules=False, ) return grid_response
def display_not_applied_correction(self, df): if df is None: st.write( "Manual correction table for not applied records is not available." ) return if not df.empty: count2 = df.groupby("field").size() count2 = count2.reset_index() count2.rename(columns={ "field": "Field", 0: "Number of Records" }, inplace=True) st.subheader("Numbers of Manual Corrections Not Applied By Field") st.write(count2) df = df.sort_values(by=["field"]) df = df[df.columns.tolist()[1:] + ["uid"]] st.subheader("Table of Manual Corrections Not Applied") AgGrid(df) else: st.write( "Manual correction table for not applied records is empty.")
rowNodes: [rowNode], columns: [col], flashDelay: 10000000000 }); }; """) gb.configure_grid_options(onCellValueChanged=js) go = gb.build() st.markdown(""" ### JsCode injections Cell editions are highlighted here by attaching to ```onCellValueChanged``` of the grid, using JsCode injection ```python js = JsCode(...) gb.configure_grid_options(onCellValueChanged=js) ag = AgGrid(data, gridOptions=gb.build(), key='grid1', allow_unsafe_jscode=True, reload_data=False) ``` """) ag = AgGrid(data, gridOptions=go, key='grid1', allow_unsafe_jscode=True, reload_data=False) st.subheader("Returned Data") st.dataframe(ag['data']) st.subheader("Grid Options") st.write(go)
import streamlit as st import numpy as np import pandas as pd from st_aggrid import AgGrid, DataReturnMode, GridUpdateMode, GridOptionsBuilder license_key = "For_Trialing_ag-Grid_Only-Not_For_Real_Development_Or_Production_Projects-Valid_Until-18_March_2021_[v2]_MTYxNjAyNTYwMDAwMA==948d8f51e73a17b9d78e03e12b9bf934" @st.cache() def get_data_ex3(): df = pd.DataFrame(np.random.randint(0, 100, 100).reshape(-1, 5), columns=list("abcde")) return df df = get_data_ex3() st.subheader("Setting a license") st.markdown(""" Ag-grid (not this component, which is free) has its own [licensing options](https://www.ag-grid.com/documentation/react/licensing/). If you do have an license, you can load it though ```license_key``` parameter on grid call. """) AgGrid(df, key='grid1', enable_enterprise_modules=True, license_key=license_key) st.markdown( """On this example enterprise features are enabled (advanced column menus) and no watermak is displayed. However, it will only work until 2021-03-18 (When my trial license used on the code expires)""" )
sessions.trainer_dict["metrics"].append(metric_selects) sessions.trainer_dict["id"].append(sessions.id) sessions.id = sessions.id + 1 trainer_dataFrame = pd.DataFrame(sessions.trainer_dict) gb = GridOptionsBuilder.from_dataframe(trainer_dataFrame) gb.configure_selection('multiple', use_checkbox=False, rowMultiSelectWithClick=False, suppressRowDeselection=True) gridOptions = gb.build() grid_response = AgGrid(trainer_dataFrame, gridOptions=gridOptions, height=180, width='100%', data_return_mode=DataReturnMode.FILTERED, update_mode=GridUpdateMode.MODEL_CHANGED, fit_columns_on_grid_load=True, allow_unsafe_jscode=True, enable_enterprise_modules=True) trainer_res = grid_response['selected_rows'] for idx, trainer in enumerate(trainer_res): trainer_id = trainer["id"] trainer_dataloader = trainer["dataloader"] trainer_model = trainer["model"] trainer_optimizer = trainer["optimizer"] trainer_loss = trainer["loss"] trainer_scheduler = trainer["scheduler"] trainer_metrics = trainer["metrics"]
gb = GridOptionsBuilder.from_dataframe(data) #make all columns editable gb.configure_columns(list('abcde'), editable=False) go = gb.build() st.markdown(""" ### Pinned Rows Pin columns using either pinnedTopRowData or pinnedBottomRowData ``` gb = GridOptionsBuilder.from_dataframe(data) go = gb.build() go['pinnedTopRowData'] = [{'a':'100', 'b':'200'}] go['pinnedBottomRowData'] = [{'a':'pinned', 'b':'down'}] ``` """) go['pinnedTopRowData'] = [{'a': '100', 'b': '200'}] go['pinnedBottomRowData'] = [{'a': 'pinned', 'b': 'down'}] ag = AgGrid(data, gridOptions=go, height=400, fit_columns_on_grid_load=True, key='an_unique_key_xZs151', reload_data=reload_data) st.subheader("Returned Data") st.dataframe(ag['data']) st.subheader("Grid Options") st.write(go)
unsafe_allow_html=True) button = st.sidebar.button("Generate random data") height = st.sidebar.slider('Height', min_value=100, max_value=800, value=400) reload_data = False if button: from streamlit import caching caching.clear_cache() reload_data = True data = get_data() gb = GridOptionsBuilder.from_dataframe(data) #make all columns editable gb.configure_columns(list('abcde'), editable=True) go = gb.build() if use_fixed_key: ag = AgGrid(data, gridOptions=go, height=height, fit_columns_on_grid_load=True, key='an_unique_key', reload_data=reload_data) else: ag = AgGrid(data, gridOptions=go, height=height, fit_columns_on_grid_load=True) st.subheader("Returned Data") st.dataframe(ag['data'])
def main(): st.markdown(""" <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} </style> """, unsafe_allow_html=True) padding = 0 st.markdown(f""" <style> .reportview-container .main .block-container{{ padding-top: {padding}rem; padding-right: {padding}rem; padding-left: {padding}rem; padding-bottom: {padding}rem; }} </style> """, unsafe_allow_html=True) COLOR = "black" BACKGROUND_COLOR = "#fff" max_width = 860 padding_top = 0 padding_right = 0 padding_left = 0 padding_bottom = 0 dj_contents = dj_soup.select(".prof dl") for i, element in enumerate(dj_contents): dj_ids.append(element.select('dt')[0]['id']) dj_images_urls.append(element.select('dt')[0].select('img')[0]['src']) dj_href = element.select('a')[0]['href'] if '/programs/' in dj_href: dj_hrefs.append(domain + dj_href) else: dj_hrefs.append(dj_href) el2 = dj_contents[i].get_text().split('\n')[1:-1] key = el2[0].split(':')[1] # -- dj_otherDicts = dict() #init for j in el2[1:]: key2 = j.split(':')[0] value = j.split(':')[1] dj_otherDicts.update({key2: value}) one_dict = {key: dj_otherDicts} dj_dicts.update(one_dict) for element in dj_contents: el1 = element.get_text().split('\n')[1:-1] name = el1[0].split(':')[1] dj_names.append(name) # -- 担当番組Program contents = dj_dicts[name] if '担当番組' in contents: dj_haveprograms.append(contents['担当番組']) else: dj_haveprograms.append('') # -- 誕生日Birth if '誕生日' in contents: dj_births.append(contents['誕生日']) else: dj_births.append('') # -- 血液型Blood if '血液型' in contents: dj_bloods.append(contents['血液型']) else: dj_bloods.append('') # -- 身長Hight if '身長' in contents: dj_heights.append(contents['身長']) else: dj_heights.append('') # -- 趣味Hobbys if '趣味' in contents: dj_hobbys.append(contents['趣味']) else: dj_hobbys.append('') # -- 特技Skills if '特技' in contents: dj_skills.append(contents['特技']) else: dj_skills.append('') df = pd.DataFrame({ 'id': dj_ids, '名前Name': dj_names, '担当番組Program': dj_haveprograms, '誕生日Birth': dj_births, '血液型Blood': dj_bloods, '身長Hight': dj_heights, '趣味Hobbys': dj_hobbys, '特技Skills': dj_skills, '画像URL': dj_images_urls }) # df.index = np.arange(1, len(df)+1) for cnt, entrie in enumerate(program_res.entries): #program_id = entrie['mobileimg']['src'].split('/')[5] #bad error 2021/11/05 program_id = entrie['link'].split('/')[5] #new 2021/11/05 mark = "/" + program_id + "/" href = entrie['links'][1]['href'] program_ids.append(program_id) program_titles.append(str(cnt + 1) + ": " + entrie['title']) program_subtitles.append(entrie['mobilesubtitle']) program_image_urls.append(entrie['mobileimg']['src']) program_summarys.append(entrie['summary']) program_sound_urls.append(program_base + mark + href) datestr = entrie['published'] program_pubdates.append( datetime.datetime.strptime( datestr, '%a, %d %b %Y %H:%M:%S %z').strftime('%Y/%m/%d')) program_djnames.append(' ') program_sound_times.append(' ') # program_djnames 追加 for i, dj_id in enumerate(program_ids): if dj_id in dj_ids: program_djnames[i] = dj_names[dj_ids.index(dj_id)] else: program_djnames[i] = '' df2 = pd.DataFrame({ 'id': program_ids, 'Program': program_titles, '番組名': program_subtitles, 'DJName': program_djnames, '配信日Pubday': program_pubdates, '再生時間SoundTime': program_sound_times }) # df.index = np.arange(1, len(df)+1) st.title("RADIO365 DJ's Fan site") hedder_text = f"""Obtained automatically from [RADIO365](https://www.radio365.net/). [update:2021/11/06]<br> Click the Play button below the image to play the program. (Please pay attention to the audio)<br> You can also use the upper left select box to switch programs (from 1 to 100).<br> [RADIO365](https://www.radio365.net/)から自動取得しています。<br> 画像の下にある再生ボタンで番組が再生されます。(音声にご注意ください)<br> 左上セレクトボックスでも番組(1~100迄)切替ができます。 """ st.markdown(hedder_text, unsafe_allow_html=True) selector = st.sidebar.selectbox("Select program (1 - 100):", program_titles) select_indexNumber = int(selector.split(':')[0]) - 1 # dark_theme = st.sidebar.checkbox("Dark Theme", False) # if dark_theme: # BACKGROUND_COLOR = "rgb(17,17,17)" # COLOR = "#fff" st.markdown( f""" <style> .reportview-container .main .block-container{{ max-width: {max_width}px; # padding-top: {padding_top}rem; # padding-right: {padding_right}rem; # padding-left: {padding_left}rem; # padding-bottom: {padding_bottom}rem; }} # .reportview-container .main {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # h1 {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # h5 {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # .css-145kmo2 {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # .css-qbe2hs {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # .st-ck {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} # .css-xq1lnh-EmotionIconBase {{ # color: {COLOR}; # background-color: {BACKGROUND_COLOR}; # }} </style> """, unsafe_allow_html=True, ) markdown_str = "#### " + '<font color="Gray">' + program_subtitles[ select_indexNumber] + '</font>' st.markdown(markdown_str, unsafe_allow_html=True) #===== read sidebar image file ======= img_data = read_image(program_image_urls[select_indexNumber]) st.image(img_data, caption=selector, use_column_width=True) # sound update sound_data = read_sound_data(program_sound_urls[select_indexNumber]) st.audio(sound_data, format='audio/aac') st.markdown(program_summarys[select_indexNumber], unsafe_allow_html=True) st.write('') # DJ photo & profile block htmls = set_hrefs(dj_hrefs) html_height = (int(len(htmls) / 8) + 1) * 62 joinhtml = "".join(htmls) html = f"""{joinhtml}""" with st.expander("DJ Photo (please click to see phot & profile)", expanded=True): componentsv1.html(html, height=html_height, scrolling=True) st.sidebar.text('') # Sidebar DJ photo temp_text = str(len(dj_images_urls)) + " DJ member:" st.sidebar.text(temp_text) dj_img_datas = read_sidebar_photos() for i, dj_image_url in enumerate(dj_images_urls): st.sidebar.image(dj_img_datas[i], caption=dj_names[i], use_column_width='True') # for i, program_sound_url in enumerate(program_sound_urls): # sound_data = read_sound_data(program_sound_urls[i]) # program_sound_times[i] = get_sound_time(program_sound_urls[i], sound_data) # DJ List with st.expander("DJ List (please click to see dj list)", expanded=True): #customize gridOptions gb = GridOptionsBuilder.from_dataframe(df) gb.configure_grid_options( pagination=True, paginationAutoPageSize=True, ) gridOptions = gb.build() AgGrid(df, autosize=True, gridOptions=gridOptions) st.markdown(get_table_download_link(df), unsafe_allow_html=True) # Program List with st.expander('Program List (please click to see program list)', expanded=True): gb2 = GridOptionsBuilder.from_dataframe(df2) gb2.configure_grid_options( pagination=True, paginationAutoPageSize=True, ) gridOptions2 = gb2.build() AgGrid(df2, autosize=True, gridOptions=gridOptions2) st.markdown(get_table_download_link(df2), unsafe_allow_html=True)
import streamlit as st import pandas as pd import numpy as np from st_aggrid import AgGrid, GridOptionsBuilder df = pd.DataFrame( np.random.randint(0, 100, 50).reshape(-1, 5), index=range(10), columns=list("abcde"), ) available_themes = ["streamlit", "light", "dark", "blue", "fresh", "material"] selected_theme = st.selectbox("Theme", available_themes) gb = GridOptionsBuilder.from_dataframe(df) if st.checkbox('Pre-select rows 4 and 6 when loading.'): gb.configure_selection('multiple', pre_selected_rows=[3, 5]) response = AgGrid(df, editable=True, gridOptions=gb.build(), data_return_mode="filtered_and_sorted", update_mode="no_update", fit_columns_on_grid_load=True, theme=selected_theme)
# enables pivoting on all columns, however i'd need to change ag grid to allow export of pivoted/grouped data, however it select/filters groups gb.configure_default_column(enablePivot=True, enableValue=True, enableRowGroup=True) gb.configure_selection(selection_mode="multiple", use_checkbox=True) gb.configure_side_bar() # side_bar is clearly a typo :) should by sidebar gridOptions = gb.build() st.success(f""" 💡 Tip! Hold the shift key when selecting rows to select multiple rows at once! """) response = AgGrid( shows, gridOptions=gridOptions, enable_enterprise_modules=True, update_mode=GridUpdateMode.MODEL_CHANGED, data_return_mode=DataReturnMode.FILTERED_AND_SORTED, fit_columns_on_grid_load=False, ) df = pd.DataFrame(response["selected_rows"]) st.subheader("Filtered data will appear below 👇 ") st.text("") st.table(df) st.text("") try: total_predicted_time = df['predicted_time'].sum() st.text("Total predicted time: " + str(total_predicted_time))
gb.configure_grid_options(domLayout='normal') gridOptions = gb.build() #Display the grid st.header("Streamlit Ag-Grid") st.markdown(""" AgGrid can handle many types of columns and will try to render the most human readable way. On editions, grid will fallback to string representation of data, DateTime and TimeDeltas are converted to ISO format. Custom display formating may be applied to numeric fields, but returned data will still be numeric. """) grid_response = AgGrid( df, gridOptions=gridOptions, height=grid_height, width='100%', data_return_mode=return_mode_value, update_mode=update_mode_value, fit_columns_on_grid_load=fit_columns_on_grid_load, allow_unsafe_jscode=True, #Set it to True to allow jsfunction to be injected enable_enterprise_modules=enable_enterprise_modules) df = grid_response['data'] selected = grid_response['selected_rows'] selected_df = pd.DataFrame(selected).apply(pd.to_numeric, errors='coerce') with st.spinner("Displaying results..."): #displays the chart chart_data = df.loc[:, ['apple', 'banana', 'chocolate']].assign( source='total') if not selected_df.empty:
toss_d = ["Toss Decision",str(list(mat_detail.loc['toss_decision'])[0]).capitalize()] winner = ["Winner",list(mat_detail.loc['winner'])[0]] result = ["Result","Won by " + str(int(list(mat_detail.loc['result_margin'])[0])) + " " + str(list(mat_detail.loc['result'])[0])] potm = ["Player of the Match",list(mat_detail.loc['player_of_match'])[0]] u1 = ["Umpire 1",list(mat_detail.loc['umpire1'])[0]] u2 = ["Umpire 2",list(mat_detail.loc['umpire2'])[0]] eli = ["Eliminator", "No" if list(mat_detail.loc['umpire1'])[0]=='N' else "Yes"] final_table = pd.DataFrame([date,venue,toss_w,toss_d,winner,result,potm,u1,u2,eli],columns=["",""]) #tab = tabulate(final_table, tablefmt = 'fancy_grid',numalign="center",showindex=False) st.dataframe(final_table) full_data = deliveries[deliveries.id == mat.iloc[ind].id] AgGrid(full_data) cur_mat = deliveries[deliveries.id == mat.iloc[ind].id] cur_mat["cur_over"] = cur_mat.over + (cur_mat.ball/10) cur_mat = cur_mat.sort_values(by=['cur_over']) cur_mat_1 = cur_mat[cur_mat.batting_team == team1].reset_index() cur_mat_1["total_score"] = cur_mat_1.total_runs for i in range(1,len(cur_mat_1.total_score)) : cur_mat_1.iloc[i,-1] += cur_mat_1.iloc[i-1,-1] cur_mat_2 = cur_mat[cur_mat.batting_team == team2].reset_index() cur_mat_2["total_score"] = cur_mat_2.total_runs for i in range(1,len(cur_mat_2.total_score)) : cur_mat_2.iloc[i,-1] += cur_mat_2.iloc[i-1,-1]
def status_summary(self): sort_gruppen = { '12-15': 0, '16-49': 1, '50-64': 2, '65-74': 3, '> 74': 4, 'Unbekannt': 5, 'Impfberechtigte Bevölkerung': 6, 'Gesamtbevölkerung': 7 } max_datum = self.data_age['Impfdatum'].max() st.markdown( f"### Geimpfte nach Altersgruppen, Stand {max_datum.strftime('%d.%m.%Y')}" ) df = self.data_age[self.data_age['Impfdatum'] == max_datum] summary_df = pd.DataFrame(data={}, columns=[ 'Altersgruppe', 'Erstimpfung', 'Erstimpfung kumuliert', 'Zweitimpfung', 'Zweitimpfung kumuliert', 'Bevölkerungszahl', 'Anteil der Geimpften', 'sort' ]) for alter in df['Altersgruppe'].unique(): first = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 1)]['Anzahl'].iloc[0] first_kum = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 1)]['Anzahl Kumuliert'].iloc[0] second = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 2)]['Anzahl'].iloc[0] second_kum = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 2)]['Anzahl Kumuliert'].iloc[0] bev = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 2 )]['Bevölkerungzahl der Altersgruppe'].iloc[0] anteil = df[(df['Altersgruppe'] == alter) & (df['Impftyp'] == 2)]['Anteil der Geimpften'].iloc[0] anteil = "{:,.1f}".format(anteil) sort = sort_gruppen[alter] rec = { 'Altersgruppe': alter, 'Erstimpfung': first, 'Erstimpfung kumuliert': first_kum, 'Zweitimpfung': second, 'Zweitimpfung kumuliert': second_kum, 'Bevölkerungszahl': bev, 'Anteil der Geimpften': anteil, 'sort': sort, } summary_df = summary_df.append(rec, ignore_index=True) summary_df = summary_df.sort_values(by=['sort']) summary_df = summary_df.drop('sort', axis=1) AgGrid(summary_df) erstimpf = int(df[(df['Altersgruppe'] == 'Gesamtbevölkerung') & (df['Impftyp'] == 1)]['Anzahl'].iloc[0]) zweitimpf = int(df[(df['Altersgruppe'] == 'Gesamtbevölkerung') & (df['Impftyp'] == 2)]['Anzahl'].iloc[0]) kum_erstimpf = int( df[(df['Altersgruppe'] == 'Gesamtbevölkerung') & (df['Impftyp'] == -1)]['Anzahl Kumuliert'].iloc[0]) kum_zweitimpf = int( df[(df['Altersgruppe'] == 'Gesamtbevölkerung') & (df['Impftyp'] == 2)]['Anzahl Kumuliert'].iloc[0]) kum_pzt = df[(df['Altersgruppe'] == 'Gesamtbevölkerung') & (df['Impftyp'] == 1)]['Anteil der Geimpften'].iloc[0] kum_pzt = "{:,.1f}".format(kum_pzt) kum_impfwillige_pzt = df[ (df['Altersgruppe'] == 'Impfberechtigte Bevölkerung') & (df['Impftyp'] == 1)]['Anteil der Geimpften'].iloc[0] text = f"""Am {max_datum.strftime('%d.%m.%Y')} wurden Total {erstimpf + zweitimpf :,.0f} Dosen verimpft; Davon waren {erstimpf :,.0f} Erstimpfungen und {zweitimpf :,.0f} Zweitimpfungen. Es wurden bis zu diesem Tag {kum_erstimpf + kum_zweitimpf :,.0f} Personen mindestens einmal geimpft, davon haben {kum_zweitimpf :,.0f} bereits ihre Zweitimpfung erhalten. Somit sind heute {kum_pzt}% der Gesamtbevölkerung und {kum_impfwillige_pzt :,.1f}% der Impfberechtigten (Bevölkerung ab 16 Jahre) mindestens einmal geimpft. """ st.write(text)
def app_run(): titanic_link = 'https://raw.githubusercontent.com/mwaskom/seaborn-data/master/titanic.csv' titanic_data = load_dataset(titanic_link) st.title("Titanic Data Demo") selected_class = st.radio("Select Class", titanic_data['class'].unique()) st.write("Selected Class:", selected_class) # st.write("Selected Class Type:", type(selected_class)) selected_sex = st.selectbox("Select Sex", titanic_data['sex'].unique()) st.write(f"Selected Option: {selected_sex!r}") selected_decks = st.multiselect("Select Decks", titanic_data['deck'].unique()) st.write("Selected Decks:", selected_decks) age_columns = st.columns(2) age_min = age_columns[0].number_input("Minimum Age", value=titanic_data['age'].min()) age_max = age_columns[1].number_input("Maximum Age", value=titanic_data['age'].max()) if age_max < age_min: st.error("The maximum age can't be smaller than the minimum age!") else: st.success("Congratulations! Correct Parameters!") subset_age = titanic_data[(titanic_data['age'] <= age_max) & (age_min <= titanic_data['age'])] st.write( f"Number of Records With Age Between {age_min} and {age_max}: {subset_age.shape[0]}" ) optionals = st.expander("Optional Configurations", True) fare_min = optionals.slider("Minimum Fare", min_value=float(titanic_data['fare'].min()), max_value=float(titanic_data['fare'].max())) fare_max = optionals.slider("Maximum Fare", min_value=float(titanic_data['fare'].min()), max_value=float(titanic_data['fare'].max())) subset_fare = titanic_data[(titanic_data['fare'] <= fare_max) & (fare_min <= titanic_data['fare'])] st.write( f"Number of Records With Fare Between {fare_min} and {fare_max}: {subset_fare.shape[0]}" ) progress_bar = st.progress(0) progress_text = st.empty() for i in range(101): time.sleep(0.01) progress_bar.progress(i) progress_text.text(f"Progress: {i}%") st.dataframe(titanic_data) with st.echo("below"): balloons = st.text_input("Please enter awesome to see some balloons") if balloons == "awesome": st.balloons() st.write("This is a large text area.") st.text_area("A very big area", height=300) # test aggrid st.subheader("streamlit-aggrid test") # https://towardsdatascience.com/7-reasons-why-you-should-use-the-streamlit-aggrid-component-2d9a2b6e32f0 gb = GridOptionsBuilder.from_dataframe(titanic_data) gb.configure_pagination() # gb.configure_selection(selection_mode="multiple", use_checkbox=True) gb.configure_selection(selection_mode="single", use_checkbox=True) # gb.configure_side_bar() # gb.configure_default_column(groupable=True, value=True, enableRowGroup=True, aggFunc="sum", editable=True) # gb.configure_column("embark_town", cellStyle=cellsytle_jscode) gridOptions = gb.build() grid_data = AgGrid( titanic_data, key="ag1", gridOptions=gridOptions, # enable_enterprise_modules=True, # allow_unsafe_jscode=True, update_mode=GridUpdateMode. SELECTION_CHANGED, # able to detect more than one type of update? # for multiselect, how to batch updates? input_mode=DataReturnMode.FILTERED # only one? ) grid_data2 = AgGrid(titanic_data, key="ag2", gridOptions=gridOptions) grid_data3 = AgGrid(titanic_data, key="ag3", gridOptions=gridOptions) st.write(DataReturnMode.__members__) st.write(GridUpdateMode.__members__) st.write(grid_data) st.write(grid_data2) st.write(grid_data3) selected_rows = grid_data["selected_rows"] selected_rows = pd.DataFrame(selected_rows) if len(selected_rows) != 0: fig = px.bar(selected_rows, "embark_town", color="pclass") st.plotly_chart(fig) # set your session states here... if 'cc1' in st.session_state: st.session_state.cc1 = st.session_state.cc1 # this is not useless, it sets the session data... if 'cc2' in st.session_state: st.session_state.cc2 = st.session_state.cc2 if 'cc3' in st.session_state: st.session_state.cc3 = st.session_state.cc3 if 'my_hours_per_week' in st.session_state: st.session_state.my_hours_per_week = st.session_state.my_hours_per_week if 'my_amount' in st.session_state: st.session_state.my_amount = st.session_state.my_amount
"minWidth": 150 }, { "field": "duration", "valueFormatter": "x.toLocaleString() + 's'" }, { "field": "switchCode", "minWidth": 150 }, ], "defaultColDef": { "sortable": True, "flex": 1, }, }, # get the rows for each Detail Grid "getDetailRowData": JsCode("""function (params) { console.log(params); params.successCallback(params.data.callRecords); }""").js_code, }, } r = AgGrid(df, gridOptions=gridOptions, height=500, allow_unsafe_jscode=True, enable_enterprise_modules=True, update_mode=GridUpdateMode.SELECTION_CHANGED)
precision=0, aggFunc='sum') # gb.configure_column("Date", type=["dateColumnFilter","customDateTimeFormat"], custom_format_string='dd-MM-yyyy', pivot=True) gb.configure_default_column(groupable=True, value=True, enableRowGroup=True, aggFunc="sum", editable=True) gb.configure_grid_options(domLayout='normal') gridOptions = gb.build() grid_response = AgGrid( grouped_golfers_last_8, gridOptions=gridOptions, # height=grid_height, width='100%', # data_return_mode=return_mode_value, # update_mode=update_mode_value, # fit_columns_on_grid_load=fit_columns_on_grid_load, allow_unsafe_jscode= True, #Set it to True to allow jsfunction to be injected enable_enterprise_modules=True, ) # AgGrid(grouped_golfers_last_8) with st.expander("Player Detail"): st.write('combined', combined.head()) st.write('Find a player') player_names = combined['Name'].unique() names_selected = st.multiselect('Select Player', player_names) st.write((combined.set_index('Name').loc[names_selected, :] ).reset_index().sort_values(by='Week', ascending=False))
gb.configure_default_column(editable=True) grid_options = gb.build() grid_options['columnDefs'].append({ "field": "clicked", "header": "Clicked", "cellRenderer": BtnCellRenderer, "cellRendererParams": { "color": "red", "background_color": "black", }, }) st.title("cellRenderer Class Example") response = AgGrid(df, theme="streamlit", key='table1', gridOptions=grid_options, allow_unsafe_jscode=True, fit_columns_on_grid_load=True, reload_data=False, try_to_convert_back_to_original_types=False ) st.write(response['data']) try: st.write(response['data'][response['data'].clicked == 'clicked']) except: st.write('Nothing was clicked')
def verifier_page(): st.set_page_config(page_title="SPOC Verifier", layout="wide") main_col, geo_col, action_col = st.beta_columns([2, 1, 0.5]) species = pd.read_json("data/species-records.json") grid_options = GridOptionsBuilder.from_dataframe(species) # Grid options grid_options.configure_selection("single", use_checkbox=True) grid_options.configure_pagination(paginationAutoPageSize=True) with main_col: """ # SPOC Verifier """ records_grid = AgGrid( species, gridOptions=grid_options.build(), data_return_mode=DataReturnMode.FILTERED, update_mode=GridUpdateMode.MODEL_CHANGED, ) selected = records_grid["selected_rows"] with geo_col: if len(selected) > 0: f""" ## {selected[0]['Species']} """ url_params = url_parse.urlencode({ "div_num": selected[0]["div_enum"], "paper_id": selected[0]["Paper ID"], "species_id": selected[0]["Instance ID"], }) div_url = f"{settings.api_url}/api/div/?{url_params}" result = requests.get(div_url) entities_html = entities_template.render( content=result.json().get("html")) components.html(entities_html, height=500, scrolling=True) with action_col: action_html = action_template.render(status="select") components.html(action_html, height=75, width=65) if len(selected) > 0: """ ## Places """ geo_request = requests.get( f"{settings.api_url}/api/coordinates/?places={selected[0]['Place']}" ) geo_result = geo_request.json() if len(geo_result) < 1: st.text("No places found") else: m = folium.Map( location=[geo_result["lat_mean"], geo_result["long_mean"]], zoom_start=7) for place in geo_result["markers"]: folium.Marker( [place["latitude"], place["longitude"]], popup=place["label"], tooltip=place["label"], ).add_to(m) folium_static(m)
col1, col2 = st.beta_columns(2) if len(attributes)>1: color=attributes[1] else: color=None fig = cache_graph(stats, x=attributes[0], y='counts',color=color) col1.plotly_chart(fig, use_container_width=True) gridOptions=configure_grid_stat(stats) with col2: selection=AgGrid(stats, data_return_mode=return_mode_value, update_mode=update_mode_value, fit_columns_on_grid_load=True, # allow_unsafe_jscode=True, gridOptions=gridOptions ) st.write(":point_up: Click on the table to see details") condition=False if selection['selected_rows']: for r in selection['selected_rows']: del r["counts"] # st.write(r) sub_cond=True for a in r.keys(): sub_cond&=(chart_data[a]==r[a])
# df.loc[[20], ['measure', 'costs', 'revenue']] = ['thinning', 0, 300] # df.loc[[40], ['measure', 'costs', 'revenue']] = ['harvest and land sale', 0, 3500] # Bullard & Straka 2011. Basic Concepts in Forest Valuation and Investment Analysis Basic Concepts in Forest Valuation and Investment Analysis # Section 4. Financial criteria – page 4.20 # df.loc[[0], ['measure', 'costs', 'revenue']] = ['reforest', 95, 0] # df.loc[[15], ['measure', 'costs', 'revenue']] = ['precommercial thinning', 0, 550] # df.loc[[25], ['measure', 'costs', 'revenue']] = ['thinning', 0, 1500] # df.loc[[35], ['measure', 'costs', 'revenue']] = ['harvest and land sale', 0, 3350] gb = GridOptionsBuilder(min_column_width=50, editable_columns=True) gb.build_columnsDefs_from_dataframe(df) # gb.enableSideBar() gridOptions = gb.build() returned_df = AgGrid(df, gridOptions=gridOptions) # TODO double use triggers copy warning returned_df['costs_yearly'] = 0 returned_df['costs_yearly'][1:] = yearly_costs returned_df['revenue_yearly'] = 0 returned_df['revenue_yearly'][1:] = yearly_revenue # Set initial result returned_df['result'] = returned_df['revenue'] + returned_df['revenue_yearly'] \ - returned_df['costs'] - returned_df['costs_yearly'] returned_df['npv_costs'] = round( (returned_df['costs'] + returned_df['costs_yearly']) / (1 + interest / 100)**returned_df['t'], 2) returned_df['npv_revenue'] = round(
'color': 'black', 'backgroundColor': 'white' } } }; """) gb.configure_selection('single', use_checkbox=False) gb.configure_grid_options(domLayout='normal') gridOptions = gb.build() grid_response = AgGrid( user_Rev, gridOptions=gridOptions, height=280, width='100%', data_return_mode=DataReturnMode.FILTERED, update_mode=GridUpdateMode.MODEL_CHANGED, fit_columns_on_grid_load=False, allow_unsafe_jscode=True, #Set it to True to allow jsfunction to be injected enable_enterprise_modules=False, ) grid_Selected = grid_response['selected_rows'] with Product_Detail: if grid_Selected: Item = grid_Selected[0] Selected_item = Item.get('product_id') detail, cat = mt.get_ProductDetail(userName, Selected_item) st.subheader(f'Detalles del producto: {Selected_item}') st.write(f'**Categoria del producto**: {cat}') purchase_date = detail['order_purchase_timestamp'].values[0]
import streamlit as st import pandas as pd import numpy as np from st_aggrid import AgGrid, GridOptionsBuilder, JsCode df = pd.DataFrame( "", index=range(10), columns=list("abcde"), ) gb = GridOptionsBuilder.from_dataframe(df) gb.configure_default_column(editable=True) gb.configure_column('a', cellEditor='agRichSelectCellEditor', cellEditorParams={'values': ['a', 'b', 'c']}) gb.configure_grid_options(enableRangeSelection=True) response = AgGrid(df, gridOptions=gb.build(), fit_columns_on_grid_load=True, allow_unsafe_jscode=True, enable_enterprise_modules=True)