def dashboard(): col1, col2, col3, col4 = st.columns(4) col1.metric('Portfolio Value', f"${round(st.portfolio.value[-1],2)}", f"${round(st.portfolio.pl[-1],2)}") col2.metric("Daily Change", f"${round(st.portfolio.daily_change[-1],2)}", f"{round(st.portfolio.daily_ret[-1]*100,2)}%") col3.metric("Annual Volatility _expected return", f"{round(st.portfolio.std*100, 2)}%", f"{round(st.portfolio.exp_ret,2)*100}%") col4.metric("Sharpe _sortino Raio", f"{round(st.portfolio.sharpe, 2)}", f"{round(st.portfolio.sortino, 2)}") col11, col12 = st.columns(2) with col11: st.subheader('Portfolio Value') st.line_chart(st.portfolio.value) st.subheader('Portfolio vs SPY (% return)') st.line_chart(st.portfolio.benchmark('SPY') * 100) with col12: st.subheader('Portfolio Holdings') st.line_chart(st.portfolio.holdings) st.subheader('Portfolio Cash Flows') st.bar_chart(st.portfolio.cash_flows)
def predict_page(self): st.write('## Simulator') st.write("""Predict the outcome or probability of survival by customizing the input data of individuals""") st.write('') model_list = os.listdir('../output') avail_algo = [re.search('(?<=model_).+(?=\.pkl)', m).group(0) for m in model_list] with st.form(key="predict_form"): # New Data col1, col2, col3 = st.columns(3) pclass = col1.radio("Passage Class", (1, 2, 3), format_func=lambda x: CLASS_NAME[x]) sex = col2.selectbox("Sex", ('male', 'female'), format_func=lambda x: x.title()) age = col3.number_input("Age", step = 5) new_data = pd.DataFrame({ 'Pclass': [pclass], 'Sex': [sex], 'Age': [age] }) # Prediction Options col4, col5 = st.columns([2, 1]) algorithm = col4.selectbox('Algorithm', avail_algo, format_func=lambda x: ALGO_NAME[x]) probs = col5.radio('Predict Probability?', (True, False), format_func=lambda x: 'Yes' if x else 'No') predict = st.form_submit_button(label = 'Predict') if predict: pred = self.predict(new_data, algorithm, probs) if probs: velocimeter_chart(pred[0]) else: outcome = 'SURVIVED' if pred == 1 else 'DIED' st.write(f""" The model predicted that this individual would have ### {outcome} in the Titanic tragic accident """)
def build_metrics(area_stats, price_data, national_prices): def diff_to_national(area, national): diff = area - national return (diff / national) * 100 median_price = price_data['median_price'].mean() median_price_to_national = diff_to_national( median_price, national_prices['median_price'].mean()) mean_price = price_data['mean_price'].mean() mean_price_to_national = diff_to_national( mean_price, national_prices['mean_price'].mean()) total_paid_millions = price_data['total_paid'].sum() / 1000000 area_median_age = area_stats.median_age.median() diff_area_age_to_uk = (area_median_age / 40.5) st.markdown( f'##### Within **{travel_time}** mins **{travel_mode}** of **{address_input}**:\n' ) st.markdown('###### Population details within area') pop_col1, pop_col2, pop_col3 = st.columns(3) pop_col1.metric('Approx. Population', f'{area_stats.total_population.sum():,.0f}', None) pop_col2.metric('Median Age', f'{area_median_age:,.0f}') pop_col3.metric('UK National median age', '40.5', None) st.markdown('###### House Prices paid (2019)') col1, col2 = st.columns(2) col1.metric("Median Price (relative to national)", f'£{median_price:,.0f}', f'{median_price_to_national:.2f}%') col2.metric("Average Price (relative to national)", f'£{mean_price:,.0f}', f'{mean_price_to_national:.2f}%')
def show_graph_for_selection(df_, choice,prijs_per_nacht_fixed): st.subheader(f"Bezetting voor {choice}") if choice != "ALLES": df_ = df_[df_["acco_type"] == choice] df_grouped, df_grouped_in_house, df_pivot_number_of_acco, df_pivot_arrivals,df_pivot_omzet,df_grouped_date = group_data(df_) make_graph( df_grouped, df_grouped_in_house, df_pivot_number_of_acco, df_pivot_arrivals,df_pivot_omzet, ["2019", "2021", "2022"], "maand_dag", choice, prijs_per_nacht_fixed ) if choice == "ALLES": col1,col2= st.columns(2) with col1: year ="2019" show_count( df_grouped_date,"arrivals", year) with col2: year ="2021" show_count( df_grouped_date,"arrivals", year) year ="2022" show_count( df_grouped_date,"arrivals", year) col1,col2= st.columns(2) with col1: year ="2019" show_count( df_grouped_date,"departures", year) with col2: year ="2021" show_count( df_grouped_date,"departures", year) year ="2022" show_count( df_grouped_date,"departures", year)
def conversation_metrics_page(state): logging.info({"message": "Loading Metrics - Conversations page."}) st.title(":bar_chart: Metrics - Conversations") st.markdown(""" In this page you can generate conversation-based metrics for your Watson Assistant. You need to select a date range to get logs. """) st.subheader("Parameters") col1, col2 = st.columns(2) args = {} end_date = datetime.datetime.now() start_date = end_date - datetime.timedelta(days=7) args['logs_date'] = col1.date_input('Logs date', value=(start_date, end_date)) args['Date'] = col1.selectbox('Datetime variable', ('request_timestamp', 'response_timestamp')) args['Sessions'] = col2.text_input('Conversation id', value='response.context.conversation_id') args['Active users'] = col2.text_input('User variable', value='response.context.global.system.user_id') if st.button("Get logs"): with st.spinner("Getting logs..."): from src.metrics.conversation import logs_to_dataframe from src.connectors.watson_assistant import WatsonAssistant wa = WatsonAssistant(apikey=state.watson_args["apikey"], service_endpoint=state.watson_args["endpoint"], default_skill_id=state.watson_args["skill_id"]) query_logs = wa.define_query_by_date(args['logs_date'][0], args['logs_date'][1]) logs = wa.get_logs(query=query_logs) state.logs = logs_to_dataframe(logs, args['Date']) if state.logs is not None: from src.metrics.conversation import get_metrics, gen_plotly_datetime df_logs = state.logs metrics = get_metrics(df_logs, args) c1, c2, c3, c4 = st.columns(4) c1.metric(label="Sessions count", value=metrics["sessions_count"]) c2.metric(label="Messages count", value=metrics["messages_count"]) c3.metric(label="Avg messages per session", value=metrics["avg_messages"]) c4.metric(label="Active users", value=metrics["active_users"]) # Plotly Datetime per day fig_date_options = list(args.keys()) fig_date_options = [e for e in fig_date_options if e not in ['logs_date', 'Date']] fig_date_option = st.selectbox('Options', fig_date_options) fig_date = gen_plotly_datetime(df_logs, args, fig_date_option) st.plotly_chart(fig_date, use_container_width=True) state.sync()
def test_variable_columns(self): weights = [3, 1, 4, 1, 5, 9] sum_weights = sum(weights) st.columns(weights) for i, w in enumerate(weights): # Pull the delta from the back of the queue, using negative index delta = self.get_delta_from_queue(i - len(weights)) self.assertEqual(delta.add_block.column.weight, w / sum_weights)
def main(): menu = ["Enter Details", "Search"] choice = st.sidebar.selectbox("Menu", menu) if choice == "Enter Details": st.subheader("Enter Details") with st.form(key='my_form1'): col1, col2 = st.columns(2) dob = col1.date_input('Enter DOB') First_name = col1.text_input("First Name", ) Last_name = col1.text_input("Last Name", ) col1.write('Select Skill') option_1 = col1.checkbox('Java') option_2 = col1.checkbox('Shell script') option_3 = col1.checkbox('Python') option_4 = col1.checkbox('SQL') submit_button = st.form_submit_button(label='Submit') if submit_button: update_df['dob'] = [dob] update_df['First_name'] = [First_name.strip()] update_df['Last_name'] = [Last_name.strip()] update_df['Java'] = [option_1] update_df['Shell Script'] = [option_2] update_df['Python'] = [option_3] update_df['SQL'] = [option_4] sh = gc.open('prototype') wks = sh[0] df = wks.get_as_df() df = df.append(update_df) wks.set_dataframe(df, (1, 1)) elif choice == "Search": st.subheader("Search") with st.form(key='my_form2'): col1, col2 = st.columns(2) First_name = col1.text_input("First Name", ) Last_name = col1.text_input("Last Name", ) submit_button = st.form_submit_button(label='Submit') if submit_button: sh = gc.open('prototype') wks = sh[0] df = wks.get_as_df() if (First_name != "") and (Last_name != ""): filtered_df = df[ (df['First_name'].str.strip() == (First_name.strip())) & (df['Last_name'].str.strip() == Last_name.strip())] if (Last_name) == "": filtered_df = df[(df['First_name'].str.strip() == ( First_name.strip()))] if (First_name) == "": filtered_df = df[(df['Last_name'].str.strip() == ( Last_name.strip()))] st.write(filtered_df.reset_index(drop=True))
def runner(): col11, col22 = st.columns(2) with col11: st.title("Language Translation") st.markdown('Feel the power of Neural Machine Translation') with col22: st.image('Capture.PNG', use_column_width=True) col1, col2 = st.columns(2) conversion_list = { "English": "en", "Bengali": "bn", "Hindi": "hi", "French": "fr" } with col1: source_language = st.selectbox( 'Select the Source Language', ['Default', 'English', 'Bengali', 'Hindi', 'French']) input_text = st.text_input('Enter the Input text', 'Enter text here') with col2: destination_language = st.selectbox( 'Select the Destination', ['Default', 'English', 'Bengali', 'Hindi', 'French']) with col1: button_value = st.checkbox(label='Translate') translated_text = "" if button_value: if source_language != destination_language: print("The Source Language is {}".format( conversion_list[source_language])) print("The Destination Language is {}".format( conversion_list[destination_language])) translated_text = translate_text( input_text, conversion_list[source_language], conversion_list[destination_language]) else: translated_text = input_text with col2: st.text_input('Translated Text', translated_text) print("Translated Text : ", translated_text) if (destination_language == 'Hindi'): button_value_text_to_speech = st.checkbox(label='Audio Form') if (button_value_text_to_speech): audio_path = text_to_speech(translated_text) audio_file = open(audio_path, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio / ogg')
def fourier_frame(): with st.expander("Create a Fourier Series"): # st.markdown('# Create a Fourier Series:') st.latex(r'\hat{f}(x) = \frac{a_0}{2} + \sum_{n=1}^{\infty}a_n\cos\bigg(\frac{2\pi n}{P}x\bigg) + ' r'\sum_{n=1}^{\infty}b_n\sin\bigg(\frac{2\pi n}{P}x\bigg)') selection_list = ['Sine', 'Cosine', 'Square', 'Triangle', 'Sawtooth', 'Gaussian'] st.markdown('## Wave Options') type = st.selectbox('Wave Type', selection_list, 0) amp = st.slider('Amplitude', 0., 100., 1., 0.1, "%.1f") per = st.slider('Period (or Gaussian FWHM)', 0.01, 100., 2*np.pi, 0.01, "%.2f") phase = st.slider('Phase (or Gaussian Mean)', -np.pi, np.pi, 0., 0.01, "%.2f") velocity = st.number_input('Velocity (v)', int(0), int(100), int(0), int(1), "%d") rectify = st.checkbox('Rectify', False) st.markdown('## Fourier Series Options') ts = st.number_input('Draw Timestamp', 0., 100., 0., 0.1, "%.1f") iters = st.number_input('Iterations', int(1), int(100), int(10), int(1), "%d") cols = st.columns(2) min = cols[0].number_input(label='Interval Min', value=-np.pi) max = cols[1].number_input(label='Interval Max', value=np.pi) interval = (min, max) res = st.number_input(label='Resolution', value=1000) pchart = build_fourier(type, rectify, amp, per, phase, velocity, ts, iters, interval, res) st.plotly_chart(pchart, use_container_width=True)
def display_stats_data(collect_field): if st.checkbox('Show Raw Sar Data'): cols_per_line = Config.cols_per_line cols = st.columns(cols_per_line) even_lines = int(len(collect_field) / cols_per_line) remaining_cols = len(collect_field) % cols_per_line empty_cols = cols_per_line - remaining_cols for line in range(even_lines): for col in cols: f_index = cols.index(col) col.markdown(f'###### {collect_field[f_index][2]}') #col.markdown(f'###### {collect_field[f_index][0].columns[0]}') col.write(collect_field[f_index][0]) col.write(collect_field[f_index][1]) if remaining_cols and not even_lines: for index in range(remaining_cols): col = cols[index] col.markdown(f'###### {collect_field[index][2]}') #col.markdown(f'###### {collect_field[index][0].columns[0]}') col.write(collect_field[index][0]) for nindex in range(1, empty_cols + 1): nindex = cols_per_line - nindex cols[nindex].write('') col.write(collect_field[index][1]) elif remaining_cols and even_lines: for index in range(1, remaining_cols + 1): col = cols[index - 1] col.markdown(f'___ ') f_index = len(collect_field) - index col.markdown(f'###### {collect_field[f_index][2]}') #col.markdown(f'###### {collect_field[f_index][0].columns[0]}') col.write(collect_field[f_index][0]) col.write(collect_field[f_index][1])
def cond3_header(): st.header("Condition 3: Multiplier Goal") st.text("In this condition we allow each trial to continue\n" "until the entire bankroll is lost or a multiplier goal is met.\n") left, right = st.columns(2) left.subheader("Spins to Goal or Bankrupt") return left, right
def run(): import streamlit as st import pandas as pd import plotly.express as px df = pd.DataFrame(px.data.gapminder()) # Continents contlist = df['continent'].unique() continent = st.selectbox("Select a continent:", contlist) col1, col2 = st.columns(2) fig = px.line(df[df['continent'] == continent], x="year", y="gdpPercap", title="GDP per Capita", color='country') col1.plotly_chart(fig) fig = px.line(df[df['continent'] == continent], x="year", y="pop", title="Population", color='country') col2.plotly_chart(fig)
def show_library_two_columns(library_name: Text) -> Tuple[Any, Any]: """Introduces a library using a two-column layout and i18n. Args: library_name: The name of the library being introduced. Returns: The two Streamlit columns (as a tuple) containing the library information. """ logo_column, description_column = streamlit.columns( [1, 2] # Second column will be twice as wide ) logo_column.image(translation(f"libraries.{library_name}_logo"), use_column_width=True) description_column.markdown( paragraphs( "#### {library} `{name}`".format( library=translation("libraries.library"), name=translation(f"libraries.{library_name}_name")), "<{url}>".format(url=translation(f"libraries.{library_name}_url")), "> {description}".format(description=translation( f"libraries.{library_name}_description")))) return logo_column, description_column
def get_weekly_tweet_analysis_page(shared_state): st.header("Top Tweets") df_weekly_top_tweets = shared_state.df_weekly_top_tweets df_weekly_top_users = shared_state.df_weekly_top_users weeks = df_weekly_top_tweets.index.unique(level="WeekDate") selected_week_option = st.selectbox( "Select a week", get_week_options(weeks), format_func=lambda x: x["label"], ) COLUMNS = ["handle", "quote_count", "retweet_count", "timestamp", "text"] col1, col2 = st.columns(2) tweets_top_n = max( col1.number_input("Show top N tweets (limited to 25)", value=5), 25) tweets_sort_column = col2.selectbox("Sort by column", ["retweet_count", "quote_count"]) only_media_tweets = st.checkbox("Only show media tweets") df_top_tweets = filter_by_week_option_value( df_weekly_top_tweets, selected_week_option["value"], lambda df: df.set_index("datastore_id"), ) if only_media_tweets: df_top_tweets = df_top_tweets[df_top_tweets.hasMedia] df_top_tweets = df_top_tweets[COLUMNS].nlargest(tweets_top_n, [tweets_sort_column]) st.table(df_top_tweets)
def render_sliders(): column1, column2 = st.columns(2) with column1: radius_mean = st.slider('radius_mean', 6.981000, 28.110000) texture_mean = st.slider('texture_mean', 9.710000, 39.280000) perimeter_mean = st.slider('perimeter_mean', 43.790000, 188.500000) area_mean = st.slider('area_mean', 143.500000, 2501.000000) smoothness_mean = st.slider('smoothness_mean', 0.052630, 0.163400) compactness_mean = st.slider('compactness_mean', 0.019380, 0.345400) concavity_mean = st.slider('concavity_mean', 0.000000, 0.426800) radius_worst = st.slider('radius_worst', 7.930000, 36.040000) texture_worst = st.slider('texture_worst', 12.020000, 49.540000) with column2: perimeter_worst = st.slider('perimeter_worst', 50.410000, 251.200000) area_worst = st.slider('area_worst', 185.200000, 4254.000000) smoothness_worst = st.slider('smoothness_worst', 0.071170, 0.222600) compactness_worst = st.slider('compactness_worst', 0.027290, 1.058000) concavity_worst = st.slider('concavity_worst', 0.000000, 1.252000) concave_point_worst = st.slider('concave_point_worst', 0.000000, 0.291000) symmetry_worst = st.slider('symmetry_worst', 0.156500, 0.663800) fractal_dimension_worst = st.slider('fractal_dimension_worst', 0.055040, 0.207500) return [ radius_mean, texture_mean, perimeter_mean, area_mean, smoothness_mean, compactness_mean, concavity_mean, radius_worst, texture_worst, perimeter_worst, area_worst, smoothness_worst, compactness_worst, concavity_worst, concave_point_worst, symmetry_worst, fractal_dimension_worst ]
def st_list_model(): st.markdown("### Models list") columns_checkboxes_beta_columns = st.columns(5) columns_checkboxes_beta_columns[0].text("Show columns for") show_commit_columns = columns_checkboxes_beta_columns[1].checkbox( "commits", value=True) show_model_columns = columns_checkboxes_beta_columns[2].checkbox( "model parameters", value=True) show_train_columns = columns_checkboxes_beta_columns[3].checkbox( "train parameters", value=True) show_evaluation_columns = columns_checkboxes_beta_columns[4].checkbox( "evaluation metrics", value=True) #%% def show_column(column: str) -> bool: return ((show_commit_columns or not column.startswith("commit.")) and (show_model_columns or not column.startswith("model.")) and (show_train_columns or not column.startswith("train.")) and (show_evaluation_columns or not column.startswith("evaluation."))) st.table( MODELS_DF.filter( items=[column for column in MODELS_DF if show_column(column)]))
def visualize_similarity( nlp: spacy.language.Language, default_texts: Tuple[str, str] = ("apple", "orange"), *, threshold: float = 0.5, title: Optional[str] = "Vectors & Similarity", key: Optional[str] = None, ) -> None: """Visualizer for semantic similarity using word vectors.""" meta = nlp.meta.get("vectors", {}) if title: st.header(title) if not meta.get("width", 0): st.warning("No vectors available in the model.") else: cols = st.columns(2) text1 = cols[0].text_input("Text or word 1", default_texts[0], key=f"{key}_similarity_text1") text2 = cols[1].text_input("Text or word 2", default_texts[1], key=f"{key}_similarity_text2") doc1 = nlp.make_doc(text1) doc2 = nlp.make_doc(text2) similarity = doc1.similarity(doc2) similarity_text = f"**Score:** `{similarity}`" if similarity > threshold: st.success(similarity_text) else: st.error(similarity_text) exp = st.expander("Vector information") exp.code(meta)
def create_cross_validation_form(ui_params): col1, col2, col3 = st.columns(3) with col1: st_cv_initial_days = st.number_input("Initial days", value=730, min_value=1, step=1) with col2: st_cv_period_days = st.number_input("Period days", value=180, min_value=1, step=1) with col3: st_cv_horizon_days = st.number_input("Horizon days", value=365, min_value=1, step=1) st_validation_metric = st.selectbox("Validation Metric", options=VALIDATION_METRICS, index=3) show_cross_validation = st.form_submit_button(label='Cross-Validate') # show_cross_validation = st.checkbox("Show Cross-Validation", value=False) st.caption("This can take some time.") dic_return = DotDict(initial_days=st_cv_initial_days, period_days=st_cv_period_days, horizon_days=st_cv_horizon_days, validation_metric=st_validation_metric, cross_validation=show_cross_validation) return dic_return
def function_database(): global FUNCTIONS # Title st.header("Function Database") # Load Inputs st.markdown("## Select Database") USERINPUT_Language = UI_SelectDatabases() # Functions st.markdown("## View Functions") st.markdown("### Found <span style=\"color:yellow\">**" + str(len(FUNCTIONS[USERINPUT_Language])) + "**</span> functions!", unsafe_allow_html=True) Functions_DisplayNames = GetFunctionDisplayNames(FUNCTIONS[USERINPUT_Language]) USERINPUT_FunctionChoice = st.selectbox("Exact Match Functions", ["Select Function"] + Functions_DisplayNames) if not USERINPUT_FunctionChoice == 'Select Function': USERINPUT_FunctionChoiceIndex = Functions_DisplayNames.index(USERINPUT_FunctionChoice) UI_DisplayFunctionDetails(FUNCTIONS[USERINPUT_Language][USERINPUT_FunctionChoiceIndex], Language=USERINPUT_Language) # Add or Remove Existing Functions # Retreive Removed Cache Functions AddedFunctions, RemovedFunctions, AddedIndices, RemovedIndices = GetRemovedFunctions(FUNCTIONS[USERINPUT_Language]) st.markdown("## Add/Remove Functions") AddedFunctions_DisplayNames = GetFunctionDisplayNames(AddedFunctions) RemovedFunctions_DisplayNames = GetFunctionDisplayNames(RemovedFunctions) USERINPUT_AddedFunctionChoiceIndex = -1 USERINPUT_RemovedFunctionChoiceIndex = -1 col1, col2, col3 = st.columns([5, 1, 5]) AddedFuncCount = col1.empty() AddedFuncSelect = col1.empty() AddedCode = col1.empty() USERINPUT_AddedFunctionChoiceIndex = UI_SelectFunction(AddedFunctions, name="Added", CodeWindow=AddedCode, FuncCount=AddedFuncCount, FuncSelect=AddedFuncSelect, Language=USERINPUT_Language) RemovedFuncCount = col3.empty() RemovedFuncSelect = col3.empty() RemovedCode = col3.empty() RemovedFuncCount.markdown("**" + str(len(RemovedFunctions_DisplayNames)) + "** Removed Functions") USERINPUT_RemovedFunctionChoiceIndex = UI_SelectFunction(RemovedFunctions, name="Removed", CodeWindow=RemovedCode, FuncCount=RemovedFuncCount, FuncSelect=RemovedFuncSelect, Language=USERINPUT_Language) col2.markdown("") col2.markdown("") USERINPUT_Remove = col2.button("->") and (USERINPUT_AddedFunctionChoiceIndex >= 0) USERINPUT_Add = col2.button("<-") and (USERINPUT_RemovedFunctionChoiceIndex >= 0) if USERINPUT_Remove: RemovedIndices.append(AddedIndices[USERINPUT_AddedFunctionChoiceIndex]) if USERINPUT_Add: RemovedIndices.pop(USERINPUT_RemovedFunctionChoiceIndex) if USERINPUT_Remove or USERINPUT_Add: SetRemovedFunctions(RemovedIndices) AddedFunctions, RemovedFunctions, AddedIndices, RemovedIndices = GetRemovedFunctions(FUNCTIONS[USERINPUT_Language]) USERINPUT_AddedFunctionChoiceIndex = UI_SelectFunction(AddedFunctions, name="Added", CodeWindow=AddedCode, FuncCount=AddedFuncCount, FuncSelect=AddedFuncSelect, Language=USERINPUT_Language) USERINPUT_RemovedFunctionChoiceIndex = UI_SelectFunction(RemovedFunctions, name="Removed", CodeWindow=RemovedCode, FuncCount=RemovedFuncCount, FuncSelect=RemovedFuncSelect, Language=USERINPUT_Language) # Download Database if st.button('Download Database'): SaveDatabaseData = {"Name": 'FunctionDatabase_' + USERINPUT_Language, "Functions": AddedFunctions} json.dump(SaveDatabaseData, open(DEFAULT_DATABASE_SAVEPATH, 'w')) link = DownloadFile(DEFAULT_DATABASE_SAVEPATH, 'FunctionDatabase_' + USERINPUT_Language + '.json', 'FunctionDatabase_' + USERINPUT_Language + '.json') st.markdown(link, unsafe_allow_html=True)
def champ_scores(): col1, col2 = st.columns(2) # Bengals / Chiefs col1.title("Bengals @ Chiefs") col1.write("**Points for Each Player**") col1.dataframe(cin_kc_scorers) col1.write("") col1.write("") col1.write("**Each Scoring Play ... scroll!**") col1.dataframe(cin_kc) col1.write("") # 49ers / Rams col2.title("49ers @ Rams") col2.write("**Points for Each Player**") col2.dataframe(sf_lar_scorers) col2.write("") col2.write("") col2.write("") col2.write("") col2.write("") col2.write("") col2.write("") col2.write("**Each Scoring Play ... scroll!**") col2.dataframe(sf_lar)
def main(): st.header("Calculate specific and absolute humidity") col1, col2, col3 = st.columns(3) with col1: t = st.number_input("Temperature (Celcius)", None, None, 25) with col2: rh = st.number_input("Relative humidity (%)", None, None, 36) with col3: p = st.number_input("Pressure (mbar)", None, None, 1020) tekst = (f"<div style='background-color: lightblue;padding:20px;'>Specific humidity (q) = <b>{round(rh2q(rh, t, p ),1)}</b> g/kg<br><br>Absolute humidity = <b>{round(rh2ah(rh, t),1)}</b> grams/m<sup>3</sup></div>") st.markdown(tekst, unsafe_allow_html=True) st.subheader("Formula for specific humidity") r'''$$es = 6.112 * e^{\frac{17.67 * t}{t + 243.5}} (e = 2,71828..)\\$$ $$e = es * \frac{rh}{100}\\$$ $$q = {\frac{0.622 * e}{p - (0.378 * e)}}*1000$$''' link =('<a href="https://github.com/PecanProject/pecan/blob/master/modules/data.atmosphere/R/metutils.R#L45" target="_blank">source</a>') st.markdown(link, unsafe_allow_html=True) st.subheader("Formula for absolute humidity") r'''$$ah = \frac{6.112 * e^{\frac{17.67 * t}{t + 243.5}} * rh * 2.1674}{273.15 + t} (e = 2,71828..)$$''' link2 =('<a href="https://carnotcycle.wordpress.com/2012/08/04/how-to-convert-relative-humidity-to-absolute-humidity/" target="_blank">source</a>') st.markdown(link2, unsafe_allow_html=True) st.write("https://imgur.com/gallery/lUSmGHg")
def get_params(): #the streamlit stuff st.title('Dither sequences for blended acquisition') #start by getting the input parameters from the user. TODO: add tooltip my_expander1 = st.expander("General parameters:", expanded=True) with my_expander1: col1, col2, col3 = st.columns(3) with col1: no_src = st.number_input('Number of souces to dither:',1,9,3,1,help="This is the number of sources operating in flip-flop-flap- mode.") nPoints = st.number_input('The number of dithers per source:',10,2500,200,10, help='This is how many dithers you want for one source. Typically this number should be > no traces in the migration aperture.') compute_dithers=st.button("Plot the dithers (for QC), and prepare a file for downloading", help='Produce nice looking QC plots of the dither sequences.') get_help =st.button("Get a ppt that explains the dithering",help="Download a ppt with a lot of explanation on why you want to use the inverse Irwin-Hall distribution.") if get_help: #return a ppt # Load selected file filename="./dither_explained.pptx" with open(filename, 'rb') as f: s = f.read() download_button_str = download_button(s, filename, f'Click here to download {filename}') st.markdown(download_button_str, unsafe_allow_html=True) with col2: range_beg= st.number_input('Dither minimum in ms:', -2000,2000, 0, 4) range_end= st.number_input('Dither maximum in ms:', -2000,2000, 500, 4) dither_type=st.selectbox('Select type of dithers (remember that natural dithering adds to this):',('Inverse Irwin-Hall','Random','Halton','Poisson'), help="The IHH (Invese Irwin-Hall) is the one your should select! The others are for RnD and as illustrations.") if dither_type=="Inverse Irwin-Hall": st.warning('Contact Legal/IP/RnD before using invese Irwin-Hall dither on a survey!') with col3: nLevels = st.number_input('Number of levels (N+1 or N+2):',1,2,1,1, help="Keep this as 1 for all normal surveys. In a case where sources are going off very often, it might be advisable to also optimize the dithers for the N+2 shot. Pls contact RnD before using this on a real survey.") nBacksteps = st.number_input('Amount of anti-clustering:',1,5,5,1,help="Keep this at 5 or less. [3-5] is a good and robust choice. If you use numbers much larger than 5, there is a potential issue with regards to a CGG patent in that the effective distribution becones close to uniform random. Please contact Legal/IP council and RnD before going above 5.") user_seed = int(st.text_input("User seed (for rand numb gen):", "0", help="Keeping this at 0 will give different results each time, However, by providing your own seed, for example 123, the same random sequence is produced every time.")) if(user_seed!=0): random.seed(a=user_seed) return [nBacksteps, no_src, nPoints, range_beg, range_end, nLevels, dither_type, compute_dithers]
def kappa(): st.title("Cohen's Kappa Calculator") st.write(""" 1. Copy the codes of Coder 1 into the Coder 1 text entry field and hit "Enter." 2. Copy the codes for Coder 2 into the Coder 2 text entry field and hit "Enter." ❗ Make sure that the coding decisions between Coder 1 and Coder 2 are the same length. """) col1 = st.text_input('Coder 1', value='a a b') col2 = st.text_input('Coder 2', value='a a b') st.caption("📝 This app does not retain user data.") try: st.subheader('Results') c1, c2, c3 = st.columns(3) c1.metric('Dataset Length', str(len(col1.split()))) c2.metric('Accuracy', str(accuracy_score(col1.split(), col2.split()))) c3.metric('Kappa Score', str(cohen_kappa_score(col1.split(), col2.split()))) labels = sorted(list(set(col1.split() + col2.split()))) indices = [str(label) + '_' for label in labels] st.write("Confusion Matrix:") st.dataframe( pd.DataFrame(confusion_matrix(col1.split(), col2.split()), index=indices, columns=labels)) st.caption('Note: Coder 1 is used as the baseline for evaluation.') st.markdown( "For more an extended presentation on Cohen's Kappa see Hart-Davidson (2014), [Using Cohen's Kappa to Gauge Interrater Reliability](https://www.slideshare.net/billhd/kappa870)" ) except ValueError: st.markdown('<mark>Error: Data must be the same length</mark>', unsafe_allow_html=True)
def add_petrodc_app(): st.subheader('Petroleum Data Collector APP') st.write("This is a web based application to access and visualize petroleum-related data from" " public databases. This is part of the open source initiative by Pro Well Plan AS.") st.info('petrodc is a python package to get datasets from public sources. New sources are \ added as they are tested; suggestions and contributions of all kinds are very welcome.') c1, c2, c3, c4, c5 = st.columns(5) with c1: st.markdown("[![Github](https://img.shields.io/badge/source-petrodc-green.svg?logo=github)]" "(https://github.com/pro-well-plan/petrodc)") with c2: st.markdown("[![PyPI version](https://badge.fury.io/py/petrodc.svg)]" "(https://badge.fury.io/py/petrodc)") database = st.selectbox( 'Select the data source:', ('Topo-bathymetry', 'Wellbore data NPD', 'Athabasca well logs', 'Petroleum Deposits') ) if database == 'Topo-bathymetry': elevation_app() if database == 'Wellbore data NPD': npd_app() if database == 'Athabasca well logs': st.set_option('deprecation.showPyplotGlobalUse', False) ags_app() if database == 'Petroleum Deposits': deposits_app()
def _add_choice(choices, to_ignore, path, val): path = list(path) if path: key = path[-1] if isinstance(val, dict): if len(path) == 1: st.markdown("------") h = len(path) + 1 if len(path) < 6 else 6 st.markdown(f"{(h)*'#'} {key} :") elif isinstance(val, list): val = list(map(lambda x: "None" if x is None else x, val)) val.sort() if len(val) == 1: default = val else: default = None col1, col2 = st.columns([2, 1]) choice = col1.multiselect(label=key, options=val, default=default) ignore = col2.checkbox(label="ignore", value=path in default_ignore, key=path) if ignore: to_ignore.append(path) else: choices.append((path, choice)) else: return 0
def run(): import streamlit as st import pandas as pd import plotly.express as px df = pd.DataFrame(px.data.gapminder()) # Countries clist = df['country'].unique() country = st.selectbox("Select a country:", clist) col1, col2 = st.columns(2) fig = px.line(df[df['country'] == country], x="year", y="gdpPercap", title="GDP per Capita") col1.plotly_chart(fig, use_container_width=True) fig = px.line(df[df['country'] == country], x="year", y="pop", title="Population Growth") col2.plotly_chart(fig, use_container_width=True)
def writeData(data, name, dataType="value"): #name="Gross Profit", data=financials st.subheader(name) col1, col2, col3, col4 = st.columns([1, 1, 1, 1]) cols = [col1, col2, col3, col4] sorted_column = data.loc[name].sort_values(ascending=False) ### for i in range(4): if dataType == "percent": position = sorted_column.index.get_loc(data.columns[i]) data.loc[name][i] = str(round(data.loc[name][i] * 100, 1)) + "%" with cols[i]: st.text(data.columns[i].year) #st.text(data.loc[name][i]) #use markdown for color: st.markdown('<p class="%s">%s</p>' % (text_styles[position], data.loc[name][i]), unsafe_allow_html=True) else: position = sorted_column.index.get_loc(data.columns[i]) if round(data.loc[name][i] / 1000000000, 2) < 1: data.loc[name][i] = str(round(data.loc[name][i] / 1000000, 2)) + " M€" else: data.loc[name][i] = str( round(data.loc[name][i] / 1000000000, 2)) + " B€" with cols[i]: st.text(data.columns[i].year) #st.text(data.loc[name][i]) #use markdown for color: st.markdown('<p class="%s">%s</p>' % (text_styles[position], data.loc[name][i]), unsafe_allow_html=True)
def main(): st.markdown( """ <style> [data-testid="stSidebar"][aria-expanded="true"] > div:first-child { width: 250px; } [data-testid="stSidebar"][aria-expanded="false"] > div:first-child { width: 250px; margin-left: -250px; } </style> """, unsafe_allow_html=True, ) header_cols = st.columns((1)) header_cols[0].title("Welcome to Galaxy Finder") header_cols[0].markdown( """ Created by [George Stein](https://georgestein.github.io/) """) display_method = header_cols[-1].button('Interested in learning how this works?') if display_method: describe_method() else: galaxy_search()
def app(): st.write('## PCA') # todo functionalize sampleData = pd.read_csv("./data/test/SL1344_test/PCA/mageck_14_2_batch.txt", sep="\t", index_col=0) countData = pd.read_csv("./data/test/SL1344_test/PCA/test8.normalized.txt", sep="\t", index_col=0).drop("Gene", axis=1) countData = np.log2(countData + 0.5) c1, c2 = st.columns((4, 1)) c2.write('### PCA Options') numPCs = c2.slider("Select number of Principal Components", min_value=2, max_value=50, value=10) numGenes = c2.slider("Number of genes to use", value=500, max_value=countData.shape[0]) choose_by = c2.selectbox('Choose genes based on highest', ['variance', 'log2FoldChange (not implemented)']) pDf, pc_var = find_PCs(countData, sampleData, numPCs, numGenes, choose_by) pcX_labels = [f'PC{i}' for i in range(1, numPCs+1)] expVars = [c for c in pDf.columns if c not in pcX_labels] pcX = c2.selectbox('X-axis component', pcX_labels) pcY = c2.selectbox('Y-axis component', [pc for pc in pcX_labels if pc != pcX]) pcVar = c2.radio('Variable to highlight', expVars) fig = px.scatter(pDf, x=pcX, y=pcY, color=pcVar, labels ={pcX: f'{pcX}, {pc_var[pcX]} % Variance', pcY: f'{pcY}, {pc_var[pcY]} % Variance'}, height=700, hover_data=expVars, hover_name=pDf.index) fig.update_layout(autosize=True, font=dict(size=18), paper_bgcolor='rgba(0,0,0,0)', ) fig.update_traces(marker=dict(size=12, line=dict(width=2, color='DarkSlateGrey')), selector=dict(mode='markers')) c1.write(f'### {pcX} vs {pcY}, highlighting {pcVar}') c1.plotly_chart(fig, use_container_width=True) c3, c4 = st.columns(2) pDf_sum = pDf.groupby(pcVar).median() varDf = pd.DataFrame.from_dict(pc_var, orient='index').reset_index() varDf.columns = ['PC', '% Variance'] fig2 = px.line(varDf, x='PC', y='% Variance', markers=True, labels={'PC': ''}) fig2.update_traces(marker=dict(size=12, line=dict(width=2, color='DarkSlateGrey'))) c3.write('### Scree Plot') c3.plotly_chart(fig2) c4.write(f'### PCs summarized by {pcVar}') c4.plotly_chart(px.imshow(pDf_sum), use_container_width=True)
def image_and_header(): image = "static/personagem2.png" col1, mid, col2 = st.columns([30, 10, 40]) with col1: st.image(image, use_column_width=True) with col2: st.write("# World's most amazing trading bots!!!")