def main(): # setup a title st.title('Select a tool') # Once we have the dependencies, add a selector for the app mode on the sidebar. model_option = st.selectbox( '', ( 'Daily geoactivity tool',\ 'GPS TEC tool',\ 'AMPERE FACs forecast',\ 'Substorm onset forecast' ) ) # we'll need a session state to switch between dates # basically the prev day and next day buttons! # session state details for the geo_activity_tool page # session state details for the geo_activity_tool page geo_all_param_list = [ "DSCOVER", "OMNI", "STORM",\ "SUBSTORM", "SUPERDARN" ] nhours_plot_default = 0 ndays_plot_default = 1 inp_start_date = datetime.date(2018, 1, 2) inp_start_time = datetime.time(0, 0) # session state details for the geo_activity_tool page # session state details for the geo_activity_tool page # session state details for the sson_model page # session state details for the sson_model page data_obj = dwnld_sw_imf_rt.DwnldRTSW() url_data = data_obj.dwnld_file() if url_data is not None: data_obj.read_url_data(url_data) # repeat the operations we do with sson_model calc sw_imf_df = data_obj.read_stored_data() sw_imf_df.set_index('propagated_time_tag', inplace=True) sw_imf_df = sw_imf_df.resample('1min').median() # linearly interpolate data sw_imf_df.interpolate(method='linear', axis=0, inplace=True) omn_end_time = sw_imf_df.index.max() # session state details for the sson_model page # session state details for the sson_model page # common session state details for the all pages state = session_state.get(\ plot_start_date=inp_start_date,\ plot_start_time=inp_start_time,\ plot_param_list=geo_all_param_list,\ plot_nhours_plot=nhours_plot_default,\ plot_ndays_plot=ndays_plot_default,\ date_sson_hist_plot=omn_end_time ) if model_option == 'Daily geoactivity tool': geo_activity_page( state,\ local_data_store="./geo_tool/data/sqlite3/",\ plot_style="classic",\ inp_start_date=inp_start_date,\ inp_start_time=inp_start_time,\ all_param_list=geo_all_param_list,\ nhours_plot_default=nhours_plot_default,\ ndays_plot_default=ndays_plot_default ) elif model_option == 'GPS TEC tool': gps_tec_page() elif model_option == 'AMPERE FACs forecast': fac_model_page() else: ss_onset_page(state)
import os import uuid import numpy as np import streamlit as st from additional_utils import load_augmentations_config from augmentation import apply_changes, dict_update, select_next_aug from code_generator import build_code from files_uploaders import image_uploader from layout import return_layout from session_state import get from state_dict import aug_dict, clear_dict, oneof_dict, state_dict from string_builders import build_string session_state = get(id=uuid.uuid4()) root_path = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(root_path, 'augmentation.json') clear_dict(session_state) image_uploader() st.text('Upload an image, then select transformation from the \ list.\nTo apply OneOf use OneOf at the beginning and StopOneOf\ to close it.') if 'image' in list(state_dict.keys()): # noqa: C901 st.image(state_dict['image']) image_params = { 'width': state_dict['image_array'].shape[1], 'height': state_dict['image_array'].shape[0], }
remove_bom=False, normalization="NFC", ) stream_iterator = iter(stream) while stream_iterator: try: line = next(stream_iterator) out_file.write(line) except StopIteration: break output_file.close() if __name__ == "__main__": # Session initialization session = get(count=1, expected="", successes=0) with st.spinner("Loading neural network..."): mnist = Net() mnist.load_state_dict( torch.load("models/mnist_cnn.pt", map_location="cpu")) # Read text file input_file = open("texts.json", "r") output_file = open("texts_unicode.json", "w") fix_file_encoding(input_file, output_file) with open("texts_unicode.json") as json_file: texts = json.load(json_file) languages = texts["languages"] # language choice language = st.sidebar.radio(" ", list(languages.keys()))
# Run the predict page st.title('Machine Learning Web App - Image Captioning') st.header("Final Project - Advanced Statistics Topics: ML and DS") st.header( "Click [here](https://github.com/juanse1608/AST-ImageCaptioning/blob/main/README.md) to know more about the project!" ) st.write('''Upload a photo and see the predicted caption for it''') # File uploader allows user to add their own image uploaded_file = st.file_uploader(label="Upload Image", type=["png", "jpeg", "jpg"]) # Setup session state to remember state of app so refresh isn't always needed # See: https://discuss.streamlit.io/t/the-button-inside-a-button-seems-to-reset-the-whole-app-why/1051/11 session_state = session_state.get(pred_button=False) # Create logic for app flow if not uploaded_file: st.warning("Please upload an image.") session_state.pred_button = False st.stop() else: session_state.uploaded_image = uploaded_file.read() st.image(session_state.uploaded_image, use_column_width=True) value = st.selectbox( "Select Prediction Type", ("Argmax", "Random"), help='''__Argmax__ picks the value/token with the highest probability. __Random__ picks the value/token randomly using the distribution of the predictions.''' )
import streamlit as st import home import annotator_tool import session_state PAGES = {"Home": home, "Annotation tool": annotator_tool} session = session_state.get(message_value=None) def main(): st.sidebar.title('Navigation') selection = st.sidebar.radio("Go to", list(PAGES.keys())) page = PAGES[selection] with st.spinner(f'Loading {selection} ...'): if page == annotator_tool: page.main(session) else: page.main() st.sidebar.info('This tool is an example.') if __name__ == "__main__": main()
import streamlit as st import pandas as pd import random import sys import urllib.parse from package import visuals from package.reports import reports sys.path.append('..') import session_state from shared import load_explanation_group state = session_state.get(sample_id=0) def download_report_url(report_html, report_name): report_html = urllib.parse.quote(report_html, safe='') report_name = f'{report_name}.html' href = f'<a href="data:text/html,{report_html}" download="{report_name}">Download Report</a>' st.markdown(href, unsafe_allow_html=True) def show(explanation_group_path): explanation_group = load_explanation_group(explanation_group_path) sample_id_placeholder = st.sidebar.empty() state.sample_id = sample_id_placeholder.text_input( label='Select individual (by ID):', value=state.sample_id
import streamlit as st import requests import time import subprocess import session_state current_session = session_state.get(load=False) st.title('NYC Taxi Demand Prediction') st.sidebar.subheader("Predict Option") month = st.sidebar.number_input('Month', min_value=1, max_value=12, value=1) day = st.sidebar.number_input('Day', min_value=1, max_value=31, value=14) weekday = st.sidebar.number_input('weekday', min_value=0, max_value=6, value=2) hour = st.sidebar.number_input('Hour', min_value=0, max_value=23, value=17) is_weekend = st.sidebar.number_input('Is Weekend', min_value=0, max_value=1, value=0) lag_1h_cnt = st.sidebar.number_input('lag_1h_cnt', min_value=0, value=1103) lag_1d_cnt = st.sidebar.number_input('lag_1d_cnt', min_value=0, value=1189) lag_7d_cnt = st.sidebar.number_input('lag_7d_cnt', min_value=0, value=1730) lag_14d_cnt = st.sidebar.number_input('lag_14d_cnt', min_value=0, value=1800) avg_14d_cnt = st.sidebar.number_input('avg_14d_cnt', min_value=0.0, value=969.17) avg_21d_cnt = st.sidebar.number_input('avg_21d_cnt', min_value=0.0, value=879.14) std_14d_cnt = st.sidebar.number_input('std_14d_cnt', min_value=0, value=507)