# Audio #audio_file = open("examplemusic.mp3").read() #st.audio(audio_file, format = 'mp3') # Widget # Checkbox if (st.checkbox("Show/Hide")): st.text("Showing or Hiding Widget") # Radio Button status = st.radio("What is your status", ("Active", "Inactive")) if (status == 'Active'): st.success("you are Active") else: st.warning("Inactive, Activate") # SelectBox occupation = st.selectbox( "Your Occupation", ["Programmer", "Data Scientist", "Doctor", "Businessman"]) st.write("You Selected this option ", occupation) # MultiSelect location = st.multiselect("Where do you work?",
# front end elements of the web page html_temp = """ <div style ="background-color:pink;padding:13px"> <h1 style ="color:black;text-align:center;">Coronary Heart disease diagnosis App</h1> </div> """ # display the front end aspect st.markdown(html_temp, unsafe_allow_html = True) st.subheader('by Bao xin ') # following lines create boxes in which user can enter data required to make prediction gender = st.radio("Select Gender: ", ('male', 'female')) chestpain = st.selectbox('Chest Pain Type',("Typical angina","Atypical angina","Non-anginal pain","Asymptomatic")) hypertension=st.selectbox('hypertension',['Yes','No']) hpgrade = st.selectbox('hygrade',("level 3","level 2","level 1","prehypertension","normal")) diabete=st.selectbox('diabete',['Yes','No']) smokecurrent=st.selectbox('smokecurrent',['Yes','No']) max_ccach=st.number_input('The maximum double carotid plaque height') max_ccacl=st.number_input('The maximum double carotid plaque length') rccach=st.number_input('right carotid plaque height') ps=st.number_input('The addition of double carotid plaque height') #user_input=preprocess(sex,cp,exang, fbs, slope, thal ) pred=preprocess(gender, hypertension, diabete, smokecurrent, hpgrade,chestpain, rccach, max_ccach, max_ccacl, ps)
def main(): def file_selector(folder_path='./datasets'): filenames = os.listdir(folder_path) selected_filename = st.selectbox("Select a file", filenames) return os.path.join(folder_path, selected_filename) filename = file_selector() st.info("You selected {}".format(filename)) #Read data function df = pd.read_csv(filename) #Show dataset if st.checkbox("Show Dataset"): number = st.number_input("Number of Rows to View", 1) st.dataframe(df.head(number)) #Show columns st.write("**Selected dataset column names**") if st.button("Column Names"): st.write(df.columns) #Show shape st.write("**Selected dataset shape**") if st.checkbox("Shape of Dataset"): data_dim = st.radio("Show dimension by", ("Rows", "Columns")) if data_dim == 'Rows': st.text("Number of Rows") st.write(df.shape[0]) elif data_dim == 'Columns': st.text("Number of Columns") st.write(df.shape[1]) else: st.write(df.shape) #Select columns st.write("**Selected dataset columns**") if st.checkbox("Select columns to show"): all_columns = df.columns.tolist() selected_columns = st.multiselect("Select", all_columns) new_df = df[selected_columns] st.dataframe(new_df) #Show datatypes st.write("**Selected dataset datatypes**") if st.button("Data Types"): st.write(df.dtypes) #Plot and visualization st.subheader("Data Visualization") all_columns_names = df.columns.tolist() type_of_plot = st.selectbox("Select the type of Plot", ["area", "bar"]) selected_column_names = st.multiselect("Select Columns to Plot", all_columns_names) if st.button("Generate Plot"): st.success("Generating Customizable Plot of {} for {} ".format( type_of_plot, selected_column_names)) #Plots by Streamlit: if type_of_plot == 'area': cust_data = df[selected_column_names] st.area_chart(cust_data) elif type_of_plot == 'bar': cust_data = df[selected_column_names] st.bar_chart(cust_data)
def main(): st.image('logo.png', width=200) st.title('AceleraDev Data Science') st.subheader('Semana 2 - Pré-processamento de Dados em Python') st.image('https://media.giphy.com/media/KyBX9ektgXWve/giphy.gif', width=200) file = st.file_uploader( 'Escolha a base de dados que deseja analisar (.csv)', type='csv') if file is not None: st.subheader('Analisando os dados') df = pd.read_csv(file) st.markdown('**Número de linhas:**') st.markdown(df.shape[0]) st.markdown('**Número de colunas:**') st.markdown(df.shape[1]) st.markdown('**Visualizando o dataframe**') number = st.slider('Escolha o numero de linhas que deseja ver', min_value=1, max_value=20) st.dataframe(df.head(number)) st.markdown('**Nome das colunas:**') st.markdown(list(df.columns)) exploracao = pd.DataFrame({ 'nomes': df.columns, 'tipos': df.dtypes, 'NA #': df.isna().sum(), 'NA %': (df.isna().sum() / df.shape[0]) * 100 }) st.markdown('**Contagem dos tipos de dados:**') st.write(exploracao.tipos.value_counts()) st.markdown('**Nomes das colunas do tipo int64:**') st.markdown(list(exploracao[exploracao['tipos'] == 'int64']['nomes'])) st.markdown('**Nomes das colunas do tipo float64:**') st.markdown(list( exploracao[exploracao['tipos'] == 'float64']['nomes'])) st.markdown('**Nomes das colunas do tipo object:**') st.markdown(list(exploracao[exploracao['tipos'] == 'object']['nomes'])) st.markdown('**Tabela com coluna e percentual de dados faltantes :**') st.table(exploracao[exploracao['NA #'] != 0][['tipos', 'NA %']]) st.subheader('Inputaçao de dados númericos :') percentual = st.slider( 'Escolha o limite de percentual faltante limite para as colunas vocë deseja inputar os dados', min_value=0, max_value=100) lista_colunas = list( exploracao[exploracao['NA %'] < percentual]['nomes']) select_method = st.radio('Escolha um metodo abaixo :', ('Média', 'Mediana')) st.markdown('Você selecionou : ' + str(select_method)) if select_method == 'Média': df_inputado = df[lista_colunas].fillna(df[lista_colunas].mean()) exploracao_inputado = pd.DataFrame({ 'nomes': df_inputado.columns, 'tipos': df_inputado.dtypes, 'NA #': df_inputado.isna().sum(), 'NA %': (df_inputado.isna().sum() / df_inputado.shape[0]) * 100 }) st.table(exploracao_inputado[ exploracao_inputado['tipos'] != 'object']['NA %']) st.subheader('Dados Inputados faça download abaixo : ') st.markdown(get_table_download_link(df_inputado), unsafe_allow_html=True) if select_method == 'Mediana': df_inputado = df[lista_colunas].fillna(df[lista_colunas].mean()) exploracao_inputado = pd.DataFrame({ 'nomes': df_inputado.columns, 'tipos': df_inputado.dtypes, 'NA #': df_inputado.isna().sum(), 'NA %': (df_inputado.isna().sum() / df_inputado.shape[0]) * 100 }) st.table(exploracao_inputado[ exploracao_inputado['tipos'] != 'object']['NA %']) st.subheader('Dados Inputados faça download abaixo : ') st.markdown(get_table_download_link(df_inputado), unsafe_allow_html=True)
def main(): # estruturas de uma página web st.image('logo.png', width=200) st.title('AceleraDev Data Science') # título principal da página st.subheader('Semana 2 - Pré-processamento de dados em Python') # upload dos dados st.subheader('Importação da base de dados') file = st.file_uploader( 'Escolha a base de dados que deseja analisar (formato csv)', type='csv') # criação do objeto dataframe if file is not None: df = pd.read_csv(file) # dataframe utilizado slider = st.slider('Valores', 0, 100) # fatia parte do dataframe # resumo da base de dados st.subheader('Resumo dos dados') st.markdown('**Número de linhas:**') st.markdown(df.shape[0]) st.markdown('**Número de colunas:**') st.markdown(df.shape[1]) # plotando os dados no formato de data frame st.subheader('Apresentação dos dados') st.markdown('Formato: Dataframe') st.dataframe(df.head(slider)) # visualização dos dados do dataframe # plotando os dados no formato de table st.markdown('Formato: Table') st.table(df.head(slider)) # exploração dos dados exploracao = pd.DataFrame({ 'nomes': df.columns, 'tipos': df.dtypes, 'NA #': df.isna().sum(), 'NA %': (df.isna().sum() / df.shape[0]) * 100 }) st.markdown('**Contagem dos tipos de dados:**') st.write(exploracao.tipos.value_counts()) st.markdown('Nomes das colunas do tipo int64:') st.markdown(list(exploracao[exploracao['tipos'] == 'int64']['nomes'])) st.markdown('**Nomes das colunas do tipo float64:**') st.markdown(list( exploracao[exploracao['tipos'] == 'float64']['nomes'])) st.markdown('**Nomes das colunas do tipo object:**') st.markdown(list(exploracao[exploracao['tipos'] == 'object']['nomes'])) st.markdown('**Tabela com coluna e percentual de dados faltantes :**') st.table(exploracao[exploracao['NA #'] != 0][['tipos', 'NA %']]) st.subheader('Inputaçao de dados númericos :') percentual = st.slider( 'Escolha o limite de percentual faltante limite para as colunas vocë deseja inputar os dados', min_value=0, max_value=100) lista_colunas = list( exploracao[exploracao['NA %'] < percentual]['nomes']) select_method = st.radio('Escolha um metodo abaixo :', ('Média', 'Mediana')) st.markdown('Você selecionou : ' + str(select_method)) if select_method == 'Média': df_inputado = df[lista_colunas].fillna(df[lista_colunas].mean()) exploracao_inputado = pd.DataFrame({ 'nomes': df_inputado.columns, 'tipos': df_inputado.dtypes, 'NA #': df_inputado.isna().sum(), 'NA %': (df_inputado.isna().sum() / df_inputado.shape[0]) * 100 }) st.table(exploracao_inputado[ exploracao_inputado['tipos'] != 'object']['NA %']) st.subheader('Dados Inputados faça download abaixo : ') st.markdown(get_table_download_link(df_inputado), unsafe_allow_html=True) if select_method == 'Mediana': df_inputado = df[lista_colunas].fillna(df[lista_colunas].mean()) exploracao_inputado = pd.DataFrame({ 'nomes': df_inputado.columns, 'tipos': df_inputado.dtypes, 'NA #': df_inputado.isna().sum(), 'NA %': (df_inputado.isna().sum() / df_inputado.shape[0]) * 100 }) st.table(exploracao_inputado[ exploracao_inputado['tipos'] != 'object']['NA %']) st.subheader('Dados Inputados faça download abaixo : ') st.markdown(get_table_download_link(df_inputado), unsafe_allow_html=True)
filename_llip = file_source + 'l_lip/' + 'l_lip_' + str(option_lip) + '.png' filename_ulip = file_source + 'u_lip/' + 'u_lip_' + str(option_lip) + '.png' im_llip = cv2.imread(filename_llip, 0) im_llip_new = im_llip.copy() im_ulip = cv2.imread(filename_ulip, 0) im_ulip_new = im_ulip.copy() option_hair = st.sidebar.selectbox(' Select HairStyle ', select_list) filename_hair = file_source + 'hair/' + 'hair_' + str(option_hair) + '.png' im_hair = cv2.imread(filename_hair, 0) im_hair_new = im_hair.copy() #radio option to move a particular facial attribute rad_option = st.radio("select feature to move", label_list, 0) #moves skin and face simultaneously for user ease if (rad_option == 'skin'): x = st.slider('x_step', -512, 512, 0) y = st.slider('y_step', -512, 512, 0) T = np.float32([[1, 0, x], [0, 1, y]]) im_skin_new = cv2.warpAffine(im_skin, T, (L, L)) im_neck_new = cv2.warpAffine(im_neck, T, (L, L)) if (rad_option == 'nose'): x = st.slider('x_step', -512, 512, 0) y = st.slider('y_step', -512, 512, 0)
def main(): st.write("# Generate Tweet Datasets !") st.write( "[By Régis Amon](https://www.linkedin.com/in/r%C3%A9gis-amon-87669665/)" ) st.write( "[Using the wonderfull twint library by twintproject](https://github.com/twintproject/twint/tree/master/twint)" ) search = None username = None search_type = st.radio('User or search', ["user", "search"]) if search_type == "user": username = st.text_input("Enter tweet account") elif search_type == "search": search = st.text_input("Enter a search or hashtag") #username = st.text_input("username") limit = st.slider("Limit of tweets", 60, 3000, 500, step=20) since = st.date_input('Since', max_value=today) since = str(since) lang = 'en' if username is not None: #st.button(("Get tweets")) if st.button("Get tweets"): if os.path.isfile('none.csv'): os.remove('none.csv') else: print("File not exist") #st.write("### Word cloud") st.write(get_tweet(username, limit, lang, since), use_column_width=True) df = pd.read_csv("none.csv") st.dataframe(df) #st.write("Right click on the image then choose ***Save as*** to download the Wordcloud") #st.balloons() st.markdown(get_table_download_link(df), unsafe_allow_html=True) if search is not None: if st.button("Get search"): if os.path.isfile('none.csv'): os.remove('none.csv') else: print("File not exist") #st.write("### Word cloud") st.write(get_tweet_search(search, limit, lang, since), use_column_width=True) df = pd.read_csv("none.csv") st.dataframe(df) #st.write("Right click on the image then choose ***Save as*** to download the Wordcloud") #st.balloons() st.markdown(get_table_download_link(df), unsafe_allow_html=True)
def test_invalid_value_range(self): """Test that value must be within the length of the options.""" with self.assertRaises(StreamlitAPIException): st.radio("the label", ("m", "f"), 2)
def test_noneType_option(self): """Test NoneType option value.""" current_value = st.radio("the label", (None, "selected"), 0) self.assertEqual(current_value, None)
# st.markdown("---") st.markdown("## **② Paste KWs/Broken links & Run Model **") linesDeduped2 = [] c0, c1, c2, c3, c4, c5, c6, c7 = st.beta_columns([0.8, 0.55, 1, 0.55, 1, 0.55, 1, 1.5]) with c1: st.text("") st.text("") st.text("") with c2: RadioMapWhat = st.radio( "What do you want to map?", options=("Map Broken URLs", "Map Keywords") ) with c3: st.text("") st.text("") st.text("") with c4: RadioMapTo = st.radio( "Where to map them?", options=("To crawled URL", "To crawled titles") ) with c5: st.text("") st.text("")
def test_invalid_value(self): """Test that value must be an int.""" with self.assertRaises(StreamlitAPIException): st.radio("the label", ("m", "f"), "1")
elif select == 'Mar': data = data[data[DATE_TIME].dt.month == 3] elif select == 'Apr': data = data[data[DATE_TIME].dt.month == 4] elif select == 'May': data = data[data[DATE_TIME].dt.month == 5] else: data = data hour = st.slider("Hour to look at", 0, 23) data_show = data[data[DATE_TIME].dt.hour == hour] options = st.radio('Select Start-Stop', ('Start', 'Stop', "All")) if options == 'Start': data_1 = data_show[data_show["flag"] == 'Start'] elif options == "Stop": data_1 = data_show[data_show["flag"] == "Stop"] else: data_1 = data_show st.write(data_1) st.subheader("Geo data between %i:00 and %i:00" % (hour, (hour + 1) % 24)) midpoint = (np.average(data_1["latitude"]), np.average(data_1["longitude"])) st.write( pdk.Deck(
st.write("Write Here") st.write("Python Range with WRITE", range(10)) st.text("Display JSON") dico = {'name': "saphiros", 'age': 24} st.json(dico) st.header("TEST BUTTON") st.button("simple button") st.text("checkbox") if st.checkbox("show/hide"): #Do Action st.text("some action") status = st.radio("ton statut", ('active', 'Inactive')) if status == 'active': st.success("tu es active") else: st.warning("non active !") st.text("boite de selection") occupation = st.selectbox("ton poste", ['Développeur', 'analyste', 'Doctor']) st.write("So, you are a ", occupation) st.write("selection multiple") location = st.multiselect("ou est tu ?", ("Paris", "Londre", "New York")) st.write("You Selected", len(location), "location")
def main(): st.title('AceleraDev Data Science') st.subheader('Semana 2 - Pré-processamento de Dados em Python') st.markdown('Desenvolvedora: Natália Araújo') file = st.file_uploader( 'Escolha a base de dados que deseja analisar (.csv)', type='csv') if file is not None: st.subheader('Analisando os dados') df = pd.read_csv(file) st.markdown('**Número de linhas:**') st.markdown(df.shape[0]) st.markdown('**Número de colunas:**') st.markdown(df.shape[1]) st.markdown('**Visualizando o dataframe**') number = st.slider('Escolha o numero de colunas que deseja ver', min_value=1, max_value=20) st.dataframe(df.head(number)) st.markdown('**Nome das colunas:**') st.markdown(list(df.columns)) exploitation = pd.DataFrame({ 'names': df.columns, 'types': df.dtypes, 'NA #': df.isna().sum(), 'NA %': (df.isna().sum() / df.shape[0]) * 100 }) st.markdown('**Contagem dos tipos de dados:**') st.write(exploitation.types.value_counts()) st.markdown('**Nomes das colunas do tipo int64:**') st.markdown( list(exploitation[exploitation['types'] == 'int64']['names'])) st.markdown('**Nomes das colunas do tipo float64:**') st.markdown( list(exploitation[exploitation['types'] == 'float64']['names'])) st.markdown('**Nomes das colunas do tipo object:**') st.markdown( list(exploitation[exploitation['types'] == 'object']['names'])) st.markdown('**Tabela com coluna e percentual de dados faltantes :**') st.table(exploitation[exploitation['NA #'] != 0][['tipos', 'NA %']]) st.subheader('Inputaçao de dados númericos :') percentual = st.slider( 'Escolha o limite de percentual faltante limite para as colunas vocë ' 'deseja inputar os dados', min_value=0, max_value=100) lista_colunas = list( exploitation[exploitation['NA %'] < percentual]['names']) select_method = st.radio('Escolha um metodo abaixo :', ('Mean', 'Median')) st.markdown('Você selecionou : ' + str(select_method)) if select_method == 'Mean': df_imputed = df[lista_colunas].fillna(df[lista_colunas].mean()) get_imputation(df_imputed) if select_method == 'Median': df_imputed = df[lista_colunas].fillna(df[lista_colunas].median()) get_imputation(df_imputed)
def presentation(): st.markdown("## 2.4 Simulating Choices ") # if st.checkbox("Show Gumble Distribution?"): # # Gumbel Distribution # mu = st.slider("Select Mean value", min_value=float(-20), max_value=float(50), value=float(0), step=float(0.1)) # beta = st.slider("Select Variance value", min_value=float(0), max_value=float(10), value=float(1), step=float(0.1)) # val, plt = sc.gumbel_distrubution(mu=mu, beta=beta, count=10_00_000, plot=True) # st.pyplot(plt) # ------------------------- Write Up for the section ------------------------- # # ---------------------------------------------------------------------------- # st.markdown(""" Here we will be simulating **Mode Choice Model** when origin is **Zone 1** and destination is **Zone 2**. Remember that the choosen alternative according to the MNL model is the utility maximizing alternative, i.e, the alternative $m$ for which: """) st.write(""" $$ U_{j,m}^i \gt U_{j,m'}^i \\thinspace \\forall m' \in \{car, pt, slow\} $$ or equally, $$ m = \\max_{m' \in \{car, pt, slow\}} U_{j,m'}^i $$ where, $$ U_{j,m'}^i = V_{j,m'}^i + \epsilon_{j,m'}^i $$ and $\epsilon_{j,m'}^i$ is I.I.D Gumble distributed error term representing uncertinity. """) st.markdown(""" One can thus simulate a choice by simulating draws of for respectively alternative and making a choice according to given equation. """) # ------------------------------------ EXERCISES ------------------------------# # ---------------------------------------------------------------------------- # st.markdown("## EXERCISES") # ------------------------------------ Ques2 ------------------------------# if st.checkbox( "(i) Simulating choices for each mode with gumbel distribution"): # Number of samples sample_count = st.number_input("Number of samples", min_value=int(1000), max_value=int(50_00_000), value=int(10_00_000), step=int(1000)) # Gumbel Distribution mu = st.slider("Select Mean value", min_value=float(-20), max_value=float(50), value=float(0), step=float(0.1)) beta = st.slider("Select Variance value", min_value=float(0), max_value=float(10), value=float(1), step=float(0.1)) if st.checkbox("Plot distribution?"): val, plot_gumbel = sc.gumbel_distrubution(mu=mu, beta=beta, count=10_00_000, plot=True) st.pyplot(plot_gumbel) # Origin and Destination orig_zone = st.radio("Select Origin Zone", list(ZONE.keys())) dest_zone = st.radio("Select Destination Zone", list(ZONE.keys())) # Get values with st.spinner("Sampling values"): max_U, max_U_index, count_dict = sc.model_sampling( num_samples=sample_count, mu=mu, beta=beta, origin_zone=ZONE[orig_zone], destination_zone=ZONE[dest_zone]) st.success("Done simulating for {} samples!".format(sample_count)) st.markdown( "### Number of times the mode is selected from {} samples".format( sample_count)) valu = np.array(list(count_dict.values())).reshape(3, 1) prob = np.divide(valu, sample_count).reshape(3, 1) data = np.hstack((valu, prob)).reshape(3, 2) df = pd.DataFrame(data, index=list(MODE.keys()), columns=["COUNT", "PROBABILITY"]) st.table(df) # Plot fig = go.Figure(data=[go.Histogram(x=max_U, nbinsx=30)]) fig.update_layout( barmode='group', title="Distrubution of max_U for {} samples".format(sample_count), title_font_size=20) st.plotly_chart(fig)
hop_length=512) spect = librosa.power_to_db(spect, ref=np.max) spect = np.expand_dims(spect, axis=( -1, 0, )) res = model.predict(np.array(spect)) return res if __name__ == "__main__": st.title("Alpha Ai Solution") st.subheader("Cough Detection Web Application") activites = ["Members", "Chirag", "Garima", "Arshid", "Ritik"] choice = st.sidebar.selectbox("Alpha Team", activites) status = st.radio("Activate the App", ("Start", "Stop")) model = keras.models.load_model('./updated_model.h5') if status == "Start": st.success("its Activated") while (True): FORMAT = pyaudio.paFloat32 CHANNELS = 2 RATE = 44100 CHUNK = 1024 RECORD_SECONDS = 4 WAVE_OUTPUT_FILENAME = "./file.wav" p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paFloat32, channels=2, rate=RATE, input=True,
df=df.sort_values(by=['score']) st.header('Ranking of TOS clauses') st.subheader('The Best Clauses of the Contract are') st.dataframe(df.head(),width=900,height=400) st.subheader('The Questionable Clauses of the Contract include') st.dataframe(df.tail(),900,400) #st.dataframe(df.style.highlight_max(axis=0)) st.header('Summary') if st.checkbox("Summary"): #location = st.multiselect("View Summary",("General","Add ratio","Add maximum wordlimit")) location = st.radio("View Summary",("General","Add ratio","Add maximum wordlimit")) if location=='General': gen_sum=normalize(gen_sum) st.warning(gen_sum) elif location=='Add ratio': st.subheader('Add ratio percentage') level = st.slider("Select Ratio level",10,100) summ=summarize(text, ratio=level/100.0) #print(summ) st.warning(summ) elif location=='Add maximum wordlimit': level = st.slider("Add word limit",10,len(text.split())) summ=summarize(text, word_count=level)
def eda(df): # Show Dataset if st.checkbox("Show Dataset"): number = st.number_input("Numbers of rows to view", 5) st.dataframe(df.head(number)) # Show Columns if st.checkbox("Columns Names"): st.write(df.columns) # Show Shape if st.checkbox("Shape of Dataset"): st.write(df.shape) data_dim = st.radio("Show Dimension by ", ("Rows", "Columns")) if data_dim == "Columns": st.text("Numbers of Columns") st.write(df.shape[1]) elif data_dim == "Rows": st.text("Numbers of Rows") st.write(df.shape[0]) else: st.write(df.shape) # Select Columns if st.checkbox("Select Column to show"): all_columns = df.columns.tolist() selected_columns = st.multiselect("Select Columns", all_columns) new_df = df[selected_columns] st.dataframe(new_df) # Show Value Count if st.checkbox("Show Value Counts"): all_columns = df.columns.tolist() selected_columns = st.selectbox("Select Column", all_columns) st.write(df[selected_columns].value_counts()) # Show Datatypes if st.checkbox("Show Data types"): st.text("Data Types") st.write(df.dtypes) # Show Summary if st.checkbox("Show Summary"): st.text("Summary") st.write(df.describe().T) # Plot and visualization st.subheader("Data Visualization") all_columns_names = df.columns.tolist() # Correlation Seaborn Plot if st.checkbox("Show Correlation Plot"): st.success("Generating Correlation Plot ...") if st.checkbox("Annot the Plot"): st.write(sns.heatmap(df.corr(), annot=True)) else: st.write(sns.heatmap(df.corr())) st.pyplot() # Count Plot if st.checkbox("Show Value Count Plots"): x = st.selectbox("Select Categorical Column", all_columns_names) st.success("Generating Plot ...") if x: if st.checkbox("Select Second Categorical column"): hue_all_column_name = df[df.columns.difference([x])].columns hue = st.selectbox("Select Column for Count Plot", hue_all_column_name) st.write(sns.countplot(x=x, hue=hue, data=df, palette="Set2")) else: st.write(sns.countplot(x=x, data=df, palette="Set2")) st.pyplot() # Pie Chart if st.checkbox("Show Pie Plot"): all_columns = categorical_column(df) selected_columns = st.selectbox("Select Column", all_columns) if selected_columns: st.success("Generating Pie Chart ...") st.write(df[selected_columns].value_counts().plot.pie( autopct="%1.1f%%")) st.pyplot() # Customizable Plot st.subheader("Customizable Plot") type_of_plot = st.selectbox("Select type of Plot", ["area", "bar", "line", "hist", "box", "kde"]) selected_columns_names = st.multiselect("Select Columns to plot", all_columns_names) if st.button("Generate Plot"): st.success("Generating Customizable Plot of {} for {}".format( type_of_plot, selected_columns_names)) custom_data = df[selected_columns_names] if type_of_plot == "area": st.area_chart(custom_data) elif type_of_plot == "bar": st.bar_chart(custom_data) elif type_of_plot == "line": st.line_chart(custom_data) elif type_of_plot: custom_plot = df[selected_columns_names].plot(kind=type_of_plot) st.write(custom_plot) st.pyplot()
import streamlit as st import numpy as np import pandas as pd import sys from tts_server_utils import load_taco2, load_waveglow, gen_e2e_taco st.title("Nicole's TTS Deployed for testing") taco_path = sys.argv[1] # path to model checkpoint wave_path = sys.argv[2] # path to waveglow @st.cache(allow_output_mutation=True) def load_model(taco_path, wave_path): taco_model = load_taco2(taco_path) waveglow, denoiser = load_waveglow(wave_path) return taco_model, waveglow, denoiser data_load_state = st.text("Loading models ....!") taco_model, wave_model, denoiser = load_model(taco_path, wave_path) data_load_state.text("Loading models ...... Done!!") preselect = st.radio("Select or type in the box below!", ("Thank you for contacting us", " I can surely solve your issue today", "I can talk to you in english")) text_box = st.text_input("Enter Text here ", value="") if not text_box: audio_data = gen_e2e_taco(preselect, taco_model, wave_model, denoiser) st.audio(audio_data, format='audio/wav', start_time=0) else: audio_data = gen_e2e_taco(text_box, taco_model, wave_model, denoiser) st.audio(audio_data, format='audio/wav', start_time=0)
import streamlit as st st.write("""# **Simple Converter App** """) Menu = st.radio( "What would you like to do?", ("Denary Conversion", "Binary Conversion", "Hexadecimal Conversion")) st.write(""" ____ """) if Menu == "Denary Conversion": User = st.slider("Please select a Denary Number", min_value=1, max_value=255, value=1) st.write(User, "in Binary is", bin(User)[2:], "\n", "\n", User, "in Hexadecimal is", hex(User)[2:]) if Menu == "Binary Conversion": User = st.text_input("Please Enter a Byte of binary") try: Internal = int(User, 2) st.write(User, "in Denary is", int(User, 2)) st.write(User, "in Hexadecimal is", hex(Internal)[2:]) except: st.error("Please Enter a Valid Input.") if Menu == "Hexadecimal Conversion": User = st.text_input("Please input a Hexadecimal String") try:
def view(): '## Read tables' sql_all_table_names = "select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';" all_table_names = query_db(sql_all_table_names)['relname'].tolist() table_name = st.selectbox('Choose a table', all_table_names) if table_name: f'Display the table' sql_table = f'select * from {table_name};' df = query_db(sql_table) st.dataframe(df) '## Search for products approaching best used before date' today = datetime.date.today() start_date = st.date_input('Start date', today) if start_date: f'Display the table' '## Query top N rating products of the grocery store (Group by, Join)' top_nratings = st.selectbox('Select N', ('5', '10', '15'), key="selectbox1") if top_nratings: f'Display the table' sql_ratings = f"select producer.producer_name, cast(avg(R.numStar) as Decimal (10,2)) as ratings from rating_rate as R, producer, product_producedBy as P where P.producer=producer.producer_name and R.pid=P.pid group by producer_name order by ratings desc limit {top_nratings};" df = query_db(sql_ratings) st.dataframe(df) '## Query top N popular products of the grocery store (Group by, Join)' top_npopular = st.selectbox('Select N', ('5', '10', '15'), key="selectbox2") if top_npopular: f'Display the table' sql_npopular = f"select p.product_name, count(o.o_quantity) as num from order_history o, product_producedby p where o.pid = p.pid group by p.product_name order by num desc limit {top_npopular} ;" df = query_db(sql_npopular) st.dataframe(df) '## Query sales amount of products(Group by, Join)' st.write('<style>div.Widget.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) sales_amount = st.radio('Query sales amount', ('Query all','Vegetable', 'Fruit', 'Grocery', 'Kitchen', 'Drink', 'Electronic Devices', 'Phone', 'Food'), index=0, key="sales_amount") if sales_amount=='Query all': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Vegetable': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Vegetable' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Fruit': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Fruit' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Grocery': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Grocery' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Kitchen': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Kitchen' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Drink': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Drink' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Electronic Product': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Electronic Product' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'Phone': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as total_value from order_history o, product_producedby p where o.pid = p.pid and p.category = 'Phone' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) elif sales_amount == 'food': f'Display the table' sql_sales = f"select p.product_name, sum(o.o_quantity * o.o_price) as sales from order_history o, product_producedby p where o.pid = p.pid and p.category = 'food' group by p.product_name;" df = query_db(sql_sales) st.dataframe(df) '## Show total inventory value of each producer(Group by, Join)' sql_in_value=f"select P.producer, sum(I.in_quantity*P.cost) as Total_inventory_value from product_producedBy as P, inventory_of as I WHERE I.pid=P.pid group by P.producer order by Total_inventory_value desc" df = query_db(sql_in_value) st.dataframe(df) '## Check the profit of each product(Group by, Join)' sql_profit=f"select P.product_name, P.producer, sum(O.o_quantity*(O.o_price-P.cost)) as total_profit from Order_history as O, product_producedBy as P WHERE O.pid=P.pid group by P.pid order by total_profit desc" df = query_db(sql_profit) st.dataframe(df)
def main(): # Test/Title st.title('StreamLit Concepts') # Header/Subheader st.header("To Create Header") st.subheader("To Create subheader") st.subheader("Do you want to build the GUI using web app") st.code('pip install streamlit') #text st.text("hello Streamlit") html_temp = """ <div style="background-color:tomato;padding:10px"> <h2 style="color:white;text-align:center;">Streamlit ML App </h2> </div> """ st.markdown(html_temp, unsafe_allow_html=True) st.markdown('<i class="material-icons">{}</i>'.format("people"), unsafe_allow_html=True) st.latex(r''' e^{i\pi} + 1 = 0 ''') st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') st.write(['st', 'is <', 3]) st.write("✔� Time up!") st.code('s="Happy" for i in range(0,10): print(s)') df1 = pd.DataFrame(np.random.randn(50, 5), columns=('col %d' % i for i in range(5))) my_table = st.table(df1) df = st.cache( pd.read_csv )("https://github.com/SurendraRedd/StreamlitProjects/raw/master/lang.csv") is_check = st.checkbox("Display Data") if is_check: st.write(df) st.write('Dataframe example') st.dataframe(df1) #Markdown st.markdown("### This is a Markdown") st.markdown("### 🎲 Demo on streamlit") st.markdown("Streamlit python package is used to develop applications" "with out knowing much web application concepts.") st.markdown("**♟ Examples ♟**") st.markdown("* Happly learning!.") #Will be used for displaying the Error Messages in a colourful format st.success("Successful") st.info("Information!") st.warning('this is a warning') st.error("this is an error Danger") data = {'1': "True", '2': "True", '3': "False"} st.json(data) # Exception handling st.exception("IndexError('list out of index')") place_holder = st.empty() place_holder.text('Replaced!') #help of the function st.help(range) st.write("Text with write") st.write(range(10)) st.line_chart({"data": [1, 5, 2, 6, 2, 1]}) st.area_chart({"data": [1, 5, 2, 6, 2, 1]}) st.bar_chart({"data": [1, 5, 2, 6, 2, 1]}) arr = np.random.normal(1, 1, size=100) fig, ax = plt.subplots() ax.hist(arr, bins=20) st.pyplot(fig) ''' # Markdown magic This is some _**text**_. ''' df = pd.DataFrame({'col1': [1, 2, 3]}) df # <-- Draw the dataframe x = 10 'x', x # <-- Draw the string 'x' and then the value of x # Select box exp = st.selectbox("Select your experience: ", np.arange(1, 40, 1)) # Slider exp = st.slider("Select your experience: ", min_value=1, max_value=40, value=1, step=1) # Multiselect movies = st.multiselect( "Select Balayya Favourite movies?", ["SamaraSimhaReddy", "Simha", "NarasimhaNaidu", "Legend"]) # Will only run once if already cached df = load_data() st.write(df) st.button('Click') st.checkbox('Check the checkbox') st.radio('Radio Button', [1, 2, 3]) st.selectbox('Select', [1, 2, 3]) st.multiselect('Multiselect', [1, 2, 3]) st.slider('slide', min_value=0, max_value=10) st.text_input('Enter Username') st.number_input('Enter a Number') st.text_area('Enter Text Here!') st.date_input('Date Input') st.time_input('Time entry') st.file_uploader('File Uploader') st.beta_color_picker('Select color') st.echo() with st.echo(): text = 's="Happy Learning!" for i in range(0,10): print(s)' st.write(text) #Image opening #img = Image.open("download.jfif") #open the image stored in specified location img = Image.open( urllib.request.urlopen( "https://github.com/SurendraRedd/ChallengeDeploy/raw/main/singlefile/Solution.png" )) # Opens the image from the url #response = requests.get("https://github.com/SurendraRedd/Techgig/blob/main/images/Solution.png") #img = Image.open(BytesIO(response.content)) #img = Image.open(urllib.request.urlopen("https://github.com/SurendraRedd/Techgig/blob/main/images/Solution.png")) st.image(img, width=300, caption="Simple Image") # Video playing vid_file = open("sample-mp4-file.mp4", "rb").read() #play the video stored in specified location st.video(vid_file) #videoUserDefined("https://www.youtube.com/embed/B2iAodr0fOo") #widgets if st.checkbox("Show/hide"): st.text("Showing or Hiding Widget") # Radio status = st.radio("What is your status", ("Married", "Single")) if status == 'Married': st.success("You are Married") # Add a selectbox to the sidebar: add_selectbox = st.sidebar.selectbox('Navigation', ('Home', 'About', 'Help')) if add_selectbox == 'About': st.write('You have selected about page') elif add_selectbox == 'Home': st.write('you have selected Home page') else: st.write('you have selected help page') # Sample Progress bar my_bar = st.progress(0) for percent_complete in range(100): time.sleep(0.1) my_bar.progress(percent_complete + 1) with st.spinner('Wait for it...'): time.sleep(5) st.success('Done!') st.balloons() st.write('Happy Stream Lite App Learning')
model = load_learner('Shreshth1991/FossilImage/releases/download/v1.0.1/') #model = load_learner('C:\\Users\\H231148\\OneDrive - Halliburton\\Desktop\\models','model.pkl') model.predict(img_test) pred_class, pred_idx, outputs = model.predict(img_test) res = zip(model.data.classes, outputs.tolist()) predictions = sorted(res, key=lambda x: x[1], reverse=True) top_predictions = predictions[0:5] df = pd.DataFrame(top_predictions, columns=["Fossil", "Probability"]) df['Probability'] = df['Probability'] * 100 st.write(df) # Image source selection #option = st.radio('', ['Choose a test image', 'Choose your own image']) option = st.radio('', ['Choose a sample image', 'Choose your own image']) if option == 'Choose a sample image': # Test image selection test_images = os.listdir('FossilImage/test/') #test_images = os.listdir('C:\\Users\\H231148\\OneDrive - Halliburton\\Desktop\\test\\') test_image = st.selectbox('Please select a test image:', test_images) # Read the image file_path = 'FossilImage/test/' + test_image #file_path = 'C:\\Users\\H231148\\OneDrive - Halliburton\\Desktop\\test\\' + test_image img = open_image(file_path) # Get the image to display display_img = mpimg.imread(file_path)
def main(): language = st.sidebar.radio( "Select display language", options=["english", "deutsch"], format_func=str.title ) with open(md_dir / language / "intro.md", mode="r") as f: intro = f.read() st.markdown(intro) plot_data = data.PlotData(out_path=data_dir) df = plot_data.df state_mapper = ( {v: k for k, v in data.STATE_MAPPER.items()} if language == "english" else {v: v for k, v in data.STATE_MAPPER.items()} ) max_activity = ( df[["total_neg_activity", "total_pos_activity"]].abs().max().max() // 50 + 1 ) * 0.5 summary_plot = plotting.combine_summary_plots( df=df, x_var="Meldedatum", x_title="", y_var="relative_growth", y_title="Relative Growth in Total Cases", y_format="%", max_activity=max_activity, ) st.altair_chart(summary_plot, use_container_width=False) state = st.selectbox( "Select a state to see the details", options=list(state_mapper.keys()), format_func=state_mapper.get, ) y_var = st.radio( label="", options=["relative_growth", "absolute_growth"], format_func=lambda x: x.replace("_", " ").title(), ) y_format = "%" if y_var == "relative_growth" else "d" infection_title = f"{state_mapper[state]}: Infections (last updated: {plot_data.infections_last_updated[state].date()})" infection_plot = plotting.plot_infection_details( df=df, state=state, title=infection_title, x_var="Meldedatum", x_title="Date", y_var=y_var, y_format=y_format, y_title=f"{y_var.replace('_', ' ').title()} in Cumulative Cases", max_activity=max_activity, ) st.altair_chart(infection_plot, use_container_width=False) activity_title = f"{state_mapper[state]}: Detailed Mobility Report (last updated: {plot_data.mobility_last_updated[state].date()})" activity_plot = plotting.plot_activity_details( df=df, state=state, title=activity_title, x_var="Meldedatum", x_title="Date", activity_cols=plot_data.activity_cols, max_activity=max_activity, width=830, ) st.altair_chart(activity_plot, use_container_width=False)
label="Momento [kNm]", step=10., value=100., format="%.6f", key="Med", ) with col4_2: Ned = st.number_input( label="Sforzo assiale [kN] (negativo se trazione)", step=10., value=100., format="%.6f", key="Ned", ) section_geometry = st.radio("Scegli la forma della sezione", options=("Rettangolare", "T")) if section_geometry == "Rettangolare": design_constrain = st.radio("Scegli il vincolo per il progetto", options=("Base fissata", "Altezza utile fissata")) col3_1, col3_2, col3_3, col3_4 = st.columns(4) if design_constrain == "Base fissata": with col3_1: beta = st.number_input( label="beta = As/As1", min_value=0., max_value=1.0, step=.1, value=.5, format="%.1f",
model = LinearRegression() if st.button('predict'): progress = st.progress(0) for i in range(100): time.sleep(0.001) progress.progress(i + 1) model.fit(x, y) op = (model.predict([[oxygen1, temp1, humidity1]])) st.text('RESULT : ') op if op > 0.50: st.warning('forrest in danger') else: st.success('forrest in safe') st.balloons() r = st.radio('view data', ['input', 'outout']) if r == 'input': x elif r == 'outout': y elif a == 'AUTOMATED ML MODULE': user_input = st.text_input("ENTER CITY", 'pune') url = 'http://api.openweathermap.org/data/2.5/weather?q={}&appid=ac4073a87a991a712b197b7e8bc04930&units=metric'.format( user_input) res = requests.get(url) data = res.json() temp = data['main']['temp'] humidity = data['main']['temp'] wind_speed = data['wind']['speed']
'Hamsadhvāni'] @st.cache def load_phononet_model(): model = RagaDetector(0, 256) model.load_state_dict(torch.load(MODEL_PATH, map_location='cpu')['net']) model.eval() vis_model = SalientRagaDetector(model) vis_model.eval() return vis_model, model vis_model, model = load_phononet_model() mode = st.radio("How do you want to upload your file?", ('Youtube Link', 'WAV/OGG File Upload')) if mode == 'Youtube Link': youtube_link = st.text_input('Insert Youtube Link') uploaded_file = None if youtube_link is not None and len(youtube_link) > 2: @st.cache def download_youtube(): name = youtube_link.split('=')[1] if not os.path.exists(name + ".mp3"): ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': name + '.%(ext)s', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',
def eda_analysis(): global documentation_string global documentation_substring global df global df_categorical global df_numeric global df_date # Utilizing a documentation platform to see all the changes we would be using (Useful for pipelining) st.write("") st.write("") st.write( "This streamlined EDA shows a high-level analysis of your data, with just a few clicks!" ) st.write( "The datasets below have their own unique attributes that touch on specific concepts that I wanted to highlight." ) st.write("") st.write('## Data Input') #read_file() st.info( 'NOTE: You can also upload your own CSV data to play around with through the <Experimental Reading Data> option below' ) option = st.selectbox('Choose which type of data', files.name) st.write("You have chosen " + option) option_index = files.index[files['name'] == option] # st.write(files.loc[option_index,'file_name'].item()) option_name = files.loc[option_index, 'file_name'].item() st.write(files.loc[option_index, 'description'].item()) if (option_name == '<Experimental Reading data>'): read_file() else: df = read_data("", option_name, ",") if st.button('1. Initial features'): initial_features(df) if st.button('2. Check for duplicated values'): check_duplicated(df) # if st.checkbox('Drop Duplicates?'): # #Drop the duplicate # documentation_substring= f"Dropped {len(df[df.duplicated()])} values\n" # df.drop_duplicates(inplace=True) # logging.info(documentation_substring) # documentation_string+=documentation_substring+'\n' # st.write(documentation_substring) # if st.button('Check for duplicated values 2'): # if len(df[df.duplicated()]) > 0: # st.write("No. of duplicated entries: ", len(df[df.duplicated()])) # st.write(df[df.duplicated(keep=False)].sort_values(by=list(df.columns))) # else: # st.write("No duplicated entries found") # Function to calculate missing values by column# Funct if st.button('3a. In-depth analysis on missing values'): missing_values = missing_values_table(df) st.write("### Missing value rows:") st.write(missing_values) if st.button('3b. Visualize missing values'): # Visualize missing values as a matrix # Checks if the missing values are localized visualize_missing_values(df) # if st.button("(1) Drop Missing Rows"): # st.write(1) # if st.button("(1) Drop Missing Rows"): # st.write(1) # if st.button("(1) Drop Missing Rows"): # st.write(1) if st.button("4. Check the data type of each column with an example"): check_data_type(df) if st.button('5. Column-wise analysis'): column_analysis(df) # if st.button("Convert numeric to categorical feature <Pending>"): # pass # if st.button("Convert string to datetime feature <Pending>"): # pass # if st.button("Overview of summary based on the target variable <Pending>"): # pass # if st.button("Rename columns if needed <Pending>"): # pass # if st.button("Drop the target variable from the dataframe <Pending>"): # pass seperate_features() # df_numeric=df.select_dtypes(include=['float64', 'int64']) # df_date=df.select_dtypes(include=['datetime64']) # df_categorical=df.select_dtypes(exclude=['float64', 'int64','datetime64']) if st.button("6. Get implied numeric, categorical and datetime features"): get_column_types(df_categorical, df_numeric, df_date) # if st.button("Remove extra white space in text columns <Pending>"): # pass st.write("### Define the target variable") st.write("") st.info( 'Make sure you define the target variable for bivariate classification' ) if st.checkbox('Find the target variable'): if (files.loc[option_index, 'name'].item() == "<Experimental Reading data>") and ( files.loc[option_index, 'target'].item() == "Find your target variable"): st.info("Search for the target variable from your dataset") st.write(df.head()) else: st.write("For this dataset, it is {0}".format( files.loc[option_index, 'target'].item())) target_name = st.text_input("Enter the target name", files.loc[option_index, 'target'].item()) target = find_target(target_name) st.write("Target: ", target_name) st.write("Target type: ", type(target)) st.write("### Overview") st.write(target.head()) st.write(target.value_counts()) sns.countplot(x=target, data=df) st.pyplot() st.write("### Finding the data variables") st.write( "You can manually change the categorical, numeric and date-time variables" ) if (files.loc[option_index, 'name'].item() == "<Experimental Reading data>"): st.info( "You would need to manually extract the date-time variables yourself" ) if (option == "Cat Shelter information"): st.info( "The variables: date_of_birth and datetime should be manually changed to date-time variables" ) if st.checkbox("Choose data types"): choose_data_types() st.markdown("## Categorical columns") if st.button("Information on categorical columns"): st.write("### Categorical Column names") st.write(df_categorical.columns) st.write("### Categorical Info") buffer = io.StringIO() df_categorical.info(buf=buffer) s = buffer.getvalue() st.text(s) #st.write(df_numeric) categorical_selector = st.radio( "Choose what type of categorical analysis to conduct:", [ "Select one of the two", "Univariate analysis of categorical feature", "Bivariate analysis of categorical feature" ]) categorical_names = df_categorical.columns.tolist() categorical_names.append("All columns") if (categorical_selector == "Univariate analysis of categorical feature"): categorical_option = st.selectbox("Choose which column", categorical_names) if (categorical_option == "All columns"): for col in df_categorical.columns: categorical_summarized(df_categorical, y=col) else: categorical_summarized(df_categorical, y=categorical_option) if (categorical_selector == "Bivariate analysis of categorical feature"): st.info( "**Make sure that you have defined the target variable from the checkbox above**" ) categorical_option = st.selectbox("Choose which column", categorical_names) if (categorical_option == "All columns"): for col in df_categorical.columns: categorical_summarized(df_categorical, y=col, hue=target) else: categorical_summarized(df_categorical, y=categorical_option, hue=target) # if st.button("Categorical Data Imputation <Pending>"): # pass # if st.button("Chi square analysis <Pending>"): # pass # if st.button("Encoding categorical data <Pending>"): # pass if st.button(" View Finalized Categorical columns"): st.write(df_categorical.head(10)) st.markdown("## Date-time columns") date_selector = st.radio("Choose what type of Date analysis to conduct:", ["Select one:", "Breakdown of date features"]) df_date = df_date.apply(pd.to_datetime) date_names = df_date.columns.tolist() date_names.append("All columns") if date_selector == 'Breakdown of date features': date_option = st.selectbox("Choose which column", date_names) if (date_option == "All columns"): for col in df_date.columns: time_summarized(df_date, x=col) else: time_summarized(df_date, x=date_option) st.markdown("## Numeric columns") if st.button("Initial numeric features"): st.write("### Numeric Overviews") st.write(df_numeric.head()) df_numeric.hist(figsize=(20, 20), bins=10, xlabelsize=8, ylabelsize=8) st.pyplot() colsize = len(df_numeric.columns) - 5 if st.button("Correlation matrix"): plt.figure(figsize=(15, 15)) sns.heatmap(df_numeric.corr(), annot=True) st.pyplot() numeric_selector = st.radio( "Choose what type of numeric analysis to conduct:", [ "Select one of the two", "Univariate analysis of numeric feature", "Bivariate analysis of numeric feature" ]) numeric_names = df_numeric.columns.tolist() numeric_names.append("All columns") if (numeric_selector == "Univariate analysis of numeric feature"): numeric_option = st.selectbox("Choose which column", numeric_names) if (numeric_option == "All columns"): for col in df_numeric.columns: quantitative_summarized(df_numeric, y=col) else: quantitative_summarized(df_numeric, y=numeric_option) if (numeric_selector == "Bivariate analysis of numeric feature"): st.info( "**Make sure that you have defined the target variable from the checkbox above**" ) numeric_option = st.selectbox("Choose which column", numeric_names) if (numeric_option == "All columns"): for col in df_numeric.columns: quantitative_summarized(dataframe=df_numeric, y=col, palette=c_palette, x=target, verbose=False) else: quantitative_summarized(dataframe=df_numeric, y=numeric_option, palette=c_palette, x=target, verbose=False) # if (numeric_selector=="Multivariate variate analysis of numeric feature"): # st.info("**Make sure that you have defined the target variable from the checkbox above**") # st.write(df_numeric.head()) # var1 = st.text_input("Enter the first variable") # var2 = st.text_input("Enter the second variable") # quantitative_summarized(dataframe= df_numeric, y = var1, x = var2, hue = target, palette=c_palette3, verbose=False) st.write("") st.write("") if st.button("You're done!! Click here to celebrate"): st.balloons()
Job_State_MA = 1 elif state == 'Connecticut': Job_State_CT = 1 elif state == 'New Jersey': Job_State_NJ = 1 elif state == 'New York': Job_State_NY = 1 elif state == 'Washington': Job_State_WA = 1 # st.write(Job_State_CA,Job_State_MA,Job_State_CT,Job_State_NJ,Job_State_NY,Job_State_WA) Seniority_junior, Seniority_na, Seniority_senior = 0, 0, 0 same_state = st.radio( "Would you like to work in the in Same State Headquarters Office ?", ('Yes', 'No'), index=0) if same_state == 'Yes': same_state = 1 elif same_state == 'No': same_state = 0 ## Job Seniority st.subheader("Experience Level") Seniority_junior, Seniority_na, Seniority_senior = 0, 0, 0 seniority = st.radio("Choose Seniority Level", ('NA', 'Junior Level', 'Senior Level'),
def main(): global image_path, image_name # Sidebar st.sidebar.header("Parameters") st.sidebar.markdown("User authorisation") user = st.sidebar.radio("User Type", options=['User1', 'User2', 'User3'], index=0) #n_sprites = 1 # Main Page st.title("Labeling Tool for nonwovens") st.markdown(""" ## Instructions Rate the following image for the presence of nonwovens Press next image once you have rated this image to pass to the next one """) slot = st.empty() st.markdown("**Rate this image**") rating = st.radio("", options=[0, 1, 2, 3], index=0) if st.button("Next"): file = open("last_img.txt", "r") image_name = file.readline()[:-1] image_path = file.readline()[:-1] file.close() save_rating(rating, user, image_name) with st.spinner("Loading image..."): image_name, image_path = load_image(user=user) if image_path is not None: file = open("last_img.txt", "w") file.write(image_name + '\n') file.write(image_path + '\n') file.close() fig = Image.open(image_path) slot.image(fig, use_column_width=True) else: slot.markdown( "**You have processed all available images. Thank you for helping**" ) else: file = open("last_img.txt", "r") image_name = file.readline()[:-1] image_path = file.readline()[:-1] file.close() fig = Image.open(image_path) slot.image(fig, use_column_width=True) #st.pyplot(fig=fig, bbox_inches="tight") st.markdown("**Debug Section**") st.dataframe(user_df) st.empty() st.dataframe(img_df) st.markdown(""" --- This application is made with :heart: Lutz chyetnek fi rasou """)