def main(): html_temp = """ <div style ="background-color:white;padding:10px"> <h1 style ="color:black;text-align:center;">Random Data Generator App</h1> </div> """ # s = get_session(np.random.randint(low=0, # high=10000000, # size=1)) st.markdown(html_temp, unsafe_allow_html=True) session_state = get(numerical_dict={}, categorical_dict={}) st.sidebar.title("Data Configuration:") data_size = st.sidebar.number_input("Size of Data (N)", value=10, step=1, min_value=1, max_value=None) # EH: Duplicates are handled automatically because we are using dictionary. Verify for edge cases st.sidebar.subheader("Numerical Variables Configuration:") # st.sidebar.write("Enter range and data type") numerical_variable_name = st.sidebar.text_input("Numerical variable name", value="") variable_type = st.sidebar.selectbox("Variable type", ("Float", "Integer")) if variable_type == "Float": lower_bound = st.sidebar.number_input("Lower bound") higher_bound = st.sidebar.number_input("Higher bound") elif variable_type == "Integer": lower_bound = st.sidebar.number_input("Lower bound", value=0, step=1) higher_bound = st.sidebar.number_input("Higher bound", value=0, step=1) if st.sidebar.button("Add Numerical variable"): session_state.numerical_dict[numerical_variable_name] = { "lower_bound": lower_bound, "higher_bound": higher_bound, "variable_type": variable_type } st.sidebar.success( "Added {} to numerical variables".format(numerical_variable_name)) if session_state.numerical_dict: st.write("Summary of Numerical variables:") st.dataframe( pd.DataFrame.from_dict(session_state.numerical_dict, orient="index")) if session_state.categorical_dict: st.write("Summary of Categorical variables:") st.dataframe( pd.DataFrame.from_dict(session_state.categorical_dict, orient="index")) # EH: Formatting # EH: Duplicate names st.sidebar.subheader("Categorical Variables Configuration:") # st.sidebar.write("Write something here") categorical_variable_name = st.sidebar.text_input( "Categorical variable name", value="") levels = st.sidebar.text_input("Input levels (Ex: Low,Med,High)", value="") if st.sidebar.button("Add Categorical variable"): session_state.categorical_dict[categorical_variable_name] = { "levels": levels } st.sidebar.success("Added {} to categorical variables".format( categorical_variable_name)) if session_state.numerical_dict: st.write("Summary of Numerical variables:") st.dataframe( pd.DataFrame.from_dict(session_state.numerical_dict, orient="index")) if session_state.categorical_dict: st.write("Summary of Categorical variables:") st.dataframe( pd.DataFrame.from_dict(session_state.categorical_dict, orient="index")) # Prepare dataset # Prepare numerical dataset numerical_data_dict = {} for column in session_state.numerical_dict.keys(): if session_state.numerical_dict[column]["variable_type"] == "Float": feature = np.random.uniform( low=session_state.numerical_dict[column]["lower_bound"], high=session_state.numerical_dict[column]["higher_bound"], size=data_size) elif session_state.numerical_dict[column][ "variable_type"] == "Integer": feature = np.random.randint( low=session_state.numerical_dict[column]["lower_bound"], high=session_state.numerical_dict[column]["higher_bound"], size=data_size) numerical_data_dict[column] = feature # Prepare categorical dataset categorical_data_dict = {} for column in session_state.categorical_dict.keys(): levels_list = np.unique([ value.strip() for value in session_state.categorical_dict[column] ["levels"].split(",") ]).tolist() feature = np.random.choice(levels_list, size=data_size) categorical_data_dict[column] = feature # Concatenate numerical and categorical data result_df = pd.concat([ pd.DataFrame(numerical_data_dict), pd.DataFrame(categorical_data_dict) ], axis=1) if st.button("Generate"): if result_df.shape[0] == 0: st.write("Please input variable information") else: st.success("Random data is generated and saved to csv") st.write("Sample data is as shown below:") st.dataframe( result_df.sample(n=min(10, data_size)).reset_index(drop=True)) # filename = st.text_input("Enter Sample Data Name", value="") # if st.button("Done"): tmp_download_link = download_link( result_df, 'Random_data.csv', 'Click here to download your data!') st.markdown(tmp_download_link, unsafe_allow_html=True) if st.sidebar.button("Reset"): st.caching.clear_cache()
"schema_name": schema, }, ) else: conn.execute( f""" UPDATE meta.metadata SET config = %(config)s ::jsonb WHERE schema_name=%(schema_name)s; """, { "config": json.dumps(recipe_config), "schema_name": schema, }, ) st.success(f"{schema} is successfully loaded") st.write(recipe_config) elif submit and new: if not edit: archiver.archive_table(recipe_config) conn.execute( f""" INSERT INTO meta.metadata(schema_name, config,last_update) VALUES (%(schema_name)s, %(config)s ::jsonb, now()); """, { "config": json.dumps(recipe_config), "schema_name": schema,
st.write('\n') error_code = 0 if st.button('Submit'): if (weight_input.isnumeric()): weight = int(weight_input) else: st.error('Invalid weight') error_code = 1 if (height_input.isnumeric()): height = int(height_input) else: st.error('Invalid height') error_code = 1 if (duration_input.isnumeric()): duration = int(duration_input) else: st.error('Invalid duration') error_code = 1 if (gender_input == 'Male'): gender = 1 else: gender = 0 if (error_code == 0): result = caloriesBurnt(gender, age_input, height, weight, duration) st.success(str(result) + ' burnt today, Keep going. ' + '\U0001F525')
# define model regressor = LinearRegression() # train model regressor.fit(x, y) # native test # print ("model fit is ok") xPredict = numpy.array([[userToPredict]]) # get prediction prediction = regressor.predict(xPredict) st.success("With 4D Number " + str(xPredict[0][0]) + " the Prize Category is: " + str(prediction[0][0])) st.info( "Here you can see your data (blue), model (red line) and prediction") # ============== LR ============== # create figure fix, ax = plt.subplots() # add lables to plots ax.set_xlabel("4D Number") ax.set_ylabel("Prize Category") # dataset ax.scatter(x, y)
def main(): html_temp = """ <div style="background-color:{};padding:10px;border-radius:10px"> <h1 style="color:{};text-align:center;"> Application de Machine Learning </h1> </div> """ st.markdown(html_temp.format('royalblue', 'white'), unsafe_allow_html=True) today = st.date_input("La date: ", datetime.datetime.now()) activities = [ "Statistiques Exploratoires", "Graphiques", "Modèles", "A propos" ] choice = st.sidebar.selectbox("Selectionner une activité : ", activities) if choice == 'Statistiques Exploratoires': st.subheader("Analyse exploratoire des données") data = st.file_uploader( "Télécharger vos données sous format csv ou txt : ", type=["csv", "txt"]) if data is not None: df = pd.read_csv(data) st.dataframe(df.head()) if st.checkbox("Nombre des lignes et des colonnes"): st.write(df.shape) if st.checkbox("Les colonnes en Json"): all_columns = df.columns.tolist() st.write(all_columns) if st.checkbox("Selectionner une ou plusieurs variables"): selected_columns = st.multiselect("Selectionner des variables", all_columns) new_df = df[selected_columns] st.dataframe(new_df) if st.checkbox("Les statistiques Descriptives"): st.write(df.describe()) if st.checkbox("Les valeurs de compatge"): st.write(df.iloc[:, -1].value_counts()) if st.checkbox("Matrice de corrélation"): st.write(sns.heatmap(df.corr(), annot=True)) st.pyplot() if st.checkbox("Diagramme circulaire"): all_columns = df.columns.tolist() columns_to_plot = st.selectbox("Selectionner une colonne", all_columns) pie_plot = df[columns_to_plot].value_counts().plot.pie( autopct="%1.1f%%") st.write(pie_plot) st.pyplot() elif choice == 'Graphiques': st.subheader("Visualisation des données") data = st.file_uploader( "Télécharger vos données sous format csv ou txt : ", type=["csv", "txt"]) if data is not None: df = pd.read_csv(data) st.dataframe(df.head()) all_columns_names = df.columns.tolist() type_of_plot = st.selectbox( "Selectionner le type de graphique", ["Aires", "Barres", "Courbe", "Histogramme"]) selected_columns_names = st.multiselect( "Selectionner colonne de graphique", all_columns_names) if st.button("Genérer votre graphique"): st.success( "Genérer votre graphique personalisé de {} pour la variable {}" .format(type_of_plot, selected_columns_names)) if type_of_plot == 'Aires': cust_data = df[selected_columns_names] st.area_chart(cust_data) elif type_of_plot == 'Barres': cust_data = df[selected_columns_names] st.bar_chart(cust_data) elif type_of_plot == 'Courbe': cust_data = df[selected_columns_names] st.line_chart(cust_data) elif type_of_plot == 'Histogramme': cust_data = df[selected_columns_names] fig, ax = plt.subplots() ax.hist(cust_data, bins=2) st.pyplot(fig) elif type_of_plot: cust_plot = df[selected_columns_names].plot(kind=type_of_plot) st.write(cust_plot) st.pyplot() elif choice == 'Modèles': st.subheader("L'apprentissage automatique") data = st.file_uploader( "Télécharger vos données sous format csv ou txt : ", type=["csv", "txt"]) if data is not None: df = pd.read_csv(data) st.dataframe(df.head()) X = df.iloc[:, 0:-1] Y = df.iloc[:, -1] seed = 7 models = [] models.append(("Regression Logistique", LogisticRegression())) models.append(("Analyse discriminante linéaire", LinearDiscriminantAnalysis())) models.append(("Algorithme des k-plus proches voisins", KNeighborsClassifier())) models.append(('Arbres de Décision', DecisionTreeClassifier())) models.append(('Classifieur Bayésien Naif', GaussianNB())) models.append(('Machine à vecteurs de support', SVC())) model_names = [] model_mean = [] model_std = [] all_models = [] scoring = 'accuracy' for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed, shuffle=True) cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring) model_names.append(name) model_mean.append(cv_results.mean()) model_std.append(cv_results.std()) accuracy_results = { "modèle": name, "modèle d'apprentissage": cv_results.mean(), "écart-type": cv_results.std() } all_models.append(accuracy_results) if st.checkbox("Modélisation en tableaux : "): st.dataframe( pd.DataFrame(zip(model_names, model_mean, model_std), columns=[ "Nom du modèle", "Modèle d'Apprentissage Automatique", "Écart type" ])) if st.checkbox("Modélisation en Json"): st.json(all_models) elif choice == 'A propos': st.subheader("A propos") st.text( "Vous pouvez utiliser cet outil: avec un fichier csv avec séparateur virgule, Riadh NSIRI " ) html_temp = """ <div style="background-color:#808080 ;padding:5px;border-radius:10px;font-weight: bolder"> <h6 style="color:{};text-align:center;"> © 2021 . R. NSIRI </h6> </div> """ st.markdown(html_temp.format('royalblue', 'white'), unsafe_allow_html=True)
def main(): st.sidebar.header("📰 recnn by @awarebayes 👨🔧") if st.sidebar.checkbox("Use cuda", torch.cuda.is_available()): device = torch.device("cuda") else: device = torch.device("cpu") st.sidebar.subheader("Choose a page to proceed:") page = st.sidebar.selectbox( "", [ "🚀 Get Started", "📽 ️Recommend me a movie", "🔨 Test Recommendation", "⛏️ Test Diversity", "🤖 Reinforce Top K", ], ) if page == "🚀 Get Started": render_header() st.subheader( "If you have cloned this repo, here is some stuff for you:") st.markdown(""" 📁 **Downloads** + change the **constants**, so they point to this unpacked folder: - [Models](https://drive.google.com/file/d/1goGa15XZmDAp2msZvRi2v_1h9xfmnhz7/view?usp=sharing) **= MODELSPATH** - [Data for Streamlit Demo](https://drive.google.com/file/d/1nuhHDdC4mCmiB7g0fmwUSOh1jEUQyWuz/view?usp=sharing) **= DATAPATH** - [ML20M Dataset](https://grouplens.org/datasets/movielens/20m/) **= ML20MPATH** p.s. ml20m is only needed for links.csv, I couldn't include it in my streamlit data because of copyright. This is all the data you need. """) if page == "🔨 Test Recommendation": st.header("Test the Recommendations") st.info( "Upon the first opening the data will start loading." "\n Unfortunately there is no progress verbose in streamlit. Look in your console." ) st.success("Data is loaded!") models = load_models(device) st.success("Models are loaded!") state, action, reward, next_state, done = get_batch(device) st.subheader( "Here is a random batch sampled from testing environment:") if st.checkbox("Print batch info"): st.subheader("State") st.write(state) st.subheader("Action") st.write(action) st.subheader("Reward") st.write(reward.squeeze()) st.subheader( "(Optional) Select the state are getting the recommendations for") action_id = np.random.randint(0, state.size(0), 1)[0] action_id_manual = st.checkbox("Manually set state index") if action_id_manual: action_id = st.slider("Choose state index:", min_value=0, max_value=state.size(0)) st.write("state:", state[action_id]) algorithm = st.selectbox("Choose an algorithm", ("ddpg", "td3")) metric = st.selectbox( "Choose a metric", ( "euclidean", "cosine", "correlation", "canberra", "minkowski", "chebyshev", "braycurtis", "cityblock", ), ) topk = st.slider("TOP K items to recommend:", min_value=1, max_value=30, value=7) dist = { "euclidean": distance.euclidean, "cosine": distance.cosine, "correlation": distance.correlation, "canberra": distance.canberra, "minkowski": distance.minkowski, "chebyshev": distance.chebyshev, "braycurtis": distance.braycurtis, "cityblock": distance.cityblock, } action = models[algorithm].forward(state) st.markdown( "**Recommendations for state with index {}**".format(action_id)) st.write( rank(action[action_id].detach().cpu().numpy(), dist[metric], topk)) st.subheader("Pairwise distances for all actions in the batch:") st.pyplot(recnn.utils.pairwise_distances_fig(action)) if page == "⛏️ Test Diversity": st.header("Test the Distances (diversity and pinpoint accuracy)") models = load_models(device) st.success("Models are loaded!") state, action, reward, next_state, done = get_batch(device) indexes = get_index() def query(index, action, k=20): D, I = index.search(action, k) return D, I def get_err(action, dist, k=5, euc=False): D, I = query(indexes[dist], action, k) if euc: D = D**0.5 # l2 -> euclidean mean = D.mean(axis=1).mean() std = D.std(axis=1).mean() return I, mean, std def get_action(model_name, action_id): gen_action = models[model_name].forward(state) gen_action = gen_action[action_id].detach().cpu().numpy() return gen_action st.subheader( "(Optional) Select the state are getting the recommendations for") action_id = np.random.randint(0, state.size(0), 1)[0] action_id_manual = st.checkbox("Manually set state index") if action_id_manual: action_id = st.slider("Choose state index:", min_value=0, max_value=state.size(0)) st.header("Metric") dist = st.selectbox("Select distance", ["L2", "IP", "COS"]) ddpg_action = get_action("ddpg", action_id).reshape(1, -1) td3_action = get_action("td3", action_id).reshape(1, -1) topk = st.slider("TOP K items to recommend:", min_value=1, max_value=30, value=10) ddpg_I, ddpg_mean, ddpg_std = get_err(ddpg_action, dist, topk, euc=True) td3_I, td3_mean, td3_std = get_err(td3_action, dist, topk, euc=True) # Mean Err st.subheader("Mean error") st.markdown(""" How close are we to the actual movie embedding? The closer the better, although higher error may produce more diverse recommendations. """) labels = ["DDPG", "TD3"] x_pos = np.arange(len(labels)) CTEs = [ddpg_mean, td3_mean] error = [ddpg_std, td3_std] fig, ax = plt.subplots(figsize=(16, 9)) ax.bar( x_pos, CTEs, yerr=error, ) ax.set_xticks(x_pos) ax.grid(False) ax.set_xticklabels(labels) ax.set_title(dist + " error") ax.yaxis.grid(True) st.pyplot(fig) # Similarities st.header("Similarities") emb, _, _ = get_embeddings() st.markdown( "Heatmap of correlation similarities (Grammarian Product of actions)" "\n\n" "Higher = mode diverse, lower = less diverse. You decide what is better..." ) st.subheader("ddpg") st.pyplot(recnn.utils.pairwise_distances_fig(torch.tensor( emb[ddpg_I]))) st.subheader("td3") st.pyplot(recnn.utils.pairwise_distances_fig(torch.tensor(emb[td3_I]))) if page == "📽 ️Recommend me a movie": st.header("📽 ️Recommend me a movie") st.markdown(""" **Now, this is probably why you came here. Let's get you some movies suggested** You need to choose 10 movies in the bar below by typing their titles. Due to the client side limitations, I am only able to display top 200 movies. P.S. you can type to search """) mov_base = get_mov_base() mov_base_by_title = {v: k for k, v in mov_base.items()} movies_chosen = st.multiselect("Choose 10 movies", list(mov_base.values())) st.markdown("**{} chosen {} to go**".format(len(movies_chosen), 10 - len(movies_chosen))) if len(movies_chosen) > 10: st.error( "Please select exactly 10 movies, you have selected {}".format( len(movies_chosen))) if len(movies_chosen) == 10: st.success("You have selected 10 movies. Now let's rate them") else: st.info("Please select 10 movies in the input above") if len(movies_chosen) == 10: st.markdown("### Rate each movie from 1 to 10") ratings = dict([(i, st.number_input(i, min_value=1, max_value=10, value=5)) for i in movies_chosen]) # st.write('for debug your ratings are:', ratings) ids = [mov_base_by_title[i] for i in movies_chosen] # st.write('Movie indexes', list(ids)) embs = load_mekd() state = torch.cat([ torch.cat([embs[i] for i in ids]), torch.tensor(list(ratings.values())).float() - 5, ]) st.write("your state", state) state = state.to(device).squeeze(0) models = load_models(device) algorithm = st.selectbox("Choose an algorithm", ("ddpg", "td3")) metric = st.selectbox( "Choose a metric", ( "euclidean", "cosine", "correlation", "canberra", "minkowski", "chebyshev", "braycurtis", "cityblock", ), ) dist = { "euclidean": distance.euclidean, "cosine": distance.cosine, "correlation": distance.correlation, "canberra": distance.canberra, "minkowski": distance.minkowski, "chebyshev": distance.chebyshev, "braycurtis": distance.braycurtis, "cityblock": distance.cityblock, } topk = st.slider("TOP K items to recommend:", min_value=1, max_value=30, value=7) action = models[algorithm].forward(state) st.subheader("The neural network thinks you should watch:") st.write(rank(action[0].detach().cpu().numpy(), dist[metric], topk)) if page == "🤖 Reinforce Top K": st.title("🤖 Reinforce Top K") st.markdown( "**Reinforce is a discrete state algorithm, meaning a lot of metrics (i.e. error, diversity test) " "won't be possible. **") st.subheader("This page is under construction")
def main(): st.title("Liver disease Predection") html_temp = """ <div style="background-color:teal ;padding:10px"> <h2 style="color:white;text-align:center;">Liver disease Predection</h2> </div> """ st.markdown(html_temp, unsafe_allow_html=True) activities = [ 'Logistic Regression', 'KNN', 'Decision tree', 'Random forest', 'naive bayes', 'SVM', 'Neural networks' ] option = st.sidebar.selectbox('Which model would you like to use?', activities) st.subheader(option) age = st.slider("Enter age", 1, 100) gender = st.radio("Gender", ("Male", "Female")) Total_Bilirubin = st.slider('Total_Bilirubin', 0, 75) Direct_Bilirubin = st.slider('Direct_Bilirubin', 0, 20) Alkaline_Phosphotase = st.slider('Alkaline_Phosphotase ', 0, 2100) Alamine_Aminotransferase = st.slider('Alamine_Aminotransferase ', 0, 2000) Aspartate_Aminotransferase = st.slider('Aspartate_Aminotransferase ', 0, 5000) Total_Protiens = st.slider('Total_Protiens ', 0, 10) Albumin = st.slider('Albumin ', 0, 6) Albumin_and_Globulin_Ratio = st.slider('Albumin_and_Globulin_Ratio ', 0, 5) if gender == "Female": gender1 = 1 gender2 = 0 else: gender1 = 0 gender2 = 1 inputs = [[ age, Total_Bilirubin, Direct_Bilirubin, Alkaline_Phosphotase, Alamine_Aminotransferase, Aspartate_Aminotransferase, Total_Protiens, Albumin, Albumin_and_Globulin_Ratio, gender1, gender2 ]] t = (age != 0 and Total_Bilirubin != 0 and Direct_Bilirubin != 0 and Alkaline_Phosphotase != 0 and Aspartate_Aminotransferase != 0 and Total_Protiens != 0 and Albumin != 0 and Albumin_and_Globulin_Ratio != 0 and Total_Bilirubin > 50) if st.button('Classify'): if option == 'Logistic Regression': st.success(classify(log.predict(inputs), t)) elif option == 'KNN': st.success(classify(knn.predict(inputs), t)) elif option == 'Decision tree': st.success(classify(decisiontree.predict(inputs), t)) elif option == 'Random forest': st.success(classify(randomforest.predict(inputs), t)) elif option == 'naive bayes': st.success(classify(naivebayes.predict(inputs), t)) elif option == 'svm': st.success(classify(svm.predict(inputs), t)) elif option == 'Neural networks': st.success(classify(neural.predict(inputs), t))
x_test[categorical_cols] = pd.DataFrame(encoder.transform(x_test[categorical_cols].applymap(str).values)) st.title("House Pricing Assistant -- Using Artificial Intelligence") train_data = pd.read_csv("train.csv") test_data = pd.read_csv("test.csv") desc = open("data_description.txt", "r") desc_text = desc.read() st.subheader("Use the window below to explore the unaltered dataset used to train the AI.") st.dataframe(train_data) show_key = st.checkbox('Show more information about training data') if show_key: st.success(desc_text) x_train = train_data.drop(columns=["SalePrice"]) y_train = train_data["SalePrice"] x_test = test_data preprocessing() train_button = st.checkbox("Visualize Data and Train Model") if train_button: visualize_2D(x_train, y_train) # (Next plot made with coding help from https://towardsdatascience.com/how-to-perform-exploratory-data-analysis-with-seaborn-97e3413e841d)
def main(): html_temp = """ <div style="background-color:red;padding:10px"> <h2 style="color:black;text-align:center;">Heart Disease Prediction</h2> </div> """ st.markdown(html_temp, unsafe_allow_html=True) image = Image.open('h1.png') st.image(image, use_column_width=True, format='PNG') age = st.slider('Age', 0, 100) sex = st.text_input('Sex 1(male)-0(Female)', ' ') cp = st.text_input('Chest pain 0,1,2,3', ' ') trestbps = st.slider('Resting blood pressure in mm Hg ', 90, 200) chol = st.slider('Serum cholestoral in mg/dl', 126, 564) fbs = st.text_input( 'Fasting blood sugar > 120 mg/dl, (1 = true; 0 = false)', ' ') restecg = st.slider('Resting electrocardiographic results', 0, 2) thalach = st.slider('Maximum heart rate achieved', 65, 210) exang = st.text_input('Exercise induced angina 1 = es; 0 = No', ' ') oldpeak = st.text_input( 'ST depression induced by exercise relative to rest (0-7)', ' ') slope = st.text_input('The slope of the peak exercise ST segment (0-2)', ' ') ca = st.text_input('Number of major vessels (0-3) colored by flourosopy', ' ') thal = st.text_input( 'Thal (1 = normal, 2 = fixed defect, 3 = reversable defect)', ' ') result = "" if st.button('Predict'): dataset = [ int(age), int(sex), int(cp), int(trestbps), int(chol), int(fbs), int(restecg), int(thalach), int(exang), float(oldpeak), int(slope), int(ca), int(thal) ] df = pd.DataFrame(dataset) df_trans = standardScaler.fit_transform(df) df_trans = df_trans.T result = model.predict(df_trans) if result == 0: result = "Hurrah! You are safe, Enjoy" else: result = "Sorry you Have symptoms of Heart Disease" st.success('{}'.format(result)) if st.button("About"): st.text( "A controlled carbohydrate lifestyle really prevents risk factors for heart disease." ) st.text( "Github link: https://github.com/ayushkesh/Heart-disease-prediction-ML- " ) html_temp1 = """ <div style="background-color:#f63366"> <p style="color:white;text-align:center;" >By: <b>Ayush Kumar</b> </p> </div> """ st.markdown(html_temp1, unsafe_allow_html=True)
def main(): """Tweet Classifier App with Streamlit """ # Creates a main title and subheader on your page - # these are static across all pages #st.title("Tweet Classifer") #st.subheader("Climate change tweet classification") # Creating sidebar with selection box - # you can create multiple pages this way st.sidebar.title('Navigation') options = [ "Overview", "Home", "About the Data", "Explore the data", "Prediction on the Go" ] selection = st.sidebar.radio("", options) # Building out the "Overview" page st.title("Climate Change Analysis") if selection == "Overview": st.header("Overview") capture_1 = Image.open("capture_1.png") st.image(capture_1, width=800) capture_2 = Image.open("capture_2.png") st.image(capture_2, width=800) # Building out the "Home" page if selection == "Home": st.subheader("Background") st.info( "Many companies are built around lessening one’s environmental impact or carbon footprint. They offer products and services that are environmentally friendly and sustainable, in line with their values and ideals. They would like to determine how people perceive climate change and whether or not they believe it is a real threat. This would add to their market research efforts in gauging how their product/service may be received." ) #pm = st.sidebar.button("Problem Statement",key="pm") #elif status == "Problem Statement": st.subheader("Problem Statement") st.info( "Create a model that determines whether or not a person believes in climate change or not based on their tweet" ) st.subheader("Data requirements") st.info( "The collection of this data was funded by a Canada Foundation for Innovation JELF Grant to Chris Bauch, University of Waterloo. The dataset aggregates tweets pertaining to climate change collected between Apr 27, 2015 and Feb 21, 2018. In total, 43943 tweets were collected. Each tweet is labelled as one of the following classes:" ) # Building out the "Information" page if selection == "About the Data": st.info("General Data Summaries") # You can read a markdown file from supporting resources folder st.markdown("Some information here") if st.checkbox("Preview Data"): status = st.radio(" ", ("First 5 Rows", "Show All Dataset")) if status == "First 5 Rows": st.dataframe(raw.head()) else: st.dataframe(raw) if st.button("Show data summaries"): st.text("Column names") st.write(raw.columns) st.text("Number of columns") st.write(raw.shape[1]) st.text("Number of rows") st.write(raw.shape[0]) st.text("Data types") st.write(raw.dtypes) st.text("Summary") st.write(raw.describe().T) #visualising the data if selection == "Explore the data": st.info( "N.B. it is recommended that the file is removed of all the noise data. For example – language stopwords (commonly used words of a language – is, am, the, of, in etc), URLs or links, social media entities (mentions, hashtags), punctuations and industry specific words. This step deals with removal of all types of noisy entities present in the text. To proceed click Preprocessing below" ) if st.button("Preprocessing"): raw['tidy_message'] = np.vectorize(remove_pattern)(raw['message'], "@[\w]*") # remove special characters, numbers, punctuations raw['tidy_message'] = raw['tidy_message'].str.replace( "[^a-zA-Z#]", " ") #remove short words of less than 3 letters in length raw['tidy_message'] = raw['tidy_message'].apply( lambda x: ' '.join([w for w in x.split() if len(w) > 3])) st.subheader("Visualising the data") col_names = raw.columns.tolist() plot_type = st.selectbox("select plot", ["bar", "hist", "box", "kde"]) select_col_names = st.multiselect("select columns to plot", col_names) if st.button("Generate plot"): st.success("{} plot for {}".format(plot_type, select_col_names)) if plot_type == "bar": s_grp = raw.groupby(["sentiment"]).count() st.bar_chart(s_grp) st.pyplot() #elif plot_type == 'area': # plot_data = raw[select_col_names] # st.area_chart(plot_data) elif plot_type == 'hist': plot_data = raw[select_col_names] st.bar_chart(plot_data) elif plot_type: cust_plot = raw[select_col_names].plot(kind=plot_type) st.write(cust_plot) st.pyplot() st.subheader("Visuals of common words used in the tweets") st.markdown( "The most frequent words appear in large size and the less frequent words appear in smaller sizes" ) cpw = st.checkbox("Common Positive Words", key="cpw") #cpw1 = st.text("Positive words: global warming, climate change, believe climate, change real") if cpw: positive_words = ' '.join( [text for text in raw['tidy_message'][raw['sentiment'] == 1]]) positive_words_cloud = WordCloud( width=800, height=500, random_state=21, max_font_size=110).generate(positive_words) plt.axis('off') plt.imshow(positive_words_cloud, interpolation="bilinear") plt.show() st.pyplot() #st.checkbox("Show/Hide") cnw = st.checkbox("Common negative Words", key="cnw") if cnw: negative_words = ' '.join( [text for text in raw['tidy_message'][raw['sentiment'] == -1]]) negative_words_cloud = WordCloud( width=800, height=500, random_state=21, max_font_size=110).generate(negative_words) plt.axis('off') plt.imshow(negative_words_cloud, interpolation="bilinear") plt.show() st.pyplot() #st.checkbox("Show/Hide") cnnw = st.checkbox("Common neutral/normal Words", key="cnnw") if cnnw: normal_words = ' '.join( [text for text in raw['tidy_message'][raw['sentiment'] == 0]]) normal_words_wordcloud = WordCloud( width=800, height=500, random_state=21, max_font_size=110).generate(normal_words) plt.axis('off') plt.imshow(normal_words_wordcloud, interpolation="bilinear") plt.show() st.pyplot() cnnww = st.checkbox("Common News Words", key="cnnww") if cnnww: news_words = ' '.join( [text for text in raw['tidy_message'][raw['sentiment'] == 2]]) news_words_wordcloud = WordCloud( width=800, height=500, random_state=21, max_font_size=110).generate(news_words) plt.axis('off') plt.imshow(news_words_wordcloud, interpolation="bilinear") plt.show() st.pyplot() # Building out the predication page if selection == "Prediction on the Go": st.sidebar.success( "The App allows only text to be entered. It will show any present entities,provides sentiments analysis and classifies the text as per the table on top. Enter the text in the text area provided and select the buttons of your choice below the text area" ) st.info("Prediction with ML Models") st.markdown("The table below shows the description of the sentiments") img = Image.open("class.png") st.image(img) # Creating a text box for user input tweet_text = st.text_area("Enter Text", "Type Here") #named entity if st.checkbox("Show Entities"): #if st.subheader("Extract entities from your text"): # ner= st.text_area("Enter your here","Type here",key="ner") # message = ner # if st.button("Extract"): nlp_result = entity_analyzer(tweet_text) st.write(nlp_result) #st.write=entity_analyzer(entity) #sentiment analysis if st.checkbox("Show Sentiment Analysis"): # if st.subheader("Sentiment of your Text"): # sa= st.text_area("Enter your here","Type here",key="sa") # message = sa # if st.button("Analyse"): sid = SentimentIntensityAnalyzer() res_sentiment = sid.polarity_scores(tweet_text) st.json(res_sentiment) if res_sentiment['compound'] == 0: st.write("The sentiment of your text is NEUTRAL") elif res_sentiment['compound'] > 0: st.success("The sentiment of your text is POSITIVE") else: st.warning("The sentiment of your text is NEGATIVE") news_vectorizer = open("resources/tfidfvect.pkl", "rb") tweet_cv = joblib.load(news_vectorizer) if st.checkbox("Classify"): # Transforming user input with vectorizer vect_text = tweet_cv.transform([tweet_text]).toarray() predictor = joblib.load( open(os.path.join("resources/Logistic_regression.pkl"), "rb")) prediction = predictor.predict(vect_text) # When model has successfully run, will print prediction # You can use a dictionary or similar structure to make this output # more human interpretable. st.success("Text Categorized as Class: {}".format(prediction)) Classifier = st.selectbox("Choose Classifier", ['Linear SVC', 'Logistic regression']) if st.button("Classify"): # Transforming user input with vectorizer # Load your .pkl file with the model of your choice + make predictions # Try loading in multiple models to give the user a choice if Classifier == 'Linear SVC': st.text("Using Linear SVC classifier ..") # Vectorizer news_vectorizer = open("resources/vectoriser.pkl", "rb") tweet_cv = joblib.load(news_vectorizer) predictor = joblib.load( open(os.path.join("resources/linearSVC.pkl"), "rb")) elif Classifier == 'Logistic regression': st.text("Using Logistic Regression Classifeir ..") # Vectorizer news_vectorizer = open("resources/tfidfvect.pkl", "rb") tweet_cv = joblib.load(news_vectorizer) predictor = joblib.load( open(os.path.join("resources/Logistic_regression.pkl"), "rb")) results = [] n = 0 while n < len(tweet_text): vect_text = tweet_cv.transform([tweet_text['message'][n] ]).toarray() prediction = predictor.predict(vect_text) results.append((tweet_text['message'][n], prediction)) n += 1 df = pd.DataFrame(results, columns=['Message', 'Sentiment']) #Table that tabulates the results predictions = st.table(df.head(size)) st.success( "Text Categorized as Class: {}".format(predictions))
it can alert you when the cache is used incorrectly. However, something went wrong while performing this check. This error can occur when your virtual environment lives in the same folder as your project, since that makes it hard for Streamlit to understand which files it should check. If you think that's what caused this, please add the following to `~/.streamlit/config.toml`: ```toml [server] folderWatchBlacklist = ['foldername'] ``` ...where `foldername` is the relative or absolute path to the folder where you put your virtual environment. Otherwise, please [file a bug here](https://github.com/streamlit/streamlit/issues/new/choose). To stop this warning from showing in the meantime, try one of the following: * **Preferred:** modify your code to avoid using this type of object. * Or add the argument `allow_output_mutation=True` to the `st.cache` decorator. """ st.info(text) st.success(text) st.warning(text) st.error(text)
def main(): root_dir = '/root/caa/rp/monitor/' #data_extracted=root_dir+'Data_Extracted\\' cred_path = '/root/caascript/res/cred.csv' user = st.text_input("Enter Your name") if not os.path.exists(cred_path): st.warning( "No credentials Found \n\nPlease Save credentials via Login Page") user = "" if user != "": monitoring_Sheet = get_data_from_gsheet("RP_monitoring") if monitoring_Sheet.shape[0] == 0: st.error("Unable To Fetch Data...") else: assigned_accounts = monitoring_Sheet.loc[ monitoring_Sheet['intern_name'] == user.title()] if assigned_accounts.shape[0] == 0: st.warning("Sorry " + user + " no account has been assigned to you !!") else: st.write(assigned_accounts) left, right = st.beta_columns(2) start_date = left.date_input("Enter Start Date", datetime.date.today()) end_date = right.date_input("Enter End Date", datetime.date.today()) assigned_accounts_group_environment = assigned_accounts.groupby( "environment") current_date = datetime.date.today() month = current_date.strftime("%b") year = current_date.strftime("%Y") path_month_year = str(month) + "_" + str(year) working_dir = os.path.join(root_dir, path_month_year) if (not os.path.exists(working_dir)): os.makedirs(working_dir) os.makedirs( os.path.join(working_dir, 'Output_Files', 'DB_Query_files')) os.makedirs( os.path.join(working_dir, 'Output_Files', 'Script_Final_files')) st.success("Directory Created Successfully") if st.checkbox(label="Start Monitoring"): with st.spinner("Execution in progress...."): for env, group in assigned_accounts_group_environment: cred = pd.read_csv(cred_path) if env == 'AWS US': ldap_user = cred.Id.values[0] ldap_pass = cred.Ldap_pass.values[0] db_pass = cred.Db_pass.values[0] host = "localhost" elif env == 'AWS EU': ldap_user = cred.Id.values[1] ldap_pass = cred.Ldap_pass.values[1] db_pass = cred.Db_pass.values[1] host = "localhost" elif env == 'AWS GOLD': ldap_user = cred.Id.values[2] ldap_pass = cred.Ldap_pass.values[2] db_pass = cred.Db_pass.values[2] host = "localhost" server_port = [] localhost_port = [] server_port_group = group.groupby('port') #print(cred) for server_port, server_group_data in server_port_group: local_port = int( server_group_data['local_port'].values[0]) server = SSHTunnelForwarder( '172.27.128.59', ssh_username=ldap_user, ssh_password=ldap_pass, remote_bind_address=('localhost', server_port), local_bind_address=('0.0.0.0', local_port)) print( f"Destination Server Port {str(server_port)} and Source Port {str(local_port)} in execution" ) print( f" Establishing Connection with Destination Server Port {str(server_port)} and Source Port {str(local_port)} in execution" ) try: server.start() st.success("Connection Successfull") RP_monitoring_script( ldap_user, db_pass, host, start_date, end_date, server_group_data, working_dir) except: st.error( f"Couldn't connect to {str(env)} \n\n Check Logs for more details" ) finally: server.stop() st.warning( f"Connection closed {str(server_port)} " ) rp_monitoring_merging(group, working_dir) st.success("Monitoring Completed") else: st.error("Please enter your name to continue")
def main(): st.set_option('deprecation.showfileUploaderEncoding', False) st.title("HATI.AI") image = Image.open('macroview.jpg') #st.image(image, use_column_width=False) st.sidebar.image(image) st.sidebar.title("Hati.Ai Web App") menu = ["Login","SignUp"] choice = st.sidebar.selectbox("Menu",menu) if choice == "Login": st.subheader("Login Section") username = st.sidebar.text_input("User Name") password = st.sidebar.text_input("Password",type='password') if st.sidebar.checkbox("Login"): # if password == '12345': create_usertable() hashed_pswd = make_hashes(password) result = login_user(username,check_hashes(password,hashed_pswd)) if result: st.success("Logged In as {}".format(username)) def process_text(text): processed_data = [] # Make all the strings lowercase and remove non alphabetic characters #text = re.sub('[^A-Za-z]', ' ', text.lower()) # Tokenize the text; this is, separate every sentence into a list of words # Since the text is already split into sentences you don't have to call sent_tokenize tokenized_text = word_tokenize(text) #append the result into a new list called processed_data processed_data.append(tokenized_text) # Remember, this final output is a list of words return processed_data @st.cache(suppress_st_warning=True) def load_data(uploaded_file): df = pd.read_csv(uploaded_file) return df st.sidebar.subheader("Choose What Do You Want To Do") classifier = st.sidebar.selectbox(" ", ("Find new topics automatically", "POWER BI Dashboard", "Interact with our chatbot")) if classifier == 'POWER BI Dashboard': import streamlit.components.v1 as components from urllib.request import urlopen html = urlopen("https://app.powerbi.com/view?r=eyJrIjoiZTA4NWU4MjYtOTk3Yi00N2ZhLTgwZWQtZWFhMzNkNDk1Zjk3IiwidCI6Ijk5NmQwYTI3LWUwOGQtNDU1Ny05OWJlLTY3ZmQ2Yjk3OTA0NCIsImMiOjEwfQ%3D%3D&pageName=ReportSection06db5928b6af61b2868f").read() #components.html(html, width=None, height=600, scrolling=True) st.markdown(""" <iframe width="900" height="606" src="https://app.powerbi.com/view?r=eyJrIjoiZTA4NWU4MjYtOTk3Yi00N2ZhLTgwZWQtZWFhMzNkNDk1Zjk3IiwidCI6Ijk5NmQwYTI3LWUwOGQtNDU1Ny05OWJlLTY3ZmQ2Yjk3OTA0NCIsImMiOjEwfQ%3D%3D&pageName=ReportSection06db5928b6af61b2868f" frameborder="0" style="border:0" allowfullscreen></iframe> """, unsafe_allow_html=True) if classifier == 'Interact with our chatbot': import pickle with open('tnb_topic_classifier_svm', 'rb') as training_model: topic_model = pickle.load(training_model) import malaya model = malaya.sentiment.transformer(model = 'albert', size = 'base') #from src import model #malay_bert = model.BertModel() # eng_flair = model.Flair() # eng_vader = model.Vader() test = pd.DataFrame() test['Positive'] = '' test['Neutral'] = '' test['Negative'] = '' st.title("Sentiment Analyzer") message = st.text_area("Enter Text","Type Here ..") if st.button("Analyze"): with st.spinner("Analyzing the text …"): result = model.predict_proba([message]) #result = malay_bert.predict(message) message = [message] topic = topic_model.predict(message) #output = "Result is: Positive:" + str(result[0]) + "Neutral:" + str(result[1]) + "Negative:" + str(result[2]) + "topic is: " + str(topic) output = "result is:" + str(result) + "topic is: " + str(topic) st.write(output) else: st.warning("Not sure! Try to add some more words") from stop_words import get_stop_words if classifier == 'Find new topics automatically': uploaded_file = st.file_uploader('Upload CSV file to begin', type='csv') #if upload then show left bar if uploaded_file is not None: df = load_data(uploaded_file) if st.sidebar.checkbox("Show raw data", False): st.subheader("Uploaded Data Set") st.write(df) st.sidebar.subheader("Text column to analyse") st_ms = st.sidebar.selectbox("Select Text Columns To Analyse", (df.columns.tolist())) df_list = list(df) import top2vec from top2vec import Top2Vec #INITIALIZE AN EMPTY DATAFRAME, CONVERT THE TEXT INTO STRING AND APPEND INTO THE NEW COLUMN d1 = pd.DataFrame() d1['text'] = "" d1['text'] = df[st_ms] d1['text'] = d1['text'].astype(str) #INITIALIZE THE TOP2VEC MODEL AND FIT THE TEXT #model.build_vocab(df_list, update=False) model = Top2Vec(documents=d1['text'], speed="learn", workers=10) topic_sizes, topic_nums = model.get_topic_sizes() for topic in topic_nums: st.pyplot(model.generate_topic_wordcloud(topic)) # Display the generated image: else: st.warning("Incorrect Username/Password") elif choice == "SignUp": st.subheader("Create New Account") new_user = st.text_input("Username") new_password = st.text_input("Password",type='password') if st.button("Signup"): create_usertable() add_userdata(new_user,make_hashes(new_password)) st.success("You have successfully created a valid Account") st.info("Go to Login Menu to login")
def main(): st.title("Molecular Solubility Predictor") logo = Image.open("logo.jpeg") st.image(logo, width=375) st.write( "This app predicts the **_Solubility (LogS)_** values of molecules! ") st.write( "**_ > Please Login/Create new account to use the application._**") menu = ["Home", "Login", "Sign Up"] st.sidebar.header("Menu") choice = st.sidebar.selectbox("", menu) if choice == "Home": st.title("Home") #st.text(" This is our B.Tech project.") st.write( "**> Welcome to our Web Application for Solubility prediction of chemicals ! We calculate the most accurate expected value of Solubility for every drug and compound.**" ) st.write("**> What is Solubility ?**") st.write( ">**Solubility** is the ability of a solid, liquid, or gaseous chemical substance (referred to as the solute) to dissolve in solvent (usually a liquid) and form a solution. The solubility of a substance fundamentally depends on the solvent used, as well as temperature and pressure." ) st.write("**> How does it works ?**") st.write( "> We have used a Machine learning model at the backend that has been trained over thousands of different chemicals. First we take **SMILES** of the chemical as input and calculate **Mol logP, Aromatic Proportion, No. of rotatable bonds and Molecular weight** of the compound at the backend. Using these values we predict the solubility of given compound." ) #Tutorial video section st.header("**Watch our tutorial here**") video_file = open('tutorial.mp4', 'rb') video_bytes = video_file.read() st.video(video_bytes) #Developers section st.write(" ") st.header("**About us**") st.markdown("### **_Developers_**") #Prateek profile pk = Image.open("pk.JPEG") st.image(pk, width=150) st.subheader("**> Prateek Khare**") st.write("Undergrad from **Delhi Technological University** (CSE)") st.write( "[**Linkedin**](https://in.linkedin.com/in/prateek-khare-9b69b4137?trk=people-guest_people_search-card)" ) st.write("Mail here : [email protected]") st.write(" ") #Rohan profile ro = Image.open("rohan.JPEG") st.image(ro, width=150) st.subheader("**> Rohan Shekhar Paunikar**") st.write("Undergrad from **Delhi Technological University** (CSE)") st.write( "[**Linkedin**](https://www.linkedin.com/in/rohan-paunikar-a16b001b2)" ) st.write("Mail here : [email protected]") st.write(" ") #Praveen profile pkt = Image.open("pkt.JPEG") st.image(pkt, width=150) st.subheader("**> Praveen Kumar Tiu**") st.write("Undergrad from **Delhi Technological University** (CSE)") st.write("Mail here : [email protected]") st.write(" ") #Privacy policy st.header("**Privacy Policy**") st.write( "We respect the privacy of our users and by signing up you agree to our privacy policy. We collect information to provide better services to all our users. Your data is completely secure at our databases and is not shared at any cost to anyone." ) #Research paper used st.header("**References**") st.write(""" Research paper used [Polysaccharides: Structure and Solubility](https://www.intechopen.com/books/solubility-of-polysaccharides/polysaccharides-structure-and-solubility). """) st.write(""" Data obtained from the John S. Delaney. [ESOL: Estimating Aqueous Solubility Directly from Molecular Structure](https://pubs.acs.org/doi/10.1021/ci034243x). ***J. Chem. Inf. Comput. Sci.*** . """) elif choice == "Sign Up": st.title("Sign Up") st.write("**_ > Use the sidebar to create a new account._**") st.sidebar.header("Create Account") st.sidebar.markdown(" ") name = st.sidebar.text_input("Full Name") username = st.sidebar.text_input("Email Id") password = st.sidebar.text_input("Password", type='password') if st.sidebar.button("Sign Up"): create_usertable() add_userdata(name, username, password) st.success("Account Created Successfully !") st.info("Go to Login Menu to Login") elif choice == "Login": st.title("Login") st.write("**_ > Use the sidebar to Login to your account._**") st.sidebar.subheader("Login to your Account") st.sidebar.subheader(" ") st.sidebar.markdown("**User Name**") username = st.sidebar.text_input("", value="Enter Here") st.sidebar.markdown("**Password**") password = st.sidebar.text_input("", type='password') if st.sidebar.checkbox("Login"): create_usertable() result = login_user(username, password) if result: # Here I have to write options for logged in users. # Main functionality for solubility # Developers functionality # Data viewing functionality st.success("Logged In as {} .".format(username)) SMILES_input = "CCC\nNCNC\nNNCC" #st.title("SMILES Input") st.header("**SMILES Input**") st.subheader("Please Enter *SMILES* Notation of Molecules") #st.markdown("Please Enter **SMILES** Notation of Molecules") SMILES = st.text_area("", SMILES_input) SMILES = "C\n" + SMILES #Adds C as a dummy , first item SMILES = SMILES.split('\n') SMILES[1:] #Dummy item removed st.button("Predict") #structure = Chem.MolFromSmiles(SMILES) #mol_list = [] #for smile in SMILES: # struc = Chem.MolFromSmiles(smile) # mol_list.append(struc) #img = Draw.MolsToGridImage(mol_list) #img X = Generate(SMILES) st.header("**Calculated Molecular Descriptors**") X[1:] load_model = pickle.load(open('solubility model.pkl', 'rb')) prediction = load_model.predict(X) st.header("**Predicted Solubility**") prediction[1:] #view database #user_result = view_all_users() #clean_db = pd.DataFrame(user_result, columns = ["Name","User Id","Password"]) #st.dataframe(clean_db) delaney_with_descriptors_url = 'https://raw.githubusercontent.com/dataprofessor/data/master/delaney_solubility_with_descriptors.csv' dataset = pd.read_csv(delaney_with_descriptors_url) st.write("**> Dataset used**") st.dataframe(dataset) st.subheader("**Line charts for the dataset**") st.line_chart(dataset) #st.area_chart(dataset) #st.bar_chart(dataset) newdataset = dataset.drop(['MolWt'], axis=1) st.line_chart(newdataset) else: st.error("Invalid Username or Password !")
def main(): st.image( 'https://www.freelogodesign.org/file/app/client/thumb/f5c1e3f9-b1c6-4733-884f-1a447312940a_200x200.png?1600360895791' ) st.title("HISTODOOR") menu = ["Home", "Signup", "Login"] submenu = ["As a Doctor", "As a Patient"] choice = st.sidebar.selectbox("Menu", menu) if choice == "Home": st.subheader("Home") st.markdown( f'<div class="markdown-text-container stText" style="width: 698px;"><div style="font-size: medium;"> </div>', unsafe_allow_html=True) st.text("Dude..!! Here is your medical history..!! ") elif choice == "Signup": st.text("Username") new_username = st.text_input("") st.text("Password") new_password = st.text_input(" ", type='password') st.text("Confirm Password") confirm_password = st.text_input(" ", type='password') if new_password == confirm_password: st.success("Password Confirmed") else: st.warning("Passwords not the same") if st.button("Submit"): st.success("Your account was created successfully") st.info("Login to get started") pass elif choice == "Login": username = st.sidebar.text_input("Username") password = st.sidebar.text_input("Password", type='password') if st.sidebar.checkbox("Login"): if password == "12345": st.subheader("Welcome {}".format(username)) st.text("Activity") activity = st.selectbox("", submenu) if activity == "As a Doctor": df = pd.read_csv( r'C:\Users\MAHIMA MANIGANDAN\Desktop\Hack\Datasett.csv' ) table = df['Aadhar number'].values an = 530118574135 name = [ "Edit the patient's details", "View patient's record" ] if an == 530118574135: st.write("Enter the patient's Aadhar Number") num = st.text_input("") activity1 = st.selectbox("", name) if activity1 == "Edit the patient's details": add = st.text_input(" ") if st.button("Save"): st.write("The details are saved successfully") def init_db(conn: Connection): conn = get_connection(URI_SQLITE_DB) init_db(conn) display_data(conn) conn.execute( """CREATE TABLE IF NOT EXISTS test ( INPUT1 STRING, );""") conn.execute( f"INSERT INTO Datasett (INPUT) VALUES ({input1})" ) conn.commit() def display_data(conn: Connection): if st.checkbox("View newly added datails"): st.dataframe(get_data(conn)) st.write(df[1]) st.wrie(input1) elif activity1 == "View patient's record": nam = "Akhilesh" gender = "Male" aanum = "530118574135" age = "62" anae = "No" cret = "231" dia = "No" ef = "25" hbp = "Yes" pla = "253000" sc = "0.9" ss = "140" smo = "Yes" pr = "10" chol = "275" tb = "2.7" db = "1.3" ap = "260" aa = "31" aaa = "56" tp = "7.4" albu = "3" bp = "70" hemo = "10.8" wbc = "4500" rbc = "3.8" sod = "131" pot = "4.2" data = pd.DataFrame({ 'Medical records': [ 'Name', 'Gender', 'Aadhar number', 'Age', 'Anaemia', 'Creatinine_phosphokinase', 'Diabetes', 'Ejection_fraction', 'High_blood_pressure', 'Platelets', 'Serum_creatinine', 'Serum_sodium', 'Smoking', 'Pulse Rate', 'Cholesterol', 'Total_Bilirubin', 'Direct_Bilirubin', 'Alkaline_Phosphotase', 'Alamine_Aminotransferase', 'Aspartate_Aminotransferase', 'Total_Protiens', 'Albumin', 'Blood pressure', 'Haemoglobin', 'Wbc Count', 'Rbc Count', 'Sodium', 'Pottasium' ], 'Values': [ nam, gender, aanum, age, anae, cret, dia, ef, hbp, pla, sc, ss, smo, pr, chol, tb, db, ap, aa, aaa, tp, albu, bp, hemo, wbc, rbc, sod, pot ] }) st.header("History") st.write(data) else: st.write("Enter valid aadhar number and try again") elif activity == "As a Patient": df = pd.read_csv( r'C:\Users\MAHIMA MANIGANDAN\Desktop\Hack\Datasett.csv' ) table = df['Aadhar number'].values aan = 530118574135 if aan == 530118574135: st.write("Enter your Aadhar Number") aan = st.text_input("") if st.button("Submit"): nam = "Akhilesh" gender = "Male" aanum = "530118574135" age = "62" anae = "No" cret = "231" dia = "No" ef = "25" hbp = "Yes" pla = "253000" sc = "0.9" ss = "140" smo = "Yes" pr = "10" chol = "275" tb = "2.7" db = "1.3" ap = "260" aa = "31" aaa = "56" tp = "7.4" albu = "3" bp = "70" hemo = "10.8" wbc = "4500" rbc = "3.8" sod = "131" pot = "4.2" weight = "68" data = pd.DataFrame({ 'Medical records': [ 'Name', 'Gender', 'Aadhar number', 'Age', 'Anaemia', 'Creatinine_phosphokinase', 'Diabetes', 'Ejection_fraction', 'High_blood_pressure', 'Platelets', 'Serum_creatinine', 'Serum_sodium', 'Smoking', 'Pulse Rate', 'Cholesterol', 'Total_Bilirubin', 'Direct_Bilirubin', 'Alkaline_Phosphotase', 'Alamine_Aminotransferase', 'Aspartate_Aminotransferase', 'Total_Protiens', 'Albumin', 'Blood pressure', 'Haemoglobin', 'Wbc Count', 'Rbc Count', 'Sodium', 'Pottasium', 'Weight' ], 'Values': [ nam, gender, aanum, age, anae, cret, dia, ef, hbp, pla, sc, ss, smo, pr, chol, tb, db, ap, aa, aaa, tp, albu, bp, hemo, wbc, rbc, sod, pot, weight ] }) st.header("History") st.write(data)
def main(): """Web App""" st.title("Diabetes Risk Prediction 💉") st.text("-- By Mrinal Gosain") activites = ["EDA","Plot","Model Building","About"] choice = st.sidebar.selectbox("Select Activity",activites) # Exploratory data analysis! if choice == 'EDA': st.subheader("Exploratory Data Analysis 🔍") data = st.file_uploader("Upload Dataset",type=["csv","txt"]) if data is not None: # If data isn't empty! df = pd.read_csv(data) st.dataframe(df.head()) if st.checkbox("Show shape"): st.write(df.shape) if st.checkbox("Show Columns"): all_columns = df.columns.to_list() st.write(all_columns) if st.checkbox("Select Columns To Show"): selected_columns = st.multiselect("Select Columns",all_columns) new_df = df[selected_columns] st.dataframe(new_df) if st.checkbox("Show Summary"): st.write(df.describe()) if st.checkbox("Show Value Counts"): st.write(df.iloc[:,-1].value_counts()) if st.checkbox("Correlation with Seaborn"): st.write(sns.heatmap(df.corr(),annot=True)) st.pyplot() if st.checkbox("Pie Chart"): all_columns = df.columns.to_list() columns_to_plot = st.selectbox("Select 1 Column ",all_columns) pie_plot = df[columns_to_plot].value_counts().plot.pie(autopct="%1.1f%%") st.write(pie_plot) st.pyplot() # Plotting ! elif choice == 'Plot': st.subheader("Data Visualization 📈") data = st.file_uploader("Upload Dataset",type=["csv","txt"]) if data is not None: df = pd.read_csv(data) st.dataframe(df.head()) all_columns_names = df.columns.tolist() type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"]) selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names) if st.button("Generate Plot"): st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names)) # Plot By Streamlit if type_of_plot == 'area': cust_data = df[selected_columns_names] st.area_chart(cust_data) elif type_of_plot == 'bar': cust_data = df[selected_columns_names] st.bar_chart(cust_data) elif type_of_plot == 'line': cust_data = df[selected_columns_names] st.line_chart(cust_data) # Custom Plot elif type_of_plot: cust_plot= df[selected_columns_names].plot(kind=type_of_plot) st.write(cust_plot) st.pyplot() # Model building! elif choice == 'Model Building': st.subheader("Building ML Model 👨💻") data = st.file_uploader("Upload Dataset",type=["csv","txt"]) if data is not None: df = pd.read_csv(data) st.dataframe(df.head()) # Model Building X = df.iloc[:,0:-1] Y = df.iloc[:,-1] #Make sure that the predicted column is the last one! seed = 42 # Model models = [] models.append(("LR",LogisticRegression())) models.append(("LDA",LinearDiscriminantAnalysis())) models.append(("KNN",KNeighborsClassifier())) models.append(('CART', DecisionTreeClassifier())) models.append(('NB', GaussianNB())) models.append(('SVM', SVC())) # evaluate each model in turn # List model_names = [] model_mean = [] model_std = [] all_models = [] scoring = 'accuracy' for name,model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model,X,Y,cv=kfold,scoring=scoring) model_names.append(name) model_mean.append(cv_results.mean()) model_std.append(cv_results.std()) accuracy_results = {"model_name":name,"model_accuracy":cv_results.mean(),"standard_deviation":cv_results.std()} all_models.append(accuracy_results) if st.checkbox("Metrics as Table"): st.dataframe(pd.DataFrame(zip(model_names,model_mean,model_std),columns=["Model Name","Model Accuracy","Standard Deviation"])) if st.checkbox("Metrics as JSON"): st.json(all_models) elif choice == 'About': st.subheader("About Me 👨🎓") st.text("Hi, I am Mrinal. I am a data science enthusiast who loves to build machine learning application end to end.") st.text("I take avid interests in breaking down complex problems and leverage data to drive business") if st.button("Github 🔗"): webbrowser.open_new_tab(github_url) if st.button("Linkedin 🔗"): webbrowser.open_new_tab(linkedin_url)
import time st.set_option('deprecation.showPyplotGlobalUse', False) st.title("Introduction to Streamlit") st.subheader("Subheader") image = Image.open('0.jpg') st.image(image, use_color=True) st.write('Write a Text here') st.markdown('Markdown here') st.success('Congratulations') st.info('This is Info') st.warning('This is Warning') st.error('This is Error') dataframe = np.random.rand(50, 20) st.dataframe(dataframe) st.text('****' * 22) df = pd.DataFrame(np.random.rand(10, 20), columns=('col %d' % i for i in range(20))) st.dataframe(df.style.highlight_max(axis=1))
def main(): st.title("File Content Analysis") st.subheader("Natural Language Processing") message = st.text_area("Enter the text below", "Enter here", key=1) # Tokenization if st.checkbox("Tokens and Lemma"): st.subheader("Showing Tokens and Lemma") if st.button("Analyze", key=14): nlp_result = text_analysis(message) with st.spinner("Waiting"): t = Timer(lambda: text_analysis(message)) time.sleep(t.timeit(number=1)) st.json(nlp_result) # Named Entity if st.checkbox("Named Entities"): st.subheader("Extract Named Entities") #message = st.text_area("Enter the text below", "Enter here", key = 2) if st.button("Extract", key=13): nlp_result = name_entity_analysis(message) with st.spinner("Waiting"): t = Timer(lambda: name_entity_analysis(message)) time.sleep(t.timeit(number=1)) st.json(nlp_result) # Sentiment Analysis if st.checkbox("Sentiment Analysis"): st.subheader("Showing Your Sentiments") #message = st.text_area("Enter the text below", "Enter here", key = 3) if st.button("Analyze", key=10): nlp_result = sentiment_analysis(message) with st.spinner("Waiting"): t = Timer(lambda: sentiment_analysis(message)) time.sleep(t.timeit(number=1)) if nlp_result > 0: st.success("This is a positive reaction") elif nlp_result < 0: st.error("This is a negative reaction") else: st.warning("This is a neutral reaction") # Text Summarizaton if st.checkbox("Text Summarize"): st.subheader("Showing your Summarize text") #message = st.text_area("Enter the text below", "Enter here", key = 4) summr = st.selectbox("Choose the summarizer", ("gensim", "sumy", "TF-IDF")) if summr == 'sumy': level = st.selectbox("Select the Summarization Level", ("1", "2", "3", "4")) if summr == 'TF-IDF': th = st.slider('Size of Summary', 0.0, 2.0, 0.1) if st.button("Summarize", key=11): if summr == 'gensim': nlp_result = gensim_summ(message) st.info("Using Gensim") with st.spinner("Waiting"): t = Timer(lambda: gensim_summ(message)) time.sleep(t.timeit(number=1)) st.success(nlp_result) elif summr == 'sumy': nlp_result = sumy_summ(message, level) st.info("Using Sumy") with st.spinner("Waiting"): t = Timer(lambda: sumy_summ(message, level)) time.sleep(t.timeit(number=1)) st.success(nlp_result) elif summr == 'TF-IDF': nlp_result = run_summarization(message, th) with st.spinner("Waiting"): t = Timer(lambda: run_summarization(message, th)) time.sleep(t.timeit(number=1)) st.success(nlp_result)
def __init__(self): self.conn = sqlite3.connect('data/songs.db') self.model = load_my_model() st.success('Done!') self.run()
'Tamil': 'ta', 'Telugu': 'te', 'Ukrainian': 'uk', 'Urdu': 'ur', 'Vietnamese': 'vi' } if choice == 'Language Detection': st.subheader('Language Detection App') language_detect_text = st.text_area('Enter the text in your langauge') if st.button('Check Language'): try: text_language = TextBlob(language_detect_text) lang_code = text_language.detect_language() result = pycountry.languages.get(alpha_2=lang_code).name st.success(result) except: st.error('Sorry not able to detect😢') elif choice == 'Language Translation': st.subheader('Language Translation App') language_translate_text = st.text_area('Enter the text in your langauge') language_translate_text = TextBlob(language_translate_text) mylang = st.selectbox('Select language to translate', options=list(languages.keys())) if st.button('Translate'): if language_translate_text == '': st.warning('Please enter text to translate') else: try:
time.sleep(60 * 60) al_price = int(al_price) page = requests.get(url, headers=headers) soup = BeautifulSoup(page.content, 'html.parser') if url.startswith('https://www.flipkart.com'): getprice_flipkart(url, receiveremail_id) elif url.startswith('https://www.amazon.in'): getprice_amazon(url, receiveremail_id) st.title('Price Drop Alerter') st.header("Enter the URL of the Product ") url = st.text_input('') if st.button("OK"): st.success("Product URL Entered Successfully") st.header("Enter Email id to get Notified ") receiveremail_id = st.text_input(' ') if st.button('SUBMIT'): st.success(f"Entered email address is {receiveremail_id}") st.sidebar.header("Enter the expected price dropped value") al_price = st.sidebar.number_input('') st.sidebar.slider("Entered Price is", al_price - 1000.0, al_price + 1000.0, al_price) if st.sidebar.button("Set Price"): st.sidebar.success(f"The price set by you is {al_price}") if al_price == 0.00:
def main(): """Face Expression Detection App""" #setting the app title & sidebar activities = [ "Home", "Detect your Facial expressions", "CNN Model Performance", "About" ] choice = st.sidebar.selectbox("Select Activity", activities) if choice == 'Home': html_temp = """ <marquee behavior="scroll" direction="left" width="100%;"> <h2 style= "color: #000000; font-family: 'Raleway',sans-serif; font-size: 62px; font-weight: 800; line-height: 72px; margin: 0 0 24px; text-align: center; text-transform: uppercase;">Try your own test! </h2> </marquee><br> """ st.markdown(html_temp, unsafe_allow_html=True) st.subheader("Video Demo :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") #if choosing to consult the cnn model performance if choice == 'CNN Model Performance': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.subheader("CNN Model :") st.image('images/model.png', width=700) st.subheader("FER2013 Dataset from:") st.text( " https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data" ) st.image('images/dataframe.png', width=700) st.subheader("Model training results:") st.markdown("Accuracy :chart_with_upwards_trend: :") st.image("images/accuracy.png") st.markdown("Loss :chart_with_downwards_trend: : ") st.image("images/loss.png") #if choosing to detect your face exp , give access to upload the image if choice == 'Detect your Facial expressions': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") image_file = st.file_uploader("Upload Image", type=['jpg', 'png', 'jpeg']) #if image if uploaded,display the progress bar +the image if image_file is not None: our_image = Image.open(image_file) st.text("Original Image") progress = st.progress(0) for i in range(100): time.sleep(0.01) progress.progress(i + 1) st.image(our_image) if image_file is None: st.error("No image uploaded yet") # Face Detection task = ["Faces"] feature_choice = st.sidebar.selectbox("Find Features", task) if st.button("Process"): if feature_choice == 'Faces': #process bar progress = st.progress(0) for i in range(100): time.sleep(0.05) progress.progress(i + 1) #end of process bar result_img, result_faces, prediction = detect_faces(our_image) if st.image(result_img): st.success("Found {} faces".format(len(result_faces))) if prediction == 'Happy': st.subheader( "YeeY! You are Happy :smile: today , Always Be ! " ) st.text("Here is your Recommended video to watch:") st.video( "https://www.youtube.com/watch?v=4q1dgn_C0AU&t=24s" ) elif prediction == 'Angry': st.subheader( "You seem to be angry :rage: today ,Take it easy! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=d_5DU5opOFk") elif prediction == 'Disgust': st.subheader("You seem to be Disgust :rage: today! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") elif prediction == 'Fear': st.subheader( "You seem to be Fearful :fearful: today ,Be couragous! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=h_D6HhWiTiI") elif prediction == 'Neutral': st.subheader( "You seem to be Neutral today ,Happy day! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") elif prediction == 'Sad': st.subheader( "You seem to be Sad :sad: today ,Smile and be happy! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=ST97BGCi3-w") elif prediction == 'Surprise': st.subheader("You seem to be surprised today ! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") else: st.error( "Your image does not match the training dataset's images! Try an other image!" ) elif choice == 'About': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.subheader("About Face Expression Detection App")
def main(): """ML App --- """ activites = ["EDA", "Sweetviz EDA", "EDA Report", "About"] choice = st.sidebar.selectbox("Select Activites", activites) if choice == "EDA": st.subheader("EDA") def file_selector(folder_path='./datasets'): filenames = os.listdir(folder_path) selected_filename = st.selectbox('Select a file', filenames) return os.path.join(folder_path, selected_filename) filename = file_selector() st.write('You selected `%s`' % filename) df = pd.read_csv(filename) #read a csv if st.checkbox("Show Data"): number = st.number_input("No of Rows to View", 1, 2000) st.dataframe(df.head(number)) #show columns name if st.button("columns name"): st.success(df.columns) #show the shape of dataset if st.checkbox("Show Shape"): st.warning(df.shape) #show Dimention of Data set data_dim = st.radio("Show Dimentions By", ("Rows", "Columns")) if data_dim == "Rows": st.text("No of Rows") st.write(df.shape[0]) elif data_dim == "Columns": st.text("No of Columns") st.write(df.shape[1]) #show columns to select if st.checkbox("Select columns to show"): all_columns = df.columns.to_list() select_columns = st.multiselect("Select", all_columns) new_df = df[select_columns] st.dataframe(new_df) #Data Types if st.button("Data Types"): st.write(df.dtypes) # Summary of A Data if st.checkbox("Summary"): st.write(df.describe()) #value count if st.button("Value Count"): st.text("value counts By Target/Class") st.write(df.iloc[:, -1].value_counts()) #summary of a column if st.checkbox("summary of a column"): dfs = ps.DataFrameSummary(df) all_columns = df.columns.to_list() select_column = st.selectbox("Select", all_columns) new_df = dfs[select_column] st.write(new_df) st.pyplot() #column Data Type if st.button("Column Data Type"): st.write(dfs.columns_types) #visualization of a data st.header("Data Visualization") #correlation plot if st.checkbox("Correlation plot [matplotlib]"): plt.imshow(df.corr(), cmap="viridis") plt.colorbar() st.pyplot() # correlation plot using seaborn if st.checkbox("Correlation plot [Seaborn]"): plt.figure(figsize=(16, 12)) sns.heatmap(df.corr(), annot=True) st.pyplot() # correlation plot using plotly if st.checkbox("Correlation plot [Plotly]"): fig = st.imshow(df.corr()) fig.show() # Drow count plot if st.checkbox("Count plot"): st.text("Value count By Target") all_columns_name = df.columns.tolist() primary_col = st.selectbox("Select Primary column to group By", all_columns_name) select_column_name = st.multiselect("Select Columns", all_columns_name) if st.button("Plot"): st.text("Generating plot for {} and {}".format( primary_col, select_column_name)) vc_plot = df.groupby(primary_col)[select_column_name].count() else: vc_plot = df.iloc[:, -1].value_counts() st.write(vc_plot.plot(kind="bar")) plt.show() st.pyplot() #Drow A pie plot if st.checkbox("Pie Plot"): st.text("pie plot of The Data") all_columns_name = df.columns.tolist() primary_col = st.selectbox("Select Column To Drow a Chart", all_columns_name) st.write( df[primary_col].value_counts().plot.pie(autopct="%1.2f%%")) st.pyplot() # Drow a histogram if st.checkbox("Hisogram"): st.text("Histogram of The Data") all_columns_name = df.columns.tolist() primary_col = st.selectbox("Select Column To Drow a Hist plot", all_columns_name) plot = df[primary_col].plot.hist() if st.button("Generate Plot"): st.write(plot) st.pyplot() #violin plot if st.checkbox("violin plot"): st.text("Violin plot of a Data") all_columns = df.columns.tolist() primary_col = st.selectbox( "please Select a x-column To Drow a Violin plot", all_columns) sec_col = st.selectbox( "please Select a y-column To Drow a Violin plot", all_columns) violin_plot = sns.catplot(x=primary_col, y=sec_col, kind="violin", data=df) if st.button("Generate Plot"): st.write(violin_plot) st.pyplot() #custom plots st.header("Custom plots") all_column_name = df.columns.tolist() plot_type = st.selectbox("Select the Plot Type", ["area", "bar", "line", "hist", "box", "kde"]) select_column = st.multiselect("select a column to Drow", all_column_name) if st.button("Generate plot"): st.success("Generate a {} plot for {}".format( plot_type, select_column)) if plot_type == "area": st.area_chart(df[select_column]) st.pyplot() elif plot_type == "bar": st.bar_chart(df[select_column]) st.pyplot() elif plot_type == "line": st.line_chart(df[select_column]) st.pyplot() elif plot_type == "hist": st.write(df[select_column].plot(kind=plot_type)) st.pyplot() elif plot_type == "box": st.write(df[select_column].plot(kind=plot_type)) st.pyplot() elif plot_type == "kde": st.write(df[select_column].plot(kind=plot_type)) st.pyplot() # Sweetviz EDA elif choice == "Sweetviz EDA": st.subheader("Quick Analyise") data_file = st.file_uploader("Upload CSV", type=['csv']) if data_file is not None: df = pd.read_csv(data_file) st.dataframe(df.head()) if st.button("Generate Sweetviz Report"): report = sv.analyze(df) report.show_html() st_display_sweetviz("SWEETVIZ_REPORT.html") # EDA Report elif choice == "EDA Report": data_file1 = st.file_uploader("Upload CSV", type=['csv']) if data_file1 is not None: df = pd.read_csv(data_file1) st.dataframe(df.head()) if st.button("Generate"): profile = ProfileReport(df) st_profile_report(profile) elif choice == "About": st.subheader("Made by Archit")
def main(): """Hep Mortality Prediction App""" # st.title("Hepatitis Mortality Prediction App") st.markdown(html_temp.format('royalblue'), unsafe_allow_html=True) menu = ["Home", "Login", "SignUp"] submenu = ["Plot", "Prediction", "Metrics"] choice = st.sidebar.selectbox("Menu", menu) if choice == "Home": st.subheader("Home") # st.text("What is Hepatitis?") st.markdown(descriptive_message_temp, unsafe_allow_html=True) st.image(load_image('images/hepatitis_B.jpg')) elif choice == "Login": username = st.sidebar.text_input("Username") password = st.sidebar.text_input("Password", type='password') if st.sidebar.checkbox("Login"): create_usertable() hashed_pswd = generate_hashes(password) result = login_user(username, verify_hashes(password, hashed_pswd)) # if password == "12345": if result: st.success("Welcome {}".format(username)) activity = st.selectbox("Activity", submenu) if activity == "Plot": st.subheader("Data Vis Plot") df = pd.read_csv("data/clean_hepatitis_dataset.csv") st.dataframe(df) df['class'].value_counts().plot(kind='bar') st.pyplot() # Freq Dist Plot freq_df = pd.read_csv("data/freq_df_hepatitis_dataset.csv") st.bar_chart(freq_df['count']) if st.checkbox("Area Chart"): all_columns = df.columns.to_list() feat_choices = st.multiselect("Choose a Feature", all_columns) new_df = df[feat_choices] st.area_chart(new_df) elif activity == "Prediction": st.subheader("Predictive Analytics") age = st.number_input("Age", 7, 80) sex = st.radio("Sex", tuple(gender_dict.keys())) steroid = st.radio("Do You Take Steroids?", tuple(feature_dict.keys())) antivirals = st.radio("Do You Take Antivirals?", tuple(feature_dict.keys())) fatigue = st.radio("Do You Have Fatigue", tuple(feature_dict.keys())) spiders = st.radio("Presence of Spider Naeve", tuple(feature_dict.keys())) ascites = st.selectbox("Ascities", tuple(feature_dict.keys())) varices = st.selectbox("Presence of Varices", tuple(feature_dict.keys())) bilirubin = st.number_input("bilirubin Content", 0.0, 8.0) alk_phosphate = st.number_input( "Alkaline Phosphate Content", 0.0, 296.0) sgot = st.number_input("Sgot", 0.0, 648.0) albumin = st.number_input("Albumin", 0.0, 6.4) protime = st.number_input("Prothrombin Time", 0.0, 100.0) histology = st.selectbox("Histology", tuple(feature_dict.keys())) feature_list = [ age, get_value(sex, gender_dict), get_fvalue(steroid), get_fvalue(antivirals), get_fvalue(fatigue), get_fvalue(spiders), get_fvalue(ascites), get_fvalue(varices), bilirubin, alk_phosphate, sgot, albumin, int(protime), get_fvalue(histology) ] st.write(len(feature_list)) st.write(feature_list) pretty_result = { "age": age, "sex": sex, "steroid": steroid, "antivirals": antivirals, "fatigue": fatigue, "spiders": spiders, "ascites": ascites, "varices": varices, "bilirubin": bilirubin, "alk_phosphate": alk_phosphate, "sgot": sgot, "albumin": albumin, "protime": protime, "histolog": histology } st.json(pretty_result) single_sample = np.array(feature_list).reshape(1, -1) # ML model_choice = st.selectbox("Select Model", ["LR", "KNN", "DecisionTree"]) if st.button("Predict"): if model_choice == "KNN": loaded_model = load_model( "models/knn_hepB_model.pkl") prediction = loaded_model.predict(single_sample) pred_prob = loaded_model.predict_proba( single_sample) elif model_choice == "DecisionTree": loaded_model = load_model( "models/decision_tree_clf_hepB_model.pkl") prediction = loaded_model.predict(single_sample) pred_prob = loaded_model.predict_proba( single_sample) else: loaded_model = load_model( "models/logistic_regression_hepB_model.pkl") prediction = loaded_model.predict(single_sample) pred_prob = loaded_model.predict_proba( single_sample) # st.write(prediction) # prediction_label = {"Die":1,"Live":2} # final_result = get_key(prediction,prediction_label) if prediction == 1: st.warning("Patient Dies") pred_probability_score = { "Die": pred_prob[0][0] * 100, "Live": pred_prob[0][1] * 100 } st.subheader( "Prediction Probability Score using {}".format( model_choice)) st.json(pred_probability_score) st.subheader("Prescriptive Analytics") st.markdown(prescriptive_message_temp, unsafe_allow_html=True) else: st.success("Patient Lives") pred_probability_score = { "Die": pred_prob[0][0] * 100, "Live": pred_prob[0][1] * 100 } st.subheader( "Prediction Probability Score using {}".format( model_choice)) st.json(pred_probability_score) if st.checkbox("Interpret"): if model_choice == "KNN": loaded_model = load_model( "models/knn_hepB_model.pkl") elif model_choice == "DecisionTree": loaded_model = load_model( "models/decision_tree_clf_hepB_model.pkl") else: loaded_model = load_model( "models/logistic_regression_hepB_model.pkl") # loaded_model = load_model("models/logistic_regression_model.pkl") # 1 Die and 2 Live df = pd.read_csv( "data/clean_hepatitis_dataset.csv") x = df[[ 'age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology' ]] feature_names = [ 'age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites', 'varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime', 'histology' ] class_names = ['Die(1)', 'Live(2)'] explainer = lime.lime_tabular.LimeTabularExplainer( x.values, feature_names=feature_names, class_names=class_names, discretize_continuous=True) # The Explainer Instance exp = explainer.explain_instance( np.array(feature_list), loaded_model.predict_proba, num_features=13, top_labels=1) exp.show_in_notebook(show_table=True, show_all=False) # exp.save_to_file('lime_oi.html') st.write(exp.as_list()) new_exp = exp.as_list() label_limits = [i[0] for i in new_exp] # st.write(label_limits) label_scores = [i[1] for i in new_exp] plt.barh(label_limits, label_scores) st.pyplot() plt.figure(figsize=(20, 10)) fig = exp.as_pyplot_figure() st.pyplot() else: st.warning("Incorrect Username/Password") elif choice == "SignUp": new_username = st.sidebar.text_input("Username") new_password = st.sidebar.text_input("Password", type='password') confirm_password = st.sidebar.text_input("Confirm Password", type='password') if new_password == confirm_password: st.success("Password Confirmed") else: st.warning("Passwords not the same") if st.button("Submit"): create_usertable() hashed_new_password = generate_hashes(new_password) add_userdata(new_username, hashed_new_password) st.success("You have successfully created a new account") st.info("Login to Get Started")
def add(self, input_example, output_example): from collections import Counter counter = Counter() # takes two tensor arrays as input. # input shape of each example is HxWxC and is changed into CxHxW # both examples need to have the same resolution image1 = input_example.permute(2, 0, 1) image2 = output_example.permute(2, 0, 1) image1 = image1.unsqueeze(0) image2 = image2.unsqueeze(0) unfolded1 = torch.nn.functional.unfold(image1, kernel_size=self.kernel, stride=self.stride, padding=self.padding) unfolded2 = torch.nn.functional.unfold(image2, kernel_size=self.kernel, stride=self.stride, padding=self.padding) unfolded1 = unfolded1.squeeze(0) unfolded2 = unfolded2.squeeze(0) unfolded1 = unfolded1.permute(1, 0) unfolded2 = unfolded2.permute(1, 0) with st.spinner('TRAINING in progres...'): if self.index_pretrain: patterns = None # Make sure the resolution is the same or the loop is gonna get wrong! # TODO: make sure the indexing is correct! for i, pattern1 in enumerate(unfolded1): pattern1 = pattern1.unsqueeze(0) pattern2 = unfolded2[i] pattern1 = pattern1.numpy().astype('float32') pattern2 = pattern2.numpy().astype('float32') self.patterns.append(pattern2) if patterns is None: patterns = pattern1 else: patterns = np.concatenate((patterns, pattern1)) if not self.mem.is_trained: self.mem.train(patterns) self.mem.add(patterns) else: # Make sure the resolution is the same or the loop is gonna get wrong! # TODO: make sure the indexing is correct! train_progress_bar = st.progress(0) for i, pattern1 in enumerate(unfolded1): pattern1 = pattern1.unsqueeze(0) pattern2 = unfolded2[i].unsqueeze(0) pattern1 = pattern1.numpy().astype('float32') pattern2 = pattern2.numpy().astype('float32') # counter.update([int(k[0][0])]) # st.write(k[0]) d1, k1 = self.mem.search(pattern1, 1) d2, k2 = self.mem2.search(pattern2, 1) k1 = int(k1[0][0]) k2 = int(k2[0][0]) # if the pattern1 is not in self.mem add it. # if d1[0][0] > 0: # self.mem.add(pattern1) # k1 = self.mem.ntotal - 1 # # if the pattern2 is not in self.mem2 add it. # if d2[0][0] > 0: # self.mem2.add(pattern2) # k2 = self.mem.ntotal - 1 self.mem.add(pattern1) k1 = self.mem.ntotal - 1 # if the pattern2 is not in self.mem2 add it. self.mem2.add(pattern2) k2 = self.mem.ntotal - 1 self.patterns[k1] = k2 train_progress_bar.progress(i / (len(unfolded1) - 1)) st.success(f'LEARNED: {self.mem.ntotal}\tpatterns!') st.write(counter.most_common(10))
def main(): """Face Detection App with Streamlit""" st.title("Face Detection App") st.text("Build with Streamlit and OpenCV") activities = ["Face Detection", "About"] choice = st.sidebar.selectbox("Select Activty", activities) if choice == 'Face Detection': st.subheader("Face Detection") image_file = st.file_uploader("Upload Image", type=['jpg', 'png', 'jpeg']) if image_file is not None: our_image = Image.open(image_file) st.text("Original Image") # st.write(type(our_image)) st.image(our_image, width=300) enhance_type = st.sidebar.radio("Image Enhance Type", [ "Original", "Gray-Scale", "Contrast", "Brightness", "Blurring" ]) if enhance_type == 'Gray-Scale': new_img = np.array(our_image.convert('RGB')) img = cv2.cvtColor(new_img, 1) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # st.write(new_img) st.image(gray, width=300) elif enhance_type == 'Contrast': c_rate = st.sidebar.slider("Contrast", 0.5, 3.5) enhancer = ImageEnhance.Contrast(our_image) img_output = enhancer.enhance(c_rate) st.image(img_output, width=300) elif enhance_type == 'Brightness': c_rate = st.sidebar.slider("Brightness", 0.5, 3.5) enhancer = ImageEnhance.Brightness(our_image) img_output = enhancer.enhance(c_rate) st.image(img_output, width=300) elif enhance_type == 'Blurring': new_img = np.array(our_image.convert('RGB')) blur_rate = st.sidebar.slider("Brightness", 0.5, 3.5) img = cv2.cvtColor(new_img, 1) blur_img = cv2.GaussianBlur(img, (11, 11), blur_rate) st.image(blur_img, width=300) else: st.image(our_image, width=300) # Face Detection task = ["Faces", "Smiles", "Eyes", "Cannize", "Cartonize"] feature_choice = st.sidebar.selectbox("Find Features", task) if st.button("Process"): if feature_choice == 'Faces': result_img, result_faces = detect_faces(our_image) st.image(result_img, width=300) st.success("Found {} faces".format(len(result_faces))) elif feature_choice == 'Smiles': result_img = detect_smiles(our_image) st.image(result_img, width=300) elif feature_choice == 'Eyes': result_img = detect_eyes(our_image) st.image(result_img, width=300) elif feature_choice == 'Cartonize': result_img = cartonize_image(our_image) st.image(result_img, width=300) elif feature_choice == 'Cannize': result_canny = cannize_image(our_image) st.image(result_canny, width=300) elif choice == 'About': st.subheader("About Face Detection App") st.text("This app is Built with Streamlit and OpenCV") st.markdown("#### By: Hrishikesh Malkar") st.markdown( "###### Reference: [Jesse E.Agbe ([JCharisTech] (https://www.jcharistech.com/))]" )
st.write(""" # Simple Rental Prediction App This app predicts the rental price of a property in the Netherlands! All data is supplied via kamernet.nl with active properties listed for rent from July 2019 - March 2020. The app was developed using PyCaret with a Random Forrest model to predict the price of a property based on user inputs. """) @st.cache(allow_output_mutation=True) def load_data(): source = 'https://github.com/michael-william/Netherlands-Rental-Prices/raw/master/data/ml_data.csv' data=pd.read_csv(source, index_col=0) return data with st.spinner(text="Loading data"): data = load_data() st.success('Data laoded!') @st.cache(allow_output_mutation=True) def run_model(): clf = py.setup(data, target = 'rent', silent=True) rf_model = py.create_model('rf', fold=5, verbose=False) model = py.finalize_model(rf_model) return model def user_input_features(): square_meters = st.sidebar.slider('Area in square meters', 6, 675, 56) locator = Nominatim(user_agent='myGeocoder') address = st.sidebar.text_input("Address of property", "Spaarndammerstraat 35 Amsterdam") location = locator.geocode(address) longitude = np.round(location.longitude,4)
def main(): # st.title("Fake Data Generator") stc.html(custom_title) menu = ["Home", "Customize", "About"] choice = st.sidebar.selectbox("Menu", menu) if choice == "Home": st.subheader("Home") number_to_gen = st.sidebar.number_input("Number", 10, 5000) localized_providers = [ "ar_AA", "ar_EG", "ar_JO", "ar_PS", "ar_SA", "bg_BG", "bs_BA", "cs_CZ", "de", "de_AT", "de_CH", "de_DE", "dk_DK", "el_CY", "el_GR", "en", "en_AU", "en_CA", "en_GB", "en_IE", "en_IN", "en_NZ", "en_PH", "en_TH", "en_US", "es", "es_CA", "es_ES", "es_MX", "et_EE", "fa_IR", "fi_FI", "fil_PH", "fr_CA", "fr_CH", "fr_FR", "fr_QC", "he_IL", "hi_IN", "hr_HR", "hu_HU", "hy_AM", "id_ID", "it_CH", "it_IT", "ja_JP", "ka_GE", "ko_KR", "la", "lb_LU", "lt_LT", "lv_LV", "mt_MT", "ne_NP", "nl_BE", "nl_NL", "no_NO", "or_IN", "pl_PL", "pt_BR", "pt_PT", "ro_RO", "ru_RU", "sk_SK", "sl_SI", "sv_SE", "ta_IN", "th", "th_TH", "tl_PH", "tr_TR", "tw_GH", "uk_UA", "zh_CN", "zh_TW" ] locale = st.sidebar.multiselect("Select Locale", localized_providers, default="en_US") dataformat = st.sidebar.selectbox("Save Data As", ["csv", "json"]) df = generate_locale_profile(number_to_gen, locale) st.dataframe(df) with st.beta_expander("📩: Download"): make_downloadable_df_format(df, dataformat) elif choice == "Customize": st.subheader("Customize Your Fields") # Locale Providers For Faker Class localized_providers = [ "ar_AA", "ar_EG", "ar_JO", "ar_PS", "ar_SA", "bg_BG", "bs_BA", "cs_CZ", "de", "de_AT", "de_CH", "de_DE", "dk_DK", "el_CY", "el_GR", "en", "en_AU", "en_CA", "en_GB", "en_IE", "en_IN", "en_NZ", "en_PH", "en_TH", "en_US", "es", "es_CA", "es_ES", "es_MX", "et_EE", "fa_IR", "fi_FI", "fil_PH", "fr_CA", "fr_CH", "fr_FR", "fr_QC", "he_IL", "hi_IN", "hr_HR", "hu_HU", "hy_AM", "id_ID", "it_CH", "it_IT", "ja_JP", "ka_GE", "ko_KR", "la", "lb_LU", "lt_LT", "lv_LV", "mt_MT", "ne_NP", "nl_BE", "nl_NL", "no_NO", "or_IN", "pl_PL", "pt_BR", "pt_PT", "ro_RO", "ru_RU", "sk_SK", "sl_SI", "sv_SE", "ta_IN", "th", "th_TH", "tl_PH", "tr_TR", "tw_GH", "uk_UA", "zh_CN", "zh_TW" ] locale = st.sidebar.multiselect("Select Locale", localized_providers, default="en_US") profile_options_list = [ 'username', 'name', 'sex', 'address', 'mail', 'birthdate' 'job', 'company', 'ssn', 'residence', 'current_location', 'blood_group', 'website' ] profile_fields = st.sidebar.multiselect("Fields", profile_options_list, default='username') number_to_gen = st.sidebar.number_input("Number", 10, 10000) dataformat = st.sidebar.selectbox("Save Data As", ["csv", "json"]) # Initialize Faker Class custom_fake = Faker(locale) data = [ custom_fake.profile(fields=profile_fields) for i in range(number_to_gen) ] df = pd.DataFrame(data) # View As Dataframe st.dataframe(df) # View as JSON with st.beta_expander("🔍: View JSON "): st.json(data) with st.beta_expander("📩: Download"): make_downloadable_df_format(df, dataformat) else: st.subheader("About") st.success("Built with Streamlit") st.info("Jesus Saves @JCharisTech") st.text("By Jesse E.Agbe(JCharis)")
model.add( Conv2D(10, (5, 5), input_shape=(img_width, img_height, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Conv2D(config.layer_2_size, (5, 5), input_shape=(img_width, img_height,1), activation='relu')) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(8, activation="relu")) model.add(Dense(num_classes, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"]) show_terminal_output = not config.get_option("server.liveSave") model.fit( x_train, y_train, validation_data=(x_test, y_test), epochs=epochs, verbose=show_terminal_output, callbacks=[MyCallback(x_test)], ) st.success("Finished training!") # model.save("convnet.h5")
def main(): """Text Analysis App """ st.title("Language Detector & Translator") image = Image.open("people_speaking.jpg") st.sidebar.image(image, caption="Different languages", use_column_width=True) activities = ["Detector & Translator", "Voice", "About"] choice = st.sidebar.selectbox("Menu", activities) df_idiomas = pd.read_csv('lista_idiomas.csv') #st.write(df_idiomas) if choice == 'Detector & Translator': st.subheader("Text Area") lista_modos = ("For 23 languages", "For selected languages") modo = st.sidebar.radio("Choose", lista_modos) texto_default = 'Text' raw_text = st.text_area("Copy&Paste -> Ctrl+Enter", texto_default) blob = TextBlob(raw_text) if modo == "For selected languages": try: flag = False if (raw_text == " " or raw_text == " " or raw_text == " " or raw_text == " "): st.error("Please write something in the text area") elif (raw_text != texto_default) and len(raw_text) > 0 and ( raw_text != " " or raw_text != " " or raw_text != " " or raw_text != " "): dict_idioma_full = lista_idiomas_full() idioma_original = get_value(blob.detect_language(), dict_idioma_full) original_key = get_key(idioma_original, dict_idioma_full) st.success("Original Language" + ": " + idioma_original + " (" + original_key + ")") dict_idioma = lista_idiomas(idioma_original) options = st.multiselect("Choose a language", tuple(dict_idioma.values())) idioma_final = get_key(idioma_original, dict_idioma) for i in range(len(options)): value = options[i] idioma_final = get_key(value, dict_idioma) if (idioma_original != idioma_final): texto_convertido = blob.translate(to=idioma_final) st.success("Language" + ": " + value + " (" + idioma_final + ")") st.markdown(texto_convertido) flag = True except: if flag != True: st.error( "ERROR1: text must be at least 3 letters and the word must exist in the formal language" ) ###################################################################################################################### else: #st.write("23 Languages") #try: flag = False texto_convertido = "apenas teste" #st.write("Flag at try: "+str(flag)) #st.write(raw_text) if (raw_text == " " or raw_text == " " or raw_text == " " or raw_text == " " or raw_text == " "): st.error("Please write something in the text area") #st.write("Flag after if: "+str(flag)) elif (raw_text != texto_default) and len(raw_text) > 0 and ( raw_text != " " or raw_text != " " or raw_text != " " or raw_text != " "): #st.write("Flag after if elif: "+str(flag)) dict_idioma_full = lista_idiomas_full() idioma_original = str( get_value(blob.detect_language(), dict_idioma_full)) original_key = str(get_key(idioma_original, dict_idioma_full)) #st.success("Original Language"+": "+(idioma_original) + " ("+original_key+")") dict_idioma = lista_idiomas(idioma_original) options = dict_idioma.values() #st.success("Original Language:"+"idioma_original") idioma_lista = list(options) #st.write("Lista idiomas: "+str(idioma_lista)) #st.write("size list: "+str(len(idioma_lista))) for i in range(len(idioma_lista)): #st.write("indice i: "+str(i)) value = idioma_lista[i] #st.write("Lista de idiomas: "+value) #st.text(value) idioma_final = get_key(value, dict_idioma) #st.write(idioma_original) #st.write(idioma_final) if (idioma_original != idioma_final): #st.write("Convertendo texto") #st.write(idioma_original != idioma_final) texto_convertido = blob.translate(to=idioma_final) st.success("Language" + ": " + value + " (" + idioma_final + ")") st.write(str(texto_convertido)) flag = True #st.write("Flag after convert: "+str(flag)) #st.write("Flag after for/convert: "+str(flag)) #except: # st.write("Flag at except: "+str(flag)) # st.write("Texto convertido: "+str(texto_convertido)) # if flag != True: # st.error("ERROR2: text must be at least 3 letters and the word must exist in the formal language") ##################################################################################################################### elif choice == 'About': st.subheader("I hope you enjoy it and use to learn something") st.subheader("Built with Streamlit and Textblob") st.write("Problems:") st.write( " - sometimes the original language can't be correctly detected") st.write(" - sometimes the sound will fail.") st.subheader("by Silvio Lima") if st.button("Linkedin"): js = "window.open('https://www.linkedin.com/in/silviocesarlima/')" html = '<img src onerror="{}">'.format(js) div = Div(text=html) st.bokeh_chart(div) ################################################################################################################ else: # Audioplay st.subheader("Text Area") texto_default = 'Text' raw_text = st.text_area("Copy&Paste -> Ctrl+Enter", texto_default) blob = TextBlob(raw_text) try: if (raw_text == texto_default or raw_text == " " or raw_text == " " or raw_text == " " or raw_text == " "): st.error("Please write something in the text area") else: dict_idioma_full = lista_idiomas_full() idioma_original = get_value(blob.detect_language(), dict_idioma_full) original_key = get_key(idioma_original, dict_idioma_full) st.success("Original Language" + ": " + idioma_original + " (" + original_key + ")") # Original sound play(raw_text, original_key) dict_idioma = lista_idiomas(idioma_original) options = st.multiselect("Choose a language", tuple(dict_idioma.values())) for i in range(len(options)): value = options[i] idioma_final_key = get_key(value, dict_idioma) try: if (idioma_original != idioma_final_key): texto_convertido = str( blob.translate(to=idioma_final_key)) st.success("Language" + ": " + value + " (" + idioma_final_key + ")") st.write(texto_convertido) #st.text(idioma_final_key) play(texto_convertido, idioma_final_key) except: st.error( "ERROR: some languages will fail to play the sound." ) except: st.error( "ERROR3: text must be at least 3 letters and the word must exist in the formal language" )