@st.cache def get_data(): url = "http://data.insideairbnb.com/united-states/ny/new-york-city/2019-09-12/visualisations/listings.csv" return pd.read_csv(url) """, language="python") st.markdown( "_To display a code block, pass in the string to display as code to [`st.code`](https://streamlit.io/docs/api.html#streamlit.code)_." ) with st.echo(): st.markdown( "Alternatively, use [`st.echo`](https://streamlit.io/docs/api.html#streamlit.echo)." ) st.header("Where are the most expensive properties located?") st.subheader("On a map") st.markdown( "The following map shows the top 1% most expensive Airbnbs priced at $800 and above." ) st.map( df.query("price>=800", engine='python')[["latitude", "longitude"]].dropna(how="any")) st.subheader("In a table") st.markdown("Following are the top five most expensive properties.") st.write( df.query("price>=800", engine='python').sort_values("price", ascending=False).head()) st.subheader("Selecting a subset of columns") st.write( f"Out of the {df.shape[1]} columns, you might want to view only a subset. Streamlit has a [multiselect](https://streamlit.io/docs/api.html#streamlit.multiselect) widget for this."
st.title('Simulasi Fuzzy pada kecepatan mobil') st.markdown(""" Mobil secara otomatis mengikuti objek yang bergerak. Hal penting yang harus diperhatikan adalah menjaga jarak dengan mengontrol percepatan dalam kaitannya dengan pergerakan benda. Untuk pengendaraan yang baik, fleksibel dan kontinyu, pengendalian mobil dilakukan dengan menggunakan penalaran fuzzy. Ada dua variabel dalam robot yang menentukan respons: * jarak (distance) --> jarak ke mobil di depan delta * delta --> perubahan jarak per. satuan waktu """) col1, col2 = st.beta_columns([4, 2]) with col1: st.subheader("Rules") st.image('./rules.png', use_column_width=True) with col2: st.subheader("Output") st.image('./action.png', use_column_width=True) col3, col4 = st.beta_columns([2, 2]) with col3: dataset = { 'Variabel': ['Distance', 'Delta', 'Action'], 'Type': ['input', 'input', 'output'], 'Min-Kondisi': ['very small', 'shrinking fast', 'brake hard'], 'Min-value': ['0', '-5', '-10'], 'max-Kondisi': ['very big', 'GrowingFast', 'FloorIt'],
def hsv_shift_option(IMAGE_PATH, IMAGE_SIZE): img_bgr = cv2.imread(IMAGE_PATH) img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV) img_hsv = cv2.resize(img_hsv, (IMAGE_SIZE // 3, IMAGE_SIZE // 3)) st.write("## HSV Shift") op_list = [('hue', 'Hue Shift', 'h_shift'), ('sat', 'Saturation Shift', 's_shift'), ('val', 'Value', 'v_shift'), ('combined', 'na', 'na')] sft_dict = {} sft_arr_dict = {} bg_dict = {} for i, (op, title, arg) in enumerate(op_list): if title != 'na': if op != 'hue': sft_dict[op] = st.sidebar.slider(title, min_value=-255, max_value=255, value=(0, 0)) else: sft_dict[op] = st.sidebar.slider(title, min_value=-180, max_value=180, value=(0, 0)) sft_arr_dict[op] = np.linspace(sft_dict[op][0], sft_dict[op][1], 9) bg_dict[op] = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 3)).astype(np.uint8) if op != 'combined': for j, shift in enumerate(sft_arr_dict[op]): row, col = divmod(j, 3) bg_dict[op][row * IMAGE_SIZE // 3 : row * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, col * IMAGE_SIZE // 3 : col * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, :] \ = hsv_shift(img_hsv, **{arg: shift}) else: for j in range(9): row, col = divmod(j, 3) bg_dict[op][row * IMAGE_SIZE // 3 : row * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, col * IMAGE_SIZE // 3 : col * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, :] \ = hsv_shift(img_hsv, sft_arr_dict['hue'][j], sft_arr_dict['sat'][j], sft_arr_dict['val'][j]) total_background = np.zeros( (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)).astype(np.uint8) for i, color in enumerate(('hue', 'sat', 'val', 'combined')): row, col = divmod(i, 2) total_background[row * IMAGE_SIZE:row * IMAGE_SIZE + IMAGE_SIZE, col * IMAGE_SIZE:col * IMAGE_SIZE + IMAGE_SIZE, :] = bg_dict[color] total_background = cv2.cvtColor(total_background, cv2.COLOR_HSV2RGB) st.image(total_background) st.markdown("* * * ") st.subheader("Parameters") st.write(f"Hue channel: {sft_dict['hue']}") st.write(f"Saturation channel: {sft_dict['sat']}") st.write(f"Value channel: {sft_dict['val']}")
from numba import jit # GRAPHING from matplotlib import pyplot as plt import seaborn as sns import altair as alt import plotly.figure_factory as ff import streamlit as st context = os.getcwd() st.write('Context:{}'.format(context)) # EDA df1 = pd.read_csv(context+'/data/train/train.csv') if st.checkbox('Show raw data'): st.subheader('Our Training Data') st.write(df1) st.subheader('PLOTS') @st.cache @jit def genhist(df, bins): hist_values = np.histogram(df, bins=bins, range=(0,bins))[0] return hist_values df1 = df1.fillna(method='pad') st.subheader('Filter on Column')
bath = st.slider("How many bathrooms?", int(data.bathrooms.min()), int(data.bathrooms.max()), int(data.bathrooms.mean())) bed = st.slider("How many bedrooms?", int(data.bedrooms.min()), int(data.bedrooms.max()), int(data.bedrooms.mean())) floor = st.slider("How many floor do you want?", int(data.floors.min()), int(data.floors.max()), int(data.floors.mean())) #splitting your data X = data.drop('price', axis=1) y = data['price'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=45) #modelling step #import your model model = LinearRegression() #fitting and predict your model model.fit(X_train, y_train) model.predict(X_test) errors = np.sqrt(mean_squared_error(y_test, model.predict(X_test))) predictions = model.predict([[sqft_liv, bath, bed, floor]])[0] #checking prediction house price if st.button("Run me!"): st.header("Your house prices prediction is USD {}".format( int(predictions))) st.subheader("Your range of prediction is USD {} - USD {}".format( int(predictions - errors), int(predictions + errors)))
def main(): st.title('Perform EDA with ease...') def file_select(folder='./datasets'): filelist=os.listdir(folder) selectedfile=st.selectbox('select a default file',filelist) return os.path.join(folder,selectedfile) if st.checkbox('Select dataset from local machine'): data=st.file_uploader('Upload Dataset in .CSV',type=['CSV']) if data is not None: df=pd.read_csv(data) else: filename=file_select() st.info('You selected {}'.format(filename)) if filename is not None: df=pd.read_csv(filename) #show data if st.checkbox('Show Dataset'): num=st.number_input('No. of Rows',5,10) head=st.radio('Select Head/Tail',('Head','Tail')) if head=='Head': st.dataframe(df.head(num)) else: st.dataframe(df.tail(num)) #show columns if st.checkbox('Columns'): st.write(df.columns) #show shape if st.checkbox('Shape'): st.text('(Rows,Columns)') st.write(df.shape) #select columns if st.checkbox('Select Columns to show'): collist=df.columns.tolist() selcols=st.multiselect("Select",collist) newdf=df[selcols] st.dataframe(newdf) #unique values if st.checkbox('Unique Values'): st.dataframe(df.nunique()) selectedcol=st.selectbox('Select column to see unique values',df.columns.tolist()) st.write(df[selectedcol].unique()) #data type if st.checkbox('Data Types'): st.dataframe(df.dtypes) #chech for nul values if st.checkbox('Null values'): st.dataframe(df.isnull().sum()) st.write(sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis')) st.pyplot() #show summary if st.checkbox('Summary/Describe'): st.write(df.describe()) #plot and viz. st.header('Data Visualization with Seaborn') #seaborn correlation plot if st.checkbox('Correlation plot'): st.write(sns.heatmap(df.corr(),annot=True)) st.pyplot() #univariate distribution if st.checkbox('Univariate Distribution'): cols=df.columns.tolist() plottype=st.selectbox('Select plot type',['dist','hist']) selectedcol=st.selectbox('Select columns to plot',cols) binnum=st.number_input('No. of bins',10,50) st.write(sns.distplot(df[selectedcol],bins=binnum)) st.pyplot() #bivariate distribution if st.checkbox('Bivariate Distribution'): cols=df.columns.tolist() plottype=st.selectbox('Select plot type',['scatterplot','jointplot']) st.text('Select two columns') x=st.selectbox('Select X-axis column to plot',cols) y=st.selectbox('Select Y-axis column to plot',cols) kindtype=st.selectbox('Select plot kind',['none','reg','resid','hex','kde']) if kindtype!='none': st.write(sns.jointplot(df[x],df[y],kind=kindtype)) st.pyplot() else: st.write(sns.jointplot(df[x],df[y])) st.pyplot() #pair wise plot if st.checkbox('Pair Plot'): cols=df.columns.tolist() cols.insert(0,'none') selectedcollist=st.multiselect('Select columns to plot',cols) hueval=st.selectbox('Select a hue column',cols) if hueval!='none': st.write(sns.pairplot(df[selectedcollist],hue=df[hueval])) st.pyplot() else: st.write(sns.pairplot(df[selectedcollist])) st.pyplot() #categorical plots if st.checkbox('Categorical Scatterplots'): cols=df.columns.tolist() cols.insert(0,'none') plottype=st.selectbox('Select plot type',['stripplot','swarmplot']) x=st.selectbox('Select X-axis(categorical) column to plot',cols) y=st.selectbox('Select Y-axis(numericall) column to plot',cols) hueval=st.selectbox('Select a hue column(categorical)',cols) if plottype=='stripplot': if x!='none' and hueval!='none': st.write(sns.stripplot(df[x],df[y],hue=df[hueval])) st.pyplot() elif x!='none' and hueval=='none': st.write(sns.stripplot(df[x],df[y])) st.pyplot() else: if x!='none' and hueval!='none': st.write(sns.swarmplot(df[x],df[y],hue=df[hueval])) st.pyplot() elif x!='none' and hueval=='none': st.write(sns.swarmplot(df[x],df[y])) st.pyplot() #categorical distributions if st.checkbox('Categorical Distributions'): cols=df.columns.tolist() cols.insert(0,'none') plottype=st.selectbox('Select plot type',['box','bar','violin','count','point','factor']) x=st.selectbox('Select X-axis(catrogrical) column to plot',cols) y=st.selectbox('Select Y-axis(numerical) column to plot',cols) hueval=st.selectbox('Select a hue column',cols) #box plot if plottype=='box': if hueval!='none': st.write(sns.boxplot(df[x],df[y],hue=df[hueval])) st.pyplot() else: st.write(sns.boxplot(df[x],df[y])) st.pyplot() #bar plot if plottype=='bar': if hueval!='none': st.write(sns.barplot(df[x],df[y],hue=df[hueval])) st.pyplot() else: st.write(sns.barplot(df[x],df[y])) st.pyplot() #violin plot if plottype=='violin': if hueval!='none': st.write(sns.violinplot(df[x],df[y],hue=df[hueval])) st.pyplot() else: st.write(sns.violinplot(df[x],df[y])) st.pyplot() #count plot if plottype=='count': st.text('Plotting countplot for selected X column') if hueval!='none': st.write(sns.countplot(df[x],hue=df[hueval])) st.pyplot() else: st.write(sns.countplot(df[x])) st.pyplot() #point plot if plottype=='point': if hueval!='none': st.write(sns.pointplot(df[x],df[y],hue=df[hueval])) st.pyplot() else: st.write(sns.pointplot(df[x],df[y])) st.pyplot() #factor plot if plottype=='factor': typekind=st.selectbox('Select plottype for factor for plot',['point','bar','box','violin','strip','swarm']) colm=st.selectbox('Select column(col) parameter',cols) rows=st.selectbox('Select(only if col is selected) column(row) parameter',cols) if hueval!='none': if colm!='none' and rows!='none': st.write(sns.factorplot(x=x,y=y,hue=hueval,col=colm,row=rows,data=df,kind=typekind)) st.pyplot() elif colm!='none' and rows=='none': st.write(sns.factorplot(x=x,y=y,hue=hueval,col=colm,data=df,kind=typekind)) st.pyplot() else: if colm!='none' and rows!='none': st.write(sns.factorplot(x=x,y=y,col=colm,row=rows,data=df,kind=typekind)) st.pyplot() elif colm!='none' and rows=='none': st.write(sns.factorplot(x=x,y=y,col=colm,data=df,kind=typekind)) st.pyplot() #linear relationship if st.checkbox('Linear Relationship'): cols=df.columns.tolist() cols.insert(0,'none') xval=st.selectbox('Select X-axis',cols) yval=st.selectbox('Select Y-axis',cols) hueval=st.selectbox('Select hue column',cols) if hueval!='none': st.write(sns.lmplot(x=xval,y=yval,hue=hueval,data=df)) st.pyplot() else: st.write(sns.lmplot(x=xval,y=yval,data=df)) st.pyplot() ########### st.subheader('Customizable plots') cols=df.columns.tolist() plottype=st.selectbox('Select plot type',['bar','hist','box','area','line','kde']) selectedcollist=st.multiselect('Select columns to plot',cols) if st.button('Generate plot'): st.success('Generating customizable {} plot for {}'.format(plottype,selectedcollist)) #plot using streamlit if plottype=='area' : cusdata=df[selectedcollist] st.area_chart(cusdata) elif plottype=='bar' : cusdata=df[selectedcollist] st.bar_chart(cusdata) elif plottype=='line' : cusdata=df[selectedcollist] st.line_chart(cusdata) elif plottype : cusplot=df[selectedcollist].plot(kind=plottype) st.write(cusplot) st.pyplot() if st.button('See who created this!'): st.subheader('Project developed by:') st.info('Name: K Vikas Reddy') st.info('College: SASTRA Deemed to be University') st.info('Gmail: [email protected]') if st.button('Thanks'): st.balloons() st.text('') st.text('') st.warning('Please report bugs if any and suggest any new features') if st.checkbox('About this project'): st.write('Exploratory Data Analysis is the first step every Data Scientist does in every project.') st.write('I saw many people groan to perform EDA. It is because of the same scripts written over and over for every project.') st.write('If you also felt the same, then this project is for you. This project helps you perform EDA with ease.') st.write('You dont need to write any scripts. EDA can be performed with just few clicks. It is also very time efficient.') st.write('As Data Science is becoming very popular, Data Science aspirants should also become smart and productive.') st.write('Hope this project helps you to some extent.')
def main(): menu = ['Home', 'About', 'Contact', 'Feedback'] choice = st.sidebar.selectbox("Menu", menu) if choice == "Home": # Let's set the title of our awesome web app st.title('Title of your Awesome App') # Now setting up a header text st.subheader("By Your Cool Dev Name") # Option to upload an image file with jpg,jpeg or png extensions uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) # When the user clicks the predict button if st.button("Predict"): # If the user uploads an image if uploaded_file is not None: # Opening our image image = Image.open(uploaded_file) # Send our image to database for later analysis firebase_bro.send_img(image) # Let's see what we got st.image(image, use_column_width=True) st.write("") try: with st.spinner("The magic of our AI has started...."): label = our_image_classifier(image) time.sleep(8) st.success("We predict this image to be: " + label) rating = st.slider("Do you mind rating our service?", 1, 10) except: st.error("We apologize something went wrong 🙇🏽♂️") else: st.error("Can you please upload an image 🙇🏽♂️") elif choice == "Contact": # Let's set the title of our Contact Page st.title('Get in touch') def display_team(name, path, affiliation="", email=""): ''' Function to display picture,name,affiliation and name of creators ''' team_img = Image.open(path) st.image(team_img, width=350, use_column_width=False) st.markdown(f"## {name}") st.markdown(f"#### {affiliation}") st.markdown(f"###### Email {email}") st.write("------") display_team("Your Awesome Name", "./assets/profile_pic.png", "Your Awesome Affliation", "*****@*****.**") elif choice == "About": # Let's set the title of our About page st.title('About us') # A function to display the company logo def display_logo(path): company_logo = Image.open(path) st.image(company_logo, width=350, use_column_width=False) # Add the necessary info display_logo("./assets/profile_pic.png") st.markdown('## Objective') st.markdown("Write your company's objective here.") st.markdown('## More about the company.') st.markdown("Write more about your country here.") elif choice == "Feedback": # Let's set the feedback page complete with a form st.title("Feel free to share your opinions :smile:") first_name = st.text_input('First Name:') last_name = st.text_input('Last Name:') user_email = st.text_input('Enter Email: ') feedback = st.text_area('Feedback') # When User clicks the send feedback button if st.button('Send Feedback'): # Let's send the data to a Database to store it firebase_bro.send_feedback(first_name, last_name, user_email, feedback) # Share a Successful Completion Message st.success("Your feedback has been shared!")
st.sidebar.subheader("Total number of tweets for each airline") each_airline = st.sidebar.selectbox('Visualization type', ['Bar plot', 'Pie chart'], key='2') airline_sentiment_count = data.groupby( 'airline')['airline_sentiment'].count().sort_values(ascending=False) airline_sentiment_count = pd.DataFrame({ 'Airline': airline_sentiment_count.index, 'Tweets': airline_sentiment_count.values.flatten() }) if not st.sidebar.checkbox("Close", True, key='2'): if each_airline == 'Bar plot': st.subheader("Total number of tweets for each airline") fig_1 = px.bar(airline_sentiment_count, x='Airline', y='Tweets', color='Tweets', height=500) st.plotly_chart(fig_1) if each_airline == 'Pie chart': st.subheader("Total number of tweets for each airline") fig_2 = px.pie(airline_sentiment_count, values='Tweets', names='Airline') st.plotly_chart(fig_2) @st.cache(persist=True)
import warnings warnings.filterwarnings("ignore") import glob #import sys,os import xlrd #-----------------Design_layout main side-----------------# trial_path = '//Vn01w2k16v18/data/Copyroom/Test_software/Data/Control plan/Control plan 3000' #trial_path='/media/ad/01D6B57CFBE4DB20/1.Linux/Data/Process_control/Control plan 3000' #trial_path='/media/ad/01D6B57CFBE4DB20/1.Linux/Data/Process_control/Control plan 2600' #trial_path='/media/ad/01D6B57CFBE4DB20/1.Linux/Data/Process_control/Control plan E series 1' st.markdown('<style>h1{color: green;}</style>', unsafe_allow_html=True) st.title('Process quality control') st.subheader('Created by: DNN') st.header("Information") text2 = st.text_input("1. Please input folder name for data analysis (MUST)", trial_path) path = text2 + '/' #path='//Vn01w2k16v18/data/Copyroom/Test_software/Data/Membrane 3000S/' #st.write('path input: '+path) st.write( 'Accept any public folders like copy room or QA folder, currently not allow private folder' ) text3 = st.text_input("2. Please input folder name for saving (option)", '//Vn01w2k16v18/data/Copyroom/Test_software/Data/Save') path3 = text3 + '/'
'Other': 'Other_Sales', 'Global': 'Global_Sales' } region = reg_dict[roi] pt = vg[region].values sales = vg.iloc[:, 5:] glob = vg.iloc[:, 9:] # plot_type = st.sidebar.selectbox('Visualization type', ['Bar plot', 'Pie chart'],key='2') if not st.sidebar.checkbox("Close", True, key='2'): data_choice = st.sidebar.selectbox("Data", ['Show Raw Data', 'Show Overall Data']) if data_choice == 'Show Raw Data': st.subheader( 'The Total Sales of {} (in Millions) in {} Region are'.format( name, roi)) st.write(pt) else: st.subheader('Overall Data for selected Game') st.write(vg) # if plot_type == 'Bar Plot': # st.write('True') # st.subheader("Total No of Sales") # fig = px.bar(vg,x=100,y=100,height=500) # st.plotly_chart(fig) # else: # fig = px.pie(vg, values=glob, names='Global_Sales') # st.plotly_chart(fig) st.sidebar.subheader('Games by Publisher')
return tsne, imol # app file_name = 'small_molecule_drug' data_load_state = st.text('Loading data...') data = get_data(file_name) data_load_state.text('Loading data...done!') #check the data if st.sidebar.checkbox('Show All Drugs'): st.subheader('All Drugs') st.dataframe(data) #bokeh tsne, svgs = tsne_cluster(data) ChangeMoleculeRendering(renderer='PNG') source = ColumnDataSource(data=dict(x=tsne[:,0], y=tsne[:,1], desc= data.Name, svgs=svgs))
on="PSE_ACT_CAT", how="left") return temp.loc[:, [ "Libelle prestations", "Libelle executant", "PRS_PAI_MNT", "PRS_REM_MNT" ]] df = load_data() option_presta = st.sidebar.multiselect('Select act', df['Libelle prestations'].unique()) option_exec = st.sidebar.multiselect('Select category', df['Libelle executant'].unique()) st.subheader('Expenditure and amount reimbursed') chart_data = df.loc[ (df["Libelle prestations"].isin(option_presta)) & (df['Libelle executant'].isin(option_exec)), ["PRS_PAI_MNT", "PRS_REM_MNT"]].stack().reset_index().rename(columns={ "level_1": "Variable", 0: "Amount" }) chart_data = chart_data.loc[:, ["Variable", "Amount"]] chart_data = chart_data.set_index("Variable") st.bar_chart(chart_data, width=1) df.loc[(df["Libelle prestations"].isin(option_presta)) & (df['Libelle executant'].isin(option_exec)), :] 'You selected: ', option_presta, option_exec
def main(): global number_of_agegroups, names names = ["0-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80+"] number_of_agegroups = len (names) # number of agegroups N = [1756000, 1980000, 2245000, 2176000, 2164000, 2548000, 2141000, 1615000, 839000] # h_rivm = [0.00347, 0.000377, 0.000949, 0.00388, 0.00842,0.0165, 0.0251, 0.0494, 0.0463] # I -> H # ic_opn = [ 0, 2.8402E-05, 0.000211306, 0.000609427, 0.001481364, 0.003788442, 0.006861962, 0.008609547, 0.00210745] # Aantallen van https://www.rivm.nl/coronavirus-covid-19/grafieken geldeeld op 4836661 infecties (cummulatieve prevalentie gedeeld door 8 ) ifr_ = [2.04658E-06, 3.78694E-06, 1.76088E-05, 5.45016E-05, 0.000156108, 0.000558534, 0.002271095, 0.009964733, 0.048248607 ] h_ = [0.0015, 0.0001, 0.0002, 0.0007, 0.0013, 0.0028, 0.0044, 0.0097, 0.0107] # I -> H i1_ = [0.0000, 0.0271, 0.0422, 0.0482, 0.0719, 0.0886, 0.0170, 0.0860, 0.0154] # H-> IC i2_ = [0.0555, 0.0555, 0.0555, 0.0555, 0.0555, 0.0531, 0.0080, 0.0367, 0.0356] # IC-> H d_ = [0.0003, 0.0006, 0.0014, 0.0031, 0.0036, 0.0057, 0.0151, 0.0327, 0.0444] # H-> D dic_ = [0.0071, 0.0071, 0.0071, 0.0071, 0.0071, 0.0090, 0.0463, 0.0225, 0.0234] # IC -> D dhic_ = [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0010, 0.0040, 0.0120, 0.0290] # IC -> h -> D r_ = [0.1263, 0.1260, 0.1254, 0.1238, 0.1234, 0.1215, 0.1131, 0.0976, 0.0872] # recovery rate from hospital (before IC) ric_ = [0.0857, 0.0857, 0.0857, 0.0857, 0.0857, 0.0821, 0.0119, 0.0567, 0.0550] # recovery rate from hospital (after IC) with st.expander('Parameters', expanded=False): col1, col2, col3, col4 = st.columns(4) with col1: initial_exposed_ratio = input_parameters("in. exp. ratio", [ 0.01, 0.06, 0.03, 0.01 , 0.0051 , 0.001 , 0.001 , 0.001 , 0.001]) with col2: initial_infected_ratio = input_parameters("in. inf. ratio", [ 0.01, 0.06, 0.03, 0.01 , 0.0051 , 0.001 , 0.001 , 0.001 , 0.001]) with col3: rel_besmh = input_parameters("rel besm",[ 3, 3, 3, 3, 3, 3, 1, 1, 1]) # Relative infectiousness https://www.pnas.org/content/117/36/22430 with col4: rel_vatbh = input_parameters("rel vatbaarh",[1,1.5,2,5,8,10,16,16,16] ) # Relative suspceptibility fig 1b, op het oog https://www.nature.com/articles/s41591-020-0962-9. # Verdubbeld om alle waards => 1 te krijgen. (anders dooft het uit in die leeftijdsgroep) col1x, col2x, col3x, col4x = st.columns(4) with col1x: correction_per_age = input_parameters("corr age/vax-eff",[1,1,1,1,1,1,1,1,1] ) dfromi = [] for x in range (number_of_agegroups): dfromi.append( ifr_[x] - ((h_[x]* d_[x]) + (h_[x]*i1_[x]*dic_[x] ))) df_parameters = pd.DataFrame( {'Agegroup': names, 'ifr':ifr_, 'rel_besmh': rel_besmh, 'rel_vatbaarh': rel_vatbh, }) total_pop = sum(N) I0 ,E0 = [], [] # ["0-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80+"] for y in range (number_of_agegroups): E0.append (N[y]* initial_exposed_ratio[y]) I0.append (N[y]* initial_infected_ratio[y]) R0, S0, C0 = [0] * number_of_agegroups, [None] * number_of_agegroups, [0] * number_of_agegroups H0, IC0, D0 = [0] * number_of_agegroups, [0] * number_of_agegroups, [0] * number_of_agegroups DIFR0, HIC0, Hcumm0, ICcumm0 = [0] * number_of_agegroups, [0] * number_of_agegroups, [0] * number_of_agegroups, [0] * number_of_agegroups alfa = [0] * number_of_agegroups # vaccination rate for y in range(number_of_agegroups): S0[y] = N[y] - E0[y]- I0[y] - R0[y] st.sidebar.subheader("Parameters") incubationtime = (st.sidebar.slider('Incubatietijd (1/sigma)', 1, 30, 2)) beta_ = st.sidebar.number_input( "Contact rate (beta)", min_value=0.0, max_value=1.0, step=1e-4, value = 0.03100, format="%.4f") #R_start_ = (st.sidebar.slider('R-naugth', 0.0, 5.0, 1.1)) infectioustime = (st.sidebar.slider('Average days infectious (1/gamma)', 1, 30, 2)) sigma = [1/incubationtime]*number_of_agegroups # 1/incubation time - latent period beta = [beta_] * number_of_agegroups # contact rate gamma = [1/infectioustime] * number_of_agegroups # mean recovery rate (1/recovery days/infectious time) # IF YOU WANT TO START FROM AN R-NAUGHT # Rstart = [R_start_]*number_of_agegroups # beta = [] # for y in range (number_of_agegroups): # beta.append(Rstart[y]*gamma[y]/(S0[y]/N[y])) global rutte_factor rutte_factor = st.sidebar.slider('Rutte factor (seasonality, maatregelen (<1), verspoepelingen (>1)', 0.0, 10.0, 1.0) what_to_show_options = ["S", "E", "I", "R", "C", "H", "IC","HIC","DIFR", "D", "Hcumm", "ICcumm"] what_to_show_options_default = [ "C"] what_to_show = st.sidebar.multiselect( "What to show", what_to_show_options, what_to_show_options_default) y0 = tuple(S0 + E0 + I0 + R0 + C0 + H0 +IC0+ HIC0 + DIFR0 + D0 + Hcumm0 + ICcumm0) parameters = tuple(N + alfa + beta + gamma + sigma + rel_besmh + rel_vatbh + ifr_ + h_ + i1_ + i2_ + d_ + dic_ + dhic_ + r_ + ric_+ correction_per_age + dfromi) n = 176 # number of time points t = np.linspace(0, n-1, n) # time points result_odeint = odeint(func, y0, t, parameters, tfirst=True) st.subheader("Totals") show_result(result_odeint, N) # draw_graph_with_all_groups(result_odeint, names, beta, gamma, t,N) plot_total(result_odeint, 1, what_to_show) with st.expander('Per leeftijdsgroep', expanded=False): st.subheader("Per age group") for name in names: plot_single_age_group(name, result_odeint, names, t, N, what_to_show) #plot_total_as_ratio(result_odeint, total_pop) st.subheader("Contact matrix") st.write(get_contact_matrix("2016/-17","all")) show_toelichting()
fig_count.add_trace(go.Indicator( mode="number", value=total_tnr_pending, title="TNR Pending", ), row=2, col=2) fig_count.update_layout(template="plotly_dark", font_family="Arial", margin=dict(l=20, r=20, t=20, b=20)) st.plotly_chart(fig_count, use_container_width=True) #Summary plot for counts - END #Gender information-START st.subheader("It it a girl? A boy? It's a mystery! 😵") st.markdown('Did you know, it is not easy to identify the gender of kitten. ' 'We sometimes have to wait for the vet visit to get an idea') fig_gender = px.pie( original_data, # values='GENDER', names='GENDER', # x='USUAL SPOT', # y='Count', # title='Gender Distribution', # color='GENDER', # barmode='stack' # width= 400, # height= 300, )
@st.cache def load_data(nrows): data = pd.read_csv(DATA_URL, nrows=nrows) data.rename(lambda x: str(x).lower(), axis="columns", inplace=True) data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN]) return data data_load_state = st.text("Loading data...") data = load_data(10000) data_load_state.text("Loading data...done!") st.subheader("Number of pickups by hour") hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0, 24))[0] st.bar_chart(hist_values) st.subheader("Map of all pickups") st.map(data) @st.cache def load_data(hour_to_filter): return data[data[DATE_COLUMN].dt.hour == hour_to_filter] hour_to_filter = st.slider("hour", 0, 23, 17) # min: 0h, max: 23h, default: 17h
def main(): """Face Expression Detection App""" #setting the app title & sidebar activities = [ "Home", "Detect your Facial expressions", "CNN Model Performance", "About" ] choice = st.sidebar.selectbox("Select Activity", activities) if choice == 'Home': html_temp = """ <marquee behavior="scroll" direction="left" width="100%;"> <h2 style= "color: #000000; font-family: 'Raleway',sans-serif; font-size: 62px; font-weight: 800; line-height: 72px; margin: 0 0 24px; text-align: center; text-transform: uppercase;">Try your own test! </h2> </marquee><br> """ st.markdown(html_temp, unsafe_allow_html=True) st.subheader("Video Demo :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") #if choosing to consult the cnn model performance if choice == 'CNN Model Performance': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.subheader("CNN Model :") st.image('images/model.png', width=700) st.subheader("FER2013 Dataset from:") st.text( " https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data" ) st.image('images/dataframe.png', width=700) st.subheader("Model training results:") st.markdown("Accuracy :chart_with_upwards_trend: :") st.image("images/accuracy.png") st.markdown("Loss :chart_with_downwards_trend: : ") st.image("images/loss.png") #if choosing to detect your face exp , give access to upload the image if choice == 'Detect your Facial expressions': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") image_file = st.file_uploader("Upload Image", type=['jpg', 'png', 'jpeg']) #if image if uploaded,display the progress bar +the image if image_file is not None: our_image = Image.open(image_file) st.text("Original Image") progress = st.progress(0) for i in range(100): time.sleep(0.01) progress.progress(i + 1) st.image(our_image) if image_file is None: st.error("No image uploaded yet") # Face Detection task = ["Faces"] feature_choice = st.sidebar.selectbox("Find Features", task) if st.button("Process"): if feature_choice == 'Faces': #process bar progress = st.progress(0) for i in range(100): time.sleep(0.05) progress.progress(i + 1) #end of process bar result_img, result_faces, prediction = detect_faces(our_image) if st.image(result_img): st.success("Found {} faces".format(len(result_faces))) if prediction == 'Happy': st.subheader( "YeeY! You are Happy :smile: today , Always Be ! " ) st.text("Here is your Recommended video to watch:") st.video( "https://www.youtube.com/watch?v=4q1dgn_C0AU&t=24s" ) elif prediction == 'Angry': st.subheader( "You seem to be angry :rage: today ,Take it easy! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=d_5DU5opOFk") elif prediction == 'Disgust': st.subheader("You seem to be Disgust :rage: today! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") elif prediction == 'Fear': st.subheader( "You seem to be Fearful :fearful: today ,Be couragous! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=h_D6HhWiTiI") elif prediction == 'Neutral': st.subheader( "You seem to be Neutral today ,Happy day! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") elif prediction == 'Sad': st.subheader( "You seem to be Sad :sad: today ,Smile and be happy! " ) st.text("Here is your Recommended video to watch:") st.video("https://www.youtube.com/watch?v=ST97BGCi3-w") elif prediction == 'Surprise': st.subheader("You seem to be surprised today ! ") st.text("Here is your Recommended video to watch:") #st.video("https://www.youtube.com/watch?v=M1uyH-DzjGE&t=46s") else: st.error( "Your image does not match the training dataset's images! Try an other image!" ) elif choice == 'About': st.title("Face Expression WEB Application :") st.subheader(":smile: :worried: :fearful: :rage: :hushed:") st.subheader("About Face Expression Detection App")
def page(): assets_path = 'webpages/project_pages/gym_pid_assets' st.write(""" ### Gym-Tuned PID Controller *By Michael Wu and Christian Reyes* """) st.video("https://www.youtube.com/watch?v=w2g0dKvVW-c") st.write(""" We decided to use Gym and stable-baseline to facilitate reinforcement learning tasks by reducing the amount of infrastructural code we have to write. Gym is a toolkit for developing and comparing reinforcement learning algorithms, it provides the basic architectures for functions such as **step, reset, render**. Furthermore, it supports built in callback functions that help with debugging. Stable-baseline is a set of improved implementations of reinforcement learning algorithms based on the OpenAI Baseline package. Specifically, we decided to adopt the Deep Deterministic Policy Gradient (DDPG) because it features a model-free off-policy algorithm for learning continuous actions. Learning a continuous action space is specifically important to us because we are trying to learn the target speed, Longitudinal K values, and the lateral K values of our controller. Those values are all in a continuous space. DDPG has two networks: the actor and the critic network. The Actor network proposes the next action given the current state; while the critic network predicts if the action is good or bad given a state and an action. Our proposed pipeline is as in the picture below, where our environment will output some observations to the actor module and critic module. Actor module will use that observation to produce the next action, while the critic network will evaluate the actor’s policy. """) ac_image = Image.open(f'{assets_path}/actor_critic.png') st.image(ac_image, caption= 'Actor Critic Diagram of our RL System', use_column_width=True) st.write(""" Our first proposal was to make our observation state be the concatenation of current speed, current throttle, current steering, current transform, and target transform. However, we quickly find that there might be too much input, since it is of shape (15, 1). Therefore, we reduced the observation state to just (3,1) by making our observation state be only current speed, current throttle, and current steering. Now that we have the observation space out of the way, we need to define our reward function such that the DDPG algorithm will converge according to our desired policy. We want to incentivize finishing a lap, go to the next waypoint, and go faster. We also want to decentivize collisions, stay in the same place, and by expert knowledge, we know that the track can handle speed greater than 80, so we want to disincentivize speed less than 80. """) rew_image = Image.open(f'{assets_path}/reward_code.png') st.image(rew_image, caption= 'Image of our Reward/Penalty Function', use_column_width=True) st.header("Design Questions") st.subheader( "What design criteria must your project meet? What is the desired functionality?") st.write(""" Specifically for the ROAR Gym project, we decided to utilize the sensing data of GPS, vehicle throttle, steering, and put it into our simple waypoint following planning module, and into a PID controller. """) st.subheader("Describe the design you chose.") st.write(""" Specifically for the ROAR Gym project, we decided to go with the gym environment because it is open source, easy to start with, and have a variety of supported examples, and future adaptations. And we chose to use a stable baseline as our algorithm specific implementation above this gym environment. """) st.subheader( "What design choices did you make when you formulated your design?\ What trade-offs did you have to make?") st.write(""" Specifically for the ROAR Gym environment, we streamed the entire concept of Agent into the Gym, and then formulated our customized function that extract and reformulate the needed data to feed into the gym observation, and action space. An agent knows about the camera data, current vehicle state, planning module state, and everything that is streamed into the vehicle. Now, since we have streamed this agent into the Gym, the gym environment also knows about that information. Unfortunately, the tradeoff we have to make is efficiency -- it is not efficient to copy all data into another module. However training a Reinforcement agent takes time and the time it took to copy the data turns out to be trivial. """) st.subheader( "How do these design choices impact how well the project meets design\ criteria that would be encountered in a real engineering application,\ such as robustness, durability, and efficiency?") st.write(""" Specifically for the ROAR Gym project, I think we met the criteria pretty good. The RL trained PID controller turned out to be really robust in the sense that it will not collide, but at the same time drive decently fast. In fact, we won the award for not crashing a single time in this semester’s ROAR competition. In terms of efficiency, it is able to run at about 50 FPS. Putting this data in comparison with a traditional PID controller -- which is also about 55 FPS, i think that this simple neural net is performing really well. """) st.header("Implementation Questions") st.subheader("Describe any hardware you used or built. Illustrate with pictures and diagrams.") st.write(""" Not applicable. """) st.subheader("What parts did you use to build your solution?") st.write(""" Not applicable. """) st.subheader( "Describe any software you wrote in detail. Illustrate with diagrams,\ flow charts, and/or other appropriate visuals. This includes launch\ files, URDFs, etc.") st.write(""" See our description above for the implementation details and associated figures. """) st.subheader("How does your complete system work? Describe each step.") st.write(""" Specifically for RL based PID controller, it is a infinite while loop with each loop doing... """) st_comps.html(""" In Loop: <ol> <li>Get the vehicle state (Current transform, camera data)</li> <li>Planning module produces the next waypoint</li> <li>Feed the next waypoint and vehicle state into the RL based PID Controller</li> <li>RL based PID Controller will output the next throttle and steering</li> <li>Stream the throttle and steering into the environment that we are attached to currently (either simulation or real hardware)</li> </ol> """)
def main(): #global variables to be used in script PRE_TRAINED_MODEL_NAME = 'bert-base-cased' class_names = ['negative', 'positive'] tokenizer = BertTokenizer.from_pretrained('bert-base-cased') class Model(nn.Module): def __init__(self, *args, **kwargs): super(Model, self).__init__() #develop a class for the Sentiment Classifier class SentimentClassifier(nn.Module): def __init__(self, n_classes): super(SentimentClassifier, self).__init__() self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME) self.drop = nn.Dropout(p=0.3) self.out = nn.Linear(self.bert.config.hidden_size, n_classes) def forward(self, input_ids, attention_mask): _, pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask) output = self.drop(pooled_output) return self.out(output) #Generate a title for our webpage st.title('Sentiment analysis and product reviews.') #createing a sidebar for our webpage st.sidebar.title("Sentiment Analysis Web App") #little comment for our sidebar section st.sidebar.markdown("😃Is your review positive or negative?😞") #Here we will load the data into a cache to prevent repeated work) @st.cache def load_data(): #Function to pull in data from our Amazon s3 Bucket data = pd.read_csv( 'https://amazonproductdata.s3-us-west-1.amazonaws.com/train.csv') return data #let's ingest our raw data here df = load_data() @st.cache def get_model(): gdown.download( "https://drive.google.com/uc?id=1cz41bp4tf37Mky_R31T41qiSN6ucMjGi", "./assets/model_state_dict.bin", quiet=False) get_model() #A function for loading models incase we include other models later def load_model(filepath): model = SentimentClassifier(len(class_names)) device = torch.device('cpu') model.load_state_dict(torch.load(filepath, map_location=device)) return model #loading model into memory - works locally #model = load_model('./model/BERT_trained_model') #This one works locally! model = load_model('./assets/model_state_dict.bin') #here we have the ability to plot data metrics def plot_metrics(metrics_list): if "Confusion Matrix" in metrics_list: st.subheader("Confusion Matrix") plot_confusion_matrix(model, x_test, y_test, display_labels=class_names) #function to provide inference from BERT model def BERT_inference(review_text): #tokenizer = BertTokenizer.from_pretrained('bert-base-cased') #Now we must encode the use text encoded_review = tokenizer.encode_plus( review_text, max_length=300, add_special_tokens=True, return_token_type_ids=False, pad_to_max_length=True, return_attention_mask=True, return_tensors='pt', ) input_ids = encoded_review['input_ids'].to(device) attention_mask = encoded_review['attention_mask'].to(device) output = model(input_ids, attention_mask) _, prediction = torch.max(output, dim=1) st.write(f'Review text: {review_text}') st.write(f'Sentiment : {class_names[prediction]}') #sidebar options to add more rich features to our app if st.sidebar.checkbox("Show raw data", False): st.subheader( "Amazon Review Sentiment Analysis. (Polarity Classification)") st.table(df.head(10)) #Generating a textbox for user input if st.sidebar.checkbox("Input text for inference", False): st.subheader( "Amazon Review Dataset for Sentiment Analysis. (Inference Demonstration.)" ) user_input = st.text_area("Please provide a review here.") if user_input: #Let's process the users input print(user_input) BERT_inference(user_input)
# Created by Gloria Apolot # This file is located in this directory # C:\Users\Apolot\Your team Dropbox\Gloria Apolot\Gloria\Apps> # First import a package (an add-on) called streamlit from streamlit import title from streamlit import subheader from streamlit import write from streamlit import slider # Then use streamlit to create a title of the App title("Gloria Apolot First App") # Then use streamlit to add a sub title subheader("Summing two numbers") # Then use streamlit to write our task for this App write("This is my first app. I am going to add two numbers. z = x + y") # Then, add two numbers and write the answer to a holder named x # 100 + 120 speed = slider('Select speed', 0, 100, 80, 10) time = slider('Select time', 0, 24, 8, 2) distance = speed * time # Then use streamlit to create a user interface (UI) that displays the answer write(speed) write(time) write(distance)
st.sidebar.markdown('Group members') st.sidebar.markdown('Vikas Bevoor') st.sidebar.markdown('Sumit Yenugwar') st.sidebar.markdown('Chandramohan') st.sidebar.markdown('Harsh Joshi') st.title('Model Deployment: Logistic Regression') msg = st.text_input( "Please paste the mail content here to check if its Abusive or Not", " ") df = pd.read_csv("cleandata2.csv") df.reset_index(inplace=True) from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer # Taking input f = [msg] fpd = pd.DataFrame(f, columns=['CleanContent']) # For input message f_matrix = emails_bow1.transform(fpd.CleanContent) f_pred = model.predict(f_matrix) answer = f_pred[0] st.subheader('Predicted Result') st.write(answer)
grade = st.slider('Grade [%]', 0., 30., value=0., step=0.1) / 100 st.plotly_chart(speedVsPowerPlot(w=w_total, G=grade), use_container_width=True) if grade != 0: st.plotly_chart(vamPlot(w=w_total, G=grade), use_container_width=True) st.header('Strava Segment Analysis') with open('segmentDict.txt', 'r') as infile: db = json.load(infile) sortedKeys = sorted(db, key=str.lower) selectedSegments = st.multiselect('Segment(s)', sortedKeys) for key in selectedSegments: # Display segment data st.subheader(db[key]['name']) st.write('https://www.strava.com/segments/' + str(db[key]['id'])) st.write('Distance', round(db[key]['distance'] / 1000, 2), ' km = ', round(db[key]['distance'] / 1000 * 0.621, 2), ' mi') if (db[key]['total_elevation_gain'] == 0): totElevGain = round(db[key]['average_grade'] * db[key]['distance'] / 100) else: totElevGain = round(db[key]['total_elevation_gain']) st.write('Total elevation gain: ', totElevGain, ' m = ', round(totElevGain * 3.281), 'ft') st.write('Average grade: ', db[key]['average_grade'], '%') # Display segment on map
f"Test #{sample[2]}: {sample[3]} - Output {sample[0]} is expected to be {sample[1]}" ) st.header("Analysis") labels = 'pass', 'fail' sizes = [n_right, n_wrong] plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90) plt.axis('equal') st.pyplot() elif option == 'Transition Tree': option2 = st.sidebar.selectbox("Which section do you want to view?", ["ATM", "APP Login"]) st.header(option2) if option2 == "ATM": st.subheader("状态图") atm1 = Image.open("./tran_tree/img/ATM1.png") st.image(atm1, "ATM 状态图", use_column_width=True) st.write(tran_tree.state_diagram) st.subheader("Transition Tree") st.code(tran_tree.code, language='python') if st.button("run"): st.write(tran_tree.tran_tree(tran_tree.state_diagram)) atm2 = Image.open("./tran_tree/img/ATM2.png") st.image(atm2, "ATM Transition Tree", use_column_width=True) st.subheader("状态表") st.markdown(tran_tree.md) else: st.subheader("状态图") login1 = Image.open("./tran_tree/img/login.png") st.image(login1, "APP Login 状态图", use_column_width=True)
st.map(origen) st.title("Paso 1.- Adquisición de los datos") origen = get_data() st.dataframe(origen) #Iniciamos el preprocesamiento st.title("Paso 2.- Preprocesamiento") st.text(origen.shape) st.text(origen.columns) #Preguntas a responder st.title("Preguntas a responder") st.subheader( "Ejercicio 1 .- ¿Que tipo de alojamiento es el que mas hay (un cuarto, dept. completo, etc)?" ) st.dataframe(origen["room_type"].value_counts()) st.subheader( "Ejercicio 2.- ¿Cuáles son los neighbourhoods con mas alojamientos?") sns.countplot(x="neighbourhood_group", data=origen) plt.tight_layout() st.pyplot() st.subheader( "Ejercicio 3.- Mostrar los top 5 alojamientos, mas ocupados (tip la columna availability_365 muestra cuantos dias esta disponible)" ) #Listamos el número de días que están disponibles los alojamientos, de mayor a menor número de veces que aparecen origen["availability_365"].value_counts()
sepal_width = st.sidebar.slider('Sepal width', 2.0, 4.4, 3.4) petal_length = st.sidebar.slider('Petal length', 1.0, 6.9, 1.3) petal_width = st.sidebar.slider('Petal width', 0.1, 2.5, 0.2) data = { 'sepal_length': sepal_length, 'sepal_width': sepal_width, 'petal_length': petal_length, 'petal_width': petal_width } features = pd.DataFrame(data, index=[0]) return features df = user_input_features() st.subheader('User Input parameters') st.write(df) iris = datasets.load_iris() X = iris.data y = iris.target clf = RandomForestClassifier() clf.fit(X, y) prediction = clf.predict(df) prediction_proba = clf.predict_proba(df) st.subheader('Rótulos das classes e seu índice de correspondência') st.write(iris.target_names)
if user_query: query_tokens = tokenizer.encode_plus(user_query, add_special_tokens=True, max_length=512, padding='max_length', return_token_type_ids=True) ids = torch.tensor(query_tokens['input_ids']).unsqueeze(0) mask = torch.tensor(query_tokens['attention_mask']).unsqueeze(0) pred = classifier(ids, mask) top_val, top_idx = torch.topk(pred[0], 3, dim=1) pred_categories = model_classes[top_idx].tolist()[0] pred_topics = [cat_map[cat] for cat in pred_categories] st.subheader('Topics of Interest...') top1, top2, top3 = st.beta_columns(3) with top1: st.button(pred_topics[0]) with top2: st.button(pred_topics[1]) with top3: st.button(pred_topics[2]) # encode user query query_embedding = sentence_transformer.encode(user_query) # grab relevant embeddings based on category prediction relevant_docs = df[df['categories'].isin(pred_categories)]
t0 = float(str_t0) st.sidebar.markdown(""" Example times in the H1 detector: * 1126259462.4 (GW150914) * 1187008882.4 (GW170817) * 933200215 (hardware injection) * 1132401286.33 (Koi Fish Glitch) """) else: chosen_event = st.sidebar.selectbox('Select Event', eventlist) t0 = datasets.event_gps(chosen_event) detectorlist = list(datasets.event_detectors(chosen_event)) detectorlist.sort() st.subheader(chosen_event) st.write('GPS:', t0) # -- Experiment to display masses try: jsoninfo = fetch_event_json(chosen_event) for name, nameinfo in jsoninfo['events'].items(): st.write('Mass 1:', nameinfo['mass_1_source'], 'M$_{\odot}$') st.write('Mass 2:', nameinfo['mass_2_source'], 'M$_{\odot}$') #st.write('Distance:', int(nameinfo['luminosity_distance']), 'Mpc') st.write('Network SNR:', int(nameinfo['network_matched_filter_snr'])) eventurl = 'https://gw-osc.org/eventapi/html/event/{}'.format(chosen_event) st.markdown('Event page: {}'.format(eventurl)) st.write('\n') except: pass
def main(): image = Image.open('images/wordcloud.png') st.sidebar.image(image, width=200) st.sidebar.header("NLP demos") st.sidebar.text("Select an option and see it in action!") st.title("Natural Language Processing demos") st.markdown(""" #### An NLP app for demonstration purposes: analyze your text! """) # Named Entity Recognition if st.sidebar.checkbox("Named Entity Recognition", key='check1'): lang_options = st.selectbox("Choose language (EN/PT)", ['EN', 'PT'], key='sel1') if lang_options == 'EN': lang_model = 'en_core_web_sm' else: lang_model = 'pt_core_news_sm' message = st.text_area("Enter text inside the box...", key='ins1') if st.button("Run", key='run1'): with st.spinner('Wait for it...'): entity_result = entity_analyzer(message, lang_model) st.success(st.json(entity_result)) # Summarization if st.sidebar.checkbox("Text Summarization", key='check2'): st.subheader("Summarize Your Text") message = st.text_area( "Enter text (EN only for now) inside the box...", key='ins2') ratio_value = st.slider( 'Select a ratio (%) that determines the proportion of the number of sentences of the original text to be chosen for the summary', 0, 100, (10)) if st.button("Run", key='run2'): with st.spinner('Wait for it...'): summary_result = summarize(message, ratio=ratio_value / 100) st.success(summary_result) # # Automated Keyword Extraction # if st.sidebar.checkbox("Automated Keyword Extraction"): # st.subheader("Extract Keywords") # lang_options = st.selectbox("Choose language (EN/PT)",['EN','PT']) # if lang_options == 'EN': # lang_model = 'en' # elif lang_options == 'PT': # lang_model = 'pt' # else: # lang_model = 'en' # message = st.text_area("Enter text inside the box...") # if st.button("Run"): # with st.spinner('Wait for it...'): # # set YAKE! parameters # language = lang_model # max_ngram_size = 2 # deduplication_thresold = 0.2 # deduplication_algo = "seqm" # windowSize = 1 # numOfKeywords = 10 # custom_kw_extractor = yake.KeywordExtractor( # lan=language, # n=max_ngram_size, # dedupLim=deduplication_thresold, # dedupFunc=deduplication_algo, # windowsSize=windowSize, # top=numOfKeywords, # features=None, # ) # keywords = custom_kw_extractor.extract_keywords(message) # keywords = [kw for kw, res in keywords] # st.success('Keywords: ' + (', '.join(sorted(keywords)))) # Automated Keyword Extraction if st.sidebar.checkbox("Automated Keyword Extraction", key='check3'): st.subheader("Extract Keywords") lang_options = st.selectbox("Choose language (EN/PT)", ['EN', 'PT'], key='sel2') if lang_options == 'EN': stop_words = en_stopwords lang_model = 'en_core_web_sm' else: lang_model = 'pt_core_news_sm' stop_words = pt_stopwords # nlp = spacy.load(lang_model) message = st.text_area("Enter text inside the box...", key='ins3') if st.button("Run", key='run3'): with st.spinner('Wait for it...'): # corpus = [] text = ''.join([ unidecode.unidecode(accented_string) for accented_string in message ]) corpus = clean_string(text, lang_options) tr4w = TextRank4Keyword() tr4w.set_stopwords(stopwords=stop_words, lang_model=lang_model) # tr4w.set_stopwords(stopwords=stop_words) # tr4w.analyze(ppp, candidate_pos = ['NOUN', 'PROPN', 'VERB'], window_size=4, lower=False) tr4w.analyze(corpus, window_size=4, lower=False, lang_model=lang_model) st.success('Keywords: ' + (', '.join(sorted(tr4w.get_keywords(10))))) # Data Anonymization (erasing names) if st.sidebar.checkbox("Anonymize Personal Data"): st.subheader("Anonymize Your Data: Hiding Names") lang_options = st.selectbox("Choose language (EN/PT)", ['EN', 'PT'], key='sel3') if lang_options == 'EN': lang_model = 'en_core_web_sm' elif lang_options == 'PT': lang_model = 'pt_core_news_sm' else: lang_model = 'en_core_web_sm' message = st.text_area("Enter text inside the box...", key='ins4') if st.button("Run", key='run4'): with st.spinner('Wait for it...'): names_cleaned_result = sanitize_names(message, lang_model) st.success(names_cleaned_result) # N-grams if st.sidebar.checkbox("N-Grams Barplot"): st.subheader("Visualize an N-grams barplot") lang_option = st.selectbox("Choose language (EN/PT)", ['EN', 'PT'], key='sel4') # if lang_options == 'EN': # lang_model = 'english' # elif lang_options == 'PT': # lang_model = 'portuguese' # else: # lang_model = 'english' ngram_option = st.selectbox("Choose N for N-grams (1, 2 or 3)", [1, 2, 3], key='sel5') # if ngram_options == 1: # ngrams = 1 # elif ngram_options == 2: # ngrams = 2 # else: # ngrams = 3 message = st.text_area("Let's analyze and get some visuals...", key='ins5') if st.button("Run", key='run5'): with st.spinner('Wait for it...'): corpus = [] text = ''.join([ unidecode.unidecode(accented_string) for accented_string in message ]) corpus.append(clean_string(text, lang_option)) top3_words = get_top_n_words(corpus, ngram_option, n=20) top3_df = pd.DataFrame(top3_words) top3_df.columns = ["N-gram", "Freq"] fig = px.bar(top3_df, x='N-gram', y='Freq') st.plotly_chart(fig) # Wordcloud if st.sidebar.checkbox("Wordcloud"): st.subheader("Visualize a wordcloud") lang_option = st.selectbox("Choose language (EN/PT)", ['EN', 'PT'], key='sel6') if lang_option == 'EN': # lang_model = 'en_core_web_sm' stop_words = en_stopwords else: # lang_model = 'pt_core_news_sm' stop_words = pt_stopwords message = st.text_area("Let's analyze and get some visuals...", key='ins6') if st.button("Run", key='run6'): with st.spinner('Wait for it...'): corpus = [] text = ''.join([ unidecode.unidecode(accented_string) for accented_string in message ]) corpus.append(clean_string(text, lang_option)) #Word cloud wordcloud = WordCloud(background_color='white', stopwords=stop_words, max_words=100, max_font_size=50, random_state=42).generate(str(corpus)) fig = plt.figure(1) plt.imshow(wordcloud, interpolation="bilinear") plt.axis('off') st.pyplot()
import streamlit as st from sklearn.linear_model import LogisticRegression from pickle import dump from pickle import load st.title('Model Deployment: Logistic Regression') df = pd.read_csv('D:/DATA SCIENCE\Data sets/day15/claimants.csv') df.drop(["CASENUM"], inplace=True, axis=1) df = df.dropna().reset_index() df.drop(["index"], inplace=True, axis=1) #st.subheader('User Input parameters') st.write(df) # load the model from disk loaded_model = load(open('C:/OneDrive/Desktop/Logistic_Model.sav', 'rb')) prediction = loaded_model.predict(df) prediction_proba = loaded_model.predict_proba(df) #st.subheader('Predicted Result') #st.write('No' if prediction_proba[0][1] > 0.5 else 'Yes') st.subheader('Prediction Probability') st.write(prediction_proba) output = pd.concat([df, pd.DataFrame(prediction_proba)], axis=1) output.to_csv('C:/output.csv')
def blur_option(IMAGE_PATH, IMAGE_SIZE): img_bgr = cv2.imread(IMAGE_PATH) img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) img_rgb = cv2.resize(img_rgb, (IMAGE_SIZE // 3, IMAGE_SIZE // 3)) st.write("## Blur") op_list = [('gaussian', 'Gaussian Blur', GaussianBlur), ('motion', 'Motion Blur', MotionBlur), ('median', 'Median Blur', MedianBlur), ('combined', 'na', 'na')] blur_val_dict = {} blur_val_arr_dict = {} bg_dict = {} for i, (op, title, func) in enumerate(op_list): if title != 'na': blur_val_dict[op] = st.sidebar.slider(title, min_value=3, max_value=13, step=2, value=(3, 3)) blur_val_arr_dict[op] = np.linspace(blur_val_dict[op][0], blur_val_dict[op][1], 9) blur_val_arr_dict[op] = [ int(x) if int(x) % 2 == 1 else int(x) + 1 for x in blur_val_arr_dict[op] ] print(op) print(blur_val_arr_dict[op]) bg_dict[op] = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 3)).astype(np.uint8) if op != 'combined': for j, blur_val in enumerate(blur_val_arr_dict[op]): # print("blur_val") # print(blur_val) func_inst = func(blur_limit=(int(blur_val), int(blur_val)), always_apply=True) row, col = divmod(j, 3) bg_dict[op][row * IMAGE_SIZE // 3 : row * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, col * IMAGE_SIZE // 3 : col * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, :] \ = func_inst(image=img_rgb)['image'] else: for j in range(9): func_inst = Compose([ GaussianBlur( blur_limit=(int(blur_val_arr_dict['gaussian'][j]), int(blur_val_arr_dict['gaussian'][j])), always_apply=True), MedianBlur( blur_limit=(int(blur_val_arr_dict['median'][j]), int(blur_val_arr_dict['median'][j])), always_apply=True), MotionBlur( blur_limit=(int(blur_val_arr_dict['motion'][j]), int(blur_val_arr_dict['motion'][j])), always_apply=True) ]) row, col = divmod(j, 3) bg_dict[op][row * IMAGE_SIZE // 3 : row * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, col * IMAGE_SIZE // 3 : col * IMAGE_SIZE // 3 + IMAGE_SIZE // 3, :] \ = func_inst(image=img_rgb)['image'] total_background = np.zeros( (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)).astype(np.uint8) for i, effect in enumerate(('gaussian', 'motion', 'median', 'combined')): row, col = divmod(i, 2) total_background[row * IMAGE_SIZE:row * IMAGE_SIZE + IMAGE_SIZE, col * IMAGE_SIZE:col * IMAGE_SIZE + IMAGE_SIZE, :] = bg_dict[effect] st.image(total_background) st.markdown("* * * ") st.subheader("Parameters") st.write(f"Gaussian param: {blur_val_dict['gaussian']}") st.write(f"Motion param: {blur_val_dict['motion']}") st.write(f"Median: {blur_val_dict['median']}")
petal_width = st.sidebar.slider("Petal length", 0.1, 2.5, 0.2) data = { 'sepal_length': sepal_length, 'sepal_width': sepal_width, 'petal_length': petal_length, 'petal_width': petal_width } features = pd.DataFrame(data, index=[0]) return features df = user_input_features() st.subheader("User Input Parameters") st.write(df) iris = datasets.load_iris() X = iris.data Y = iris.target clf = RandomForestClassifier() clf.fit(X, Y) prediction = clf.predict(df) prediction_proba = clf.predict_proba(df) st.subheader('Class labels and their corresponding index number') st.write(iris.target_names)