#     cleaned = re.sub('[?|!|\'|"|#]',' ',sentence)
#     cleaned = re.sub('[.|,|(|)|\|/]',' ',cleaned)
#     return cleaned
def clean_data(tex):
    tex = re.sub(r'@\w+', ' ', tex)
    tex = re.sub(r'#', ' ', tex)
    tex = re.sub(r'[^\w]', ' ', tex)
    tex = re.sub(r'RT[\s]+', ' ', tex)
    tex = re.sub(r'https?:\/\/\S+', ' ', tex)
    tex = tex.lower()

    return tex


st.set_page_config(page_title="Review Classifier",
                   page_icon="⚕️",
                   layout="centered",
                   initial_sidebar_state="expanded")


def preprocess(review):

    #Total processing
    def text_preprocessing(final):
        i = 0
        str = ''
        s = ''
        filtered_sentence = []
        final = cleanhtml(final)
        for w in final.split():
            for cleaned_words in clean_data(w).split(
            ):  #after cleanpunc again one word may become two or multiple so they'll be split and stored as different words
Esempio n. 2
0
from tensorflow.keras.applications import VGG19
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
import numpy as np
import cv2
from PIL import Image
from io import BytesIO
import pandas as pd
import urllib

# set page layout
st.set_page_config(
    page_title="Image Classification App",
    page_icon="✨",
    layout="wide",
    initial_sidebar_state="expanded",
)
st.title("Image Classification")
st.sidebar.subheader("Input")
models_list = ["VGG16", "VGG19", "Inception", "Xception", "ResNet"]
network = st.sidebar.selectbox("Select the Model", models_list)

# define a dictionary that maps model names to their classes
# inside Keras
MODELS = {
    "VGG16": VGG16,
    "VGG19": VGG19,
    "Inception": InceptionV3,
    "Xception": Xception,  # TensorFlow ONLY
    "ResNet": ResNet50,
Esempio n. 3
0
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
from streamlit.elements import markdown
import plotly.express as px
from math import sqrt
from random import seed, randrange
import csv

######################################################################
# Page Styles
######################################################################

st.set_page_config(page_title='TT Analytics',
                   page_icon="🏓",
                   layout='centered',
                   initial_sidebar_state='auto')
hide_streamlit_style = """
							<style>
							#MainMenu {visibility: hidden;}
							footer {visibility: hidden;}
							</style>
							"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.markdown(
    f"""
<style>
    .reportview-container .main .block-container{{
        max-width: 90%;
        padding-top: 5rem;
        padding-right: 5rem;
Esempio n. 4
0
def main():
    st.set_page_config(page_title="Bike rental prediction", layout='wide')

    options = [
        'Home', 'EDA', 'Visualization', 'Model building and evaluation',
        'Prediction'
    ]
    choice = st.sidebar.selectbox('Choose the followings', options)

    if choice == 'Model building and evaluation':
        st.subheader(
            'Build **AutoML** models with 30 different algorithms and corresponding evaluation'
        )
        uploaded_file = st.file_uploader('', type=['csv'])
        if uploaded_file is not None:
            df = pd.read_csv(uploaded_file)
            with st.beta_expander('Expand dataframe'):
                st.dataframe(df)

            X = df.drop(['cnt', 'instant', 'dteday'], axis=1)
            Y = df['cnt']

            X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                                Y,
                                                                test_size=0.25)
            reg = LazyRegressor(verbose=0,
                                ignore_warnings=False,
                                custom_metric=None)
            models_train, predictions_train = reg.fit(X_train, X_train,
                                                      Y_train, Y_train)
            models_test, predictions_test = reg.fit(X_train, X_test, Y_train,
                                                    Y_test)

            st.subheader('2. Table of Model Performance on Test set')
            st.write(predictions_test)

            st.subheader('3. Plot of Model Performance (Test set)')

            with st.markdown('**R-squared**'):
                plt.figure(figsize=(9, 3))

                ax1 = sns.barplot(x=predictions_test.index,
                                  y="R-Squared",
                                  data=predictions_test)
                ax1.set(ylim=(0, 1))
                plt.xticks(rotation=90)
                st.pyplot(plt)

            with st.markdown('**RMSE (capped at 50)**'):

                plt.figure(figsize=(9, 3))

                ax2 = sns.barplot(x=predictions_test.index,
                                  y="RMSE",
                                  data=predictions_test)
                plt.xticks(rotation=90)
                st.pyplot(plt)

    elif choice == 'Prediction':
        st.subheader('Prediction for unseen data')
        st.sidebar.header('User Input Features')
        uploaded_file = st.sidebar.file_uploader("Upload your input CSV file",
                                                 type=["csv"])
        if uploaded_file is not None:
            input_df = pd.read_csv(uploaded_file)
        else:
            st.sidebar.subheader('Or input your features manually')

            def user_input_features():
                season = st.sidebar.selectbox('Season', np.arange(1, 5))
                yr = st.sidebar.selectbox('Year', np.arange(0, 2))
                month = st.sidebar.selectbox('Month', np.arange(1, 13))
                holiday = st.sidebar.selectbox('Is Holiday', (0, 1))
                weekday = st.sidebar.selectbox('Number of day',
                                               np.arange(1, 8))
                workingday = st.sidebar.selectbox('Is workind day', (0, 1))
                weathersit = st.sidebar.selectbox('Weather Number',
                                                  np.arange(1, 5))
                temp = st.sidebar.slider('Tempareture', 0.05, 0.86, 0.20)
                atemp = st.sidebar.slider('Atemp', 0.07, 0.84, 0.15)
                hum = st.sidebar.slider('Humadity', 0.0, 0.97, 0.55)
                windspeed = st.sidebar.slider('Windspeed', 0.02, 0.5, 0.08)
                casual = st.sidebar.slider('Casual', 2, 3410, 50)
                registered = st.sidebar.slider('Registered', 20, 6946, 5589)
                data = {
                    'season': season,
                    'yr': yr,
                    'mnth': month,
                    'holiday': holiday,
                    'weekday': weekday,
                    'workingday': workingday,
                    'weathersit': weathersit,
                    'temp': temp,
                    'atemp': atemp,
                    'hum': hum,
                    'windspeed': windspeed,
                    'casual': casual,
                    'registered': registered
                }
                features = pd.DataFrame(data, index=[0])
                return features

            input_df = user_input_features()

            st.subheader('User input features :')
            st.dataframe(input_df)

            if st.button('Start prediction'):
                model = pickle.load(open('LassoLarsIC.pkl', 'rb'))

                pred = model.predict(input_df)
                st.write('The prediction is :', pred)

    elif choice == 'EDA':
        st.subheader('Explanatory data analysis')
        uploaded_file = st.file_uploader('', type=['csv'])
        if uploaded_file is not None:
            df = pd.read_csv(uploaded_file)
            with st.beta_expander('Expand dataframe'):
                st.dataframe(df)

            with st.beta_expander('Full profile information'):
                st_profile_report(ProfileReport(df, explorative=True))

            with st.beta_expander('Display basic summary'):
                st.write(df.describe().T)
            with st.beta_expander('Display data type'):
                st.write(df.dtypes)

    elif choice == 'Visualization':
        st.subheader('Data Visualization')
        uploaded_file = st.file_uploader('', type=['csv'])
        if uploaded_file is not None:
            df = pd.read_csv(uploaded_file)
            with st.beta_expander('Expand dataframe'):
                st.dataframe(df)

            with st.beta_expander('Display bike rental along with time axis'):
                df2 = df.copy(deep=True)
                df2.dteday = pd.to_datetime(df2.dteday)
                df2.set_index('dteday', inplace=True)
                plt.figure(figsize=(20, 6))
                df2['cnt'].plot()
                st.pyplot()
                st.write(
                    'These shows that bike rental counts has seasonality and quite upwards trend.'
                )
            with st.beta_expander('Display heatmap'):
                plt.figure(figsize=(10, 6))
                sns.heatmap(df.corr(), annot=True)
                st.pyplot()
                st.write('There are some multicolliearity.')
            col1, col2 = st.beta_columns(2)
            with col1:
                with st.beta_expander(
                        'Display total bike rental counts with different seasons'
                ):
                    df.groupby('season')['cnt'].sum().plot(kind='bar')
                    st.pyplot()
                    st.write('Maximum bike rent was in season 3.')
                with st.beta_expander(
                        'Display total bike rental counts along with months and years'
                ):
                    df.groupby(['mnth',
                                'yr'])['cnt'].sum().unstack().plot(kind='bar')
                    st.pyplot()
                    st.write(
                        'This plot shows the total bike rental count of every month of 2011 and 2012'
                    )
                    st.write(
                        'From MAY to OCTOBER the total bike rental count was high in every year and total rental in every month has increased from 2011 to 2012'
                    )
                with st.beta_expander(
                        'Display the pie chart of weathersit based on bike rental'
                ):
                    plt.pie(df.groupby('weathersit')['cnt'].sum(),
                            labels=['1', '2', '3'],
                            explode=(0.05, 0, 0),
                            radius=1,
                            autopct='%0.2f%%',
                            shadow=True)
                    plt.tight_layout()
                    plt.legend(loc='upper left')
                    plt.axis('equal')
                    plt.show()
                    st.pyplot()
                    st.write(
                        'we have found total out of total bike rental count, 68.57% count was in "Clear, Few clouds, Partly cloudy, Partly cloudy" weatherand 30.27% was in " Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist" weather.'
                    )
                with st.beta_expander('Display the outliers'):
                    num_var = [
                        'temp', 'atemp', 'hum', 'windspeed', 'casual',
                        'registered'
                    ]
                    for i in num_var:
                        sns.boxplot(y=i, data=df)
                        plt.title('Boxplot of ' + i)
                        plt.show()
                        st.pyplot()
                    st.write(
                        'We have found some outliers on the features - casual,windspeed and humidity'
                    )
            with col2:
                with st.beta_expander(
                        'Display the relationship between bike rental count and temperature'
                ):
                    sns.scatterplot(x='temp', y='cnt', data=df)
                    st.pyplot()
                    st.write(
                        'We found almost linear relation between temp and count.'
                    )
                with st.beta_expander(
                        'Display the relationship between bike rental count and windspeed'
                ):
                    sns.scatterplot(x='windspeed', y='cnt', data=df)
                    st.pyplot()
                    st.write('There is not much interpretation')
                with st.beta_expander(
                        'Display violine plot of seasons along with bike rental count'
                ):
                    sns.violinplot(x=df.season, y=df.cnt)
                    st.pyplot()
                    st.write(
                        'Less count was in season 1 and it is right skewed and rest 3 seasons has not exactly any long tail and more or less season 2,3,4 have similar distribution'
                    )

    elif choice == 'Home':

        image = Image.open('RentBike_25-09-17_02.jpg')
        st.image(image, use_column_width=True)
        st.title(
            'Bike rental analysis, visualization, model building, evaluation and prediction in a single window'
        )
Esempio n. 5
0
import base64
import json

import pandas as pd
import streamlit as st
st.set_page_config(layout='wide')


def download_link(object_to_download, download_filename, download_link_text):
    """
    Generates a link to download the given object_to_download.

    object_to_download (str, pd.DataFrame):  The object to be downloaded.
    download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
    download_link_text (str): Text to display for download link.

    Examples:
    download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
    download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')

    """
    if isinstance(object_to_download, pd.DataFrame):
        object_to_download = object_to_download.to_csv(index=False)

    # some strings <-> bytes conversions necessary here
    b64 = base64.b64encode(object_to_download.encode()).decode()

    return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'


# data['cards']: list of {'id', 'name', 'closed', 'desc', 'dueReminder', 'idList', 'idLabels', 'dueComplete'}
def page_config():
    st.set_page_config(page_title="Market Index",
                       layout="centered",
                       initial_sidebar_state="auto")
    st.title("Market Index")
Esempio n. 7
0
def get_co2_data():
    # OWID Data on CO2 and Greenhouse Gas Emissions
    # Creative Commons BY license
    url = 'https://github.com/owid/co2-data/raw/master/owid-co2-data.csv'
    return get_data(url)


@st.cache
def get_warming_data():
    # OWID Climate Change impacts
    # Creative Commons BY license
    url = 'https://raw.githubusercontent.com/owid/owid-datasets/master/datasets/Climate%20change%20impacts/Climate%20change%20impacts.csv'
    return get_data(url).query("Entity == 'World' and Year <=2021")


st.set_page_config(layout="wide")

df_co2 = get_co2_data()

st.markdown("""
# World CO2 emissions
__The graphs below show the CO2 emissions per capita for the entire 
world and individual countries over time.
Select a year with the slider in the left-hand graph and countries 
from the drop down menu in the other one.__

__Scroll down to see charts demonstrating the correlation between 
the level of CO2 and global warming.__

__Hover over any of the charts to see more detail__
Esempio n. 8
0
@st.experimental_memo()
def train_projection(projection, n_components, df):
    if projection == 'PCA':
        projection_model = PCA(n_components=n_components)
    elif projection == 'T-SNE':
        projection_model = TSNE(n_components=n_components)
    elif projection == 'UMAP':
        projection_model = UMAP(n_components=n_components)
    data = projection_model.fit_transform(df.drop(columns=['dominant_topic']))
    return data


if __name__ == '__main__':
    st.set_page_config(page_title='Topic Modeling',
                       page_icon='./data/favicon.png',
                       layout='wide')

    preprocessing_options = st.sidebar.form('preprocessing-options')
    with preprocessing_options:
        st.header('Preprocessing Options')
        ngrams = st.selectbox('N-grams', [None, 'bigrams', 'trigams'],
                              help='TODO ...')  # TODO ...
        st.form_submit_button('Preprocess')

    visualization_options = st.sidebar.form('visualization-options')
    with visualization_options:
        st.header('Visualization Options')
        collocations = st.checkbox(
            'Enable WordCloud Collocations',
            help='Collocations in word clouds enable the display of phrases.')
Esempio n. 9
0
def page_layout(page_layout):
    st.set_page_config(layout=page_layout)
Esempio n. 10
0
import copy
import datasets
import json
import os
import streamlit as st
import yaml

from dataclasses import asdict
from glob import glob
from os.path import join as pjoin

st.set_page_config(
    page_title="HF Dataset Tagging App",
    page_icon="https://huggingface.co/front/assets/huggingface_logo.svg",
    layout="wide",
    initial_sidebar_state="auto",
)

task_set = json.load(open("task_set.json"))
license_set = json.load(open("license_set.json"))
language_set_restricted = json.load(open("language_set.json"))
language_set = json.load(open("language_set_full.json"))

multilinguality_set = {
    "monolingual": "contains a single language",
    "multilingual": "contains multiple languages",
    "translation": "contains translated or aligned text",
    "other": "other type of language distribution",
}

creator_set = {
import streamlit as st
import numpy as np
import pandas as pd
import base64
import altair as alt
import scipy.constants as constants
from scipy.integrate import simps, quad
from scipy.interpolate import splrep, splint
from scipy.optimize import fmin


st.set_page_config(
    page_title="PV-limit-calculator",
)

st.write("# PV Thermodynamic Limit Calculator")

st.write("""
    Calculate your solar cell's maximum theoretical efficiency 
    along with other device metrics.
    
    **Plot & download** the J-V curve data 
    as well as the device metrics as a function of bandgap.
    """
)

with st.expander("Written by..."):
    st.write("""
    PV thermodynamic limit calculation part of the code was written by **Mark Ziffer** as a 
    graduate student in **Ginger lab at the University of
    Washington**
Esempio n. 12
0
import streamlit as st
from streamlit_jina import jina
st.set_page_config(page_title="Jina Image Search", )

endpoint = "http://0.0.0.0:45678/api/search"

st.title("Jina Image Search")
st.markdown(
    "You can run Jina's [Pokemon Docker example](https://github.com/jina-ai/examples/tree/master/pokedex-with-bit#tldr-just-show-me-the-pokemon) to test out this search"
)

jina.image_search(endpoint=endpoint)
Esempio n. 13
0
def main():
    ''' The main Web app '''
    # Set Page name and favicon
    ICON = "./assets/favicon.png"
    st.set_page_config(page_title="MonoShot", page_icon=ICON)

    # Title
    TITLE_STYLE = '''
                border: 3px solid #9ab7d6;
                border-radius: 15px;
                text-align: center;
                background: rgb(220, 235, 237);
                color: white;
                font-family: Futura, sans-serif;
                color: pink;
                text-shadow: #000 0px 0px 1px;
            '''
    title = f'''
            <div style="{TITLE_STYLE}">
                <h1> MONOSHOT </h1>
            </div>
            '''
    st.markdown(title, unsafe_allow_html=True)  # Render the HTML/CSS

    # Header
    HEADER = "Just a single shot that's all it takes."
    st.subheader(HEADER)

    # File Upload and Selections
    PROCESSED_DATA_PATH = "./processed_data/"

    uploaded_file = st.file_uploader(
        "Upload your file here:",
        type=[".mp4", ".avi", ".mov", ".jpeg", ".jpg", ".png"])
    if uploaded_file:
        file_data = get_file_data(uploaded_file)
        processor = FileProcessor(
            file_data
        )  # Processor object for applying diff. methods to the media file

        if uploaded_file.type in ("video/mp4", "video/mov", "video/avi"):
            if not os.path.isdir(PROCESSED_DATA_PATH):
                os.mkdir(PROCESSED_DATA_PATH)

            video_duration = processor.get_duration()
            has_required_dim = processor.get_dimensions()

            if video_duration > 30:
                display_msg("Oops! Video too long to be processed!", -1)

            elif not has_required_dim:
                display_msg(
                    "Oops! Allowed Resolutions are: 360p, 480p, 720p and 1080p",
                    -1)

            else:
                # SideBar Widgets
                select_options = ["Enhance Image", "Generate Shot"]
                select_output = st.sidebar.selectbox("Select Enhancement:",
                                                     select_options)

                if select_output:
                    time.sleep(1.5)
                    if select_output == "Enhance Image":
                        time_stamp = st.sidebar.slider(
                            "Choose the time stamp (in seconds)",
                            min_value=1,
                            max_value=video_duration,
                            step=1)
                        brightness_lvl = st.sidebar.slider("Brightness:",
                                                           min_value=0.0,
                                                           max_value=2.0,
                                                           step=0.2,
                                                           value=1.0)
                        sharpness_lvl = st.sidebar.slider("Sharpness:",
                                                          min_value=0.0,
                                                          max_value=2.0,
                                                          step=0.2,
                                                          value=1.0)
                        contrast_lvl = st.sidebar.slider("Contrast:",
                                                         min_value=0.0,
                                                         max_value=2.0,
                                                         step=0.2,
                                                         value=1.0)
                        color_lvl = st.sidebar.slider("Color:",
                                                      min_value=0.0,
                                                      max_value=2.0,
                                                      step=0.2,
                                                      value=1.0)

                        if st.sidebar.button("Generate"):
                            processor.enhanced_img(
                                PROCESSED_DATA_PATH,
                                time_stamp *
                                1000,  # Timestamp should be in milliseconds
                                brightness_lvl,
                                sharpness_lvl,
                                contrast_lvl,
                                color_lvl)
                            display_progress_bar()
                            display_msg(
                                "Enhanced Image has been successfully generated.",
                                1)

                    elif select_output == "Generate Shot":
                        display_msg(
                            "NOTE: It may take a while to generate a shot.", 0)

                        shot_options = [
                            "SlowMo", "TimeLapse", "GIF", "Boomerang"
                        ]
                        shot = st.sidebar.selectbox("Select Shot:",
                                                    shot_options)

                        if shot == "SlowMo" and st.sidebar.button("Generate"):
                            processor.generate_shot(PROCESSED_DATA_PATH,
                                                    slowmo=True)
                            display_progress_bar()
                            display_msg(
                                "SlowMo has been successfully generated.", 1)

                        elif shot == "TimeLapse" and st.sidebar.button(
                                "Generate"):
                            processor.generate_shot(PROCESSED_DATA_PATH,
                                                    timelapse=True)
                            display_progress_bar()
                            display_msg(
                                "TimeLapse has been successfully generated.",
                                1)

                        elif shot == "GIF" and st.sidebar.button("Generate"):
                            processor.generate_shot(PROCESSED_DATA_PATH,
                                                    gif=True)
                            display_progress_bar()
                            display_msg("GIF has been successfully generated.",
                                        1)

                        elif shot == "Boomerang":
                            start_time = st.sidebar.slider(
                                "Choose the start time (in seconds):",
                                min_value=1,
                                max_value=video_duration,
                                step=1)
                            end_time = st.sidebar.slider(
                                "Choose the end time (in seconds):",
                                min_value=start_time + 2,
                                max_value=video_duration,
                                step=1)

                            if st.sidebar.button("Generate"):
                                processor.generate_shot(PROCESSED_DATA_PATH,
                                                        boomerang=(True,
                                                                   start_time,
                                                                   end_time))
                                display_progress_bar()
                                display_msg(
                                    "Boomerang has been successfully generated.",
                                    1)

                else:
                    display_msg(
                        "Please select atleast a single option to proceed.", 0)

        else:
            if not os.path.isdir(PROCESSED_DATA_PATH):
                os.mkdir(PROCESSED_DATA_PATH)

            select_options = [
                "Enhance Resolution", "Apply Filter", "Extract Text"
            ]
            select_output = st.sidebar.selectbox("Select Enhancement:",
                                                 select_options)

            if select_output:
                time.sleep(1.5)
                if select_output == "Enhance Resolution":

                    if st.sidebar.button("Generate"):
                        display_msg(
                            "Please wait. It may take a while to enhance the resolution...",
                            0)
                        processor.enhance_resolution(PROCESSED_DATA_PATH)
                        display_progress_bar()
                        display_msg(
                            "Enhanced Resolution Image has been successfully generated.",
                            1)

                elif select_output == "Apply Filter":
                    select_options = [
                        "Pencil Sketch", "Water Colored", "Faded", "Document",
                        "Cartoonify", "Vigenette", "Phantom", "Negative"
                    ]
                    select_output = st.sidebar.selectbox(
                        "Select a filter:", select_options)

                    if st.sidebar.button("Generate"):
                        processor.apply_filter(PROCESSED_DATA_PATH,
                                               filter=select_output)
                        display_progress_bar()
                        display_msg("Filter has been successfully applied.", 1)

                elif select_output == "Extract Text":
                    if st.sidebar.button("Fetch Text"):
                        txt = processor.extract_txt()

                        if txt:
                            display_msg("Text extraction successful.", 1)
                            text_expander = st.beta_expander("Output Section")
                            with text_expander:
                                st.text("*" * 70)
                                st.write(txt)
                                st.text("*" * 70)
                        else:
                            display_msg("Failed to extract text!", -1)

            else:
                display_msg(
                    "Please select atleast a single option to proceed.", 0)

        file_data.close()  # Delete the temp file after all the operations.
        shutil.rmtree("./temp_files")

        helper_widget = st.empty()
        processed_files = os.listdir(PROCESSED_DATA_PATH)

        if processed_files:
            if helper_widget.button("Proceed to Download"):
                display_processed_file(PROCESSED_DATA_PATH, processed_files)

    display_info_sections()
Esempio n. 14
0
import json
import pandas as pd
import streamlit as st
from datetime import datetime


# Set page layout
st.set_page_config(
    page_title="Travel",
    page_icon="🌍",
    layout="wide",
    initial_sidebar_state="expanded",
)

@st.cache(allow_output_mutation=True)
def read_json(json_file):
    # read the json file
    with open(json_file, 'r') as f:
        data = json.loads(f.read())
    # Flattern dataframe
    df = pd.json_normalize(data, record_path =['locations'])
    # create a dataframe from the json file
    #df = pd.DataFrame(data)
    # return the dataframe
    return df

# create a function to read timestampms as a column and convert to datetime 
def convert_timestampms(df):
    # convert the timestampms column to datetime
    df['timestampMs'] = pd.to_datetime(df['timestampMs'], unit='ms')
    df['date'] = df['timestampMs'].dt.date
Esempio n. 15
0
#Load libraries
import plotly.express as px
import streamlit as st
import pydeck as pdk
import pandas as pd
import numpy as np
import base64
import os
import re

#Set title and favicon
st.set_page_config(
    page_title='Precios de Apartamentos y Casas en la Cuidad Guatemala.',
    page_icon=
    "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/lg/57/flag-for-guatemala_1f1ec-1f1f9.png"
)
st.markdown('<html lang="es"><html translate="no">', unsafe_allow_html=True)

#Code to automatically include GTM Container
code = """<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-XXXXXX');</script>
<!-- End Google Tag Manager -->"""

a = os.path.dirname(st.__file__) + '/static/index.html'
with open(a, 'r') as f:
    data = f.read()
    if len(re.findall('GTM-', data)) == 0:
Esempio n. 16
0
from pathlib import Path

# Locals
dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.models.methods.metadata import get_parameter_metadata
from src.models.icestupaClass import Icestupa
from src.utils.settings import config
from src.utils import setup_logger


# SETTING PAGE CONFIG TO WIDE MODE
air_logo = os.path.join(dirname, "src/visualization/logos/AIR_logo_circle.png")
st.set_page_config(
    layout="centered",  # Can be "centered" or "wide". In the future also "dashboard", etc.
    initial_sidebar_state="expanded",  # Can be "auto", "expanded", "collapsed"
    page_title="Icestupa",  # String or None. Strings get appended with "• Streamlit".
    page_icon=air_logo,  # String, anything supported by st.image, or None.
)


@st.cache
def vars(df_in):
    input_cols = []
    input_vars = []
    output_cols = []
    output_vars = []
    derived_cols = []
    derived_vars = []
    for variable in df_in.columns:
        v = get_parameter_metadata(variable)
        if v["kind"] == "Input":
Esempio n. 17
0
# A logo, wide mode template for a Streamlit app. Check out docs.streamlit.io for more ideas.
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt

# Set to wide mode, add page title, change layout centering, or add a favicon
st.set_page_config(page_title="Streamlit Template: Big Chart", layout="wide")


# Load your data, decorating your function with @st.cache to cache data for performance
@st.cache
def get_data():
    data = pd.DataFrame(np.random.randn(50, 4), columns=['a', 'b', 'c', 'd'])
    return data


data = get_data()

# Set a title for your app
title_col1, title_col2, title_col3 = st.beta_columns((.25, 2, .5))

with title_col1:
    st.image("thumbnails/logo_crown.png", width=100)

with title_col2:
    st.title("Streamlit Template: Logo Style!")

with title_col3:
    st.write("""
    #
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 22:17:19 2021

@author: Anurodh Mohapatra
"""

import pickle
import json
import streamlit as st
import pandas as pd

st.set_page_config(page_title='AM',
                   page_icon=None,
                   layout='centered',
                   initial_sidebar_state='auto')

# To hide hamburger (top right corner) and “Made with Streamlit” footer
hide_streamlit_style = """
            <style>
            #MainMenu {visibility: hidden;}
            footer {visibility: hidden;}
            </style>
            """
st.markdown(hide_streamlit_style, unsafe_allow_html=True)

# load the model from disk
model = pickle.load(open('Website/HouseRentSite/model.pkl', 'rb'))
map_add = json.load(open('Website/HouseRentSite/map.json', 'r'))

Esempio n. 19
0
    'company_size_100-500', 'company_size_1000-4999', 'company_size_10000+',
    'company_size_50-99', 'company_size_500-999', 'company_size_5000-9999',
    'company_size_<10', 'company_size_nan', 'company_type_Early Stage Startup',
    'company_type_Funded Startup', 'company_type_NGO', 'company_type_Other',
    'company_type_Public Sector', 'company_type_Pvt Ltd', 'company_type_nan',
    'last_new_job_1', 'last_new_job_2', 'last_new_job_3', 'last_new_job_4',
    'last_new_job_>4', 'last_new_job_nan', 'last_new_job_never'
]

##### Configure web app
PAGE_CONFIG = {
    "page_title": "StColab.io",
    "page_icon": ":smiley:",
    "layout": "centered"
}
st.set_page_config(**PAGE_CONFIG)
st.set_option('deprecation.showImageFormat', False)
st.header("Employee Trajectory: Switch job vs Stay at current company")
image = Image.open(r'employees.jpg')
st.image(image, use_column_width=True, format='JPG')
st.sidebar.write(
    "**Please insert values to determine whether an employee will stay at their current company or switch jobs.**"
)


##### Pull features
def format_cap(text):
    return text.replace('_', ' ').strip().capitalize()


def format_city(city):
Esempio n. 20
0
import utils
import model
# Data Loading and Numerical Operations
import pandas as pd
import numpy as np
# Metrics
from sklearn.metrics import precision_score, recall_score
# Web App
import streamlit as st

st.set_option('deprecation.showPyplotGlobalUse', False)
st.set_page_config(page_title='Manual Parameter Tuner', layout='centered', initial_sidebar_state='expanded')


def main():
    utils.local_css("css/styles.css")
    st.title("Heart Disease Prediction - Manual Parameter Tuner")
    st.sidebar.title("Manual Parameter Tuning")
    st.markdown("### Machine Learning is not only about the algorithms you use but also about the different Parameters "
                "assigned to each of them. The final model is heavily affected by the parameters used for a specific "
                "algorithm. "
                "\nThis interactive web app will help you to explore the various parameters of different ML algorithms."
                "\nThe different ML models presented here are:"
                "\n* Logistic Regression"
                "\n* Support Vector Classifier"
                "\n* k-Nears Neighbour Classifier"
                "\n* Decision Tree Classifier"
                "\n* Random Forest Classifier"
                "\n* Gradient Boosting Classifier"
                "\n* XGBoost Classifier"
                "\n### The dataset used here is the **Framingham** Coronary Heart Disease dataset publicly available "
Esempio n. 21
0
import streamlit as st
from transformers import pipeline
from transformers.tokenization_utils import TruncationStrategy

import tokenizers
import pandas as pd
import requests

st.set_page_config(
     page_title='AlephBERT Demo',
     page_icon="🥙",
     initial_sidebar_state="expanded",
)

st.markdown(
    """
<style>

    .sidebar .sidebar-content {
        background-image: linear-gradient(#3377ff,  #80aaff);
    }

    footer {
        color:white;
        visibility: hidden;
    }
    input {
        direction: rtl;
    }
    .stTextInput .instructions {
        color: grey;
Esempio n. 22
0
import streamlit as st
import pandas as pd
import os, re
from interface.landing_page import get_landing_page
from interface.retweet_graph_analysis import get_retweet_graph_analysis_page
from interface.top_image_analysis import get_top_image_analysis_page
from interface.candidate_analysis import get_candidate_analysis_page
from interface.explore_data import get_explore_data_page
import interface.SessionState as SessionState
# from interface.df_utils import load_pickled_df
from interface.image_urls import bucket_image_urls
import pickle

st.set_page_config(
    page_title="VoterFraud2020 - a Twitter Dataset of Election Fraud Claims",
    page_icon="./interface/img/favicon.ico",
    initial_sidebar_state="expanded",
)


@st.cache
def insert_html_header(name, snippet):
    a = os.path.dirname(st.__file__) + "/static/index.html"
    with open(a, "r") as f:
        data = f.read()
        if snippet not in data:
            print("Inserting {}".format(name))
            with open(a, "w") as ff:
                new_data = re.sub("<head>", "<head>" + snippet, data)
                ff.write(new_data)
        else:
Esempio n. 23
0
def run_app():
    """Function running the streamlit app
    """
    st.set_page_config(layout="wide")
    st.title("Single Parameter Estimation From Random Sample")

    mle_estimate, mcmc_estimate = st.beta_columns(2)

    st.sidebar.title('Simulated Data')
    mu = st.sidebar.slider('\u03BC',
                           min_value=-100,
                           max_value=100,
                           value=0,
                           step=1)
    n = st.sidebar.slider('N', min_value=10, max_value=100, value=30, step=1)

    run_sampler = st.sidebar.button('Run Sampler')
    y = np.random.normal(mu, 1, size=n)

    with st.sidebar.beta_expander('Metropolis Hastings'):
        mu_init_metr = st.slider('\u03BC First ProposalS',
                                 min_value=-100,
                                 max_value=100,
                                 value=0,
                                 step=1)
        samples = st.number_input('Samples',
                                  min_value=100,
                                  max_value=10000,
                                  value=1000)
        warm_up = st.number_input('Warm-Up',
                                  min_value=100,
                                  max_value=10000,
                                  value=1000)
        proposal_width = st.slider('Proposal Width',
                                   min_value=0.,
                                   max_value=10.,
                                   value=0.1,
                                   step=0.1)
        mu_prior = st.slider('\u03BC Prior',
                             min_value=-100,
                             max_value=100,
                             value=0,
                             step=1)
        sd_prior = st.slider('\u03C3 Prior',
                             min_value=0,
                             max_value=100,
                             value=10,
                             step=1)

    with st.sidebar.beta_expander('Newton-Raphson'):
        mu_init_newt = st.slider('\u03BC Init',
                                 min_value=-100,
                                 max_value=100,
                                 value=0,
                                 step=1)
        tol = st.number_input('Tolerance',
                              min_value=1e-9,
                              max_value=0.1,
                              value=1e-9)
        maxiter = st.number_input('Maximum Number of Iterations',
                                  min_value=10,
                                  max_value=1000,
                                  value=100)
        boot = st.number_input('Number of Bootstrapped Samples',
                               min_value=1,
                               max_value=100,
                               value=30,
                               step=1)

    if run_sampler:
        mcmc_mu = metropolis_hastings(y=y,
                                      mu_init=mu_init_metr,
                                      warm_up=warm_up,
                                      samples=samples,
                                      proposal_width=proposal_width,
                                      prior_mu=mu_prior,
                                      prior_sigma=sd_prior)
        mcmc_fig = plot_solution(mu=mu, variance=1, approx_solution=mcmc_mu)
        mcmc_estimate.header('Metropolis-Hastings Estimate')
        mcmc_estimate.pyplot(mcmc_fig)

        mle_mu = maximum_likelyhood(y=y,
                                    mu_init=mu_init_newt,
                                    boot=boot,
                                    maxiter=maxiter,
                                    tol=tol)
        mle_fig = plot_solution(mu=mu, variance=1, approx_solution=mle_mu)
        mle_estimate.header('Newton-Raphson Estimate')
        mle_estimate.pyplot(mle_fig)
Esempio n. 24
0
def main():
    st.set_page_config(page_title='Covid-19 Aerosol Transmission Estimator',
                       layout='wide',
                       initial_sidebar_state='expanded')
    st.markdown("***Beta Version***")

    st.markdown("*This is a tool to assist in risk assessment and aid in decision making. Many variables can contribute to creating a safe work place. These estimates are based on current assumptions. This can be used by supervisors, safety managers and employees as a basis for communication and enhanced understanding.*")
    st.title('Covid-19 Aerosol Transmission Estimator')
    hide_streamlit_style = """
    <style>
    #MainMenu {visibility: hidden;}
    footer {visibility: hidden;}
    </style>

    """
    st.markdown(hide_streamlit_style, unsafe_allow_html=True) 

    st.markdown("This web application calculates the ***estimated probability of infection*** in an ***indoor environment*** based on several factors.")
    st.markdown("This application interface works best when on a desktop computer.")
    st.markdown("<< You can adjust these factors in the Parameters panel to the left.")

    st.markdown("""As you make changes in the [Parameters](#parameters) panel, new results will be calculated 
                 in the [Overall Results](#overall-results) section.""")

    st.markdown("""You can read further detailed instructions in the [How to use this app](#how-to-use-this-app)
                 section below. You can read about how the calculations are made in the [How this works](#how-this-works) 
                 section below.""")
    st.sidebar.markdown('## Parameters')

    st.sidebar.markdown('### Covid variant')

    variant_dict = {'Original: 1x':1,
                'Alpha (B.1.1.7 UK): 1.3x': 1.3,
                'Beta (B.1.351 South Africa): 1.25x': 1.25,
                'Gamma (P.1 Brazil): 1.4x': 1.4,
                'Delta (B.1.617.2 India): 2x': 2.0}
    variant_select = st.sidebar.selectbox('COVID-19 Variant', 
                               list(variant_dict.keys()),
                               index=4)
    variant_multiplier = variant_dict[variant_select]
    
    st.sidebar.markdown('### Room measurements')
    
    option = st.sidebar.selectbox('Presets',
                                 ('OCIO Video Conference Room, CapGal 4001', 'Freer Staff Library, G203', 'Break Room, SMS 118', 'Small exhibit gallery, Hirshhorn 202', 'Outer Ring Corridor, Hirshhorn 4th floor', 'Break Room, MSC, G2002B', 'LAB Processing Lab, MSC D1015', 'Classroom, NASM Udvar-Hazy 101.06B', 'Mary Baker Engen Restoration Hanger, NASM Udvar-Hazy 113.03', 'Family History Center, NMAAHC 2052', 'Health Services, NMAAHC C3050', 'Museum Shop, NMAAHC 1025', 'S C Johnson Conference Room A, NMAH 1014', 'Collections Workroom, NMAI LL-2144', 'Conservation Scientific Lab, NMAI E-3099', 'Anthropology  Library, NMNH 330', 'Fossil Prep Lab NMNH 25', 'LAB Break Room, NMNH W107', 'Education Center Classroom, QUAD 3037', 'Reptile Discovery Center, NZP F100A'),
                                 )
    preset_dict = {'OCIO Video Conference Room, CapGal 4001':{'length':26.7,
                           'width':26.7,
                           'area':714,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Freer Staff Library, G203':{'length':23.1,
                           'width':23.1,
                           'area':535,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Break Room, SMS 118':{'length':12.2,
                           'width':12.2,
                           'area':149,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Small exhibit gallery, Hirshhorn 202':{'length':18.6,
                           'width':18.6,
                           'area':347,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Outer Ring Corridor, Hirshhorn 4th floor':{'length':41.8,
                           'width':41.8,
                           'area':1747,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Break Room, MSC, G2002B':{'length':17.7,
                           'width':17.7,
                           'area':313,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'LAB Processing Lab, MSC D1015':{'length':41.2,
                           'width':41.2,
                           'area':1699,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Classroom, NASM Udvar-Hazy 101.06B':{'length':34.9,
                           'width':34.9,
                           'area':1215,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Mary Baker Engen Restoration Hanger, NASM Udvar-Hazy 113.03':{'length':144.9,
                           'width':144.9,
                           'area':20998,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Family History Center, NMAAHC 2052':{'length':41.5,
                           'width':41.5,
                           'area':1719,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Health Services, NMAAHC C3050':{'length':12.5,
                           'width':12.5,
                           'area':157,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Museum Shop, NMAAHC 1025':{'length':49.5,
                           'width':49.5,
                           'area':2454,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'S C Johnson Conference Room A, NMAH 1014':{'length':30.6,
                           'width':30.6,
                           'area':936,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Collections Workroom, NMAI LL-2144':{'length':40.2,
                           'width':40.2,
                           'area':1618,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Conservation Scientific Lab, NMAI E-3099':{'length':18.4,
                           'width':18.4,
                           'area':339,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Anthropology Library, NMNH 330':{'length':43.0,
                           'width':43.0,
                           'area':1852,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Fossil Prep Lab NMNH 25':{'length':36.3,
                           'width':36.3,
                           'area':1320,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'LAB Break Room, NMNH W107':{'length':13.7,
                           'width':13.7,
                           'area':189,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Education Center Classroom, QUAD 3037':{'length':40.4,
                           'width':40.4,
                           'area':1636,
                           'height':10,
                           'ach':3,
                           'merv':4},
               'Reptile Discovery Center, NZP F100A':{'length':25.5,
                           'width':25.5,
                           'area':648,
                           'height':10,
                           'ach':3,
                           'merv':4}
                }

    b15 = st.sidebar.number_input('Floor area of room (in ft²)', value=preset_dict[option]['area'])
    #b14 = st.sidebar.number_input('Width of room (in ft)', value=preset_dict[option]['width'])
    b16 = st.sidebar.number_input('Height of room (in ft)', value=preset_dict[option]['height'])
    
    ### Calculating room volume
#    e13 = b13 * 0.305
#    e14 = b14 * 0.305
#    e15 = e13 * e14
    e15 = b15 / 10.764
    e16 = b16 * 0.305
#    e17 = e13 * e14 * e16
    e17 = e15 * e16
    
    ach_dict = {'Closed Windows (0.3)':0.3,
                'Open Windows (2.0)': 2.0,
                'Mechanical Ventilation (3.0)': 3.0,
                'Smithsonian Standard (6.0)': 6.0,
                'Better mechanical ventilation (8.0)': 8.0,
                'Laboratory, Restaurant (9.0)': 9.0,
                'Bar (15.0)': 15.0,
                'Hospital/Subway Car (18.0)': 18.0,
                'Airplane (24.0)': 24.0}
    ach_select = st.sidebar.selectbox('Air changes per hour', 
                               list(ach_dict.keys()),
                               index=preset_dict[option]['ach'])
    b28 = ach_dict[ach_select]
    merv_dict = {'MERV 0 (None)':0,
                 'MERV 2 (Res. Window AC)': 2,
                 'MERV 6 (Res./Comm./Industrial)': 6,
                 'MERV 10 (Res./Comm./Hospital)': 10,
                 'MERV 13 (Smithsonian Standard)': 13,
                 'MERV 14 (Hospital & General Surgery)': 14,
                 'MERV 17 (HEPA)': 17}
    merv_select = st.sidebar.selectbox(
                'Filtration System',
                options=list(merv_dict.keys()),
                index=preset_dict[option]['merv'])
    merv_value = merv_dict[merv_select]
    
    recirc_dict = {'None (0)':0,
                'Slow (0.3)': 0.3,
                'Moderate (1.0)': 1.0,
                'Fast (10.0)': 10.0,
                'Airplane (24.0)': 24.0,
                'Subway Car (54.0)': 54.0}
    recirc_select = st.sidebar.selectbox('Recirculation Rate (per hour)', 
                               list(recirc_dict.keys()),
                               index=2)
    recirc_rate = recirc_dict[recirc_select]
    
    st.sidebar.markdown('### Advanced parameters')
    breathing_dict = {'Resting (0.49)': 0.49,
                      'Standing (0.54)': 0.54,
                      'Singing (1.00)': 1.00,
                      'Light Exercise (1.38)': 1.38,
                      'Moderate Exercise (2.35)': 2.35,
                      'Heavy Exercise (3.30)': 3.30}
    breathing_select = st.sidebar.selectbox('Breathing rate of susceptibles (m³/hr)', 
                               list(breathing_dict.keys()),
                               index=0)
    b47 = breathing_dict[breathing_select]
    #b47 = st.sidebar.number_input('Breathing rate of susceptibles (m3/hr)', value=0.72)
    resp_dict = {'Breathing (light) (1.1)': 1.10,
                 'Breathing (normal) (4.2)': 4.20,
                 'Breathing (heavy) (8.8)': 8.80,
                 'Talking (whisper) (29.0)': 29.00,
                 'Talking (normal) (72.0)': 72.00,
                 'Talking (loud) (142.0)': 142.00,
                 'Singing (970.0)': 970.0}
    resp_select = st.sidebar.selectbox('Respiratory Activity: (q/m³)', 
                               list(resp_dict.keys()),
                               index=0)   
    b51 = resp_dict[resp_select] * b47

    st.sidebar.markdown('q/h ='+'{:.2f}'.format(b51))

    b53 = st.sidebar.slider('Mask fit/compliance', 0, 100, value = 100)
    mask_ex_dict = {'None (0%)': 0.0,
                'Face shield (23%)': 23,
                'Cloth mask (50%)': 50.0,
                'Disposable surgical (65%)': 65.0,
                'N95, KN95 masks (90%)': 90.0}
    mask_ex_select = st.sidebar.selectbox(
            'Mask efficiency',
            options=list(mask_ex_dict.keys()),
            index=3)
    b52 = mask_ex_dict[mask_ex_select]
    b54 = b52
    #b52 = st.sidebar.number_input('Exhalation mask efficiency (%)', value=50)
    #b54 = st.sidebar.number_input('Inhalation mask efficiency', value=30)

    st.sidebar.markdown('### Scenario parameters')
    b24 = st.sidebar.number_input('Duration of event (in min)', value=480)
    si_cap = math.floor((b15) / 113)
    six_foot_cap = math.floor((b15) / 36)
#    st.sidebar.markdown(f'*SI recommendations of 113 ft² per person would accomodate **{si_cap}** people in this space.*')
    b38 = st.sidebar.number_input('Total number of people present', value=12)
    b39 = st.sidebar.number_input('Infective people', value=1)
    immune = st.sidebar.number_input('Immune people', value=1)
    suscept = b38 - immune - b39

    ## Calculations


    e24 = b24/60

    ### Calculation aerosol filtration
    # Source: https://www.ashrae.org/technical-resources/filtration-disinfection
    # Table of MERV values corresponding to aerosol filtration efficiency, by different particle sizes (in microns)
    merv_eff_dict = [
        {'merv': 0, '0.3-1': 0, '1-3': 0, '3-10': 0},
        {'merv': 1, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},
        {'merv': 2, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},
        {'merv': 3, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},
        {'merv': 4, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.01},
        {'merv': 5, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.2},
        {'merv': 6, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.35},
        {'merv': 7, '0.3-1': 0.01, '1-3': 0.01, '3-10': 0.50},
        {'merv': 8, '0.3-1': 0.01, '1-3': 0.20, '3-10': 0.70},
        {'merv': 9, '0.3-1': 0.01, '1-3': 0.35, '3-10': 0.75},
        {'merv': 10, '0.3-1': 0.01, '1-3': 0.50, '3-10': 0.80},
        {'merv': 11, '0.3-1': 0.2, '1-3': 0.65, '3-10': 0.85},
        {'merv': 12, '0.3-1': 0.35, '1-3': 0.80, '3-10': 0.90},
        {'merv': 13, '0.3-1': 0.50, '1-3': 0.85, '3-10': 0.90},
        {'merv': 14, '0.3-1': 0.75, '1-3': 0.90, '3-10': 0.95},
        {'merv': 15, '0.3-1': 0.85, '1-3': 0.90, '3-10': 0.95},
        {'merv': 16, '0.3-1': 0.95, '1-3': 0.95, '3-10': 0.95},
        {'merv': 17, '0.3-1': 0.9997, '1-3': 0.9997, '3-10': 0.9997},
        {'merv': 18, '0.3-1': 0.99997, '1-3': 0.99997, '3-10': 0.99997},
        {'merv': 19, '0.3-1': 0.999997, '1-3': 0.999997, '3-10': 0.999997},
        {'merv': 20, '0.3-1': 0.9999997, '1-3': 0.9999997, '3-10': 0.9999997},
    ]
    aerosol_radius = 2
    for item in merv_eff_dict:
        if item['merv'] == merv_value:
            if aerosol_radius < 1:
                eff = item['0.3-1']
            elif aerosol_radius < 3:
                eff = item['1-3']
            else:
                eff = item['3-10']

    ### Calculating first order loss rate
    b29 = 0.62
    b30 = 0.3
    b31 = 0
    b32 = b28 + b29 + b30 + b31

    ### Calculating ventilation rate per person
    b34 = e17 * (b28 + b31) * 1000 / 3600 / b38

    ### Calculating quanta
    b66 = b51 * (1 - (b52/100) * (b53/100)) * b39 * variant_multiplier
    b67 = b66/b32/e17 * (1-(1/b32/e24) * (1 - math.exp(-1 * b32 * e24)))
    b68 = b67 * b47 * e24 * (1 - (b54/100) * (b53/100))

    b71 = (1 - math.exp(-1 * b68)) * 100
    indiv_prob_formatted = '{:.4f}%'.format(b71)
    at_least_one_prob = 1 - stats.binom.cdf(0, suscept, b71)
    at_least_one_formatted = '{:.4f}%'.format(at_least_one_prob)
    
    st.markdown('## Overall Results')
    st.markdown('*This result will update live as you change parameters in the sidebar.*')
    
    people_option = st.radio("Scenario type",
                             ('If an infected person enters',
                              'Prevalence of infection'))
    st.markdown(f'Each susceptible individual has **{indiv_prob_formatted}** probability of getting infected')
    st.markdown(f'With **{suscept}** susceptible individuals, there is a  **{at_least_one_formatted}** probability that at least one person will get infected')

    gauge_plot = mpl_gauge.gauge(labels=['Very Low','Low','Medium','High','Very High'],
                   colors=['#2FCC71','#1F8449','#F4D03F','#F5B041','#C03A2B'], 
                   arrow=2, title='Risk Level')
    #st.pyplot(gauge_plot)
    bullet_plot = plotly_gauge.plotly_bullet(b71)
    bullet_plot.update_layout(height = 250)
    st.plotly_chart(bullet_plot, use_container_width=True, height=250)
    
    #b26 = st.number_input('Number of repetitions of event', value=26)
    #st.write(f'Probability of infection over {b26} repetitions:')


#    st.write('<style>body { margin: 0; font-family: Arial, Helvetica, sans-serif;} .footer{padding: 10px 16px; background: #555; color: #f1f1f1; position:fixed;bottom:0;} .sticky { position: fixed; bottom: 0; width: 100%;} </style><div class="footer sticky" id="sticky-footer"><i>Based on input parameters,</i><br/>Probability of infection: '+prob_formatted+'</div>', unsafe_allow_html=True)

#    with st.beta_expander(label='Intermediate Calculations'):
#        st.write(f'First order loss rate: {b32} h-1')
#        st.write(f'Ventilation rate per person: {b34} L/s/person')

    # io_df = pd.DataFrame([{'Room Area (sq. ft)':b15,
    #                      'Room Height (ft)':b16,
    #                      'Probability of Infection (%)': b71}])
    # st.table(io_df)

    # save_button = st.button('Add scenario to table')

    # saved_df = pd.DataFrame(columns=['Room Length (ft)','Room Width (ft)','Probability of Infection (%)'])
    # state = SessionState.get(saved_df = pd.DataFrame(columns=['Room Length (ft)','Room Width (ft)','Probability of Infection (%)']))
    # if save_button:
    #     state.saved_df = state.saved_df.append(io_df, ignore_index=True)
    # st.dataframe(state.saved_df)
    # st.markdown(get_table_download_link(state.saved_df), unsafe_allow_html=True)  
        
    st.markdown('## How this app works')
    with open('explanation.md','r') as explanation_md:
        explanation_text = explanation_md.read()
        st.markdown(explanation_text)
        st.markdown("[Back to top](#covid-19-aerosol-transmission-estimator)")

    st.markdown('## Preset scenarios')
    with open('scenarios.md','r') as scenarios_md:
        scenarios_text = scenarios_md.read()
        st.markdown(scenarios_text)
        st.markdown("[Back to top](#covid-19-aerosol-transmission-estimator)")            

    st.markdown('## How to use this app')
    with open('instructions.md', 'r') as instructions_md:
        instructions_text = instructions_md.read()
        st.markdown(instructions_text)
        st.markdown("[Back to top](#covid-19-aerosol-transmission-estimator)")
    
    with open('footer.md', 'r') as footer_md:
        footer_text = footer_md.read()
        st.markdown(footer_text)
Esempio n. 25
0
# Others
import base64
import json

# Modules of Possible Diagnosis
import os.path
import sys

sys.path.append('pages/')

from diagnosis_main import page as main_page
from rinite_alergica import writereadlists

##############################################################################
st.set_page_config(page_title='ImmunoPlatform', layout="wide")


# This can only be set once and it must be the first streamlit command of the app
###############################################################################
def introduction_page(action):

    if action == 'ON':
        st.header('**IMMUNOPLATFORM - v1.0.0**')
        st.markdown('''
                    Bem-vindo à ImmunoPlatform! \n
                    Esta plataforma permite auxiliar consultas na área de Imunoalergologia
                    fornecendo *widgets* interativos com o intuito de facilitar o preenchimento,
                    por parte do médico, das diferentes variáveis para quatro diagnósticos distintos: \n
                    - Doença Alérgica Respiratória \n
                    - Urticária Crónica \n
Esempio n. 26
0
from dateutil.parser import parse
import base64

import streamlit as st
from streamlit_folium import folium_static
import up42
import graphviz as graphviz
import geopandas as gpd

st.set_page_config(layout="centered",
                   initial_sidebar_state="expanded")  #,page_icon="./logo.png")
st.title("UP42 APP")

####### PROJECT ######

## Auth & project
st.sidebar.markdown(
    "Enter your [UP42 credentials](https://sdk.up42.com/authentication/):")
project_id = st.sidebar.text_input("Project ID",
                                   value='',
                                   max_chars=None,
                                   key=None,
                                   type='default')
project_apikey = st.sidebar.text_input("Project API KEY",
                                       value='',
                                       max_chars=None,
                                       key=None,
                                       type='default')

if not project_id or not project_apikey:
    st.sidebar.warning('Please input your credentials.')
Esempio n. 27
0
input_config = 'config.json'

colors = [
    '#7aecec', '#aa9cfc', '#feca74', '#bfe1d9', '#c887fb', '#e4e7d2',
    '#905829', '#dfcd62', '#e8d53d', '#f0c88b', '#b68282', '#799fb2',
    '#c3b489', '#bf9a81', '#a592ae', '#e5aef9', '#f69419', '#a36b42',
    '#e5c3a6', '#4fc6b4', '#d9e69a', '#f76b6b', '#e8d53d', '#61c861',
    '#65a4d9', '#b8ff57', '#779987', '#f69419'
]

st.set_page_config(
    page_title="Object Storage Statistical Scanner App",
    page_icon=":shark:",
    layout="wide",
    initial_sidebar_state="auto",
    menu_items={
        'About':
        "# S3 Statistical File Scanner. This is an *extremely* cool app!"
    })

st.title("Cortx Statistical File Scanner")
st.sidebar.write("Enter Cortx S3 Credentials")
form = st.sidebar.form("aws_credentials")

if 'job_ids' not in st.session_state:
    st.session_state.job_ids = []

if 'person_entity_data' not in st.session_state:
    st.session_state.person_entity_data = []
Esempio n. 28
0
def main():
    st.set_page_config(layout="wide") 
    st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',
             unsafe_allow_html=True)

    confirmed_df, death_df, recovery_df = wwConfirmedDataCollection()
    st.title("Covid-19 ­Ъда Pandemic Data Visualization")
    displayRawData(confirmed_df, death_df, recovery_df)
    confirmed_df, death_df, recovery_df = dataMassaging(
        confirmed_df, death_df, recovery_df
    )
    full_table = mergeDataAndDataCorrection(confirmed_df, death_df, recovery_df)

    st.write('\nData from "CSSEGISandData POST data massaging"')
    
    user_selectionbox_input = st.selectbox(
        "Select an option", ["Global", "Select from list of countries"]
    )
    min_date_found = full_table["date"].min()
    max_date_found = full_table["date"].max()

    selected_date = st.date_input(
        "Pick a date",
        (min_date_found, max_date_found)
    )
    if len(selected_date) == 2:
        
        if user_selectionbox_input == "Select from list of countries":
            full_table = full_table[(full_table['date'] >= selected_date[0]) & (full_table['date'] <= selected_date[1])]
            
            # full_table = full_table[full_table["date"] == (between(selected_date[0], selected_date[1]))]
            list_of_countries = full_table["location"].unique()
            selected_country = st.selectbox("Select country", list_of_countries)

            mask_countries = full_table["location"] == (selected_country)
            full_table = full_table[mask_countries]

            # Adding new cases to the table for graphing
            full_table["new_confirmed"] = full_table["confirmed"].diff(1).fillna(0)
            full_table["new_recovered"] = full_table["recovered"].diff(1).fillna(0)
            full_table["new_deaths"] = full_table["deaths"].diff(1).fillna(0)
            

            user_input = st.selectbox(
                "Select an option", ["Total Number of Cases", "New Cases Per Day"]
            )
            st.write(full_table)
            if user_input == "New Cases Per Day":
                source = pd.DataFrame(full_table, columns=["date", "new_confirmed", "new_recovered", "new_deaths"])
                title = f"New Cases Per Day for {selected_country}"
            else:
                source = pd.DataFrame(
                    full_table, columns=["date", "confirmed", "deaths", "recovered"]
                )
                title = f"Total reported cases for {selected_country}"
            
            st.altair_chart(altairLineChartGraphing(title, source), use_container_width=True)    

        else:
            full_table = full_table[full_table["date"] == selected_date[1]]
            confirmed_source = pd.DataFrame(full_table, columns=["location", "lat", "lon", "confirmed"])
            

            #Readable values
            confirmed_source["confirmed_readable"] = confirmed_source["confirmed"].apply(human_format)
            display_confirmed_source = pd.DataFrame(confirmed_source, columns=["location", "lat", "lon", "confirmed_readable"]).reset_index(drop=True)
            display_confirmed_source = display_confirmed_source.rename(columns={"confirmed_readable": "confirmed"})
            st.dataframe(display_confirmed_source)

            INITIAL_VIEW_STATE = pdk.ViewState(
                latitude=55.3781,
                longitude=-3.436,
                zoom=1,
                pitch=25,
            )

            column_layer = pdk.Layer(
                "ColumnLayer",
                data=confirmed_source,
                get_position=["lon", "lat"],
                radius=50000,
                get_elevation="confirmed",
                elevation_scale=0.25,
                get_fill_color=["255,255, confirmed*.01"],
                get_line_color=[255, 255, 255],
                filled=True,
                pickable=True,
                extruded=True,
                auto_highlight=True,
            )
            TOOLTIP = {
                "html": "{location}<br> <b>{confirmed_readable}</b> Confirmed Cases",
                "style": {
                    "background": "grey",
                    "color": "white",
                    "font-family": '"Helvetica Neue", Arial',
                    "z-index": "10000",
                },
            }

            r = pdk.Deck(
                column_layer,
                map_style="mapbox://styles/mapbox/satellite-streets-v11",
                map_provider="mapbox",
                initial_view_state=INITIAL_VIEW_STATE,
                tooltip=TOOLTIP,
            )
            st.write("## Total Number of Confirmed Cases All Time")
            st.pydeck_chart(r)
    else:
        st.write("Select Valid Dates to continue")
Esempio n. 29
0
import streamlit as st
import utils
import pickle
import sys

model = pickle.load(open("phishing_site_detector.sav", "rb"))

st.set_page_config(page_title="Phishing Website Detector")
st.title("Phishing Website Detector")

st.header(
    "App that detects if the given url to a website is a legitimate or a phishing website"
)
st.subheader(
    "Phishing is one of the major problems faced by cyber-world and leads to financial losses for both industries and individuals.Detection of phishing attack with high accuracy has always been a challenging issue. One of the effective way of checkingif a website/url is legitimate, or a scam is by checking its url. This detector uses an ML model to check if the given url is legitimate."
)

st.text("")

user_input_url = st.text_input(label="Enter URL to check:")

if st.button("Predict"):

    is_http_present = user_input_url.find("http://", 0, 7)
    is_https_present = user_input_url.find("https://", 0, 8)

    if is_http_present != -1:
        user_input_url = user_input_url.replace("http://", "", 1)

    if is_https_present != -1:
        user_input_url = user_input_url.replace("https://", "", 1)
Esempio n. 30
0
import streamlit as st
from PIL import Image
import os

STYLES = {
    "unet": "U-net-efficientnet",
    "featurepyramidnetwork": "FPN-efficientnet",
    "linknet": "LinkedNet",
}

HOST = "http://154.61.75.187:5000/"

st.set_option("deprecation.showfileUploaderEncoding", False)

st.set_page_config(
    page_title="Segmentor",  # default page title
    initial_sidebar_state="expanded"  # "expanded", "collapsed"
)

st.title("Image Segmentation Tool")
try:
    response = requests.get(HOST)
    status_check = 200
except requests.exceptions.ConnectionError:
    status_check = 0
    print("backend offline")

if status_check == 200:
    print('Backend Server was online')
    st.text(
        'This tool is used to compare 3 segmentation models mentioned below')
    st.text(