Exemplo n.º 1
0
    def __init__(self):
        # Emblem Visualization
        self.emblem = "None"
        self.emblem_amount = 0
        self.emblem_level = 0

        # Type of Emblem
        self.normal_emb = 0
        self.partial_emb = 0
        self.unique_acc_emb = 0
        self.legendary_acc_emb = 0

        # Emblem Stats
        self.emblem_cd = 0
        self.emblem_batk = 0
        self.emblem_atkp = 0

        # SF Stats
        self.sf = 0

        # Equipment Type, Stat & Rank
        self.type = "None"
        self.stat = "None"
        self.stat_amount = 0
        self.rank = "None"

        # Offensive Stats
        self.atk = 0
        self.atkp = 0
        self.dmg = 0
        self.batk = 0
        self.platk = 0
        self.cr = 0
        self.cratk = 0
        self.cd = 0
        self.maxdmg = 0
        self.fd = 0

        # Defensive Stats
        self.pdef = 0
        self.pdefinc = 0
        self.pdefdec = 0
        self.mdef = 0
        self.mdefinc = 0
        self.mdefdec = 0
        self.bdef = 0
        self.pldef = 0
        self.critres = 0
        self.critdmgres = 0

        # Hit Miss Stats
        self.acc = 0
        self.accp = 0
        self.evd = 0
        self.evdp = 0
        self.penrate = 0
        self.block = 0
        self.abnormalstatres = 0
        self.ignore = 0

        # HP MP Stats
        self.hp = 0
        self.mp = 0
        self.hpinc = 0
        self.mpinc = 0
        self.hprec = 0
        self.mprec = 0
        self.hprecp = 0
        self.mprecp = 0
        self.hppotionrecp = 0
        self.mppotionrecp = 0
        self.buffdurationinc = 0

        # Mobility Stats
        self.spd = 0
        self.jmp = 0
        self.kbkres = 0

        # Misc Stats
        self.exp = 0
        self.dr = 0
        self.meso = 0
        self.glincrease = 0
        self.partyexp = 0
        self.feverchargeinc = 0
        self.feverduration = 0
        self.maxfeverchance = 0

        # Shadow Partner Stats
        self.spmulti = 0

        # Set Stats
        self.mempsetcount = 0
        self.aempsetcount = 0
        self.necrosetcount = 0
        self.fafsetcount = 0
        self.bosssetcount = 0
        self.commandersetcount = 0

        # Flame Stats
        self.atklinecount = 0
        self.crlinecount = 0
        self.cdlinecount = 0

        with st.beta_expander("Pet Set"):
            _, pet1, _ = st.beta_columns([0.02, 0.96, 0.02])
            pet_type = st.selectbox("Choose a Combination",
                                    ["M Label", "Wonder Black"])
            self.type = pet_type
            if "Wonder Black" in [pet_type]:
                self.atk += 20
                self.accp += 3
                self.cr += 3
            elif "M Label" in [pet_type]:
                self.atk += 700
                self.accp += 3
                self.batk += 3
                self.bdef += 3
                self.dmg += 3
                self.cr += 3
                self.cd += 3
Exemplo n.º 2
0
    elif val is False and logo == 'Shaped':
        word = WordCloud(width=wid, height=hei, margin=0, background_color=bc, mask=ma, contour_width=cd,
                         contour_color=cd_c).generate(txt)
    elif val is False and logo == 'Simple':
        word = WordCloud(width=wid, height=hei, margin=0, background_color=bc).generate(txt)
    else:
        word = WordCloud(width=wid, height=hei, margin=0, background_color=bc,
                         color_func=lambda *args, **kwargs: c).generate(txt)
    word.to_file('wordcld.png')
    st.subheader("\nHere is your wordcloud :")
    image = Image.open('wordcld.png')
    st.image(image, caption='Your customized wordcloud')
    st.markdown("____")

# About Us Section
information = st.beta_expander("About Us", False)
information_html = """
    <style>
    div {
        font-family: "Gill Sans", sans-serif; 
        font-size: medium;
        color: #92B2DF;
    }
    img {
        float: right;
    }
    </style>
    <div>We are INPT first year students :
        <ul>
            <li><a>Chadli Wiam</a></li>
            <li><a>Zoufir Zineb</a></li>
Exemplo n.º 3
0
import oect_processing as oectp
from oect_processing.oect_utils import oect_plot
from oect_processing.oect_utils.oect_load import uC_scale

# sys.path.insert(0, os.path.abspath('..'))
# os.chdir('..')
# os.chdir('..')

TEST_DATA = r'oect_processing/notebooks/test_data_manufactured'

st.set_page_config(page_title='OECT Processing')
st.title('OECT processing')
st.header('Rajiv Giridharagopal, Ph.D.')
st.subheader('University of Washington, [email protected]')

with st.beta_expander('Quick Guide'):
    st.write('''
    On the sidebar, input the windows path (copy-paste from Explorer) into "Device Folder". 
    The drop-down menu processes individual folders of data. In the third box, select the pixels you want to use for calculating $\mu_C*$. 
    You need at least 2 pixels. The thickness values are grabbed from .cfg files in each folder. You can override that below.
    Lastly, if you scroll down, you can right-click any graph and save it locally.''')

st.sidebar.header('Load data')


# device = st.sidebar.file_uploader('Select files', accept_multiple_files=True, help='Select all files for a given device')


def file_selector(folder_path=TEST_DATA):
    filenames = []
    for name in os.listdir(folder_path):
Exemplo n.º 4
0
import streamlit as st
import pandas as pd
import numpy as np


cols = st.beta_columns([1, 3, 1])

cols[1].title("World Unit of Account")


st.write("""
    ## NOT Currency, NOT Money, ONE Unit of Account for All
    """)

# About
expander_bar = st.beta_expander("About")
expander_bar.markdown("""
* **Algorithmic Unit of Account:** Unlike all conventional units of account, WUA uses an algorithm to derive its conversions into currencies.
* **Data source:** [Uniswap](https://uniswap.org) and [Chainlink] (https://chain.link). More sources are being considered.
* We are excited to see online native currencies build an open source distributed financial realm. We have developed an algorithmic unit of account that best represents the current state of trust in world currencies taking into account current population and life expectancy metrics.
""")


hide_streamlit_style = """
            <style>
            #MainMenu {visibility: hidden;}
            footer {visibility: hidden;}
            </style>
            """
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
Exemplo n.º 5
0
def exp_data(country, data):
    expander = st.beta_expander(f'{country} Analysis')
    expander.write(
        f'{country} has {round((data.loc[0]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"])) * 100, 1)}% {data.loc[0]["Gender"]} and {round((data.loc[1]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"])) * 100, 1)}% {data.loc[1]["Gender"]} membership.'
    )
Exemplo n.º 6
0
def main():
    """
    Main is responsible for the visualisation of everything connected with streamlit.
    It is the web application itself.
    """

    # # Radiobuttons in one row
    # st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)

    # Sets sidebar's header and logo
    sidebar.sidebar_head()

    #
    # # Spectrometer type `- BWTek / Renishaw / Witec / Wasatch / Teledyne
    #

    spectra_types = [
        'EMPTY', 'BWTEK', 'RENI', 'WITEC', 'WASATCH', 'TELEDYNE', 'JOBIN'
    ]
    spectrometer = st.sidebar.selectbox("Choose spectra type",
                                        spectra_types,
                                        format_func=LABELS.get,
                                        index=0)

    # sidebar separating line
    sidebar.print_widgets_separator()

    # User data loader
    # sidebar.print_widget_labels('Upload your data or try with ours', 10, 0)

    files = st.sidebar.file_uploader(label='Upload your data or try with ours',
                                     accept_multiple_files=True,
                                     type=['txt', 'csv'])

    # Allow example data loading when no custom data are loaded
    if not files:
        if st.sidebar.checkbox("Load example data"):
            if spectrometer == "EMPTY":
                st.sidebar.error('First Choose Spectra type')
            else:
                files = utils.load_example_files(spectrometer)

    # Check if data loaded, if yes, perform actions
    delim = None
    if files:
        st.spinner('Uploading data in progress')
        # sidebar separating line
        sidebar.print_widgets_separator()

        from detect_delimiter import detect
        new_files = []
        for file in files:
            file.seek(0)
            lines = file.readlines()

            try:
                lines = [line.decode('utf-8') for line in lines]
            except AttributeError:
                pass

            # lines = str.splitlines(str(text))  # .split('\n')
            first_lines = '\n'.join(lines[:20])

            delim = detect(first_lines)
            colnum = lines[-2].count(delim)

            lines = [i for i in lines if i.count(delim) == colnum]
            text = '\n'.join(lines)
            buffer = io.StringIO(text)
            buffer.name = file.name
            new_files.append(buffer)

        try:
            df = save_read.read_files(spectrometer, new_files, delim)
        except (TypeError, ValueError):
            st.error('Try choosing another type of spectra')
            st.stop()

        main_expander = st.beta_expander("Customize your chart")
        # Choose plot colors and templates
        with main_expander:
            plots_color, template = vis_utils.get_chart_vis_properties()

        # Select chart type
        chart_type = vis_opt.vis_options()

        # sidebar separating line
        sidebar.print_widgets_separator()

        # Select data conversion type
        spectra_conversion_type = vis_opt.convertion_opt()

        # TODO need improvements
        # getting rid of duplicated columns
        df = df.loc[:, ~df.columns.duplicated()]

        #
        # # data manipulation - raw / optimization / normalization
        #

        # TODO delete if not needed
        # Normalization
        # if spectra_conversion_type == LABELS["NORM"]:
        #     df = (df - df.min()) / (df.max() - df.min())

        # Mean Spectra
        if chart_type == 'MS':
            df = df.mean(axis=1).rename('Average').to_frame()

        # columns in main view. Chart, expanders
        # TODO rozwiązać to jakoś sprytniej
        normalized = False
        col_left, col_right = st.beta_columns([5, 2])
        if spectra_conversion_type != "RAW":
            col_right = col_right.beta_expander("Customize spectra",
                                                expanded=False)
            with col_right:
                vals = data_customisation.get_deg_win(chart_type,
                                                      spectra_conversion_type,
                                                      df.columns)
                if st.checkbox("Data Normalization"):
                    normalized = True
                    df = (df - df.min()) / (df.max() - df.min())
                else:
                    normalized = False

        # For grouped spectra sometimes we want to shift the spectra from each other, here it is:
        with main_expander:
            # TODO the code below needed?
            # trick to better fit sliders in expander
            # _, main_expander_column, _ = st.beta_columns([1, 38, 1])
            # with main_expander_column:

            shift_col, _, trim_col = st.beta_columns([5, 1, 5])
            with shift_col:
                if chart_type == 'GS':
                    shift = data_customisation.separate_spectra(normalized)
                elif chart_type == 'SINGLE':
                    col = st.selectbox('spectrum to plot', df.columns)
                    df = df[[col]]
                else:
                    shift = None
            with trim_col:
                df = vis_utils.trim_spectra(df)

        # data conversion end
        if spectra_conversion_type in {'OPT'}:
            baselines = pd.DataFrame(index=df.index)
            baselined = pd.DataFrame(index=df.index)
            flattened = pd.DataFrame(index=df.index)
            for col in df.columns:
                baselines[col] = peakutils.baseline(df[col], vals[col][0])
                baselined[col] = df[col] - baselines[col]
                flattened[col] = baselined[col].rolling(window=vals[col][1],
                                                        min_periods=1,
                                                        center=True).mean()

        #
        # # Plotting
        #

        # Groupped spectra
        if chart_type == 'GS':
            shifters = [(i + 1) * shift for i in range(len(df.columns))]
            plot_df = df if spectra_conversion_type == 'RAW' else flattened
            plot_df = plot_df + shifters

            figs = [
                px.line(plot_df,
                        x=plot_df.index,
                        y=plot_df.columns,
                        color_discrete_sequence=plots_color)
            ]

        # Mean spectra
        elif chart_type == 'MS':
            if spectra_conversion_type == 'RAW':
                plot_df = df
                figs = [
                    px.line(plot_df,
                            x=plot_df.index,
                            y=plot_df.columns,
                            color_discrete_sequence=plots_color)
                ]

            elif spectra_conversion_type in {'OPT'}:
                columns = [
                    'Average', 'Baseline', 'BL-Corrected',
                    'Flattened + BL-Corrected'
                ]
                plot_df = pd.concat([df, baselines, baselined, flattened],
                                    axis=1)
                plot_df.columns = columns

                fig1 = px.line(plot_df,
                               x=plot_df.index,
                               y=columns[-1],
                               color_discrete_sequence=plots_color[3:])
                fig2 = px.line(plot_df,
                               x=plot_df.index,
                               y=plot_df.columns,
                               color_discrete_sequence=plots_color)
                figs = [(fig1, fig2)]
            else:
                raise ValueError(
                    'Unknown conversion type for Mean spectrum chart')
        # 3D spectra
        elif chart_type == 'P3D':
            plot_df = flattened if spectra_conversion_type in {"OPT"} else df

            plot_df = plot_df.reset_index().melt('Raman Shift',
                                                 plot_df.columns)
            fig = px.line_3d(plot_df,
                             x='variable',
                             y='Raman Shift',
                             z='value',
                             color='variable')

            camera = dict(eye=dict(x=1.9, y=0.15, z=0.2))
            fig.update_layout(
                scene_camera=camera,
                width=1200,
                height=1200,
                margin=dict(l=1, r=1, t=30, b=1),
            )
            figs = [fig]

        # Single spectra
        elif chart_type == 'SINGLE':
            if spectra_conversion_type == 'RAW':
                plot_df = df
                figs = [
                    px.line(plot_df[col], color_discrete_sequence=plots_color)
                    for col in plot_df.columns
                ]
            else:
                columns = [
                    'Average', 'Baseline', 'BL-Corrected',
                    'Flattened + BL-Corrected'
                ]
                figs = []

                plot_df = pd.concat([df, baselines, baselined, flattened],
                                    axis=1)
                plot_df.columns = columns

                fig1 = px.line(plot_df,
                               x=plot_df.index,
                               y=columns[-1],
                               color_discrete_sequence=plots_color[3:]
                               )  # trick for color consistency
                fig2 = px.line(plot_df,
                               x=plot_df.index,
                               y=plot_df.columns,
                               color_discrete_sequence=plots_color)
                fig_tup = (fig1, fig2)
                figs.append(fig_tup)
        else:
            raise ValueError("Something unbelievable has been chosen")

        with col_left:
            charts.show_charts(figs, plots_color, template)

        with col_left:
            st.markdown('')
            link = utils.download_button(plot_df.reset_index(),
                                         f'spectrum.csv',
                                         button_text='Download CSV')
            st.markdown(link, unsafe_allow_html=True)

    else:
        manual.show_manual()

    authors.show_developers()
Exemplo n.º 7
0
net = PatchNet(input_size=TRAINING_IMAGE_SIZE, kernel_size=KERNEL_SIZE)
optimizer = optim.AdamW(params=net.parameters(), lr=0.03)

# with st.beta_expander("FAST AND ROBUST IMAGE STYLETRANSFER AND COLORIZATION", expanded=True):
#     # header1 = st.write('## FAST AND ROBUST IMAGE STREETCARS AND COLORIZATION')
#     header2 = st.markdown('#### by providing input and output example image pairs and by using similarity search')
#     header3 = st.markdown('##### Transfer the style of images by providing input and output example images.')
#     header4 = st.markdown('##### Colorize images by providing black-white or grayscale input and colored output example images(like grayscale photo as input example and colored photo as output example for training)')

# video_file = open('tutorial.webm', 'rb')
# video_bytes = video_file.read()
# st.video(video_bytes)

with st.beta_expander(
        "LEARN A SINGLE PATTERN FROM ONE IMAGE OR MULTIPLE IMAGES THAT WILL REPRESENT THAT PARTICULAR IMAGE OR IMAGE CLASS",
        expanded=True):
    pass

col1_1, col1_2 = st.beta_columns(2)
input_ph = st.empty()
train_int_col, train_out_col = st.beta_columns(2)
input_col, output_col = st.beta_columns(2)
output_col = output_col.empty()
rand_input_col, rand_output_col = st.beta_columns(2)
loss_ph = st.empty()

uploaded_file = input_ph.file_uploader("Choose input image",
                                       type=['png', 'jpg'])

if uploaded_file is not None:
Exemplo n.º 8
0
st.sidebar.write(
    "[Sambhavi Dhanabalan] (https://www.linkedin.com/in/sambhavi-dhanabalan/)")

st.sidebar.title("Tools used")
st.sidebar.write(
    ":one: Pytorch :two: Google Colab Pro :three: Streamlit  :four: FastAPI :five: Heroku"
)

st.sidebar.title("Git links")
st.sidebar.write(
    "[Streamlit App] (https://github.com/SambhaviPD/droughtwatch/tree/master)")
st.sidebar.write(
    "[Training Notebooks] (https://github.com/SambhaviPD/droughtwatch/tree/main)"
)

expander = st.beta_expander("1. Introduction")
expander.write(
    ":trophy: Public benchmarks in Weights & Biases encourages collaboration from the community for a variety of problem statements. The one that I worked on as part of FSDL's final course project was the Drought Watch. The intent of this project is to predict drought severity from satellite imagery and ground-level photos. Complete details about the project from Weights & Biases can be found here: [Drought Watch] (https://arxiv.org/pdf/2004.04081.pdf)"
)

expander = st.beta_expander("2. Project Goals")
expander.write(
    ":up: 77.8 is the top accuracy in the project's leader board. To surpass that is my primary goal. In order to do that, an initial understanding of satellite imagery is required, followed by the right model architecture."
)
expander.write(
    ":rain_cloud: Secondary goal would be to isolate images obscured with clouds. Cloud detection algorithms need to be studied and applied to the input data set. If I'm able to isolate successfully, then need to study what impact it makes on improving the accuracy."
)

expander = st.beta_expander("3. Stretch Goals")
expander.write(
    ":clipboard: If I am able to enter the leaderboard, submitting a report to W&B is a goal that I'd planned for."
Exemplo n.º 9
0
st.subheader("How quickly do we merge PRs?")
c = alt.Chart(df[df.is_pull_request.eq(True)]).mark_circle().encode(
    x='created_at', y='hrs_until_issue_closed', size='hrs_until_issue_closed',
    tooltip=['number', 'created_at', 'hrs_until_issue_closed'],
    href='url')
st.altair_chart(c, use_container_width=True)

# Month by month average
c = alt.Chart(df[df.is_pull_request.eq(True)]).mark_bar().encode(
    x='yearmonth(created_at):O', y='average(hrs_until_issue_closed)')
st.altair_chart(c, use_container_width=True)


# Could I pull in issue labels?
st.subheader("How quickly do we close issues?")

c = alt.Chart(df).mark_circle().encode(
    x='created_at', y='hrs_until_issue_closed',
    tooltip=['number', 'created_at', 'hrs_until_issue_closed', 'comments_count'],
    href='url')
st.altair_chart(c, use_container_width=True)

with st.beta_expander("Raw data"):
    df

# TODO:
# . PR turnaround time
# . PR comment turnaround time
# . average PRs per week
# . LOC changed per PR
Exemplo n.º 10
0
import streamlit as st
import time

st.title('Streamlit 超入門')
st.write('プログレスバーを表示')
'Start!!'

latest_iteration = st.empty()
bar = st.progress(0)

for i in range(100):
    latest_iteration.text(f'Iteration{i+1}')
    bar.progress(i + 1)
    time.sleep(0.1)

'Done!!'

left_column, right_column = st.beta_columns(2)
button = left_column.button('右カラムに文字を表示')

if button:
    right_column.write('ここは右カラムです。')

expander = st.beta_expander('問い合わせ')
expander.write('問い合わせ内容を書く')
Exemplo n.º 11
0
                    'id': gpu_idx,
                    'mem_used': gpu_mem_used,
                    'mem_total': gpu_mem_total
                })
            gpu_df = pd.DataFrame(gpu_df_records)
            st.table(gpu_df)

    # Show CPU stats.
    if st.checkbox('show CPUs and RAM'):
        st.write(psutil.virtual_memory().available / 1024 / 1024 / 1024)
        st.write(psutil.cpu_count())
        st.write(psutil.cpu_percent())

    # Tensorboard stats.
    ports_to_use = list(range(8701, 8710))  # Use these ten ports.
    if st.beta_expander("Tensorboard"):
        output = subprocess.run('pgrep -u $(whoami) tensorboard',
                                shell=True,
                                text=True,
                                capture_output=True)

        def get_pid_info(pid: int) -> Tuple[int, str]:
            port = subprocess.run(
                f"ss -lp | grep pid={pid} | awk '{{print $5}}' | awk -F ':' '{{print $2}}'",
                shell=True,
                capture_output=True,
                text=True).stdout
            cmd = subprocess.run(f'ps -p {pid} -o args | tail -n 1',
                                 capture_output=True,
                                 text=True,
                                 shell=True).stdout
Exemplo n.º 12
0
    radiologist_labels_df)
hospital_records_df, hospital_records_logging = hospital_records_cleaning(
    hospital_records_df)

st.write("""
The following metrics are provided after selecting a single row to represent _perfect_ duplicates (i.e. multiple 
rows with identical values accross columns) and the removal of rows with different data but the same accession numbers. 
This is based on the assumption that there is no way to confidently determine what data should be kept and which removed. 
""")

# left_column, right_column = st.beta_columns(2)
# pressed = left_column.button('Filter NAs')
# if pressed:
#     pas

data_cleaning = st.beta_expander("Data Checks & Cleaning")
data_cleaning.write(f"""
* Length of model-outputs before any cleaning **{model_outputs_logging[0]}**. \n
* Length of model-outputs after removing _perfect_ duplicates **{model_outputs_logging[1]}**. \n
* Length of model-outputs after removing corrupt data (duplicate accession numbers) **{model_outputs_logging[2]}**. \n 
* Length of radiologist-labels before any cleaning **{radiologist_labels_logging[0]}**. \n
* Length of radiologist-labels after removing _perfect_ duplicates **{radiologist_labels_logging[1]}**. \n
* Length of radiologist-labels after removing corrupt data (duplicate accession numbers) **{radiologist_labels_logging[2]}**. \n
* Length of hospital-records before any cleaning **{hospital_records_logging[0]}**. \n
* Length of hospital-records after removing _perfect_ duplicates **{hospital_records_logging[1]}**. \n
* Length of hospital-records after removing corrupt data (duplicate accession numbers) **{hospital_records_logging[2]}**. \n
""")


def select_institute(df, institute):
    if institute != 'All':
Exemplo n.º 13
0
st.set_option('deprecation.showPyplotGlobalUse', False)

st.markdown('''
# <img style="float: left;" src="https://mario.wiki.gallery/images/thumb/a/a6/Super_Mushroom_Artwork_-_Super_Mario_3D_World.png/1200px-Super_Mushroom_Artwork_-_Super_Mario_3D_World.png" width = 60>   The [mushroom dataset](https://archive.ics.uci.edu/ml/datasets/Mushroom) <img style="float: right;" src="https://i.pinimg.com/originals/a8/ff/3e/a8ff3ed1011dbabc869ab8ea401ace4e.png" width=80>
''', unsafe_allow_html=True)

df_mush = pd.read_csv('mushrooms.csv')

selected_view = st.selectbox(
    'Would you like to explore the data or do some modeling?',
    ('Let\'s explore!', 'Model away!')
)

if selected_view == 'Let\'s explore!':

    with st.beta_expander("Show intro?"):
        st.markdown('''
    The [mushroom data set](https://archive.ics.uci.edu/ml/datasets/Mushroom) has been contributed to the UCI Machine Learning over 30 years ago. From the authors description:

    > This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. **The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like "leaflets three, let it be'' for Poisonous Oak and Ivy.**

    Each mushroom is characterized by 22 distinct features:

    * cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s
    * cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s
    * cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y
    * bruises: bruises=t,no=f
    * odor: almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s
    * gill-attachment: attached=a,descending=d,free=f,notched=n
    * gill-spacing: close=c,crowded=w,distant=d
    * gill-size: broad=b,narrow=n
Exemplo n.º 14
0
                      key="sel1")

'You selected: ', option

option = st.sidebar.selectbox('Which number do you like best?',
                              df['first column'],
                              key="sel2")

'You selected: ', option

left_column, right_column = st.beta_columns(2)
pressed = left_column.button('Press me?')
if pressed:
    right_column.write("Woohoo!")

expander = st.beta_expander("FAQ")
expander.write(
    "Here you could put in some really, really long explanations...")

''
text_label = st.empty()
text_label.text('Starting a long computation...')
# Add a placeholder
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(100):
    # Update the progress bar with each iteration.
    latest_iteration.text(f'Iteration {i+1}')
    bar.progress(i + 1)
    time.sleep(0.1)
text_label.text('...and now we\'re done!')
Exemplo n.º 15
0
def page_work(state_container, page_flip: bool):
    '''The main workhorse routine for the Xplore page'''

    if not state_container.xploreSessionState:
        state_container.xploreSessionState = XploreSessionState()
        state = state_container.xploreSessionState
        state.columns = ['default']
    else:
        state = state_container.xploreSessionState

    url_params = st.experimental_get_query_params()
    page = url_params.pop('page', '')
    if get_title() in page:
        if url_params and not all(not x for x in url_params.values()):
            for key in url_params:
                if key == 'columns':
                    # This needs to be a list
                    continue
                val = url_params.get(key, '')
                if isinstance(val, list):
                    val = val[0]
                    url_params[key] = val
                if key == '':
                    if val == 'True':
                        url_params[key] = True
                    else:
                        url_params[key] = False
            state.__init__(**url_params)

    sqobjs = state_container.sqobjs
    # All the user input is preserved in the state vars
    xplore_sidebar(state, sqobjs)

    if state.table != "tables":
        df = gui_get_df(sqobjs[state.table],
                        _table=state.table,
                        namespace=state.namespace.split(),
                        hostname=state.hostname.split(),
                        start_time=state.start_time,
                        end_time=state.end_time,
                        view=state.view,
                        columns=state.columns)
        if state.table == "device" and 'uptime' in df.columns:
            df.drop(columns=['uptime'], inplace=True)
    else:
        df = gui_get_df(sqobjs[state.table],
                        _table=state.table,
                        namespace=state.namespace.split(),
                        hostname=state.hostname.split(),
                        start_time=state.start_time,
                        end_time=state.end_time,
                        view=state.view)

    query_str = ''
    if not df.empty:
        if 'error' in df.columns:
            st.error(df.iloc[0].error)
            st.experimental_set_query_params(**asdict(state))
            st.stop()
        if state.query:
            try:
                show_df = df.query(state.query)
                query_str = state.query
            except Exception:
                st.warning('Query string throws an exception, ignoring')
                show_df = df
                query_str = ''
        else:
            show_df = df
    else:
        show_df = df

    if state.table != "tables":
        summ_df = xplore_run_summarize(sqobjs[state.table],
                                       namespace=state.namespace.split(),
                                       hostname=state.hostname.split(),
                                       start_time=state.start_time,
                                       end_time=state.end_time,
                                       query_str=query_str)
    else:
        summ_df = pd.DataFrame()

    if not show_df.empty:
        dfcols = show_df.columns.tolist()
        if (state.table == 'routes' and 'prefix' in dfcols
                and 'prefixlen' not in dfcols):
            dfcols.append('prefixlen')

        dfcols = sorted((filter(lambda x: x not in ['index', 'sqvers'],
                                dfcols)))

        grid1 = st.beta_container()
        headercol, uniq_col = st.beta_columns(2)
        with grid1:
            with headercol:
                st.write(
                    f'<h2 style="color: darkblue; font-weight: bold;">{state.table} View</h2>',
                    unsafe_allow_html=True)
                if show_df.shape[0] > 256:
                    st.write(
                        f'Showing first 256 of {show_df.shape[0]} rows, use query to filter'
                    )
            with uniq_col:
                if state.table != "tables":
                    if (not state.uniq_clicked
                            or state.uniq_clicked not in dfcols):
                        if 'hostname' in dfcols:
                            selindex = dfcols.index('hostname') + 1
                        else:
                            selindex = 1
                    elif state.uniq_clicked in dfcols:
                        selindex = dfcols.index(state.uniq_clicked) + 1

                    state.uniq_clicked = st.selectbox('Distribution Count of',
                                                      options=['-'] + dfcols,
                                                      index=selindex,
                                                      key='distcount')

        scol1, scol2 = st.beta_columns(2)

        if state.table != "tables" and state.uniq_clicked != '-':
            uniq_df = xplore_run_unique(show_df, columns=state.uniq_clicked)
        else:
            uniq_df = pd.DataFrame()

        if state.assert_clicked:
            assert_df = xplore_run_assert(sqobjs[state.table],
                                          start_time=state.start_time,
                                          end_time=state.end_time,
                                          namespace=state.namespace.split())
        else:
            assert_df = pd.DataFrame()

        if not summ_df.empty:
            with scol1:
                st.subheader('Summary Information')
                st.dataframe(data=summ_df)

        if not uniq_df.empty:
            with scol2:
                if uniq_df.shape[0] > 32:
                    st.warning(
                        f'{state.uniq_clicked} has cardinality > 32. Displaying top 32'
                    )
                    chart = alt.Chart(
                        uniq_df.head(32),
                        title=f'{state.uniq_clicked} Distribution') \
                        .mark_bar(color='purple', tooltip=True) \
                        .encode(y=alt.Y(f'{state.uniq_clicked}:N',
                                        sort='-x'),
                                x='count')
                else:

                    chart = alt.Chart(
                        uniq_df, title=f'{state.uniq_clicked} Distribution') \
                        .mark_bar(color='purple', tooltip=True) \
                        .encode(y=alt.Y(f'{state.uniq_clicked}:N',
                                        sort='-x'),
                                x='count')
                st.altair_chart(chart)

        if state.table in ['interfaces', 'ospf', 'bgp', 'evpnVni']:
            if assert_df.empty:
                expand_assert = False
            else:
                expand_assert = True
            validate_expander = st.beta_expander('Assert',
                                                 expanded=expand_assert)
            with validate_expander:
                if not assert_df.empty:
                    st.dataframe(data=assert_df)
                elif state.assert_clicked:
                    st.write('Assert passed')
                else:
                    st.write('Assert not run')

    expander = st.beta_expander('Table', expanded=True)
    with expander:
        if not show_df.empty:
            convert_dict = {
                x: 'str'
                for x in df.select_dtypes('category').columns
            }
            st.dataframe(data=sq_gui_style(
                show_df.head(256).astype(convert_dict), state.table),
                         height=600,
                         width=2500)
        else:
            st.warning('No Data from query')

        st.experimental_set_query_params(**asdict(state))
Exemplo n.º 16
0
def update_dashboard(log_directory: str):
    st.warning(f'**Displaying logs from:**\n{log_directory}')

    # Endpoint Response Time [ms]
    if enable_performance_metrics:
        with st.beta_expander("Endpoint Response Time [ms]", expanded=True):
            df_endpoint_performance = load_endpoint_performance(log_directory)
            if not df_endpoint_performance.empty:
                # Altair cannot handle column names with [ ] ?
                df_endpoint_performance[
                    'Measurement'] = df_endpoint_performance[
                        'Measurement [ms]']

                endpoint_performance_chart = \
                    alt.Chart(df_endpoint_performance)\
                    .mark_line(color="#FF5A00")\
                    .encode(x=alt.X("Timestamp", axis=alt.Axis(labelOverlap=True)), y="Measurement") + \
                    alt.Chart(df_endpoint_performance)\
                    .mark_line()\
                    .transform_window(rolling_mean=f"mean(Measurement)",frame=[-30, 0])\
                    .encode(x='Timestamp:T', y='rolling_mean:Q')
                st.altair_chart(endpoint_performance_chart,
                                use_container_width=True)

    # Distribution of Input Features
    if enable_input_analysis:
        with st.beta_expander("Distribution of Input Features", expanded=True):
            df_input_features = load_input_features(log_directory)
            if not df_input_features.empty:
                num_last_20pct = int(0.2 * len(df_input_features))
                st.write(
                    f"**Blue:** all except last 20% , **orange color:** most recent {num_last_20pct} samples (20%)"
                )
                features_dist_col1, features_dist_col2, features_dist_col3 = st.beta_columns(
                    3)
                for idx, feature in enumerate(df_input_features.columns[1:]):
                    dist_chart = \
                        alt.Chart(df_input_features[:-num_last_20pct])\
                        .transform_density(feature, as_=[feature, 'density'])\
                        .mark_area(opacity=0.5)\
                        .encode(x=f"{feature}:Q", y='density:Q') + \
                        alt.Chart(df_input_features[-num_last_20pct:])\
                        .transform_density(feature, as_=[feature, 'density'])\
                        .mark_area(opacity=0.5, color='#FF5A00')\
                        .encode(x=f"{feature}:Q", y='density:Q')
                    if idx % 3 == 0:
                        features_dist_col1.altair_chart(
                            dist_chart, use_container_width=True)
                    elif idx % 3 == 1:
                        features_dist_col2.altair_chart(
                            dist_chart, use_container_width=True)
                    else:
                        features_dist_col3.altair_chart(
                            dist_chart, use_container_width=True)

    # Target variable analysis (Regression)
    if enable_target_analysis:
        df_regression_target_value = load_regression_target_value(
            log_directory)
        with st.beta_expander("Target variable analysis (Regression)",
                              expanded=True):
            if not df_regression_target_value.empty:
                num_last_20pct = int(0.2 * len(df_regression_target_value))
                target_name_col_name = df_regression_target_value.columns[1]
                st.write(
                    f"**Blue:** all except last 20%, **orange color:** most recent {num_last_20pct} samples (20%)"
                )

                regression_target_value_chart = \
                    alt.Chart(df_regression_target_value[:-num_last_20pct])\
                    .mark_point()\
                    .encode(x=alt.X("Timestamp", axis=alt.Axis(labelOverlap=True)), y=target_name_col_name) + \
                    alt.Chart(df_regression_target_value[-num_last_20pct:])\
                    .mark_point(color='#FF5A00')\
                    .encode(x=alt.X("Timestamp", axis=alt.Axis(labelOverlap=True)), y=target_name_col_name)
                # alt.Chart(df_regression_target_value).mark_line(color='orange', size=1).transform_window(
                #     rolling_mean=f"mean({target_name_col_name})",frame=[-5, 0]).encode(x='Timestamp:T', y='rolling_mean:Q')
                st.altair_chart(regression_target_value_chart,
                                use_container_width=True)


                regression_target_dist_chart = \
                    alt.Chart(df_regression_target_value[:-num_last_20pct]).transform_density(
                        target_name_col_name, as_=[target_name_col_name, 'density']).mark_area(
                            opacity=0.5).encode(x=f"{target_name_col_name}:Q", y='density:Q',) + \
                    alt.Chart(df_regression_target_value[-num_last_20pct:]).transform_density(
                        target_name_col_name, as_=[target_name_col_name, 'density']).mark_area(
                            opacity=0.5, color='#FF5A00').encode(x=f"{target_name_col_name}:Q", y='density:Q',)
                st.altair_chart(regression_target_dist_chart,
                                use_container_width=True)
Exemplo n.º 17
0
    combined_df_2_selection = combined_df_2[[
        'user', 'target', 'combined_score', 'sum_score_std',
        'sentiment_score_std', 'sum_score', 'sentiment_score', 'text',
        'created_at'
    ]]
    if use_time:
        st.write(combined_df_2_selection[
            combined_df_2_selection['created_at'] >= str(created_at)])
    else:
        st.write(combined_df_2_selection)

#######################################################################################################

# CLIQUES
#######################################################################################################
expander_cliques = st.beta_expander('Cliques Detection')
communities = list(
    communities_df.groupby('group')['source'].agg(lambda x: ', '.join(x)))
for i in range(len(communities)):
    str_comm = communities[i]
    if len(str_comm.split(',')) > 1:
        expander_cliques.text('Group {}'.format(i + 1))
        expander_cliques.text(str_comm)
expander_cliques.subheader("Cliques")
expander_cliques.write(
    cliques[['source', 'target', 'text', 'clique', 'clique_size']])
expander_cliques.subheader("Source of Cliques")
expander_cliques.write(source_cliques[['Source', 'Counts']])
expander_cliques.subheader("Target of Cliques")
expander_cliques.write(target_cliques[['Target', 'Counts']])
#######################################################################################################
Exemplo n.º 18
0
button2 = left_column.button('右からカラムに文字を表示',key='button2')
if button2:
    right_column.write('ー')
    right_column.write('こちらは右カラム2です。')
```
"""
left_column, right_column = st.beta_columns(2)
button1 = left_column.button('右からカラムに文字を表示', key='button1')
if button1:
    right_column.write('こちらは右カラム1です。')
    right_column.write('ー')

button2 = left_column.button('右からカラムに文字を表示', key='button2')
if button2:
    right_column.write('ー')
    right_column.write('こちらは右カラム2です。')
"""
## ・expander
```pyton
expander = st.beta_expander('問い合わせ1')
expander.text_area('問い合わせ内容',key='toiawase1')
expander.write('※誤りのないようご記入ください')
```
"""
expander = st.beta_expander('問い合わせ1')
expander.text_area('問い合わせ内容', key='toiawase1')
expander.write('※誤りのないようご記入ください')

expander = st.beta_expander('問い合わせ2')
expander.text_area('問い合わせ内容', key='toiawase2')
expander.write('※誤りのないようご記入ください')
Exemplo n.º 19
0
      # Workforce Sentiment Analysis
      ### Hello :wave: and welcome to your **monitoring dashboard** :chart_with_upwards_trend:
      ### :white_check_mark:  When your employees are happy, they feel invested in the organisation's goals and are more compelled to their work
      ### :white_check_mark:  Find out about your employees happiness
      ### :white_check_mark:  Improve and boost your working environment :rocket:
  """)

space = '''<br>'''

components.html(space, height=50, width=1200)

# ----------------------------------
#     File import
# ----------------------------------

my_expander = st.beta_expander('File upload')
my_expander.info('Please upload a *CSV* file :open_file_folder:')


def try_read_df(f):
    try:
        return pd.read_csv(f)
    except:
        return pd.read_excel(f)


df = pd.DataFrame()
uploaded_file = my_expander.file_uploader("Choose a file")
try:
    if uploaded_file is not None:
        df = try_read_df(uploaded_file)
Exemplo n.º 20
0
def main():

    image = Image.open("src/image/header.png")
    st.image(image, use_column_width=True)

    def display_sidebar_settings():
        desc_check = st.sidebar.checkbox("ℹ️ About")
        desc_markdown = read_markdown_file("src/desc_markdown.md")
        st.sidebar.markdown("---")
        st.sidebar.subheader("Configuration")

        if desc_check:
            st.sidebar.markdown(desc_markdown, unsafe_allow_html=True)

    display_sidebar_settings()

    # Load and sort data
    df = load_data()
    dose_mtx_df = df.loc[  # create a dose_mtx df
        df["Sample_type"] == "Dose_MTX",
        ["Patient_id", "Sample_time", "Result"]]
    dose_mtx_df["Result"] = dose_mtx_df["Result"].apply(
        _to_number)  # apply to_number to remove the '<0.0x
    level_mtx_df = df.loc[  # create a level_mtx df
        df["Sample_type"] == "Level_MTX",
        ["Patient_id", "Sample_time", "Result"]]
    level_mtx_df["Result"] = level_mtx_df["Result"].apply(
        _to_number)  # apply to_number to remove the '<0.0x

    # Set assumption for start treatment
    HOUR_FIRST_SAMPLE_TREATMENT = st.sidebar.slider(
        f"Assumption: first blood sample is ~hour:",
        0,
        48,
        23,
        1,
    )

    # Compute new treatment IDs
    start_treatment_threshold = st.sidebar.slider(
        "Start treatment threshold: ", 10, 150, 20, 5)
    level_mtx_df["next_result"] = level_mtx_df.groupby(
        "Patient_id")["Result"].shift(-1)
    level_mtx_df["start_streak"] = (
        level_mtx_df["Result"] > start_treatment_threshold) & (
            level_mtx_df["Result"] >= level_mtx_df["next_result"])
    level_mtx_df["treatment_id"] = (level_mtx_df.groupby("Patient_id")
                                    ["start_streak"].cumsum().astype(str))

    # Build hour difference within treatment
    level_mtx_df["hour_difference"] = level_mtx_df.groupby([
        "Patient_id", "treatment_id"
    ])["Sample_time"].apply(lambda x: x - x.min()) / np.timedelta64(1, "h")
    level_mtx_df["hour_difference"] = (level_mtx_df["hour_difference"] +
                                       HOUR_FIRST_SAMPLE_TREATMENT)

    # Compute DME diagnostic
    hour_confidence_bound = st.sidebar.slider(
        "Select 42 + x hour for DME analysis: ", 1, 48, 4, 1)
    dme_hour_42_threshold = st.sidebar.slider(
        f"Conc. µM for DME between hour 42 -> 42+{hour_confidence_bound}: ",
        0.0,
        10.0,
        1.0,
        0.1,
    )
    level_mtx_df["is_dme"] = (
        (level_mtx_df["hour_difference"] >= 42)
        & (level_mtx_df["hour_difference"] < 42 + hour_confidence_bound)
        & (level_mtx_df["Result"] >= dme_hour_42_threshold))
    is_patient_streak_dme = (level_mtx_df.groupby(
        ["Patient_id",
         "treatment_id"])["is_dme"].max().reset_index(name="dme_diagnostic"))
    level_mtx_df = level_mtx_df.merge(is_patient_streak_dme,
                                      on=["Patient_id", "treatment_id"],
                                      how="left")

    with st.beta_expander("Data preview"):
        st.dataframe(level_mtx_df.head(50))

    with st.beta_expander("Study particular patient"):
        all_patients = (level_mtx_df["Patient_id"].value_counts().sort_values(
            ascending=False).index.values)
        number_unique_patients = st.write(
            f"Number of patients in data: {len(all_patients)}")
        selected_patient = st.selectbox("Choose patient: ", all_patients)
        patient_level_df = level_mtx_df[level_mtx_df["Patient_id"] ==
                                        selected_patient]
        patient_dose_df = dose_mtx_df[dose_mtx_df["Patient_id"] ==
                                      selected_patient]
        patient_level_df["is_dose"] = False
        patient_dose_df["is_dose"] = True
        patient_df = patient_level_df.copy().append(patient_dose_df)

        use_log_x_scale = st.checkbox("Use logarithmic scale for y", True)
        fig_timeline = fig_per_sequence = px.scatter(
            patient_df,
            x="Sample_time",
            y="Result",
            color="is_dose",
            log_y=use_log_x_scale,
            title="Timeline",
        )
        st.plotly_chart(fig_timeline, use_container_width=True)
        use_hour_difference = st.checkbox(
            "Superimpose treatments per hour difference", False)
        fig_per_sequence = px.scatter(
            patient_level_df,
            x="hour_difference" if use_hour_difference else "Sample_time",
            y="Result",
            color="treatment_id",
            log_y=use_log_x_scale,
            title="Level per sequence",
        )
        st.plotly_chart(fig_per_sequence, use_container_width=True)

    with st.beta_expander("Study DME patients"):
        dme_patients = (
            level_mtx_df.loc[level_mtx_df["dme_diagnostic"] == True,
                             "Patient_id"].value_counts().sort_values(
                                 ascending=False).index.values)
        if len(dme_patients) == 0:
            st.info("No DME detected")
            return
        st.info(  # FIXME add number of DME detected
            f"Number of DME patients detected: {len(dme_patients)}")
        selected_patient = st.selectbox("Choose patient: ", dme_patients)
        patient_level_df = level_mtx_df[level_mtx_df["Patient_id"] ==
                                        selected_patient]

        if patient_level_df["treatment_id"].nunique() == 1:
            st.warning(
                "Beware, this patient only has one long treatment, maybe it's actually a mix of treatments"
            )
        st.plotly_chart(
            px.scatter(
                patient_level_df,
                x="hour_difference",
                y="Result",
                color="treatment_id",
                symbol="dme_diagnostic",
                title="Has patient DME?",
            ),
            use_container_width=True,
        )
        # FIXME: add why DME detection was triggered
        dme_patients_series = pd.Series(dme_patients)
        st.write(dme_patients_series)

    with st.beta_expander("Generate download link"):
        st.markdown(generate_download(dme_patients_series),
                    unsafe_allow_html=True)
Exemplo n.º 21
0
def submain1():

    st.subheader('Cálculo de seções e rotas')

    cols = st.beta_columns(2)

    with cols[0]:
        volume_maximo = st.number_input('Volume máximo por seção em m³',
                                        value=1.0 * 1.2 * 1.5)
    with cols[1]:
        multiplicador_volume = st.number_input('Fator de correção do volume',
                                               value=1.1)
    df_layout = pd.read_csv('dados/LAYOUT.csv')
    n_secoes = df_layout['seção'].max()

    cols = st.beta_columns(3)

    with cols[0]:
        pedidos = st.file_uploader('Pedidos do dia',
                                   type=['xlsx', 'xls', 'csv'])
    with cols[1]:
        recebimentos = st.file_uploader('Recebimentos do dia',
                                        type=['xlsx', 'xls', 'csv'])
    with cols[2]:
        veiculos = st.file_uploader('Veículos', type=['xlsx', 'xls', 'csv'])

    if st.button('Calcular'):
        try:
            df_pedidos = pd.read_csv(pedidos)
        except Exception:
            df_pedidos = pd.read_excel(pedidos)
        try:
            df_recebimentos = pd.read_csv(recebimentos)
        except Exception:
            df_recebimentos = pd.read_excel(recebimentos)
        try:
            df_veiculos = pd.read_csv(veiculos)
        except Exception:
            df_veiculos = pd.read_excel(veiculos)

        df_pedidos1 = get_pedidos(df_pedidos, multiplicador_volume)
        df_recebimentos1 = get_recebimentos(df_recebimentos,
                                            multiplicador_volume)

        with st.beta_expander('Resultados gerais'):

            cols = st.beta_columns(3)

            cubagem_pedidos = round(df_pedidos1.VOLUME_TOTAL.sum(), 2)
            cubagem_recebimentos = round(df_recebimentos1.VOLUME_TOTAL.sum(),
                                         2)

            peso_pedidos = round(df_pedidos1.PESO_TOTAL.sum(), 2)
            peso_recebimentos = round(df_recebimentos1.PESO_TOTAL.sum(), 2)

            volumes_pedidos = df_pedidos1['Soma de QUANT'].sum()
            volumes_recebimentos = df_recebimentos1['Total'].sum()

            with cols[0]:
                st.write(
                    '__Cubagem total__: ', "{:,} m³".format(
                        round(cubagem_pedidos + cubagem_recebimentos, 2)))
                st.write('- __Cubagem total dos pedidos__: ',
                         "{:,} m³".format(cubagem_pedidos))
                st.write('- __Cubagem total dos recebimentos__: ',
                         "{:,} m³".format(cubagem_recebimentos))

            with cols[1]:
                st.write(
                    '__Peso total__: ',
                    "{:,} kg".format(round(peso_pedidos + peso_recebimentos,
                                           2)))
                st.write('- __Peso total dos pedidos__: ',
                         "{:,} kg".format(peso_pedidos))
                st.write('- __Peso total dos recebimentos__: ',
                         "{:,} kg".format(peso_recebimentos))

            with cols[2]:
                st.write('__Volumes totais__: ',
                         "{}".format(volumes_pedidos + volumes_recebimentos))
                st.write('- __Volumes totais dos pedidos__: ',
                         "{}".format(volumes_pedidos))
                st.write('- __Volumes totais dos recebimentos__: ',
                         "{}".format(volumes_recebimentos))

        n_secoes_recebimentos = np.ceil(cubagem_recebimentos / volume_maximo)

        with st.spinner('Calculando...'):
            n_recebimento = int(np.ceil(n_secoes_recebimentos))
            results_secoes = solve_secoes(df_pedidos1, df_recebimentos1,
                                          n_secoes, n_recebimento,
                                          volume_maximo)
            results_veiculos = solve_veiculos(df_pedidos1, df_veiculos)
            results_secoes['QUANTIDADE'] = pd.to_numeric(
                results_secoes['QUANTIDADE'])
            # st.components.v1.html('<hr>', height=10)
            with st.beta_expander('Resultado detalhado'):
                st.subheader('Alocação para recebimentos')
                st.write(
                    f'- Reservar seções de 1 a {n_recebimento} para recebimento'
                )
                cols = st.beta_columns(2)
                with cols[0]:
                    st.subheader('Alocação para os pedidos')
                    # print(results_secoes.columns)
                    for rota in results_secoes['ROTA'].unique():
                        st.markdown(f'__ROTA {rota}:__')
                        for secao in results_secoes.loc[
                                results_secoes['ROTA'] == rota,
                                'SECAO'].unique():
                            st.markdown(f"* __Seção {secao}__")
                            temp = results_secoes.loc[
                                (results_secoes.ROTA == rota)
                                & (results_secoes.SECAO == secao)].groupby(
                                    'NOTA')['QUANTIDADE'].sum().reset_index()
                            for index, row in temp.iterrows():
                                st.markdown(
                                    f"* Pedido {row.NOTA} | nº de volumes = {row['QUANTIDADE']}"
                                )
                with cols[1]:
                    st.subheader('Alocação nos caminhões')
                    print(results_veiculos.columns)
                    for rota in results_veiculos['ROTA'].unique():
                        st.write(f'__ROTA {rota}:__')
                        for veiculo in results_veiculos.loc[
                                results_veiculos.ROTA == rota,
                                'VEICULO'].unique():
                            st.write(f"* __Veículo {veiculo}__")
                            # for index, row in results_veiculos.loc[results_veiculos['ROTA'] == rota].iterrows():
                            #     st.write(f"* Veículo {row.VEICULO} ")
            with st.beta_expander('Visualização das seções'):
                df_layout = pd.read_csv('dados/LAYOUT.csv')
                show_layout(df_layout, n_secoes_recebimentos,
                            results_secoes['SECAO'].max())
Exemplo n.º 22
0
def write():
    st.markdown("""
        # SugarTime
        ### Forecasting
        What has the model learned about the exogenous variables? Use
        this page to explore this question.
        """)
    with st.beta_expander("CLICK HERE to expand discussion"):
        st.markdown("""
            NOTE: while the Model Performance page showed the performance
            of a multioutput model with a horizon of 1 hour (12 time steps),
            this page uses a dynamic forecasting approach (due to the
            insertion of future exogenous data that must be considered
            in the forecast). This approach iteratively forecasts just one
            step (5 minutes) ahead and then uses that predicted blood glucose
            value to forecast the next step, etc.

            You can see that this particular model has learned that
            whether you eat carbohydrates or
            inject insulin, you see an increase in blood glucose
            followed by a decrease.

            This is inaccurate, of course;
            in reality, insulin brings blood glucose down.
            So why did the model fail to learn correctly?
            I haven't dug into this particular question too deeply yet,
            but my first hypothesis would be this: unless
            a patient is *very good* at balancing their blood glucose without
            insulin, insulin will necessarily be administered quite frequently
            on a reactive basis. In other words, there is often an increase in
            blood sugar before the patient uses insulin to bring it down. And
            since insulin takes about 15-30 minutes to start bringing blood
            glucose down, the model will often see insulin ⟶ increasing blood
            sugar ⟶ decreasing blood sugar. In this sense, it isn't surprising
            that this is what the model thinks insulin is doing.

            Similarly, why did the
            model mistakenly learn that carbs will naturally drive blood sugar
            upward (which is true) and then downward (which is false)?
            Here is another possible explanation (that is not mutually
            exclusive with the first): it is possible
            that this patient had enough scenarios where insulin and carbs
            were given in close proximity that the model was unable to
            temporally disentangle their effects. Consequently, it attributed
            their combined effects to both of them. That's a theory, anyway.

            One possible way to test these ideas would be to train a model
            only on stretches of time where just insulin is given. You
            could then further train that model on stretches of time where only
            carbs are present. You could then test the model on data where both
            insulin and carbs are present. If this resulted in the model
            learning the true effects of insulin and carbs, it would be strong
            evidence that this current model was unable to tease them apart
            because of their temporal correlation.
            """)
    st.markdown("""
        *Instructions:*
        Use the UI on the sidebar to input the timing and the amount of
        carbohydrates eaten and/or insulin injected in the near future.
        Then observe the model's predicted effect of these interventions
        on future blood glucose.

        ***
        """)

    # load patient data and fit model
    vm = load_saved_model()
    patient = vm.patient

    # set some time variables
    current_time = patient.Xtest.index[-1]
    time_list = list(range(5, 61, 5))

    # get meal information from user
    st.sidebar.markdown("***")
    st.sidebar.markdown("# Forecasting Options")
    st.sidebar.markdown("***")
    st.sidebar.markdown("## Carbohydrates")
    st.sidebar.markdown("Current time is {}".format(current_time))
    meal_t = st.sidebar.select_slider(
        label="How many minutes until your next meal?", options=time_list)
    carb_time = current_time + timedelta(minutes=meal_t)
    carbs = st.sidebar.number_input("How many carbs in your next meal?")

    # get bolus information from user
    st.sidebar.markdown("***")
    st.sidebar.markdown("## Insulin")
    bolus_t = st.sidebar.select_slider(
        label="How many minutes until your next insulin bolus?",
        options=time_list)
    bolus_time = current_time + timedelta(minutes=bolus_t)
    units = st.sidebar.number_input("How many units of insulin in this bolus?")

    # do the forecast
    start_time = current_time + timedelta(minutes=5)
    inserts = {
        "carb_grams": {
            carb_time: carbs
        },
        "all_insulin": {
            bolus_time: units
        }
    }
    ypred = vm.dynamic_forecast(vm.patient.Xtest, vm.patient.ytest, start_time,
                                inserts)

    # plot the model forecast for the optimal bolus
    fig = core.plot_forecast(patient.ytest, ypred, return_flag=True)
    fig = core.plot_optimal_boundaries(patient, fig)
    st.plotly_chart(fig)

    # display info
    st.markdown("""
        ### **Predicted glucose with:**
        ### {} units of insulin at {}
        ### and
        ### {} grams of carbs at {}.
        """.format(units, bolus_time.time(), carbs, carb_time.time()))
Exemplo n.º 23
0
def linear_regression():
    'Main function to run entire chapter'

    # Load data
    data = load_data()
    cols = data.columns

    st.sidebar.markdown("**Choose X and y**")
    y = st.sidebar.selectbox("Choose y", options=cols, index=len(cols) - 1)
    X = st.sidebar.selectbox("Choose X", options=[c for c in cols if c != y])

    df = data[[y, X]]
    standardize = st.sidebar.checkbox("Standardize X?", value=True)
    if standardize:
        standardized = (df[X] - df[X].mean()) / df[X].std()
        df[X] = standardized
    b0, b1 = 5., 0.
    betas = [b0, b1]
    df["yhat"] = betas[0] + betas[1] * df[X]
    df["yhat_best"], bbest0, bbest1 = _ols_best(df)

    st.sidebar.markdown('---')
    st.sidebar.markdown('**Choose coefficients**')
    scaler = bbest0
    MIN0, MAX0 = float(np.round(bbest0 - scaler,
                                3)), float(np.round(bbest0 + scaler, 3))
    b0 = st.sidebar.slider("Intercept (b0)",
                           float(MIN0),
                           float(MAX0),
                           value=float(MIN0),
                           step=0.001)
    scaler = bbest1
    MIN1, MAX1 = float(np.round(bbest1 - scaler,
                                3)), float(np.round(bbest1 + scaler, 3))
    b1 = st.sidebar.slider("Slope (b1)", MIN1, MAX1, value=MIN1, step=0.001)

    st.sidebar.markdown('---')
    st.sidebar.markdown('**Choose plot options**')
    show_regression = st.sidebar.checkbox(
        "Show yhat \n(based on coefficients)")
    show_errors = st.sidebar.checkbox("Show errors")
    show_best = st.sidebar.checkbox("Show yhat (based on optimization)")

    st.header("Simple linear regression")

    # Data
    show_data = st.beta_expander("Show data")
    with show_data:
        st.dataframe(df[[y, X]])

    # Specification
    show_specification = st.beta_expander("Show model specification")
    with show_specification:
        st.markdown(r'''
        Relationship is defined as 
        $$
        \hat{y}  = \beta_0 + \beta_1 X 
        $$''')
        st.write(f"In our case this means:")
        st.write(f"${y}$ = `{b0:.2f}`+ `{b1:.2f}`${X}$")
        st.write(f'''

        **where**
        
        ${y}$ = unit of sales (in thousands),  
        ${X}$ = EUR of advertisment (in thousands)

        ''')

    # Plot
    betas = [b0, b1]
    df["yhat"] = betas[0] + betas[1] * df[X]
    df["yhat_best"], bbest0, bbest1 = _ols_best(df)
    show_plot = st.beta_expander("Show data plot")
    with show_plot:
        fig = plot(df,
                   show_line=show_regression,
                   show_errors=show_errors,
                   show_best=show_best)
        st.pyplot(fig)

    # Assessing accuracy
    show_accuracy = st.beta_expander("Assess model accuracy")
    ytrue, yhat, ybest = df[y], df["yhat"], df["yhat_best"]
    with show_accuracy:

        col1, col2 = st.beta_columns(2)
        with col1:
            col1.markdown("**Chosen model**")
            st.write(r'$\beta_0$ = ', np.round(b0, 3))
            st.write(r'$\beta_1$ = ', np.round(b1, 3))
            rss = _rss(ytrue, yhat)
            st.write(r'$\text{RSS}$ = ', np.round(rss, 3))
            r2 = _r2(ytrue, yhat)
            st.write(r'$R^2$ = ', np.round(r2, 3))
            F = _F(ytrue, yhat)
            st.write(r'$F\text{-statistic}$ = ', np.round(F, 3))

        with col2:
            col2.markdown("**Optimized model**")
            st.write(r'$\beta_0$ = ', np.round(bbest0, 3))
            st.write(r'$\beta_1$ = ', np.round(bbest1, 3))
            rss = _rss(ytrue, ybest)
            st.write(r'$\text{RSS}$ = ', np.round(rss, 3))
            r2 = _r2(ytrue, ybest)
            st.write(r'$R^2$ = ', np.round(r2, 3))
            F = _F(ytrue, ybest)
            st.write(r'$F\text{-statistic}$ = ', np.round(F, 3))

    show_losses = st.beta_expander("Show RSS")
    with show_losses:
        col1, col2 = st.beta_columns(2)
        angle1 = col1.slider("Vertical", 0, 360, value=24, step=1)
        angle2 = col2.slider("Horizontal", 0, 360, value=318, step=1)
        st.markdown("**RSS vs. coefficients**")
        xtrue = df[X].values
        ytrue_np = ytrue.values
        w0s = np.linspace(MIN0, MAX0, 50)
        w1s = np.linspace(MIN1, MAX1, 50)
        xx, yy, losses = _loss_grid(xtrue, ytrue_np, w0s, w1s)
        col1, col2 = st.beta_columns(2)

        fig, ax = plot_surface(xx, yy, losses, b0, b1, _rss(ytrue, yhat))
        ax.view_init(angle1, angle2)
        st.pyplot(fig)
Exemplo n.º 24
0
if uploaded_file is not None:
    video_bytes = uploaded_file.getvalue()

    result = pipe.predict_test(video_bytes)

    my_bar = st.progress(0)
    for percent_complete in range(100):
        time.sleep(0.1)
        my_bar.progress(percent_complete + 1)

    if result[0][0] >= 0.5:
        st.balloons()
        st.markdown(
            '### ***CONGRATS, YOU ARE A SQUATPRO!! YOUR BACK & KNEES ARE THANKFUL!!***'
        )
        with st.beta_expander(
                "Still Feel Like You Could Use Some Tips? Click here:"):

            st.markdown("### STARTING POSITION")
            st.markdown(
                "##### * Stand with your feet shoulder-width apart. Your knees and feet should be pointing in the same direction."
            )
            st.markdown(
                '##### * Raise your arms out in front of you for balance (or you can leave them by your side and raise them as you descend).'
            )

            st.markdown("### EXECUTION")
            st.markdown(
                "##### * Keep your head up and torso upright as you squat.")
            st.markdown(
                "##### * Bending your hips back, make sure to descend until your thighs are parallel with the floor."
            )
Exemplo n.º 25
0
def main():
    pio.templates.default = 'plotly_dark'
    df = None
    while True:
        try:
            df = load_data('src/data/data.csv')
        except Exception as e:
            print('File not found')
            continue
        break
    granularity = st.sidebar.selectbox("Granularity",
                                       ["Worldwide", "Continent", "Country"])
    if granularity == "Worldwide":
        viz = [
            'Global Member Distribution', 'Gender Distribution',
            'Membership Type', 'Membership Level'
        ]
        choice = st.sidebar.selectbox('Choose Visualization', viz)
        st.title("Member Distribution")
        if choice == 'Global Member Distribution':
            conti = pd.DataFrame(
                df.groupby('Continent')['Gender'].value_counts()).rename(
                    columns={'Gender': 'Numbers'})
            conti = conti.unstack(level=0)
            conti.columns = conti.columns.droplevel([0])
            conti = conti.rename_axis([None], axis=1).reset_index()
            with st.beta_container():
                with st.spinner('Rendering chart...'):
                    world_map = folium.Map(tiles="OpenStreetMap",
                                           zoom_start=2000)
                    marker_cluster = MarkerCluster().add_to(world_map)
                    data = df[['Latitude', 'Longitude', 'Country',
                               'Gender']].dropna()
                    for i in range(len(data)):
                        lat = data.iloc[i]['Latitude']
                        long = data.iloc[i]['Longitude']
                        radius = 5
                        # popup_text = f"Country : {data.iloc[i]['Country']}<br>%of Users : {}<br>"
                        # popup_text = popup_text.format(data.iloc[i]['Country'], data.iloc[i]['Gender'])
                        folium.CircleMarker(location=[lat, long],
                                            radius=radius,
                                            fill=True).add_to(marker_cluster)

                    # call to render Folium map in Streamlit
                    folium_static(world_map)
                if st.checkbox('Show raw data'):
                    st.write(conti)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'Africa has the highest membership rate at {round(((669+708)/1707)*100, 2)}%, followed by North America {round(((71+95)/1707)*100, 2)}%, Europe {round(((669+708)/1707)*100, 2)}%, Asia {round((20/1707)*100, 2)}% and finally Oceania {round((2/1707)*100, 2)}%'
                )
        elif choice == 'Gender Distribution':
            with st.beta_container():
                st.subheader('Worldwide Member Gender Distribution')
                data = cat_numbers(df, 'Gender')
                pie_chart(data, 'Gender', 'Global Member Gender Distribution')
                expander = st.beta_expander('Analysis')
                expander.write(
                    f"The Room's membership consists of 52.1% males and 47.9% females."
                )
        elif choice == 'Membership Type':
            mem = pd.DataFrame(
                df.groupby('Gender')['Membership Type'].value_counts()).rename(
                    columns={'Membership Type': 'Number'})
            mem = mem.unstack(level=1)
            mem.columns = mem.columns.droplevel([0])
            mem = mem.rename_axis([None], axis=1).reset_index()
            mem.drop(columns='NONE')
            with st.beta_container():
                fig = px.bar(mem,
                             x='Gender',
                             y=[
                                 'Founding Member', 'Free Trial',
                                 'Premium Paying', 'Staff Membership'
                             ])
                fig.update_layout(barmode='group')
                st.plotly_chart(figure_or_data=fig)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'A majority, 55.5%, of members hold Free Trial accounts (53.4% female and 57.3%). '
                    f'11.6% on the Premium Paying Plan, 20.3% are Founding Members and 12.1% are on Staff '
                    f'Membership.')
        elif choice == 'Membership Level':
            mlevel = pd.DataFrame(
                df.groupby('Membership Level')['Gender'].value_counts()
            ).rename(columns={'Membership Level': 'Number'})
            mlevel = mlevel.unstack(level=1)
            mlevel.columns = mlevel.columns.droplevel([0])
            mlevel = mlevel.rename_axis([None], axis=1).reset_index()
            mlevel['male%'] = round(
                mlevel['male'] / (mlevel['male'] + mlevel['female']) * 100, 2)
            mlevel['female%'] = round(
                mlevel['female'] / (mlevel['male'] + mlevel['female']) * 100,
                2)
            with st.beta_container():
                fig = px.bar(mlevel,
                             x='Membership Level',
                             y=['female%', 'male%'])
                fig.update_layout(
                    title_text="Membership Level Distribution",
                    barmode="stack",
                    uniformtext=dict(mode="hide", minsize=10),
                )
                st.plotly_chart(figure_or_data=fig)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'A majority of the members, {round(363/(363+242+200),2)*100}% are Mid-career Leaders. '
                    f'However, gender distribution across the membership levels is relatively even, '
                    f'males being dominating membership with little margin:\n '
                    '* Young Leader: 50.98% male and 49.02% female'
                    '* Mid-career Leader: 51.21% male and 48.79% female'
                    '* Senior Leader: 53.99% male and 49.02% female')
    if granularity == "Country":
        country = st.sidebar.selectbox('country', df['Country'].unique())
        st.title(country)
        graph_type = st.selectbox("Choose visualization", [
            'Gender', 'Membership Type Distribution',
            'Membership Level Distribution'
        ])
        if graph_type == "Gender":
            st.subheader("Gender Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country], 'Gender')
                st.write(data)
                pie_chart(data, 'Gender', 'Gender Distribution')
                expander = st.beta_expander(f'{country} Analysis')
                expander.write(
                    f'{country} has {round((data.loc[0]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"]))*100,1)}% {data.loc[0]["Gender"]} and {round((data.loc[1]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"]))*100,1)}% {data.loc[1]["Gender"]} membership.'
                )
                # exp_data(f'{country}', data)
        elif graph_type == "Membership Type Distribution":
            st.subheader("Membership Type Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country],
                                   'Membership Type')
                pie_chart(data, 'Membership Type',
                          'Membership Type Distribution')
        elif graph_type == "Membership Level Distribution":
            st.subheader("Membership Level Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country],
                                   'Membership Level')
                pie_chart(data, 'Membership Level',
                          'Membership Level Distribution')
    elif granularity == "Continent":
        continent = st.sidebar.selectbox('continent',
                                         df['Continent'].dropna().unique())
        st.title(continent)
        graph_type = st.selectbox("Choose visualization", [
            'Gender', 'Membership Type Distribution',
            'Membership Level Distribution'
        ])
        if graph_type == "Gender":
            st.subheader("Gender Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent], 'Gender')
                pie_chart(data, 'Gender', 'Gender Distribution')
        elif graph_type == "Membership Type Distribution":
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent],
                                   'Membership Type')
                pie_chart(data, 'Membership Type',
                          'Membership Type Distribution')
        elif graph_type == "Membership Level Distribution":
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent],
                                   'Membership Level')
                pie_chart(data, 'Membership Level',
                          'Membership Level Distribution')
Exemplo n.º 26
0
                                 interactive=False,
                                 started=False,
                                 vec=None,
                                 current_query="")

sents, ids, id2ind, ind2id = load_sents_and_ids()

print("len sents", len(sents))

index = load_index(similarity, pooling)
bert = load_bert()
bert_all_seq = load_bert_all_seq()
bert_alignment_supervised = load_bert_alignment_supervised()
pca = load_pca(pooling)

my_expander = st.beta_expander("How to query?")
my_expander.markdown(
    """Start by writing a query that aims to capture a relation between two entities. 
<ul>
  <li>Use <b><font color=‘blue’>$</font></b> or <b><font color=‘blue’>:[w]</font></b> to mark words that <b>must appear</b>. </li>
  <li>Mark the <b>arguments</b> with <b><font color=‘orange’>a2:</font></b> and <b><font color=‘orange’>a2:</font></b> </li>
  <li>Mark with <b><font color=‘brown’>:</font></b> additional captures that fix the required synatctic structure. </li>
</ul>  
For instance, in the query '<b><font color=‘orange’>a1:[w]</font></b>COVID-19 <b><font color=‘blue’>$</font></b>causes <b><font color=‘orange’>a2:</font></b>pain', we search for sentences where the syntactic relation between the first and second argument is the same as the relation between `COVID-19` and `pain` in this sentence (subject-object relation). We further request an exact match for the word `causes` and the argument `COVID-19`. <br> For more details on the query language, check out 
<a href="https://spike.covid-19.apps.allenai.org/datasets/covid19/search/help">this</a> tutorial.""",
    unsafe_allow_html=True)
#st.write("Uses {}-dimensional vectors".format(pca.components_.shape[0]))
#st.write("Number of indexed sentences: {}".format(len(sents)))
print("Try accessing the demo under localhost:8080 (or the default port).")

if mode == "Start with Query":
Exemplo n.º 27
0
import streamlit as st


st.title('Streamlit 超入門')
st.write('Interactive Widgets')

left_column,right_column=st.beta_columns(2)
button=left_column.button('右カラムに文字を表示')

if button:
    right_column.write('ここは右カラム')

expander1=st.beta_expander('問い合わせ1')
expander1.write('問い合わせ1の回答')
expander2=st.beta_expander('問い合わせ2')
expander3=st.write('問い合わせ3の回答')
Exemplo n.º 28
0
 def test_nested_expanders(self):
     level1 = st.beta_expander("level 1")
     with self.assertRaises(StreamlitAPIException):
         level2 = level1.beta_expander("level 2")
def main():
    st.set_page_config(page_title="Cloze Test Generation",
                       page_icon="🦈",
                       layout="wide")
    col1, col2, col3 = st.beta_columns(3)
    image = Image.open("logo.jpg")
    with col1:
        st.image(image, )
    with col2:
        st.image(image, )
    with col3:
        st.image(image, )

    st.markdown(
        "<h1 style='text-align: center;'> Cloze Test Generation</h1>",
        unsafe_allow_html=True,
    )
    # st.title('Cloze Test Generation')
    st.header("Paste, Generate and Test!")

    user_name = st.text_input("Enter your name: ")
    if user_name:
        st.info(
            f"""Hi, {user_name}. This application is based on the following libraries.
            \n - [Transformers](https://github.com/huggingface/transformers)
            \n - [Stanza](https://github.com/stanfordnlp/stanza)
            \n - [Tensorflow](https://www.tensorflow.org/)
            \n - [NumPy](https://numpy.org/)
            """)

        st.write("""### We'll prepare the toolkits before we start.""")
        st.write("""This process could take a while for the first time.""")
        st.warning("Preparing Stanza toolkits...")
        try:
            nlp = stanza.Pipeline(lang="en",
                                  processors="tokenize,pos",
                                  verbose=0,
                                  use_gpu=False)
        except:
            stanza.download("en")
            nlp = stanza.Pipeline(lang="en",
                                  processors="tokenize,pos",
                                  verbose=0,
                                  use_gpu=False)

        st.warning("Preparing BERT tokenzier...")
        tokenizer = download_bert_tokenizer()

        st.warning("Preparing BERT model...")
        model = download_bert_model()

        st.success("Done")

        col1, col2 = st.beta_columns(2)
        with col1:
            st.write(
                """### Paste your article below. We'll do the rest for you!""")
            text = st.text_area(
                "Here is a default text. Change it to whatever you want and press Generate!",
                """In the 1980s the number of giant pandas in China hovered around 1,100. Now, after decades of focused conservation, giant pandas have been crossed off the endangered list. Habitat preservation, anti-poaching efforts, and advances in captive-breeding programs can offer a lifeline to the most endangered members of the biosphere.""",
            )

        with col2:
            generate_btn = st.button("Generate")
            if generate_btn:
                questions, answers, options, paragraph = generate(
                    nlp, text, tokenizer, model)

                st.text_area("Result: ", paragraph)

                for option in options:
                    st.text(option)

                with st.beta_expander("Answer Section"):
                    for i, answer in enumerate(answers, start=1):
                        st.text(f"{i}. " + answer)
Exemplo n.º 30
0
    body{
    background: url('data:image/jpg; base64, %s');
    }
    .sidebar .sidebar-content('data:image/jgp, %s');
    </style>
    ''' % (bin_str, bin_str_2)

    st.markdown(page_bg_img, unsafe_allow_html=True)
    return


set_png_as_page_bg('C:/ML Web App/static/background_4.jpg', 'C:/ML Web App/static/sidebar_3.jpg')

st.title('Machine Learning Web App Beta Version')

my_expander = st.beta_expander('View Dataset')
with my_expander:
    st.subheader('Dataset')
    data_file = st.sidebar.file_uploader('Upload your datafile', type=['csv'])
    if data_file is not None:
        st.write(type(data_file))
        file_details = {'FileName:': data_file.name,
                        'Filetype:': data_file.type,
                        'Filesize:': data_file.size}
        st.write(file_details)
        df = pd.read_csv(data_file)
        col = df.columns
        st.dataframe(df)
    else:
        st.write('Upload your datafile from here. ')
target_var = st.sidebar.text_input('Clarify your target variable')