Exemplo n.º 1
0
"""



question_num=1
for q in range(question_num):

    random_index=np.random.randint(low=0, high=len(sentence),size=question_num )

    question_word = sentence[random_index[q]]
    correct_answer = word_dict[question_word]
    c_answer = answer[random_index[q]]

    st.header(question_word)

    answer_copy= answer.copy()
    answer_copy.remove(correct_answer)
    wrong_answers = random.sample(answer_copy, 3)
            
    answer_options = [correct_answer]+ wrong_answers
    random.shuffle(answer_options)
    
    st.subheader(answer_options)
    
    expander = st.beta_expander('答えを表示する')
    expander.header(c_answer)


button =st.button('次の問題を表示する')
Exemplo n.º 2
0
def main():
    st.title('Named Entity Recognition')
    st.subheader('Using Wikipedia content to perform NER')
    user_input = st.text_area("Enter Title of Wikipedia Page", "Type here")
    if st.button("Recognize"):
        sel(user_input)
Exemplo n.º 3
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import streamlit as st
from datetime import time
from datetime import date

options = ("female", "male")

w1 = st.checkbox("I am human", True)

w2 = st.slider("Age", 0, 100, 25, 1)
st.write("Value 1:", w2)

w3 = st.text_area("Comments", "Streamlit is awesomeness!")

w4 = st.button("Click me")

w5 = st.radio("Gender", options, 1)

w6 = st.text_input("Text input widget", "i iz input")

w7 = st.selectbox("Options", options, 1)

w8 = st.time_input("Set an alarm for", time(8, 45))

w9 = st.date_input("A date to celebrate", date(2019, 7, 6))
def main():
    import streamlit as st
    html_temp = """
    <div style="background-color:#ADD8E6">
    <div style="background-color:Black">
    <h2 style="color:white;text-align:center;"> Spotify Skip Prediction </h2>
    </div>

    </div>
    """
    st.markdown(html_temp, unsafe_allow_html=True)
    sp = st.slider('Select session_position', 0, 20)
    er = st.selectbox('Select hist_user_behavior_reason_end_remote',
                      (0.0, 1.0))
    v = st.slider('Select valence', 0.0, 1.0)
    a0 = st.slider('Select acoustic_vector_0', -1.0, 1.0)
    a1 = st.slider('Select acoustic_vector_1', -1.0, 1.0)
    a2 = st.slider('Select acoustic_vector_2', -1.0, 1.0)
    a3 = st.slider('Select acoustic_vector_3', -1.0, 1.0)
    a4 = st.slider('Select acoustic_vector_4', -1.0, 1.0)
    a5 = st.slider('Select acoustic_vector_5', -1.0, 1.0)
    a6 = st.slider('Select acoustic_vector_6', -1.0, 1.0)
    a7 = st.slider('Select acoustic_vector_7', -1.0, 1.0)
    cc = st.selectbox('Select context_type_charts', (0.0, 1.0))
    ep = st.selectbox('Select context_type_editorial_playlist', (0.0, 1.0))
    pp = st.selectbox('Select context_type_personalized_playlist', (0.0, 1.0))
    tr = st.selectbox('Select context_type_radio', (0.0, 1.0))
    uc = st.selectbox('Select context_type_user_collection', (0.0, 1.0))
    sb = st.selectbox('Select hist_user_behavior_reason_start_backbtn',
                      (0.0, 1.0))
    sc = st.selectbox('Select hist_user_behavior_reason_start_clickrow',
                      (0.0, 1.0))
    sf = st.selectbox('Select hist_user_behavior_reason_start_fwdbtn',
                      (0.0, 1.0))

    sr = st.selectbox('Select hist_user_behavior_reason_start_remote',
                      (0.0, 1.0))
    trackdone = st.selectbox(
        'Select hist_user_behavior_reason_start_trackdone', (0.0, 1.0))
    ee = st.selectbox('Select hist_user_behavior_reason_end_endplay',
                      (0.0, 1.0))
    ef = st.selectbox('hist_user_behavior_reason_end_fwdbtn', (0.0, 1.0))
    el = st.selectbox('hist_user_behavior_reason_end_logout', (0.0, 1.0))
    s = st.slider('Select speechiness', 0.0, 1.0)
    o = st.slider('Select organism', 0.0, 1.0)
    et = st.selectbox('Select hist_user_behavior_reason_end_trackdone',
                      (0.0, 1.0))
    m = st.slider('Select mechanism', 0.0, 1.0)
    sl = st.slider('Select session_length', 10, 20)
    cs = st.selectbox('Select context_switch', (0.0, 1.0))
    bp = st.selectbox('Select no_pause_before_play', (0.0, 1.0))
    sbp = st.selectbox('Select short_pause_before_play', (0.0, 1.0))
    seek_f = st.slider('Select hist_user_behavior_n_seekfwd', 0.0, 60.0)
    seek_b = st.slider('Select hist_user_behavior_n_seekback', 0.0, 150.0)
    shuffle = st.selectbox('Select hist_user_behavior_is_shuffle', (0.0, 1.0))
    hr = st.slider('Select hour_of_day', 0.0, 24.0)
    mode = st.selectbox('Select mode', (0.0, 1.0))
    duration = st.slider('Select duration', 30.0, 1800.0)

    pr = st.selectbox('Select premium', (0.0, 1.0))
    est = st.slider('Select us_popularity_estimate', 90.0, 100.0)
    loud = st.slider('Select loudness', -25.0, 0.0)
    live = st.slider('Select liveness', 0.0, 1.0)
    instrument = st.slider('Select instrumentalness', 0.0, 1.0)
    flat = st.slider('Select flatness', 0.5, 1.0)
    yr = st.slider("Select release_year:", 1950, 2019)
    energy = st.slider('Select energy', 0.0, 1.0)
    dance = st.slider('Select danceability', 0.0, 1.0)
    bounce = st.slider('Select bounciness', 0.0, 1.0)
    beat_str = st.slider('Select beat_strength', 0.0, 1.0)
    acous = st.slider('Select acousticness', 0.0, 1.0)
    rangemean = st.slider('Select dyn_range_mean', 0.0, 20.0)
    tempo = st.slider('Select tempo', 50.0, 220.0)
    st_end = st.selectbox('Select hist_user_behavior_reason_start_endplay',
                          (0.0, 1.0))
    st_play = st.selectbox('Select hist_user_behavior_reason_start_playbtn',
                           (0.0, 1.0))

    inputs = [[
        sp, er, v, a0, a1, a2, a3, a4, a5, a6, a7, cc, ep, pp, tr, uc, sf, sr,
        trackdone, ee, ef, el, s, o, et, m, sl, cs, bp, sbp, seek_f, seek_b,
        shuffle, hr, mode, duration, pr, est, loud, live, instrument, flat, yr,
        energy, dance, bounce, beat_str, acous, rangemean, tempo, st_end,
        st_play
    ]]

    if st.button('Predict'):
        st.success(classify(lbm_model.predict(inputs)))
Exemplo n.º 5
0
def main():
    """Invasive Ductal Carcinoma Detection Using CNN"""
    st.title("Invasive Ductal Carcinoma Detection Using CNN")

    menu = ["Home", "Login", "Signup"]
    submenu = ["Plot", "Visualisasi IDC", "Feature Maps", "Prediction"]

    choice = st.sidebar.selectbox("Menu", menu)
    if choice == "Home":
        st.subheader("What is Invasive Ductal Carcinoma (IDC)?")
        st.markdown("#### Context")
        """
        Invasive Ductal Carcinoma (IDC) is the most common subtype of all breast cancers. To assign 
        an aggressiveness grade to a whole mount sample, pathologists typically focus on the regions 
        which contain the IDC. As a result, one of the common pre-processing steps for automatic 
        aggressiveness grading is to delineate the exact regions of IDC inside of a whole mount slide.
        """
        st.markdown("#### Content")
        """
        The original dataset consisted of 162 whole mount slide images of Breast Cancer (BCa) specimens 
        scanned at 40x. From that, 277,524 patches of size 50 x 50 were extracted (198,738 IDC negative 
        and 78,786 IDC positive). Each patch’s file name is of the format: uxXyYclassC.png — > example 
        10253idx5x1351y1101class0.png . Where u is the patient ID (10253idx5), X is the x-coordinate of 
        where this patch was cropped from, Y is the y-coordinate of where this patch was cropped from, 
        and C indicates the class where 0 is non-IDC and 1 is IDC.
        """
        st.markdown("#### Acknowledgements")
        """
        The original files are located here: http://gleason.case.edu/webdata/jpi-dl-tutorial/IDC_regular_ps50_idx5.zip
        Citation: https://www.ncbi.nlm.nih.gov/pubmed/27563488 and http://spie.org/Publications/Proceedings/Paper/10.1117/12.2043872
        """
        st.markdown("#### Inspiration")
        """
        Breast cancer is the most common form of cancer in women, and invasive ductal carcinoma (IDC) is 
        the most common form of breast cancer. Accurately identifying and categorizing breast cancer 
        subtypes is an important clinical task, and automated methods can be used to save time and reduce error.
        """
    elif choice == "Login":
        username = st.sidebar.text_input("Username")
        password = st.sidebar.text_input("Password", type='password')
        if st.sidebar.checkbox("Login"):
            create_usertable()
            hashed_pwsd = generate_hashes(password)
            result = login_user(username, verify_hashes(password, hashed_pwsd))
            if result:
                st.success("Welcome {}".format(username))

                activity = st.selectbox("Activity", submenu)
                if activity == "Plot":
                    st.subheader("Data Plot")

                    status = st.radio("Data Distribution",
                                      ("Data raw", "Data preprocessed"))

                    if status == 'Data raw':
                        img = Image.open(
                            os.path.join("data/sns.countplot(y_train).jpeg"))
                        st.image(img, width=300, caption="Data Train")

                        img = Image.open(
                            os.path.join("data/sns.countplot(y_test).jpeg"))
                        st.image(img, width=300, caption="Data Test")
                    else:
                        img = Image.open(
                            os.path.join("data/sns.countplot(y_train2).jpeg"))
                        st.image(img, width=300, caption="Data Train")

                        img = Image.open(
                            os.path.join("data/sns.countplot(y_test2).jpeg"))
                        st.image(img, width=300, caption="Data Test")

                elif activity == "Visualisasi IDC":
                    st.subheader("Visualisasi IDC(-/+)")
                    sample_gambar = st.radio(
                        "Few example of IDC with its coordinate",
                        ("IDC (-)", "IDC (+)"))
                    if sample_gambar == 'IDC (-)':
                        figure_path = glob.glob("gambar visual/0/*.png",
                                                recursive=True)
                        figure = show_image(figure_path)
                        st.pyplot(figure)
                    else:
                        figure_path = glob.glob("gambar visual/1/*.png",
                                                recursive=True)
                        figure = show_image(figure_path)
                        st.pyplot(figure)

                elif activity == "Feature Maps":
                    st.subheader("Feature Maps")
                    feature_maps = st.radio(
                        "Visualization Feature Maps from hidden layer",
                        ("VGG16", "5 Layers Conv2d"))
                    if feature_maps == 'VGG16':
                        model_ = load_model(
                            os.path.join(
                                "models/vgg-model-weights-improvement-the-best.h5"
                            ))
                        model_baru = model_.layers[0]  # Khusus vgg
                        model_baru = Model(inputs=model_baru.inputs,
                                           outputs=model_baru.layers[1].output)
                        model_baru.summary()

                        img = Image.open(
                            os.path.join(
                                "gambar visual/0/9178_idx5_x2651_y1251_class0.png"
                            ))
                        img = preprocessed_image(img)
                        img = preprocess_input(img)
                        feature_maps = model_baru.predict(img)

                        figure = feature_of(feature_maps, 8)
                        st.pyplot(figure)
                    else:
                        model_ = load_model(
                            os.path.join(
                                "models/weights-improvement-the-best.h5"))
                        model_baru = model_
                        model_baru = Model(inputs=model_baru.inputs,
                                           outputs=model_baru.layers[1].output)
                        model_baru.summary()

                        img = Image.open(
                            os.path.join(
                                "gambar visual/0/9178_idx5_x2651_y1251_class0.png"
                            ))
                        img = preprocessed_image(img)
                        img = preprocess_input(img)
                        feature_maps = model_baru.predict(img)

                        figure = feature_of(feature_maps, 5)
                        st.pyplot(figure)

                elif activity == "Prediction":
                    st.subheader("Predictive Analytics")

                    # Upload Image
                    image_file = st.file_uploader("Upload Image",
                                                  type=['jpg', 'png', 'jpeg'])

                    if image_file is not None:
                        our_image = Image.open(image_file)
                        st.text("Image Uploaded!")
                        st.image(our_image)

                        # Processed Image
                        image_test = preprocessed_image(our_image)
                    else:
                        st.warning("Please upload the image!")

                    # ML / Predict Image
                    model_choice = st.selectbox("Select Model",
                                                ["VGG16", "5 Layers Conv2d"])
                    if st.button("Predict"):
                        if model_choice == "VGG16":
                            model_ = load_model(
                                os.path.join(
                                    "models/vgg-model-weights-improvement-the-best.h5"
                                ))
                            opt = SGD(lr=0.001, momentum=0.9)
                            model_.compile(optimizer=opt,
                                           loss='categorical_crossentropy',
                                           metrics=['accuracy'])
                            prediction = model_.predict(image_test)
                            prediction_result = np.argmax(prediction[0])

                        elif model_choice == "5 Layers Conv2d":
                            model_ = load_model(
                                os.path.join(
                                    "models/weights-improvement-the-best.h5"))
                            opt = SGD(lr=0.001, momentum=0.9)
                            model_.compile(optimizer=opt,
                                           loss='categorical_crossentropy',
                                           metrics=['accuracy'])
                            prediction = model_.predict(image_test)
                            prediction_result = np.argmax(prediction[0])

                        # st.write(prediction_result)
                        if prediction_result == 1:
                            st.warning("Patient's positive IDC!")
                            st.error(
                                "Please seek for treatment and keep healthy lifestyle!"
                            )
                        else:
                            st.success("It's negative!")
            else:
                st.warning("Incorrect Username/Password")

    elif choice == "Signup":
        new_username = st.text_input("user name")
        new_password = st.text_input("Password", type='password')

        confirm_password = st.text_input("Confirm Password", type='password')
        if new_password == confirm_password:
            st.success("Password Confirmed")
        else:
            st.warning("Passwords not the same")

        if st.button("Submit"):
            create_usertable()
            hashed_new_password = generate_hashes(new_password)
            add_userdata(new_username, hashed_new_password)
            st.success("You have successfully created a new account")
            st.info("Login to Get Started ")
Exemplo n.º 6
0
                     title_opts=opts.TitleOpts(title="球队防守对比"),
                 ))
        st_pyecharts(c)

    if '组织' in MoreStats:
        c = (Bar().add_xaxis(
            ["助攻", "创造绝佳机会", "场均关键传球", "前场传球成功率",
             "场均夺回球权"]).add_yaxis(option1, get_stats(team1, 3)).add_yaxis(
                 option2, get_stats(team2, 3)).set_global_opts(
                     xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(
                         rotate=-10)),
                     title_opts=opts.TitleOpts(title="球队组织对比"),
                 ))
        st_pyecharts(c)

    if st.button('开始预测比赛'):
        '⏳⏳⏳...'

        # Add a placeholder
        latest_iteration = st.empty()
        bar = st.progress(0)

        for i in range(100):
            # Update the progress bar with each iteration.
            latest_iteration.text(f'Iteration {i + 1}')
            bar.progress(i + 1)
            time.sleep(0.1)

        '...预测结束,输出比赛结果'
        x_data = ['主队胜', '平局', '客队胜']
        y_data = [24.29, 53.78, 21.93]
Exemplo n.º 7
0
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import streamlit as st

CAT_IMAGE = "https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80"

if st.button("Layout should not shift when this is pressed"):
    st.write("Pressed!")

# Same-width columns
c1, c2, c3 = st.columns(3)
c1.image(CAT_IMAGE)
c2.image(CAT_IMAGE)
c3.image(CAT_IMAGE)

# Variable-width columns
for c in st.columns((1, 2, 3, 4)):
    c.image(CAT_IMAGE)
Exemplo n.º 8
0
def solution_page():
    st.title('Сторінка з розв\'язанням задачі')

    session_state = SessionState.get(choose_button=False,
                                     input_type='',
                                     random='',
                                     file='',
                                     db='')
    session_state.input_type = st.selectbox('Оберіть спосіб вхідних даних',
                                            ['File', 'Data Base', 'Random'])

    if session_state.input_type == 'Random':
        quantity = st.number_input('Кількість експертів',
                                   step=1,
                                   value=50,
                                   min_value=1,
                                   max_value=500)
        min_val = st.number_input('Мінімальне значеня',
                                  step=1,
                                  value=1,
                                  min_value=1,
                                  max_value=99999)
        max_val = st.number_input('Максимальне значеня',
                                  step=1,
                                  value=1000,
                                  min_value=1,
                                  max_value=99999)
        max_len = st.number_input('Максимальна тривалість роботи експерта',
                                  step=1,
                                  value=200,
                                  min_value=1,
                                  max_value=99999)
        distribution = st.selectbox('Оберіть розподіл випадкових велечин', [
            'Рівномірний', 'Усічений нормальний',
            'Рівномірний для відрізків обмеженної довжини'
        ])

        method = st.selectbox('Оберіть метод вирішення задачі', [
            'Метод динамічного програмування',
            'Жадібний алгоритм + рекурсивний покращувач', 'Обидва метода'
        ])

        if st.button('Розв\'язати'):
            condition = generate_random_condition(quantity, min_val, max_val,
                                                  distribution, max_len)
            st.write('Згенерували наступну умову: {}'.format(condition))
            st.bokeh_chart(draw_graphic_of_condition(condition))

            if method == 'Обидва метода':
                show_answer(condition, 'Метод динамічного програмування')
                show_answer(condition,
                            'Жадібний алгоритм + рекурсивний покращувач')
            else:
                show_answer(condition, method)

    if session_state.input_type == 'Data Base':
        conditions = get_presets_conditions()
        st.table(conditions)
        session_state.condition_id2solve = st.number_input(
            'Введіть ID',
            step=1,
            value=1,
            min_value=1,
            max_value=len(conditions))

        condition2solve = list(
            filter(
                lambda cond: cond.get('task_id') == session_state.
                condition_id2solve, conditions))[0]
        method = st.selectbox('Оберіть метод вирішення задачі', [
            'Метод динамічного програмування',
            'Жадібний алгоритм + рекурсивний покращувач', 'Обидва метода'
        ])

        if st.button('Розв\'язати'):
            show_answer(condition2solve.get('experts', []), method)

    if session_state.input_type == 'File':
        filename = file_selector()
        st.write('Ви обрали `%s`' % filename)
        condition = parse_condition_csv(filename)
        st.bokeh_chart(draw_graphic_of_condition(condition))
        # st.write(condition)
        method = st.selectbox('Оберіть метод вирішення задачі', [
            'Метод динамічного програмування',
            'Жадібний алгоритм + рекурсивний покращувач', 'Обидва метода'
        ])
        if st.button('Розв\'язати'):
            show_answer(condition, method)
Exemplo n.º 9
0
#Select multiple columns
cols_drop = st.multiselect("Select columns to drop", data.columns)
#Drop columns
data = data.drop(cols_drop, axis=1)

#Show the new dataframe
#Text to show it's the new dataframe
st.text("Once features have been engineered, it looks like this:")
#Show the new dataframe
st.dataframe(data)

#Change the number of clusters you want the data to be broken up into
k = st.slider("Select the number of clusters", 2, 10)

#A button that clusters when you press it
if st.button("Cluster Results"):
    #Dropping the diagnosis column so that the features can be used to predict it
    X = data.drop('diagnosis', axis=1).values
    #Scaling the values of the dataframe
    X = StandardScaler().fit_transform(X)

    #Creating a KMeans clusterer called km
    km = KMeans(n_clusters=k, init="k-means++", n_init=10)

    #Passing our transformed dataframe into our KMeans clusterer
    km_pred = km.fit_predict(X)

    #Plotting the data
    #Making two plots side by side to share the Y axis
    f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
Exemplo n.º 10
0
def main():
    """ Around The World - Travel Management System """
    st.sidebar.title("Select Required Action")
    choice = st.sidebar.radio(label="View and Update Details",
                              options=[
                                  "Home", "Customer Registration",
                                  "Customer Preference",
                                  "Customer Preferred Destinations",
                                  "Visa Status", "Booking", "Payment",
                                  "Customer Feedback"
                              ])

    if choice == "Home":
        st.markdown(
            "<h1 style='text-align: center; color: black;'>Around The World - Employee Portal</h1>",
            unsafe_allow_html=True)
        st.markdown(
            "<h4 style='text-align: center; color: black;'>The portal FETCHES, INSERTS and UPDATES data in <i>real-time<i>.</h4>",
            unsafe_allow_html=True)
        with open("style.css") as f:
            st.markdown('<style>{}</style>'.format(f.read()),
                        unsafe_allow_html=True)
            st.markdown('<style>body{background-color: #CAFFCA;}</style>',
                        unsafe_allow_html=True)

        image = Image.open("C:\\Users\\Logo.png")
        st.image(image, caption='', width=None)

    elif choice == "Customer Registration":
        st.markdown('<style>body{background-color: #FFC9C9;}</style>',
                    unsafe_allow_html=True)
        st.subheader("Check existing customers")
        if st.button("View customers"):
            viewAllCustomers()
        st.subheader("New Customer Registration: Enter Customer's Details")
        CustFirstName = st.text_input("First Name")
        CustLastName = st.text_input("Last Name")
        CustBirthDate = st.date_input("Birth Date")
        CustPhoneNo = st.text_input("Customer Phone No")
        CustStreetName = st.text_input("Street Name")
        CustZipCode = st.text_input("ZipCode")
        CustEmail = st.text_input("Customer Email")
        CustPassword = st.text_input("Customer Password", type='password')
        CustAge = st.number_input("Age", 5)
        CustGender = st.radio("Gender", tuple(gender.keys()))
        if st.button("Register"):
            addCustomerData(CustFirstName, CustLastName, CustBirthDate,
                            CustPhoneNo, CustStreetName, CustZipCode,
                            CustEmail, CustPassword, CustAge, CustGender)
            st.success("Customer was successfully registered!")

    elif choice == "Customer Preference":
        st.markdown('<style>body{background-color: #C9C9FF;}</style>',
                    unsafe_allow_html=True)
        st.subheader("Check existing customer preferences")
        if st.button("View preferences"):
            viewAllCustomerPreferences()
        st.subheader(
            "New Preference Registration: Enter Customer's Preference")
        CustID = st.number_input("Customer ID", 1)
        CustBudget = st.number_input("Budget", 100.00)
        PrefPackageType = st.text_input("Package Type")  ##change to drop-down
        if st.button("Save Preference"):
            addPreferenceData(CustID, CustBudget, PrefPackageType)
            st.success("Preference successfully recorded!")

    elif choice == "Customer Preferred Destinations":
        st.markdown('<style>body{background-color: #CAFFCA;}</style>',
                    unsafe_allow_html=True)
        st.subheader("Check existing customer preferred destinations")
        if st.button("View destination preferences"):
            viewAllCustomerPrefCities()
        st.subheader(
            "New Destination Registration: Provide Minimum 2 Preferred Destinations"
        )
        CustPrefID1 = st.number_input("Customer Preference ID", 1)
        CityID1 = st.number_input("First Preferred City ID", 1)
        CustPrefID2 = st.number_input("Re-enter Customer Preference ID", 1)
        CityID2 = st.number_input("Second Preferred City ID", 1)
        if st.button("Save Destinations"):
            addPreferenceCityData(CustPrefID1, CityID1, CustPrefID2, CityID2)
            st.success("Destinations successfully recorded!")
            st.info("Re-enter IDs to provide more preferences.")

    cnxn.commit()
    cnxn.close()
Exemplo n.º 11
0
saved_image = None

if uploaded_image is not None:
    if uploaded_image.type != 'image/png':
        st.text("File type {} not supported".format(uploaded_image.type))
        st.stop()

    source_image = Image.open(uploaded_image)
    saved_image = os.path.join('./data/uploads',
                               "{}-currentimg.png".format(ctx.session_id))
elif selected_image_path != '':
    source_image = Image.open(os.path.join('./examples/', selected_image_path))
    saved_image = os.path.join('./data/uploads',
                               "{}-currentimg.png".format(ctx.session_id))

if saved_image:
    transform = transformations.to_pytorch()
    transformed_image = transform(source_image)
    width, height = transformed_image.size
    transformed_image = transformed_image.resize(
        (500, int(height * 500 / width)))
    # TODO check if there are transformed_images
    transformed_image.save(saved_image)

    if len(state.applied_transforms) > 0:
        if st.button("Regenerate"):
            # This will re-run the transformation
            pass

    st.image(saved_image)
Exemplo n.º 12
0
def main():
    """Semi Automated ML App with Streamlit """
    st.title("Data Analysis App")
    activities = ["Data Exploration","Data Quality Validation","Data Visualization"]
    choice = st.sidebar.selectbox("Select Activities",activities)

    if choice == 'Data Exploration':
        st.subheader("Data Exploration")

        data = st.file_uploader("Upload a Dataset", type=["csv", "txt"])
        if data is not None:
            df = pd.read_csv(data)
            st.dataframe(df)
            
            if st.checkbox("Top 5 and last 5 records"):
                st.write(df.head())
                st.write(df.tail())
            
            if st.checkbox("Data Dimensions"):
                st.write("Number of Rows: "+str(df.shape[0]))
                st.write("Number of Columns: "+str(df.shape[1]))


            if st.checkbox("Column names"):
                all_columns = df.columns.to_list()
                st.write(all_columns)

            if st.checkbox("Statistical summary"):
                st.write(df.describe())

            if st.checkbox("Show Selected Columns"):
                all_columns = df.columns.to_list()
                selected_columns = st.multiselect("Select Columns",all_columns)
                new_df = df[selected_columns]
                st.dataframe(new_df)
                
            if st.checkbox("Numeric columns"):
                st.write(df.select_dtypes(include=['int16', 'int32', 'int64', 'float16', 'float32', 'float64']).columns.tolist())

            if st.checkbox("Categorical columns"):
                st.write(df.select_dtypes(exclude=['int', 'float']).columns)
                
            if st.checkbox("Categories in categorical columns"):
                df1 = df.select_dtypes(exclude=['int', 'float'])
                for col in df1.columns:
                    st.write(df1[col].value_counts())
                

                             
    elif choice == 'Data Quality Validation':
        st.subheader("Data quality validation")
        data = st.file_uploader("Upload a Dataset", type=["csv", "txt", "xlsx"])
        if data is not None:
            df = pd.read_csv(data)
            st.dataframe(df.head())
            
            if st.checkbox("Missing Value Counts"):
                st.write(df.isnull().sum())
                
            if st.checkbox("Outliers count in each column"):
                Q1 = df.quantile(0.25)
                Q3 = df.quantile(0.75)
                IQR = Q3 - Q1
                st.write(((df < (Q1 - 1.5 * IQR)) | (df > (Q3 + 1.5 * IQR))).sum()) 
                
            if st.checkbox("Distribution details"):
                df1 = df.select_dtypes(include=['int', 'float'])
                all_columns = df1.columns.to_list()
                column_to_plot = st.selectbox("Select 1 Column",all_columns)
                st.write("Skewness: %f" % df[column_to_plot].skew())
                st.write("Kurtosis: %f" % df[column_to_plot].kurt())
                st.write('***If skewness is less than -1 or greater than 1, the distribution is highly skewed.If skewness is between -1 and -0.5 or between 0.5 and 1, the distribution is moderately skewed.If skewness is between -0.5 and 0.5, the distribution is approximately symmetric.A standard normal distribution has kurtosis of 3 ***')   
                

    elif choice == 'Data Visualization':
        st.subheader("Data Visualization")
        data = st.file_uploader("Upload a Dataset", type=["csv", "txt", "xlsx"])
        if data is not None:
            df = pd.read_csv(data)
            st.dataframe(df.head())
            
            if st.checkbox("Correlation Plot(Seaborn)"):
                st.write(sns.heatmap(df.corr(),annot=True))
                st.pyplot()


        # Customizable Plot

            all_columns_names = df.columns.tolist()
            type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box"])
            selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)

            if st.button("Generate Plot"):
                st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))

                # Plot By Streamlit
                if type_of_plot == 'area':
                    cust_data = df[selected_columns_names]
                    st.area_chart(cust_data)

                elif type_of_plot == 'bar':
                    cust_data = df[selected_columns_names]
                    st.bar_chart(cust_data)

                elif type_of_plot == 'line':
                    cust_data = df[selected_columns_names]
                    st.line_chart(cust_data)

                # Custom Plot 
                elif type_of_plot:
                    cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
                    st.write(cust_plot)
                    st.pyplot()
Exemplo n.º 13
0
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

name= st.text_input("User Name", "Indra Neela")
file= st.file_uploader("select a file")
click= st.button("submit")
if(click == True):
    st.write("Output 1 - Top 5 rows of the data frame")
    df= pd.read_csv("iris.csv")
    df.head()
    st.write(df.head())


    st.write("Output 2 - Display the scatter plot between petal_length and sepal_length")
    fig = plt.figure()

    plt.scatter(df["petal_length"],df["sepal_length"])

    plt.xlabel("Petal Length")
    plt.ylabel("Sepal Length")

    st.write(fig)
def main():

    # Title and Subheader
    st.title("Iris Dataset EDA App")
    st.subheader("EDA Web App with Streamlit ")

    DATA_URL = (
        'https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv'
    )

    @st.cache(persist=True, show_spinner=True)
    def load_data():
        data = pd.read_csv(DATA_URL)
        data.columns = ('sepal_length', 'sepal_width', 'petal_length',
                        'petal_width', 'species')
        # lowercase = lambda x: str(x).lower()
        # data.rename(lowercase, axis='columns', inplace=True)
        # data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
        return data

    # Create a text element and let the reader know the data is loading.
    data_load_state = st.text('Loading data...')
    # Load 10,000 rows of data into the dataframe.
    data = load_data()
    # Notify the reader that the data was successfully loaded.
    data_load_state.text('Loading data...Completed!')

    # Show Dataset
    # if st.checkbox("Preview DataFrame: Head or Tail"):

    #	if st.button("Head"):
    #		st.write(data.head())
    #	if st.button("Tail"):
    #		st.write(data.tail())

    # Show Entire Dataframe
    if st.checkbox("View DataFrame"):
        st.dataframe(data)

        # Show Description
    if st.checkbox("View All Column Names"):
        st.text("Columns:")
        st.write(data.columns)

    # Dimensions - Radio Buttonss
    # data_dim = st.radio('Check the dimensions of the dataframe',('Rows','Columns'))
    # if data_dim == 'Rows':
    #	st.write("There are", len(data), "Rows in the dataset")
    # if data_dim == 'Columns':
    #	st.write("There are", data.shape[1], "Columns in the dataset")

    if st.checkbox("Show Summary of Dataset"):
        st.write(data.describe())
        st.write("There are", len(data), "rows and", data.shape[1],
                 "columns in the dataset")

    # Selection

    if st.checkbox("View Single Column's Data"):

        species_option = st.selectbox(
            'Select Columns', ('sepal_length', 'sepal_width', 'petal_length',
                               'petal_width', 'species'))
        if species_option == 'sepal_length':
            st.write(data['sepal_length'])
        elif species_option == 'sepal_width':
            st.write(data['sepal_width'])
        elif species_option == 'petal_length':
            st.write(data['petal_length'])
        elif species_option == 'petal_width':
            st.write(data['petal_width'])
        elif species_option == 'species':
            st.write(data['species'])
        else:
            st.write("Select A Column")

    # Show Plots
    if st.checkbox("Show Plots"):
        st.write("_" * 10)
        data.plot(kind='scatter', x='sepal_length', y='sepal_width')
        st.pyplot()
        st.write(
            "---------------- 2D Scatter Plot of Sepal_length vs Sepal_width for all the Species ---------------- "
        )
        st.write("_" * 10)
        st.write(sns.pairplot(data, hue="species", size=3))
        # Use Matplotlib to render seaborn
        st.pyplot()
        st.write(
            "---------------- Pairplot of different species ----------------")
        st.write("_" * 10)

        v_counts = data.groupby('species')
        st.bar_chart(v_counts)
        st.write(
            "---------------- Bar Plot of Groups or Counts ----------------")
        st.write("_" * 10)

    # Iris Image Manipulation
    @st.cache
    def load_image(img):
        im = Image.open(os.path.join(img))
        return im

    # Image Type
    if st.checkbox("Show/Hide Images"):

        species_type = st.radio(
            'Have a look at the images of different Iris Species!',
            ('Setosa', 'Versicolor', 'Virginica'))

        if species_type == 'Setosa':
            st.text("Showing Setosa Species")
            my_image = load_image('images/setosa.png')
        elif species_type == 'Versicolor':
            st.text("Showing Versicolor Species")
            my_image = load_image('images/versicolor.png')
        elif species_type == 'Virginica':
            st.text("Showing Virginica Species")
            my_image = load_image('images/virginica.png')

        if st.checkbox("Enhance Image"):

            enh = ImageEnhance.Contrast(my_image)
            num = st.slider("Contrast", 1.0, 2.0)
            img_width = st.slider(
                "Zoom in the Image (Set Image Width in Pixels)", 300, 700)
            st.image(enh.enhance(num), width=img_width)
        else:
            img_width = 300
            num = 1.2
            enh = ImageEnhance.Contrast(my_image)
            st.image(enh.enhance(num), width=img_width)

    # About

    if st.button("About App"):
        st.subheader("Iris Dataset EDA App - Developed by Deepankar Kotnala")
        st.text("Built with Streamlit")
Exemplo n.º 15
0
import torch
import streamlit as st
from transformers import GPT2Tokenizer, GPT2LMHeadModel

st.title('Natural Language Generation with GPT-2')
st.markdown(
    "A [simple demonstration](https://github.com/CaliberAI/streamlit-get-stories-aylien) of using [Streamlit](https://streamlit.io/) with [HuggingFace's GPT-2](https://github.com/huggingface/transformers/)."
)

seed = st.text_input('Seed', 'The dog jumped')
num_return_sequences = st.number_input('Number of generated sequences', 1, 100,
                                       20)
max_length = st.number_input('Length of sequences', 5, 100, 20)
go = st.button('Generate')

if go:
    try:
        model = GPT2LMHeadModel.from_pretrained('gpt2')
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        input_ids = torch.tensor(tokenizer.encode(seed)).unsqueeze(0)
        output = model.generate(input_ids=input_ids,
                                max_length=max_length,
                                num_return_sequences=num_return_sequences,
                                do_sample=True,
                                length_penalty=10)
        sequences = []
        for j in range(len(output)):
            for i in range(len(output[j])):
                sequences.append(
                    tokenizer.decode(output[j][i].tolist(),
                                     skip_special_tokens=True))
Exemplo n.º 16
0
def write():

    with st.spinner("Loading prediction ..."):
        # ast.shared.components.title_awesome("- Prediction")

        st.title('Prediction')
        st.write('Will the bank loan be accepted or declined?...')

        X_fs = pd.read_csv('dataset/SelectedFeatures_dataframe.csv')
        topFeaturesRFE = np.array(X_fs.columns)
        '''
		### Predict - with Pre-trained Model

		'''
        st.sidebar.title("Input for prediction")

        def user_input_features():
            Employment_Type = st.sidebar.selectbox(
                "Choose your employment type.",
                [
                    'employer', 'self_employed', 'government', 'employee',
                    'fresh_graduate'
                ],
            )

            More_Than_One_Products = st.sidebar.selectbox(
                "Do you have more than one bank product?",
                ['yes', 'no'],
            )

            Credit_Card_types = st.sidebar.selectbox(
                "Choose your credit card type.",
                ['platinum', 'normal', 'gold'],
            )

            Property_Type = st.sidebar.selectbox(
                "Choose your property type.",
                ['condominium', 'bungalow', 'terrace', 'flat'],
            )

            State = st.sidebar.selectbox(
                "Choose a your state.",
                [
                    'Johor', 'Selangor', 'Kuala Lumpur', 'Penang',
                    'Negeri Sembilan', 'Sarawak', 'Sabah', 'Terengganu',
                    'Kedah'
                ],
            )

            Credit_Card_Exceed_Months = st.sidebar.slider(
                'Credit card exceed months', min_value=1, max_value=7, step=1)
            Loan_Tenure_Year = st.sidebar.slider('Loan tenure year',
                                                 min_value=10.0,
                                                 max_value=24.0,
                                                 step=1.0)
            Number_of_Dependents = st.sidebar.slider('Number of dependents',
                                                     min_value=2,
                                                     max_value=6,
                                                     step=1)
            Years_to_Financial_Freedom = st.sidebar.slider(
                'Year to financial freedom',
                min_value=5.0,
                max_value=19.0,
                step=1.0)
            Number_of_Credit_Card_Facility = st.sidebar.slider(
                'Number of credit card facility',
                min_value=2.0,
                max_value=6.0,
                step=1.0)
            Number_of_Properties = st.sidebar.slider('Number of properties',
                                                     min_value=2.0,
                                                     max_value=5.0,
                                                     step=1.0)
            Number_of_Bank_Products = st.sidebar.slider(
                'Number of bank products',
                min_value=1.0,
                max_value=5.0,
                step=1.0)
            Number_of_Loan_to_Approve = st.sidebar.slider(
                'Number of loan to approve', min_value=1, max_value=3, step=1)
            Years_for_Property_to_Completion = st.sidebar.slider(
                'Years for property to completion',
                min_value=10.0,
                max_value=13.0,
                step=1.0)
            Number_of_Side_Income = st.sidebar.slider('Number of side income',
                                                      min_value=1.0,
                                                      max_value=3.0,
                                                      step=1.0)

            Loan_Amount = st.sidebar.slider('Loan amount',
                                            min_value=0.0,
                                            max_value=1000000.0,
                                            step=1.0)
            Monthly_Salary = st.sidebar.slider('Monthly salary',
                                               min_value=0.0,
                                               max_value=100000.0,
                                               step=1.0)
            Total_Sum_of_Loan = st.sidebar.slider('Total sum of loan',
                                                  min_value=0.0,
                                                  max_value=10000000.0,
                                                  step=1.0)
            Total_Income_for_Join_Application = st.sidebar.slider(
                'Total income for join application',
                min_value=0.0,
                max_value=100000.0,
                step=1.0)
            Score = st.sidebar.slider('Score ',
                                      min_value=6,
                                      max_value=9,
                                      step=1)

            data = {
                'Credit_Card_Exceed_Months': Credit_Card_Exceed_Months,
                'Employment_Type': Employment_Type,
                'Loan_Amount': Loan_Amount,
                'Loan_Tenure_Year': Loan_Tenure_Year,
                'More_Than_One_Products': More_Than_One_Products,
                'Credit_Card_types': Credit_Card_types,
                'Number_of_Dependents': Number_of_Dependents,
                'Years_to_Financial_Freedom': Years_to_Financial_Freedom,
                'Number_of_Credit_Card_Facility':
                Number_of_Credit_Card_Facility,
                'Number_of_Properties': Number_of_Properties,
                'Number_of_Bank_Products': Number_of_Bank_Products,
                'Number_of_Loan_to_Approve': Number_of_Loan_to_Approve,
                'Property_Type': Property_Type,
                'Years_for_Property_to_Completion':
                'Years_for_Property_to_Completion',
                'State': State,
                'Number_of_Side_Income': Number_of_Side_Income,
                'Monthly_Salary': Monthly_Salary,
                'Total_Sum_of_Loan': Total_Sum_of_Loan,
                'Total_Income_for_Join_Application':
                Total_Income_for_Join_Application,
                'Score': Score
            }
            features = pd.DataFrame(data, index=[0])
            return features

        input_df = user_input_features()

        # bank = pd.read_csv('dataset/bank.csv')

        # new_X_test = bank.drop(['Decision'], 1)

        # new_input = np.array([Credit_Card_Exceed_Months, Employment_Type, Loan_Amount,
        #        Loan_Tenure_Year, More_Than_One_Products, Credit_Card_types,
        #        Number_of_Dependents, Years_to_Financial_Freedom,
        #        Number_of_Credit_Card_Facility, Number_of_Properties,
        #        Number_of_Bank_Products, Number_of_Loan_to_Approve, Property_Type,
        #        Years_for_Property_to_Completion, State, Number_of_Side_Income,
        #        Monthly_Salary, Total_Sum_of_Loan,
        #        Total_Income_for_Join_Application, Score])

        new = np.array(pd.get_dummies(input_df))

        for i in new:
            new_x = list(map(lambda x: 1 if (i == x) else 0, topFeaturesRFE))

        new_x = np.array(new_x)

        input_X_test = pd.DataFrame([new_x], columns=topFeaturesRFE)

        classifier_user = st.selectbox(
            "Choose desire classifier.",
            [
                'DecisionTreeClassifier', 'RandomForestClassifier',
                'GradientBoostingClassifier'
            ],
        )
        '''
		### Predict Result: 
		'''
        import joblib

        if st.button('Predict!'):
            loaded_model = joblib.load(
                open('pretrained_classifier/' + classifier_user + ".sav",
                     'rb'))
            result = loaded_model.predict(input_X_test)
            result_proba = loaded_model.predict_proba(input_X_test)
            st.text("The probability for Reject is : {} ".format(
                result_proba[0][0]))
            st.text("The probability for Accept is : {} ".format(
                result_proba[0][1]))
            st.text("Hence, the predicted result is: {} ".format(
                list(map(lambda x: 'Accept'
                         if (x == 1) else 'Reject', result))[0]))
    key = st.sidebar.selectbox('Choose a musical key', keys)

 #   st.sidebar.write('key = {}'.format(key))

    if (key == 'C'):
        transposition = 0
    else:
        transposition = 12 - keys.index(key)  # transpose leftward
    transposed_generated_music = transpose_sequence(generated_music, \
                                        transposition = transposition)
    transposed_seed_music = transpose_sequence(seed_music, transposition = \
                                           transposition)

    bpm = st.sidebar.number_input('Set the bpm (beats per minute) in the '\
                        'range [20, 180]', min_value = 20, max_value = 180,\
                         value = 60, key = 'bpm')

 #  st.sidebar.write('bpm = {}'.format(bpm))

    for music_type in ['seed', 'generated']:                                                             
        if (st.button('Create MIDI File for the ' + music_type.title() + \
                      ' Music', key = music_type)):
            fpath = './midi_output/{0}_{1}.mid'.format(music_type, session_id)
            exec('convert_to_midi(transposed_{0}_music, '.format(music_type) + \
                 'bpm = bpm, output_file = fpath)')
            st.markdown(get_binary_file_downloader_html(fpath, 'MIDI'), unsafe_allow_html = True)


    

Exemplo n.º 18
0
class_names = [
    '후라이드치킨', '간장게장', '갈비구이', '갈비찜', '갈비탕', '갈치구이', '갈치조림', '감자전', '감자조림',
    '감자채볶음', '감자탕', '갓김치', '건새우볶음', '경단', '계란국', '계란말이', '계란찜', '계란후라이',
    '고등어구이', '고등어조림', '고사리나물', '고추장진미채볶음', '고추튀김', '곱창구이', '곱창전골', '과메기', '김밥',
    '김치볶음밥', '김치전', '김치찌개', '김치찜', '깍두기', '깻잎장아찌', '꼬막찜', '꽁치조림', '꽈리고추무침',
    '꿀떡', '나박김치', '누룽지', '닭갈비', '닭계장', '닭볶음탕', '더덕구이', '도라지무침', '도토리묵', '동그랑땡',
    '동태찌개', '된장찌개', '두부김치', '두부조림', '땅콩조림', '떡갈비', '떡꼬치', '떡만두국', '떡볶이', '라면',
    '라볶이', '막국수', '만두', '매운탕', '멍게', '메추리알장조림', '멸치볶음', '무국', '무생채', '물냉면',
    '물회', '미역국', '미역줄기볶음', '불고기', '전복죽'
]

st.subheader('아침')
img = Image.open("./Img_test_omelet.jpg")
st.image(img, width=400, caption="입력 데이터 : 계란말이")

if st.button("아침 분석"):
    path = "./Img_test_omelet.jpg"
    img = keras.preprocessing.image.load_img(path, target_size=(180, 180))

    img_array = keras.preprocessing.image.img_to_array(img)
    img_array = tf.expand_dims(img_array, 0)  # Create a batch
    model = keras.models.load_model('./kf_model.h5')
    predictions = model.predict(img_array)
    score = tf.nn.softmax(predictions[0])

    st.text("예측값: {}(정확도 {:.2f}%)".format(class_names[np.argmax(score)],
                                          100 * np.max(score)))

    # st.text("예측값: 계란말이(정확도 95%)")

st.subheader('점심')
    text = link.split('=')[1]
    return f'<a target="_blank" href="{link}">{text}</a>'


if __name__ == '__main__':

    stack_tags = stack.read_stackoverflow_tags()
    searchTerms = st.text_input('Search something in stackoverflow:')
    tags = stack.clean_tags(searchTerms)
    tags = stack.remove_invalid_tags(tags, stack_tags)
    st.write("Stackoverflow Tags:", tags)

    operator = 'OR'
    if st.checkbox('Search only documents with ALL terms'): operator = 'AND'

    if st.button("Start search"):
        try:
            questions_df = stack.refresh_stackoverflow(tags,
                                                       tab='Frequent',
                                                       pages=2)
        except:
            st.write(
                "Oops... Não conseguimos atualizar os dados do stackoverflow sobre este tema :( "
            )

        invertedList = json.load(open("stackoverflow_InvertedIndex.json"))
        docs_index = stack.simple_lookup_query(searchTerms, invertedList)

        #st.write(stack.print_search_result(docs_index,questions_df,operator))
        #results = stack.print_search_result(docs_index,questions_df,operator)
Exemplo n.º 20
0
freq_count = None


@st.cache(allow_output_mutation=True)
def get_positive_data():
    return [], []


@st.cache(allow_output_mutation=True)
def get_negative_data():
    return [], []


with positive_div:
    positive_input_text = st.text_input(label="Enter positve tweet")
    add_pos_tweet = st.button("Add +ve tweet")

with negative_div:
    negative_input_text = st.text_input(label="Enter negative tweet")
    add_neg_tweet = st.button("Add -ve tweet")

create_vocabulary = st.button("Create vocabulary")

if add_pos_tweet and positive_input_text:
    get_positive_data()[0].append(positive_input_text)
    get_positive_data()[1].append("+")

if add_neg_tweet and negative_input_text:
    get_negative_data()[0].append(negative_input_text)
    get_negative_data()[1].append("-")
Exemplo n.º 21
0
from os import path
import streamlit as st
import os

def main(keyword, max_tweet, date):
    filename = f'tweets_extracted/clean_tweets_{search_keyword}.csv'
    if path.exists(filename):
        st.warning("You already did a sentiment for this keyword, would like to do it again")
        continue_button =st.button("Continue")
        if continue_button:
            tweet_scrape(search_keyword, max_tweet, date)
    else:
        tweet_scrape(search_keyword, max_tweet, date)

    sentiment_summary_to_excel(filename,keyword)

if __name__ == '__main__':
    st.title("Twitter Sentiment Analysis")
    search_keyword = st.text_input('Enter the keyword for tweets search')
    max_tweet = st.number_input(" Enter the max tweets you want to extract",min_value=1,max_value=50000, step=1, help='Max tweets is 50 000')
    date = str(st.date_input('Enter the start date, if no start date'))
    directory = 'tweets_extracted'
    proceed_button = st.button('Proceed')
    if proceed_button:

        if path.exists (directory):
            main (search_keyword, max_tweet, date)
        else:
            os.mkdir (directory)
            main(search_keyword, max_tweet,date)
Exemplo n.º 22
0
    lines = (line.strip() for line in text.splitlines())
    # break multi-headlines into a line each
    chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
    # drop blank lines
    text = '\n'.join(chunk for chunk in chunks if chunk)
    print(text)
    loaded_model = pickle.load(open('model_trainned.sav', 'rb'))
    resultado = loaded_model.predict([text])
    resultado1 = loaded_model.predict_proba([text])
    print(resultado)
    print(100 * round(resultado1[0][1], 2), '%')


st.title('Fake News Classifiers')
st.header('Introducing Sentiment Analysis')
st.markdown(
    '''Also known as "Opinion Mining", Sentiment Analysis refers to the use of Natural Language Processing to determine the attitude, opinions and emotions of a speaker, writer, or other subject within an online mention.\n
**Essentially, it is the process of determining whether a piece of writing is positive or negative. This is also called the Polarity of the content.**\n
As humans, we are able to classify text into positive/negative subconsciously. For example, the sentence "The kid had a gorgeous smile on his face", will most likely give us a positive sentiment. In layman’s terms, we kind of arrive to such conclusion by examining the words and averaging out the positives and the negatives. For instance, the words "gorgeous" and "smile" are more likely to be positive, while words like “the”, “kid” and “face” are really neutral. Therefore, the overall sentiment of the sentence is likely to be positive.\n
A common use for this technology comes from its deployment in the social media space to discover how people feel about certain topics, particularly through users’ word-of-mouth in textual posts, or in the context of Twitter, their tweets.
''')
st.header('Demo Application')
st.markdown('''
Below you enter a term and the amount of tweets passed to be analyzed. The application applies sentiment analysis and brings several insisghts about the term.'''
            )
termo = st.text_input('What term you want to verify?')
numero = st.text_input('How many past tweets do you want to analyse?')
st.button('Verify the term')
if termo and numero:
    main(termo, numero)
Exemplo n.º 23
0
columns = [f'fp_{idx}' for idx in range(nBits)] + ['target']
train_x = np.array([mol2fp(m, nBits=nBits) for m in train_mols])
test_x = np.array([mol2fp(m, nBits=nBits) for m in test_mols])
train_target = np.array([prop_dict[m.GetProp('SOL_classification')] for m in train_mols]).reshape(-1,1)
test_target = np.array([prop_dict[m.GetProp('SOL_classification')] for m in test_mols]).reshape(-1,1)
train_data = np.concatenate([train_x, train_target], axis=1)
test_data = np.concatenate([test_x, test_target], axis=1)
train_df = pd.DataFrame(train_data, columns=columns)
test_df = pd.DataFrame(test_data, columns=columns)

# Sidebar intro
st.sidebar.title("Model sandbox")
st.sidebar.markdown("**Choose a machine learning algorithm from below to see details about each**")

# Sidebar model comparison example
if st.button('Start here with the example dataset'):

    # Model comparison intro
    st.header("Model Comparison")
    st.write("After running all the default classifiers included in PyCaret, a dataframe appears below which summerizes the metrics of each classfier.")

    # Initiating model comparison setup and pulling model data
    def model_setup(train_df):
        setup(data=train_df, target='target', silent = True, session_id=123)

    def model_comparison():
        best_model = compare_models()
        results = pull(best_model)
        return results

    # execute example functions
Exemplo n.º 24
0
def add(fail):
    df = pd.read_csv("df.csv", index_col=0)
    df['Data Negócio'] = pd.to_datetime(df['Data Negócio'], dayfirst=True)

    ticker, first_sell_date, first_sell_index, reason = fail.values()

    if reason == 'before':
        st.subheader('AVISO')
        st.markdown(
            f'''No arquivo tem informação sobre a venda da ação **{ticker}** na data **{first_sell_date.date()}** 
        mas está faltando informação sobre a sua compra nos dias anteriores''')

        data_compra = st.date_input(f"A data da compra da ação {ticker}",
                                    value=first_sell_date -
                                    datetime.timedelta(days=1),
                                    max_value=first_sell_date)
        data_compra = pd.to_datetime(data_compra)
        quantidade_compra = st.number_input(
            "Quantidade", min_value=df.iloc[first_sell_index]['Quantidade'])
        preço_compra = st.number_input(label="Preço", min_value=0.)

        if st.button(label="Click 2X", key=1):
            line = pd.DataFrame(
                {
                    'Data Negócio': data_compra,
                    'C/V': "C",
                    'Código': ticker,
                    'Quantidade': quantidade_compra,
                    'Preço (R$)': preço_compra,
                    'Valor Total (R$)': preço_compra * quantidade_compra
                },
                index=[first_sell_index])
            df = pd.concat(
                [df.iloc[:first_sell_index], line,
                 df.iloc[first_sell_index:]]).reset_index(drop=True)
            df.sort_index(inplace=True)
            df.to_csv("df.csv")

    if reason == 'less':
        total_sold = df[(df["C/V"] == "V")
                        & (df["Código"] == ticker)]['Quantidade'].sum()
        total_purchased = df[(df["C/V"] == "C")
                             & (df["Código"] == ticker)]['Quantidade'].sum()
        different = total_sold - total_purchased
        st.subheader('AVISO')
        st.markdown(
            f"No arquivo tem mais {different} venda da ação {ticker} do que compra. Por favor informa a data e o preço da compra."
        )

        data_compra = st.date_input(f"A data da compra da ação {ticker}",
                                    value=first_sell_date -
                                    datetime.timedelta(days=1))
        data_compra = pd.to_datetime(data_compra)
        quantidade_compra = st.number_input("Quantidade", min_value=different)
        preço_compra = st.number_input(label="Preço", min_value=0.)

        if st.button(label="Enter 2X ", key=2):
            line = pd.DataFrame(
                {
                    'Data Negócio': data_compra,
                    'C/V': "C",
                    'Código': ticker,
                    'Quantidade': quantidade_compra,
                    'Preço (R$)': preço_compra,
                    'Valor Total (R$)': preço_compra * quantidade_compra
                },
                index=[first_sell_index])
            df = pd.concat(
                [df.iloc[:first_sell_index], line,
                 df.iloc[first_sell_index:]]).reset_index(drop=True)
            df.sort_index(inplace=True)
            df.to_csv("df.csv")
Exemplo n.º 25
0
                temperature_in_C = st.text_input(key='firstwall',
                                                 label=material +
                                                 ' temperature (C)')
                fw_material['temperature_in_C'] = temperature_in_C

        if len(fw_materials) > 1:
            volume_fraction = st.text_input(key='firstwall',
                                            label=material +
                                            ' volume fraction')
            fw_material['volume_fraction'] = volume_fraction
        else:
            fw_material['volume_fraction'] = 1

        firstwall_materials.append(fw_material)

if st.button('Simulate model'):

    for material in breeder_materials:
        for float_key in [
                'enrichment_fraction', 'packing_fraction', 'volume_fraction',
                'temperature_in_C'
        ]:
            if float_key in material.keys():
                material[float_key] = float(material[float_key])

    if len(materials) == 1:
        for material in breeder_materials:
            breeder_material = Material(**material).neutronics_material

    else:
        multimaterials = []
Exemplo n.º 26
0
def app():
    # Decorating the Nav Bar which is left side of APP
    st.sidebar.header("NAVIGATION")
    st.title("REAL TIME SENTIMENT ANALYSIS ON TWITTER ✨")
    activities = [
        "Collect The Data", "Clean The Data",
        "Summary View of Tweets Collected", "Tweet Analyzer Using K Means",
        "Tweet Analyser with search keyword"
    ]

    # choice = st.sidebar.selectbox("Type Of your Activity", activities)
    choice = st.sidebar.radio("Select Your Activity", activities)
    st.sidebar.header("CONFIGURATION NAV BAR")
    No_Of_Tweets = st.sidebar.slider("No of Tweets", 100, 5000)
    No_Of_Tweets_In_String = str(No_Of_Tweets)
    UserID = st.text_area(
        "Enter the exact twitter User Id of the User you want to Analyze (without @)"
    )
    st.markdown(
        "🔴 Note--> Don't move to next Navbar Section without completing this section."
    )
    TweetsData = pd.DataFrame()

    def collectData():
        posts = tweepy.Cursor(api.user_timeline,
                              screen_name=UserID,
                              tweet_mode="extended").items(No_Of_Tweets)
        TweetsData = pd.DataFrame([tweet.full_text for tweet in posts],
                                  columns=['Tweets'])
        return (TweetsData)

    # Step 1
    if (choice == "Collect The Data"):
        st.subheader("Analyze the tweets of your favourite User 👦👧:")
        st.subheader("This tool performs the following tasks as given below:")
        st.write(
            "1. Fetches the 5 most recent tweets from the given twitter handel"
        )
        st.write(
            "2. It also collects the tweets which are tweeted by a userID")
        st.write(
            "3. In this section you just collect the Real Time data and can be used for analyses purpose in next section."
        )

        if (st.button("Collect The Data")):
            st.success("Data is being Collected wait for some time")
            posts = tweepy.Cursor(api.user_timeline,
                                  screen_name=UserID,
                                  tweet_mode="extended").items(No_Of_Tweets)
            TweetsData = pd.DataFrame([tweet.full_text for tweet in posts],
                                      columns=['Tweets'])
            st.write(TweetsData)

    # Step 2
    elif (choice == "Clean The Data"):
        TweetsData = collectData()

        def cleanTxt(text):
            # Removing @mentions
            text = re.sub('@[A-Za-z0–9]+', '', text)
            text = re.sub('#', '', text)  # Removing '#' hash tag
            text = re.sub('RT[\s]+', '', text)  # Removing RT
            text = re.sub('https?:\/\/\S+', '', text)  # Removing hyperlink
            return (text)

        # Clean the tweets
        TweetsData['Tweets'] = TweetsData['Tweets'].apply(cleanTxt)
        st.write(TweetsData)

    # Step 3
    elif (choice == "Summary View of Tweets Collected"):
        TweetsData = collectData()

        def cleanTxt(text):
            # Removing @mentions
            text = re.sub('@[A-Za-z0–9]+', '', text)
            text = re.sub('#', '', text)  # Removing '#' hash tag
            text = re.sub('RT[\s]+', '', text)  # Removing RT
            text = re.sub('https?:\/\/\S+', '', text)  # Removing hyperlink
            return (text)

        # Clean the tweets
        TweetsData['Tweets'] = TweetsData['Tweets'].apply(cleanTxt)

        st.subheader("Analyze the tweets of your favourite User 👦👧:")
        st.subheader("This tool performs the following tasks as given below:")
        st.write(
            "1. Fetches the 5 most recent tweets from the given twitter handel"
        )
        st.write("2. Generates a Word Cloud")
        st.write(
            "3. Performs Sentiment Analysis a displays it in form of a Bar Graph"
        )

        Analyzer_choice = st.selectbox("Select the Activities", [
            "Show Recent Tweets", "Generate WordCloud",
            "Visualize the Sentiment Analysis"
        ])

        if (st.button("Analyze")):
            if (Analyzer_choice == "Show Recent Tweets"):
                st.success("Fetching last 5 Tweets")

                def Show_Recent_Tweets(raw_text):
                    rl = []
                    for i in range(0, 6):
                        rl.append(TweetsData['Tweets'][i])
                    return (rl)

                recent_tweets = Show_Recent_Tweets(UserID)
                st.write(recent_tweets)

            elif (Analyzer_choice == "Generate WordCloud"):
                st.success("Generating Word Cloud")

                def gen_wordcloud():
                    df = TweetsData
                    # word cloud visualization
                    allWords = ' '.join([twts for twts in df['Tweets']])
                    wordCloud = WordCloud(width=500,
                                          height=300,
                                          random_state=21,
                                          max_font_size=110).generate(allWords)
                    plt.imshow(wordCloud, interpolation="bilinear")
                    plt.axis('off')
                    plt.savefig('WC.jpg')
                    img = Image.open("WC.jpg")
                    return (img)

                img = gen_wordcloud()
                st.image(img)

            else:

                def Plot_Analysis():
                    st.success(
                        "Generating Visualisation for Sentiment Analysis of the User Given."
                    )
                    df = TweetsData

                    # Create a function to clean the tweets
                    def cleanTxt(text):
                        # Removing @mentions
                        text = re.sub('@[A-Za-z0–9]+', '', text)
                        text = re.sub('#', '', text)  # Removing '#' hash tag
                        text = re.sub('RT[\s]+', '', text)  # Removing RT
                        # Removing hyperlink
                        text = re.sub('https?:\/\/\S+', '', text)
                        return (text)

                    # Clean the tweets
                    df['Tweets'] = df['Tweets'].apply(cleanTxt)

                    # Create a function to get the subjectivity
                    def getSubjectivity(text):
                        return (TextBlob(text).sentiment.subjectivity)

                        # Create a function to get the polarity
                    def getPolarity(text):
                        return (TextBlob(text).sentiment.polarity)

                    # Create two new columns 'Subjectivity' & 'Polarity'
                    df['Subjectivity'] = df['Tweets'].apply(getSubjectivity)
                    df['Polarity'] = df['Tweets'].apply(getPolarity)

                    def getAnalysis(score):
                        if (score < 0):
                            return ("Negative")
                        elif (score == 0):
                            return ("Netural")
                        else:
                            return ("Positive")

                    df['Analysis'] = df['Polarity'].apply(getAnalysis)
                    return (df)

                df = Plot_Analysis()
                st.write(sns.countplot(x=df["Analysis"], data=df))
                st.pyplot(use_container_width=True)
    elif (choice == "Tweet Analyzer Using K Means"):
        st.subheader(
            "This tool fetches the last " + No_Of_Tweets_In_String +
            " tweets from the twitter handel & Performs the following tasks")
        st.write("1. Converts it into a DataFrame")
        st.write("2. Cleans the text")
        st.write(
            "3. Analyzes Subjectivity of tweets and adds an additional column for it"
        )
        st.write(
            "4. Analyzes Polarity of tweets and adds an additional column for it"
        )
        st.write(
            "5. Analyzes Sentiments of tweets and adds an additional column for it"
        )

        if (st.button("Analyze")):
            st.success("Please wait while working on K means......")
            TweetsData = collectData()
            st.success(
                "Collecting the data once again to verify we got the right data or not..."
            )

            def cleanTxt(text):
                # Removing @mentions
                text = re.sub('@[A-Za-z0–9]+', '', text)
                text = re.sub('#', '', text)  # Removing '#' hash tag
                text = re.sub('RT[\s]+', '', text)  # Removing RT
                text = re.sub('https?:\/\/\S+', '', text)  # Removing hyperlink
                return (text)

            # Clean the tweets
            st.success("Cleaning the data ...")
            TweetsData['Tweets'] = TweetsData['Tweets'].apply(cleanTxt)

            df = TweetsData
            # TFIDF Approach
            tf_idf_vect = CountVectorizer(analyzer='word',
                                          ngram_range=(1, 1),
                                          stop_words='english',
                                          min_df=0.0001)
            tf_idf_vect.fit(df['Tweets'])  # Here the formula internally works
            desc_matrix = tf_idf_vect.transform(df["Tweets"])
            st.success("Done with TFIDF Approach ...")
            st.success("working on K means...")
            # K Means Clustering
            num_clusters = 3
            km = KMeans(n_clusters=num_clusters)
            km.fit(desc_matrix)
            clusters = km.labels_.tolist()

            st.success("Done with Clustering see the below results...")

            # create DataFrame films from all of the input files.
            st.subheader(
                "KMeans Clustring which have 3 clusters 1,0 and -1 as values:")
            tweets = {'Tweet': df["Tweets"].tolist(), 'Cluster': clusters}
            frame = pd.DataFrame(tweets, index=[clusters])
            st.write(frame)

            st.subheader(
                "Showing the Different Clusters which are +ve, -ve and Neutral:"
            )
            st.write("Positive Cluster:")
            st.write(frame[frame['Cluster'] == 1])  # positive cluster
            st.write("Neutral cluster:")
            st.write(frame[frame['Cluster'] == 2])  # Neutral cluster
            st.write("Negative cluster:")
            st.write(frame[frame['Cluster'] == 0])  # Negative cluster
            st.write("Final summary:")
            # neutral =
            st.write(frame['Cluster'].value_counts())

    else:
        st.subheader(
            "This tool fetches the last " + No_Of_Tweets_In_String +
            " tweets from the twitter handel & Performs the following tasks")
        st.write("1. Converts it into a DataFrame")
        st.write("2. Cleans the text")
        st.write(
            "3. Analyzes Subjectivity of tweets and adds an additional column for it"
        )
        st.write(
            "4. Analyzes Polarity of tweets and adds an additional column for it"
        )
        st.write(
            "5. Analyzes Sentiments of tweets and adds an additional column for it"
        )

        SearchKeyword = st.text_area(
            "*Enter the keyword to be search to collect the data*")
        st.markdown(
            "<--------Also Do checkout the another cool tool from the sidebar")
        date_since = "2010-1-1"

        def get_data(SearchKeyword):
            st.success("Fetching Last " + No_Of_Tweets_In_String +
                       " Tweets Please wait........")
            posts = tweepy.Cursor(api.search,
                                  q=SearchKeyword,
                                  lang="en",
                                  since=date_since).items(No_Of_Tweets)
            st.success("Collected the data and converting it to Data frame.")
            df = pd.DataFrame([tweet.text for tweet in posts],
                              columns=['Tweets'])
            st.success("converted to data frame of collected Tweets")

            # cleaning the twitter text function
            def cleanTxt(text):
                text = re.sub('@[A-Za-z0–9]+', '',
                              text)  # Removing @mentions
                text = re.sub('#', '', text)  # Removing '#' hash tag
                text = re.sub('RT[\s]+', '', text)  # Removing RT
                text = re.sub('https?:\/\/\S+', '', text)  # Removing hyperlink
                return (text)

            # Clean the tweets
            df['Tweets'] = df['Tweets'].apply(cleanTxt)
            st.success("Finished with cleaning the data")

            def getSubjectivity(text):
                return (TextBlob(text).sentiment.subjectivity)

            # Create a function to get the polarity
            def getPolarity(text):
                return (TextBlob(text).sentiment.polarity)

            # Create two new columns 'Subjectivity' & 'Polarity'
            df['Subjectivity'] = df['Tweets'].apply(getSubjectivity)
            df['Polarity'] = df['Tweets'].apply(getPolarity)
            st.success(
                "Assainged the values of subjectivity and polarity for each tweet."
            )

            def getAnalysis(score):
                if (score < 0):
                    return ("Negative")
                elif (score == 0):
                    return ("Neutral")
                else:
                    return ("Positive")

            st.success("Final step please wait.........")
            df['Analysis'] = df['Polarity'].apply(getAnalysis)
            return (df)

        if (st.button("Show Data")):
            df = get_data(SearchKeyword)
            st.write(df)
    st.subheader(
        ' ---------------Created By :  Project 304 @KL University --------------- :sunglasses:'
    )
Exemplo n.º 27
0
def cifar10_func():
    st.title("CIFAR-10")
    st.write(
        "The dataset is a collection of images that are commonly used to train machine learning and computer vision algorithms. This dataset is used for training a multiclass classification model that can classify or recognize images belonging to 10 different classes."
    )

    from keras.datasets import cifar10

    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    classes = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]

    st.write(" ")
    st.markdown('**Shape**')
    st.write('\nTraining dataset :', x_train.shape, "\nTesting dataset :",
             x_test.shape)

    st.write("**Data** ")

    rand_14 = np.random.randint(0, x_train.shape[0], 14)
    sample_digits = x_train[rand_14]
    sample_labels = y_train[rand_14]

    num_rows, num_cols = 2, 7
    f, ax = plt.subplots(num_rows,
                         num_cols,
                         figsize=(12, 5),
                         gridspec_kw={
                             'wspace': 0.03,
                             'hspace': 0.01
                         },
                         squeeze=True)

    for r in range(num_rows):
        for c in range(num_cols):
            image_index = r * 7 + c
            ax[r, c].axis("off")
            ax[r, c].imshow(sample_digits[image_index], cmap='gray')
            ax[r, c].set_title('%s' % classes[int(sample_labels[image_index])])
    plt.show()
    st.pyplot(clear_figure=False)

    st.write("**Classes** ")
    s = ""
    for i in range(len(classes)):
        if i is not (len(classes) - 1):
            s += str(classes[i]).title()
            s += ","
            s += " "
        else:
            s += str(classes[i])
    st.write(s)

    image_height = x_train.shape[1]
    image_width = x_train.shape[2]
    num_channels = 3

    x_train_min = x_train.min(axis=(1, 2), keepdims=True)
    x_train_max = x_train.max(axis=(1, 2), keepdims=True)
    x_train = (x_train - x_train_min) / (x_train_max - x_train_min)

    x_test_min = x_test.min(axis=(1, 2), keepdims=True)
    x_test_max = x_test.max(axis=(1, 2), keepdims=True)
    x_test = (x_test - x_test_min) / (x_test_max - x_test_min)

    x_train = np.reshape(
        x_train, (x_train.shape[0], image_height, image_width, num_channels))
    x_test = np.reshape(
        x_test, (x_test.shape[0], image_height, image_width, num_channels))

    y_train, y_test = to_categorical(y_train), to_categorical(y_test)

    st.write("")
    st.write("**Build Model**")

    act = st.selectbox("Choose the type of Activation Function ",
                       ('relu', 'sigmoid', 'tanh'))

    pad = st.selectbox("Choose the Padding ", ('same', 'valid'))

    dropout = st.checkbox("Dropout")

    opt = st.selectbox("Choose the type of Optimizer ",
                       ("adam", "sgd", "rmsprop", "adagrad"))

    val = st.checkbox('Validation Set')
    epoch = st.slider("Epochs", 0, 250, step=1)
    b_s = st.slider("Batch Size", 32, 1024, step=32)

    st.write("")
    st.write("")

    if st.button("Train Model"):
        model = Sequential()

        model.add(
            Conv2D(32, kernel_size=3, activation=act, input_shape=(32, 32, 3)))
        model.add(BatchNormalization())
        if dropout:
            model.add(Dropout(0.2))
        model.add(
            Conv2D(64, kernel_size=3, strides=1, activation=act, padding=pad))
        model.add(BatchNormalization())
        model.add(MaxPooling2D((2, 2)))
        model.add(
            Conv2D(128, kernel_size=3, strides=1, padding=pad, activation=act))
        model.add(BatchNormalization())
        model.add(MaxPooling2D((2, 2)))
        model.add(Conv2D(64, kernel_size=3, activation=act))
        model.add(BatchNormalization())
        model.add(MaxPooling2D((2, 2)))
        if dropout:
            model.add(Dropout(0.2))
        model.add(Flatten())
        model.add(Dense(512, activation="relu"))
        model.add(Dropout(0.2))
        model.add(Dense(len(classes), activation="softmax"))
        model.compile(loss="categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
        with st.spinner(
                'Training may take a while, so grab a cup of coffee, or better, go for a run!'
        ):
            if val:
                result = model.fit(x_train,
                                   y_train,
                                   batch_size=int(b_s),
                                   epochs=int(epoch),
                                   validation_split=0.2)
            else:
                result = model.fit(x_train,
                                   y_train,
                                   batch_size=int(b_s),
                                   epochs=int(epoch))
        st.success("Model Trained.")
        results = model.evaluate(x_test, y_test, batch_size=128)
        st.write("Loss: ", results[0])
        st.write("Accuracy: ", results[1])
        model.save("models/cifar10.h5")
        st.write("**Predictions** (Random Test Samples)")
        Images = []
        pred = ""

        for i in range(5):
            r = np.random.randint(0, len(x_test))
            Images.append(x_test[r].reshape(x_train.shape[1],
                                            x_train.shape[2]))
            pred += str(classes[model.predict(x_test[r].reshape(
                -1, x_train.shape[1], x_train.shape[2], 3)).argmax()])
            pred += " "

        st.image(Images, width=100)
        st.write(pred)
Exemplo n.º 28
0
def main():

	

		
	menu = ["Team","Lieu d'intérêt", "Evénement", "Produit", "Itinéraire","Maintenance"]
	choice = st.sidebar.radio("", menu)


	if choice == "Team":

		


#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')


		st.markdown("<h1 style='text-align: center; font-size:15px; color:#A11F40;'>Qu'est ce que DATAtourisme ?</h1>", unsafe_allow_html=True)   

		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)   
		

		col1, col2, col3 = st.beta_columns((1,13,1))   		
   				
		with col1:
   		   
   		   st.markdown("")

		with col2:
			st.markdown("**DATAtourisme** est un dispositif national visant à **faciliter l’accès aux données publiques d’information touristique** produites à travers les territoires par les offices de tourisme et les comités départements ou régionaux du tourisme. Il se matérialise par une plateforme de collecte, de normalisation et de diffusion de données en open data, directement reliée aux bases de données territoriales, et repose sur l’animation d’une communauté d’utilisateurs. Le dispositif est **copiloté par** la **Direction générale des entreprises** et la **fédération ADN Tourisme**. Les données collectées sont relatives au recensement de l’ensemble des événements et points d’intérêt touristiques de France (musées, monuments, sites naturels, activités, itinéraires, expos et concerts, etc)", unsafe_allow_html=True)   


		with col3:
			st.markdown("")



		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)   


		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)


		st.markdown("<h1 style='text-align: center; font-size:15px; color:#A11F40;'>Qui sommes-nous ?</h1>", unsafe_allow_html=True)   

		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)   
		


		col1, col2, col3 = st.beta_columns((1,13,1))   		
   				
		with col1:
   		   
   		   st.markdown("")

		with col2:
			st.markdown("Dans le cadre de notre formation professionnelle de Data Analyst, **notre équipe** de 5 s'est alliée à **ADN Tourisme** pour proposer un **état des lieux** constamment à jour du **projet DATAtourisme** (qualité, quantité, points d'amélioration), ce qui n'existait pas jusqu'alors. Notre script Python effectue un **travail important en amont** pour récupérer, nettoyer et constuire la donnée présentée ici.", unsafe_allow_html=True)   

		with col3:
			st.markdown("")   		   



		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)   
		

		col1, col2, col3 = st.beta_columns((3,0.72,3))

		with col1:
			st.markdown("")

		with col2:
			if st.button("Team"):
			   st.balloons()

		with col3:
			st.markdown("")


		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'></h1>", unsafe_allow_html=True)   
		





#		st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'>Team</h1>", unsafe_allow_html=True)   

		



		col1, col2, col3, col4, col5 = st.beta_columns((1,1,1,1,1))   		
   				
		with col1:
   		   
   		   st.image("cm1.jpg", use_column_width=True)
   		   st.markdown("""**Carla&#8239;Moreno**""")
   		   st.markdown("""*Scrum Master*""")
   		   st.markdown(link1, unsafe_allow_html=True)

		with col2:
   		   st.image("ab.jpg", use_column_width=True)
   		   st.markdown("""**Amar&#8239;Barache**""")
   		   st.markdown("""*Equipe Tech*""")
   		   st.markdown(link5, unsafe_allow_html=True)
		

		with col3:
   		   
   		   st.image("Yvanne.jpg", use_column_width=True)	
   		   st.markdown("""**Yvanne&#8239;Euchin**""")
   		   st.markdown("""*Equipe Tech*""")
   		   st.markdown(link2, unsafe_allow_html=True)



		with col4:
		   st.image("md.jpg", use_column_width=True)
		   st.markdown("""**Michael&#8239;Desforges**""")
		   st.markdown("""*Equipe Tech*""")
		   st.markdown(link, unsafe_allow_html=True)


		with col5:
   		   st.image("cc.jpg", use_column_width=True)
   		   st.markdown("""**Corentin&#8239;Guillo**""")
   		   st.markdown("""*Product Owner*""")
   		   st.markdown(link4, unsafe_allow_html=True)



#		image = Image.open('WCS.png')
		st.image('WCS.png', use_column_width = True, output_format = 'PNG')


		page_bg_img = '''
		<style>
		body {
		background-color: #FFFCFC;
		background-size: cover;
		}
		</style>
		'''

		st.markdown(page_bg_img, unsafe_allow_html=True)


	if choice == "Produit":

		data = data1




#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')

		st.markdown("<h1 style='text-align:center; font-size:29px; color: #57565B;'>Produits</h1>", unsafe_allow_html=True)




		if st.checkbox('voir dataframe'):
		   st.write(data)
#		   st.write(data.iloc[0:100,:])
		 
				 


		f" POI : **{len(data1.index)}** sur **{len(data1.index)+len(data2.index)+len(data3.index)+len(data4.index)}** au total"

		f" Créateurs de données : **{len(data1.createur_donnée.unique())}**"

		f" Fournisseurs : **{len(data1.fournisseur.unique())}**"

		f" Villes : **{len(data1.ville.unique())}**"

		f" POI avec photo :  **{int(round(data1.photo.sum()/len(data1.photo.index)*100))}%**"


				 

		st.markdown(""" # **Densité de POI** """)

		fig = px.density_mapbox(data, lat='latitude', lon='longitude', radius=4,
                        center={"lat": 46.037763, "lon": 4.4}, zoom=4, color_continuous_midpoint = 5,
                        mapbox_style='carto-positron', color_continuous_scale=['grey','darkgrey','grey','red','red'])
		fig.update_layout(coloraxis_showscale=False,margin=dict( l=0, r=0, b=0, t=0, pad = 4 ))
		fig.update_traces(hoverinfo='skip', hovertemplate=None)
		st.plotly_chart(fig)

		



		st.markdown("""# **Par départements**""")


		fig = px.choropleth_mapbox(data, 
                           geojson=france_regions_geo, 
                           color=data.code_departement.value_counts(),
                           locations=data.code_departement.value_counts().index.tolist(), 
                           featureidkey='properties.code',
                           opacity=1,
                           center={"lat": 46.037763, "lon": 2.062783},
                           mapbox_style="carto-positron", zoom=4)

		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
		st.plotly_chart(fig)


		st.markdown(""" # **Répartition des sous-categories** """)


		x = list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])#.drop("HébergementProduit",axis=0).index[0:17])
		y=list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])#.drop("HébergementProduit",axis=0).iloc[0:17])
		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
		st.plotly_chart(fig)



		

#		x = list(data.sous_categorie.str.split(',',expand = True).stack().explode().value_counts().drop("HébergementProduit",axis=0).index)
#		y=list(data.sous_categorie.str.split(',',expand = True).stack().explode().value_counts().drop("HébergementProduit",axis=0))
#		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#		st.plotly_chart(fig)

		

#		image = Image.open('WCS.png')
#		st.image(image, use_column_width = True, output_format = 'PNG')





	elif choice == "Evénement":

		data = data2



#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')

		st.markdown("<h1 style='text-align:center; font-size:29px; color: #57565B;'>Evénements</h1>", unsafe_allow_html=True)



		if st.checkbox('voir dataframe'):
		   st.write(data.iloc[0:100,:])
		 

		f" POI : **{len(data2.index)}** sur **{len(data1.index)+len(data2.index)+len(data3.index)+len(data4.index)}** au total"

		f" Créateurs de données : **{len(data2.createur_donnée.unique())}**"

		f" Fournisseurs : **{len(data2.fournisseur.unique())}**"

		f" Villes : **{len(data2.ville.unique())}**"

		f" POI avec photo :  **{int(round(data2.photo.sum()/len(data2.photo.index)*100))}%**"
			
		

		st.markdown(""" # **Densité de POI** """)

		fig = px.density_mapbox(data, lat='latitude', lon='longitude', radius=4,
                        center={"lat": 46.037763, "lon": 4.4}, zoom=4, color_continuous_midpoint = 5,
                        mapbox_style='carto-positron', color_continuous_scale=['grey','darkgrey','grey','red','red'])
		fig.update_layout(coloraxis_showscale=False,margin=dict( l=0, r=0, b=0, t=0, pad = 4 ))
		fig.update_traces(hoverinfo='skip', hovertemplate=None)
		st.plotly_chart(fig)

		st.markdown("""# **Par départements**""")

		fig = px.choropleth_mapbox(data, 
                           geojson=france_regions_geo, 
                           color=data.code_departement.value_counts(),
                           locations=data.code_departement.value_counts().index.tolist(), 
                           featureidkey='properties.code',
                           opacity=1,
                           center={"lat": 46.037763, "lon": 2.062783},
                           mapbox_style="carto-positron", zoom=4)

		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
		st.plotly_chart(fig)


		st.markdown(""" # **Répartition des sous-categories** """)


		x = list(data2.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index)
		y=list(data2.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc)
		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
		st.plotly_chart(fig)





#		x = list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index)
#		y=list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts())
#		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#		st.plotly_chart(fig)


		

#		image = Image.open('WCS.png')
#		st.image(image, use_column_width = True, output_format = 'PNG')



	elif choice == "Lieu d'intérêt":

		data = data3



#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')


		st.markdown("<h1 style='text-align:center; font-size:29px; color: #57565B;'>Lieux d'intérêt</h1>", unsafe_allow_html=True)



		if st.checkbox('voir dataframe'):
		   st.write(data.iloc[0:100,:])
		   
		   
		



		f" POI : **{len(data3.index)}** sur **{len(data1.index)+len(data2.index)+len(data3.index)+len(data4.index)}** au total"

		f" Créateurs de données : **{len(data3.createur_donnée.unique())}**"

		f" Fournisseurs : **{len(data3.fournisseur.unique())}**"

		f" Villes : **{len(data3.ville.unique())}**"

		f" POI avec photo :  **{int(round(data3.photo.sum()/len(data3.photo.index)*100))}%**"

				

		st.markdown(""" # **Densité de POI** """)

		fig = px.density_mapbox(data, lat='latitude', lon='longitude', radius=4,
                        center={"lat": 46.037763, "lon": 4.4}, zoom=4, color_continuous_midpoint = 5,
                        mapbox_style='carto-positron', color_continuous_scale=['grey','darkgrey','grey','red','red'])
		fig.update_layout(coloraxis_showscale=False,margin=dict( l=0, r=0, b=0, t=0, pad = 4 ))
		fig.update_traces(hoverinfo='skip', hovertemplate=None)
		st.plotly_chart(fig)

		st.markdown("""# **Par départements**""")

		fig = px.choropleth_mapbox(data, 
                           geojson=france_regions_geo, 
                           color=data.code_departement.value_counts(),
                           locations=data.code_departement.value_counts().index.tolist(), 
                           featureidkey='properties.code',
                           opacity=1,
                           center={"lat": 46.037763, "lon": 2.062783},
                           mapbox_style="carto-positron", zoom=4)

		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
		st.plotly_chart(fig)

		st.markdown(""" # **Répartition des sous-categories** """)

		x = list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])
		y=list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])
		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
		st.plotly_chart(fig)



#		image = Image.open('WCS.png')
#		st.image(image, use_column_width = True, output_format = 'PNG')



	elif choice == "Itinéraire":

		data = data4




#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')


		st.markdown("<h1 style='text-align:center; font-size:29px; color: #57565B;'>Itinéraires</h1>", unsafe_allow_html=True)



		if st.checkbox('voir dataframe'):
		   st.write(data.iloc[0:100,:])
		 		   

		f" **POI** : **{len(data4.index)}** sur **{len(data1.index)+len(data2.index)+len(data3.index)+len(data4.index)}** au total"

		f" Créateurs de données : **{len(data4.createur_donnée.unique())}**"

		f" Fournisseurs : **{len(data4.fournisseur.unique())}**"

		f" Villes : **{len(data4.ville.unique())}**"

		f" POI avec photo :  **{int(round(data4.photo.sum()/len(data4.photo.index)*100))}%**"

				 	

		st.markdown(""" # **Densité de POI** """)

		fig = px.density_mapbox(data, lat='latitude', lon='longitude', radius=4,
                        center={"lat": 46.037763, "lon": 4.4}, zoom=4, color_continuous_midpoint = 5,
                        mapbox_style='carto-positron', color_continuous_scale=['grey','darkgrey','grey','red','red'])
		fig.update_layout(coloraxis_showscale=False,margin=dict( l=0, r=0, b=0, t=0, pad = 4 ))
		fig.update_traces(hoverinfo='skip', hovertemplate=None)
		st.plotly_chart(fig)

		st.markdown("""# **Par départements**""")

		fig = px.choropleth_mapbox(data, 
                           geojson=france_regions_geo, 
                           color=data.code_departement.value_counts(),
                           locations=data.code_departement.value_counts().index.tolist(), 
                           featureidkey='properties.code',
                           opacity=1,
                           center={"lat": 46.037763, "lon": 2.062783},
                           mapbox_style="carto-positron", zoom=4)

		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
		st.plotly_chart(fig)

		st.markdown(""" # **Répartition des sous-categories** """)
		
		x = list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])
		y=list(data.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])
		fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
		fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
		st.plotly_chart(fig)
				

			
#		image = Image.open('WCS.png')
#		st.image(image, use_column_width = True, output_format = 'PNG')



	if choice == "Maintenance":

		


#		image = Image.open('DATAtourisme.png')
		st.image('DATAtourisme.png', use_column_width = True, output_format = 'PNG')


		mdp = st.text_input("Mot de passe ?",type="password")
		st.write('Dites "bonjour" ;)')

		st.write()
		if mdp == "bonjour":
			if st.checkbox('voir dataframe'):
				st.write(data_erreur)
				st.markdown("")
				download = st.button('télécharger')
				if download:
					csv = data_erreur.to_csv(index=False)
					b64 = base64.b64encode(csv.encode()).decode()  
					linko= f'<a href="data:file/csv;base64,{b64}" download="data_erreur.csv">Download csv file</a>'
					st.markdown(linko, unsafe_allow_html=True)
				 	

			f" Départements sans fournisseurs : **{data_erreur[data_erreur.fournisseur.isna()].code_departement.unique()}**"
			f" Départements sans créateur : **{data_erreur[data_erreur.createur_donnée.isna()].code_departement.unique()}**"
			f" Fournisseurs sans région : **{data_erreur[data_erreur.region.isna()].fournisseur.unique()}**"
			st.markdown("")
			st.markdown(""" # **Carte des erreurs latitude & longitude** """)
			st.markdown("")
			st.map(data_erreur_map)

#			st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'>Répartition des sous-categories de la categorie Lieu d'intérêt</h1>", unsafe_allow_html=True)
#			x = list(data3.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index)
#			y=list(data3.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc)
#			fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#			fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#			st.plotly_chart(fig)

#			st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'>Répartition des sous-categories de la categorie Evénement</h1>", unsafe_allow_html=True)
#			x = list(data2.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])
#			y=list(data2.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])
#			fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#			fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#			st.plotly_chart(fig)

#			st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'>Répartition des sous-categories de la categorie Produit</h1>", unsafe_allow_html=True)
#			x = list(data1.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])#.drop("HébergementProduit",axis=0).index[0:17])
#			y=list(data1.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])#.drop("HébergementProduit",axis=0).iloc[0:17])
#			fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#			fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#			st.plotly_chart(fig)

#			st.markdown("<h1 style='text-align: center; font-size:29px; color:#57565B;'>Répartition des sous-categories de la categorie Itinéraire</h1>", unsafe_allow_html=True)
#			x = list(data4.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().index[0:17])
#			y=list(data4.sous_categorie.str.split(', ',expand = True).stack().explode().value_counts().iloc[0:17])
#			fig = px.bar(x=x,y=y,color_discrete_sequence =['#A11F40'])
#			fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0},showlegend=False,yaxis=dict(title=None), xaxis=dict(title=None,type="category"))
#			st.plotly_chart(fig)
		
		
		
   
		

#		image = Image.open('WCS.png')
#		st.image(image, use_column_width = True, output_format = 'PNG')


				



	else:
		st.subheader("""  """)
global authValue
authValue = False

radio = st.sidebar.radio("Select from options!",
                         ("Welcome Page", "Sentiment Score"))

st.markdown('<style>body{background-color: #FFF2C2;}</style>',
            unsafe_allow_html=True)

if radio == 'Welcome Page':

    st.title('**_Welcome to Sentiment Analysis System!_**')
    image = Image.open('download.jpg')
    st.image(image, caption='', use_column_width=True)

if radio == 'Sentiment Score':

    st.title('Sentiment Analysis of the file')

    st.subheader('_Please enter sentence_')
    filename = st.selectbox(
        "Choose the anonymize data file to analyze :",
        ('No Selection', 'abcde.txt', 'abc11.txt', 'abc1.txt', 'abc51.txt'))

    if st.button('Check Sentiment'):

        response = requests.post(
            f"http://127.0.0.1:8000/predict?filename={filename}")

        st.subheader(response.json())
Exemplo n.º 30
0
#width = 800
#ratio = width / im.width
#height = int(im.height * ratio) #(5)
#im_resized = im.resize((width, height))
#im_resized.save('img1.jpg')
st.image("IMG.JPG")

#ファイルのアップロード(CSV)他にも画像や音声や動画もOK
uploaded_file = st.file_uploader("ファイルの取り込み", type='csv')

#ファイルがアップロードされてからの処理
if uploaded_file is not None:
    df =pd.read_csv(uploaded_file ,encoding="SHIFT-JIS")
    
    #相関の確認
    if st.button('相関関係を確認'):
        comment = st.empty()
        comment.write('相関確認を確認してます。少々お待ちください。')

        df1 = df.corr()
        st.dataframe(df1)
        
        comment.write('相関確認完了')
       
    #分析の実施
    if st.button('予測を開始'):
    
        comment = st.empty()
        comment.write('分析を開始してます。少々お待ちください。')

        #実際予測に利用する説明変数をX2に代入