Пример #1
0
def recommenderengine():
    caching.clear_cache()
    st.write('')
    st.header('Recommended Artist Collaborations')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.write('')
    st.subheader("Genre Classification of Nyoy's songs")

    nyoygenre = {
        'Genre': ['Acoustic', 'Rock', 'R&B', 'Classical'],
        'Number of Songs': ['55', '18', '6', '5']
    }
    st.table(nyoygenre)
    st.subheader('Top artists for Nyoy to collaborate with.')
    option = st.selectbox('Select genre', ('Acoustic', 'Rock', 'R&B'))
    st.write(
        'Under the genre ', option,
        ', it is highly recommended for Nyoy to collaborate with the following artists.'
    )
    if option == 'Acoustic':
        st.write('Acoustic')
    elif option == 'Rock':
        st.write('Rock')
    elif option == 'R&B':
        st.write('R&B')
Пример #2
0
def csv_to_json(file, db):
    try:
        field_names = []
        # this is of class bytes. It is the WHOLE file, not just the name.
        data = file.readlines()
        decoded = [x.decode("utf-8").strip().split(",") for x in data]
        field_names = decoded[0]
        n_fields = len(field_names)

        data_points = []
        for i in decoded[1:]:
            d = defaultdict()
            for y in range(0, n_fields - 1):
                d[field_names[y]] = i[y]
            d["_id"] = ObjectId()
            d["timestamp"] = datetime.datetime.now()
            data_points.append(dict(d))

        insertions = store.insert_multiple_values(data_points, db)
        return [str(_id) for _id in insertions.inserted_ids]
    except BulkWriteError as error:
        # todo: way to remove the items that WERE succesfully inserted? before retrying insert?
        print("encountered a bulk write error. clearing cache.")
        caching.clear_cache()
        print("cleared cache.  Retrying bulk insert")
        insertions = store.insert_multiple_values(data_points, db)
        return [str(_id) for _id in insertions.inserted_ids]
    except Exception as error:
        print(f"Error in converting csv to json: {error}")
Пример #3
0
def contributors():
    caching.clear_cache()
    st.write('')
    st.header('Contributors')
    st.write('-----------------------------------------------------------------------') 
    st.write('')

    st.subheader('Edward Nataniel Apostol')
    st.markdown('- Email: [[email protected]]([email protected])')
    st.markdown('- LinkedIn: [https://www.linkedin.com/in/edward-apostol/](https://www.linkedin.com/in/edward-apostol/)')

    st.subheader('Eric Vincent Magno')
    st.markdown('- Email: [[email protected]](mailto:[email protected])')
    st.markdown('- LinkedIn: [https://www.linkedin.com/in/ericxmagno/](https://www.linkedin.com/in/ericxmagno/)')

    st.subheader('Fatima Grace Santos')
    st.markdown('- Email: [[email protected]]([email protected])')
    st.markdown('- LinkedIn: [https://www.linkedin.com/in/fatima-grace-santos/](https://www.linkedin.com/in/fatima-grace-santos/)')

    st.subheader('Joseph Figuracion')
    st.markdown('- Email: [[email protected]]([email protected])')
    st.markdown('- LinkedIn: [https://www.linkedin.com/in/josephfiguracion/](https://www.linkedin.com/in/josephfiguracion/)')

    st.subheader('John Barrion - Mentor')
    st.markdown('- Email: [email protected]')
    st.markdown('- LinkedIn: [https://www.linkedin.com/in/johnbarrion/](https://www.linkedin.com/in/johnbarrion/)')
Пример #4
0
    def test_clear_cache(self):
        """Clear cache should do its thing."""
        foo_vals = []

        @st.cache
        def foo(x):
            foo_vals.append(x)
            return x

        bar_vals = []

        @st.cache
        def bar(x):
            bar_vals.append(x)
            return x

        foo(0), foo(1), foo(2)
        bar(0), bar(1), bar(2)
        self.assertEqual([0, 1, 2], foo_vals)
        self.assertEqual([0, 1, 2], bar_vals)

        # Clear the cache and access our original values again. They
        # should be recomputed.
        caching.clear_cache()

        foo(0), foo(1), foo(2)
        bar(0), bar(1), bar(2)
        self.assertEqual([0, 1, 2, 0, 1, 2], foo_vals)
        self.assertEqual([0, 1, 2, 0, 1, 2], bar_vals)
Пример #5
0
def select_period_input_cache():
    DATE_FORMAT = "%m/%d/%Y"
    start_ = "2021-01-01"
    today = datetime.today().strftime("%Y-%m-%d")
    from_ = st.sidebar.text_input("startdate (yyyy-mm-dd)", start_)

    try:
        FROM = dt.datetime.strptime(from_, "%Y-%m-%d").date()
    except:
        st.error("Please make sure that the startdate is in format yyyy-mm-dd")
        st.stop()

    until_ = st.sidebar.text_input("enddate (yyyy-mm-dd)", today)

    try:
        UNTIL = dt.datetime.strptime(until_, "%Y-%m-%d").date()
    except:
        st.error("Please make sure that the enddate is in format yyyy-mm-dd")
        st.stop()

    if FROM >= UNTIL:
        st.warning("Make sure that the end date is not before the start date")
        st.stop()

    if until_ == "2023-08-23":
        st.sidebar.error("Do you really, really, wanna do this?")
        if st.sidebar.button("Yes I'm ready to rumble"):
            caching.clear_cache()
            st.success("Cache is cleared, please reload to scrape new values")
Пример #6
0
 def main(self):
     try:
         caching.clear_cache()
         [self.min_cluster_size, self.assignments, self.assign_prob, self.soft_assignments] = \
             load_clusters(self.working_dir, self.prefix)
         st.markdown(
             '**_CHECK POINT_**: Done assigning labels for **{}** instances in **{}** D space. Move on to __create '
             'a model__.'.format(self.assignments.shape,
                                 self.sampled_embeddings.shape[1]))
         st.markdown(
             'Your last saved run range was __{}%__ to __{}%__'.format(
                 self.min_cluster_size[0], self.min_cluster_size[-1]))
         if st.checkbox('Redo?', False, key='cr'):
             caching.clear_cache()
             self.slider(min_=float(self.min_cluster_size[0]),
                         max_=float(self.min_cluster_size[-1]))
             self.hierarchy()
             self.save()
         if st.checkbox("Show first 3D UMAP enhanced clustering plot?",
                        True,
                        key='cs'):
             self.show_classes()
     except (AttributeError, FileNotFoundError) as e:
         self.slider()
         self.hierarchy()
         self.save()
         if st.checkbox("Show first 3D UMAP enhanced clustering plot?",
                        True,
                        key='cs'):
             self.show_classes()
Пример #7
0
def hire():
    caching.clear_cache()

    df = pd.read_csv("NSW_Locations.csv")

    st.sidebar.write(' ')
    #Select bar for region
    region = st.sidebar.selectbox("Select region", [
        "Choose an option", "Halifax Regional Municipality", "Cape Breton",
        "Northern", "South Shore/Valley"
    ])

    #Calls display function
    if region:
        if region == "Halifax Regional Municipality":
            reg = "HRM"
            display_NSW(df, reg)
        elif region == "Cape Breton":
            reg = "CB"
            display_NSW(df, reg)
        elif region == "Northern":
            reg = "Northern"
            display_NSW(df, reg)
        elif region == "South Shore/Valley":
            reg = "South Shore/Valley"
            display_NSW(df, reg)
Пример #8
0
def candr():
    caching.clear_cache()
    st.write('')
    st.header('Conclusions and Recommendations')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.write('')

    st.subheader('Conclusion:')
    st.markdown(
        '- Regions with high average number of schools in the **best metric group** are **CAR, Region VIII, and Region I**. Conversely, regions with a high average number of schools in the **worst metric group** are **NCR, Region IV-A, and Region XI.**'
    )
    st.markdown(
        '- **Leyte** is the province with the most schools in the **best metric group**, while **Cebu** is the province with most schools in the **worst metric group**.'
    )

    st.write('')

    st.subheader('Recommendations:')
    st.markdown(
        '- Take a look at the **reasons for schools being in the best and worst metric group** (e.g. overpopulation in the area, school density, funding in the region in comparison to its population)'
    )
    st.markdown(
        '- Take a look at **how metric groups affect NAT scores** (how do the metrics affect nat scores - do low metric schools have low nat scores?)'
    )
    st.markdown(
        '- Take a look at how **different clustering algorithms** affect how the schools were clustered based on the metrics'
    )
Пример #9
0
def insertintotable():
    caching.clear_cache()
    nm = request.form.get("nm")
    stock_symbol = nm
    get_stock_data(stock_symbol)
    df = pd.read_csv(''+stock_symbol+'.csv')
    today_stock = df.tail(1)
    df = df[["Close"]]

    if len(df) == 0:
    	return render_template('index.html',not_found=True , symbol = stock_symbol)
    else:
        get_today_live_stock_data(stock_symbol)
        p_values = [2, 4, 6]
        d_values = [1]
        q_values = range(0, 3)
        next_seven_days_forecast = next_seven_days_stock_forecast(stock_symbol, df, p_values, d_values, q_values)
        suggestion, tw_list, tw_pol = get_stock_sentiment(stock_symbol)
        
  
        
    return render_template('results.html',quote = stock_symbol,open_s = today_stock['Open'].to_string(index=False),
                               close_s=today_stock['Close'].to_string(index=False),adj_close=today_stock['Adj Close'].to_string(index=False),
                               tw_list=tw_list,tw_pol=tw_pol,decision=suggestion,high_s=today_stock['High'].to_string(index=False),
                               low_s=today_stock['Low'].to_string(index=False),vol=today_stock['Volume'].to_string(index=False),
                               forecast_set=next_seven_days_forecast, arima_pred = next_seven_days_forecast[0])
Пример #10
0
def write():
    hist = session_state["mrkt"].history(start=session_state["begin"], end=session_state["end"],)
    hist = hist.reset_index()

    data = pd.DataFrame(data={'ds': hist['Date'], 'y': hist['Open']})

    m = get_model(data)

    future = m.make_future_dataframe(periods=session_state["days"])

    forecast = m.predict(future)

    st.title("Results")
    st.write(
        """
        Welcome to the results! you can click the graphs to expand them.
        """
    )

    st.pyplot(m.plot(forecast))
    st.write(
        """
        In the graph above you can find the expected forecast. All the black dots are data points that have been used to
        calculate the prediction. The darker blue line is the models prediction. This is also calculated for previous 
        days to show the accuracy of the model. The lighter blue "bands" around the prediction line is the expected 
        upper and lower bound.
        """
    )


    st.pyplot(m.plot_components(forecast))
    st.write(
        """
        This model can also give you some intuition on how the stock fluctuates within a year (fig 2.3) or even within a 
        week (fig 2.2). The first graph shows the trend line. This is the same as the in the first graph.
        """
    )

    # st.write(
    #     """
    #     want to know what the model predicts on a specific date? Select the date below and find out!
    #     """
    # )
    # date = st.date_input("select date")
    # forecast = m.predict(future)
    # st.write(f"The predicted value of stock {session_state['mrkt_val']} on {date} is: {forecast}")




    st.write(
        """
        Are you satisfied with the results? \n
        If you want to change some answers on a specific topic you can just go to the topic, hit save and return to the Results page to see the new results. \n
        Or if you want to retry from the start, hit the \"Try again\" button below!
        """
    )
    if st.button("Try again"):
        caching.clear_cache()
Пример #11
0
def conclusion():
    caching.clear_cache()
    st.write('')
    st.header('Recommended Business Strategies')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.write('')
Пример #12
0
def main():
    st.header("COG OWID")
    df_getdata = get_data().copy(deep=False)
    df = rename_columns(df_getdata)
    #df = df.fillna(0)
    continent_list_ = df["continent"].drop_duplicates().sort_values().tolist()
    continent_list = ["All"] + continent_list_
    print(continent_list)
    #continent_list =  continent_list_
    continent = st.sidebar.selectbox("Continent", continent_list, index=0)
    #df = df[len(df["iso_code"]) <= 3]
    print(df.dtypes)
    df.sort_values("Country")
    print(df)
    #df = df[df['continent'].apply(lambda x: left(x,4) !="OWID")]
    if continent != "All":
        df = df[df["continent"] == continent]
    else:
        df = df[(df["continent"] == 'Africa') | (df["continent"] == 'Asia') |
                (df["continent"] == 'Europe') |
                (df["continent"] == 'North America') |
                (df["continent"] == 'Oceania') |
                (df["continent"] == 'South America')]

    print(df)
    #df.dropna(subset=[ "Trust in Politicians"])
    columnlist = df.columns.tolist() + ["Clear_cache"]

    #st.write(df["Trust in Politicians"])
    #st.write(df["people_vaccinated_per_hundred"])
    what_to_show_left = st.sidebar.selectbox("X as", columnlist, index=157)
    if what_to_show_left == "Clear_cache":
        st.sidebar.error("Do you really, really, wanna do this?")
        if st.sidebar.button("Yes I'm ready to rumble"):
            caching.clear_cache()
            st.success("Cache is cleared, please reload to scrape new values")
        st.stop()

    what_to_show_right = st.sidebar.selectbox("Y as", columnlist, index=425)
    #st.write("For vacc.grade choose -Percentage_vaccinated_sop-")
    #try:
    make_scatterplot(df, what_to_show_left, what_to_show_right, "continent",
                     "location", None)

    st.subheader("Source for QoG data")
    st.write(
        "Dahlberg, Stefan,  Aksel Sundström, Sören Holmberg, Bo Rothstein, Natalia Alvarado Pachon & Cem Mert Dalli. 2021. The Quality of Government Basic Dataset, version Jan21. University of Gothenburg: The Quality of Government Institute, http://www.qog.pol.gu.se doi:10.18157/qogbasjan21"
    )
    st.subheader("Source for Vaccination rates")
    st.write(
        "https://www.sortiraparis.com/news/coronavirus/articles/240384-vaccine-in-the-world-as-of-datadatestodayfrlatest-the-percentage-of-people-vacci/lang/en dd 18/10/2021"
    )
    st.subheader("Source for Our World In Data-data")
    st.write(
        "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"
    )
    st.header("Fields")
    st.write(columnlist)
Пример #13
0
def get_state(name):
    caching.clear_cache()
    diffusion = Diffusion.from_pretrained(name)
    state = {
        "x": diffusion.denoise(1, n_steps=0),
        "curr_step": diffusion.num_timesteps,
        "diffusion": diffusion
    }
    return state
Пример #14
0
def clear_on_change(key, value):
    state = get_state(key)
    state["current"] = value
    if state["previous"] is not None and state["current"] != state["previous"]:
        print(state)
        caching.clear_cache()
        state = get_state(key)
        state["current"] = value
    state["previous"] = state["current"]
Пример #15
0
def interface(df):
    DATE_FORMAT = "%m/%d/%Y"
    start_ = "2022-1-31"
    #today = datetime.today().strftime("%Y-%m-%d")
    today = "2022-1-31"
    from_ = st.sidebar.text_input("date (yyyy-mm-dd)", start_)
    try:
        FROM = dt.datetime.strptime(from_, "%Y-%m-%d").date()
    except:
        st.error("Please make sure that the startdate is in format yyyy-mm-dd")
        st.stop()

    until_ = st.sidebar.text_input("enddate (yyyy-mm-dd)", today)
    try:
        UNTIL = dt.datetime.strptime(until_, "%Y-%m-%d").date()
    except:
        st.error("Please make sure that the enddate is in format yyyy-mm-dd")
        st.stop()

    if FROM > UNTIL:
        st.warning("Make sure that the end date is not before the start date")
        st.stop()

    if from_ == "2023-08-23":
        st.sidebar.error("Do you really, really, wanna do this?")
        if st.sidebar.button("Yes I'm ready to rumble"):
            caching.clear_cache()
            st.success("Cache is cleared, please reload to scrape new values")



    lijst = df.columns.tolist()
    del lijst[0:5]

    for i,x in enumerate(lijst):
        print (f"{i} - {x}")

    what_to_show_l = st.sidebar.selectbox(
        "What to show X-axis", lijst, index=37 #37 (pple fully vacc per hundred)
    )
    what_to_show_r = st.sidebar.selectbox(
        "What to show Y-axis", lijst, index=8 #10 (new_deaths_smoothed_per_million)
    )

    log_x = st.sidebar.selectbox(
        "X-ax as log", [True, False], index=1)
    log_y = st.sidebar.selectbox(
        "Y-ax as log", [True, False], index=1)

    if  FROM != UNTIL:
        method_x =  st.sidebar.selectbox( "Method X-ax", ["mean", "perc_delta_min_max","perc_delta_first_last", "first", "last", "lowest", "highest" ], index=0)
        method_y =  st.sidebar.selectbox( "Method Y-ax", ["mean", "perc_delta_min_max","perc_delta_first_last","first", "last", "lowest", "highest"], index=0)
    else:
        method_x,method_y = None, None
    return FROM, UNTIL, lijst, what_to_show_l, what_to_show_r, log_x, log_y, method_x, method_y
Пример #16
0
    def handle_clear_cache_request(self):
        """Clear this report's cache.

        Because this cache is global, it will be cleared for all users.

        """
        # Setting verbose=True causes clear_cache to print to stdout.
        # Since this command was initiated from the browser, the user
        # doesn't need to see the results of the command in their
        # terminal.
        caching.clear_cache()
Пример #17
0
def sws_feedgen():
    yield StreamwebSite(
        "test_id",
        "TEST Web Site Name",
        "TEST A description of the web site.",
        {"name": "test"},
        "test",
        "test",
        "prd",
    )
    caching.clear_cache()
Пример #18
0
def eda():
    caching.clear_cache()
    st.write('')
    st.header('Exploratory Data Analysis')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.subheader('Popular Genres in the Philippines')
    st.write(
        '- Acoustic, Rock, and R&B are the top three performing genres in the Philippines across all years.'
    )
    st.write('- Classical music is not as popular in the Philippines.')
    image = Image.open('graphs/Monthly Streams per Genre.PNG').convert('RGB')
    st.image(image, caption='', width=400, height=200)
    st.subheader(
        'Distribution of audio features across different genres for tracks belonging in the Top 200 Daily PH Chart.'
    )
    st.write(
        'Loudness, Energy, and Danceability appear to be the main distinguishing audio features among the selected genres.'
    )
    option = st.selectbox(
        'Select audio feature',
        ('acousticness', 'danceability', 'energy', 'instrumentalness',
         'liveness', 'loudness', 'speechiness', 'valence', 'tempo'))
    if option == 'acousticness':
        image = Image.open('graphs/acousticness.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'danceability':
        image = Image.open('graphs/danceability.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'energy':
        image = Image.open('graphs/energy.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'instrumentalness':
        image = Image.open('graphs/instrumentalness.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'liveness':
        image = Image.open('graphs/liveness.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'loudness':
        image = Image.open('graphs/loudness.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'popularity':
        image = Image.open('graphs/popularity.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'speechiness':
        image = Image.open('graphs/speechiness.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'tempo':
        image = Image.open('graphs/tempo.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    if option == 'valence':
        image = Image.open('graphs/valence.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
Пример #19
0
def test_data():

    # clear streamlit cache because load_data uses cache decorator
    from streamlit import caching
    caching.clear_cache()

    df1, df2 = data_processing.load_data()

    assert df1.shape[0] > 0
    assert df1.shape[1] > 0
    assert df1[df1['youtube_url'].isna()].shape[0] == 0
    assert df2.shape[0] > 0
    assert df2.shape[1] > 0
Пример #20
0
def contributors():
    caching.clear_cache()
    st.write('')
    st.header('Contributors')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.write('')

    st.write('Emerson Fili Chua - Mentor')
    st.write('Generoso Roberto')
    st.write('Kaye Janelle Yao')
    st.write('Rodel Arenas')
    st.write('Tyron Rex Frago')
Пример #21
0
def main(phrase="Tell EVE something!"):

    # Instantiating class object for this conversation
    a = Actions(phrase)

    # st.text(respond(a.utter_greet()))

    intents, user_input, history_df, end = conversation(a)
    print(end)

    if st.sidebar.button("Show backend"):
        backend_dash(intents, user_input, history_df)

    if end == False:
        caching.clear_cache()
        conversation(Actions("Could you please rephrase?"))
Пример #22
0
def genre_classification():
    caching.clear_cache()
    st.write('')
    st.header('Song Genre Classification')
    st.write(
        '-----------------------------------------------------------------------'
    )
    st.write('')
    st.write(
        'Tracks obtained from various playlists of different genres were trained for genre classification.'
    )
    st.subheader('Chosen Genres:')
    st.write('- Acoustic, Classical, Reggae, Rock, R&B')
    st.subheader('Chosen Features:')
    st.write(
        '- Danceability, Energy, Key, Loudness, Mode, Speechiness, Acousticness, Instrumentalness, Valence, Tempo'
    )
    st.subheader('Selected Model')
    st.write('- XGBoost')
    st.write('- Accuracy : 77.93%')
    st.subheader('Results of other models')

    option = st.selectbox('Select Model',
                          ('kNN', 'SVM Linear', 'SVM Polynomial', 'SVM RBF',
                           'Decision Trees', 'Random Forest', 'XGBoost'))
    st.write('Here are the results obtained from the', option, 'model.')
    if option == 'kNN':
        image = Image.open('graphs/knn.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
    elif option == 'SVM Linear':
        image = Image.open('graphs/svm linear.PNG').convert('RGB')
        st.image(image, caption='', width=500, height=300)
    elif option == 'SVM Polynomial':
        image = Image.open('graphs/svm polynomial.PNG').convert('RGB')
        st.image(image, caption='', width=550, height=300)
    elif option == 'SVM RBF':
        image = Image.open('graphs/svm rbf.PNG').convert('RGB')
        st.image(image, caption='', width=500, height=300)
    elif option == 'Decision Trees':
        image = Image.open('graphs/decision tree.PNG').convert('RGB')
        st.image(image, caption='', width=500, height=300)
    elif option == 'Random Forest':
        image = Image.open('graphs/random forest.PNG').convert('RGB')
        st.image(image, caption='', width=550, height=300)
    elif option == 'XGBoost':
        image = Image.open('graphs/xgboost.PNG').convert('RGB')
        st.image(image, caption='', width=400, height=200)
Пример #23
0
 def main(self):
     try:
         [self.sampled_features, self.sampled_embeddings
          ] = load_embeddings(self.working_dir, self.prefix)
         st.markdown(
             '**_CHECK POINT_**: Done non-linear transformation of **{}** instances '
             'from **{}** D into **{}** D. Move on to __Identify and '
             'tweak number of clusters__'.format(
                 *self.sampled_features.shape,
                 self.sampled_embeddings.shape[1]))
         if st.checkbox('Redo?', False, key='er'):
             caching.clear_cache()
             self.subsample()
             self.compute()
     except FileNotFoundError:
         self.subsample()
         self.compute()
def main():
    caching.clear_cache()
    st.title("Sentiment Analysis Demo")
    activities = ["Show Instructions", "Sentiment", "Text Analysis of URL"]
    choice = st.sidebar.selectbox("Activities", activities)

    if choice == "Show Instructions":
        filename = 'instruct1.md'
        try:
            with open(filename) as input:
                st.subheader(input.read())
        except FileNotFoundError:
            st.error('File not found')
        st.sidebar.success('To continue select one of the activities.')

    elif choice == "Sentiment":
        st.subheader("Sentiment Analysis")
        input_choice_flow()

    elif choice == "Text Analysis of URL":
        st.subheader("Analysis of Text from URL")

        raw_url = st.text_input("Enter URL")
        text_limit = st.slider("Length of Text to Preview", 50, 100)

        if st.button("Analyze"):
            result = get_text(raw_url)
            blob = TextBlob(result)
            len_of_full_text = len(result)
            len_of_short_text = round(len(result) / text_limit)
            st.info("Full Text Length: {}".format(len_of_full_text))
            st.info("Short Text Length: {}".format(len_of_short_text))
            st.write(result[:len_of_short_text])

            all_sentences = [sent for sent in blob.sentences]
            all_sentiment = [
                sent.sentiment.polarity for sent in blob.sentences
            ]

            new_df = pd.DataFrame(zip(all_sentences, all_sentiment),
                                  columns=["Sentences", "Sentiment"])
            st.dataframe(new_df)

            st.write(new_df.head(21).plot(kind='bar'))
            st.pyplot()
Пример #25
0
def main():
    if st.button("**GUARDAR CALIFICACION**"):
        dni = documento
        ids = indice
        polaridad = polaridad1
        score = sccore
        emocion = emocion2
        score_emotion = sccore2
        result = firebase.post(
            '/respuesta', {
                'id': int(ids),
                'polaridad': polaridad,
                'score': int(score),
                'emocion': emocion,
                'score_emotion': int(score_emotion),
                'dni': dni
            })
        caching.clear_cache()
def change_background():

    change = st.sidebar.button("Change Background of App")

    if (change):
        caching.clear_cache()
        img_url, img = get_url("best landscape photos")

        st.write("Photo By " + img['credits'] + " on Unsplash")
        page_bg_img = '''
    <style>
    body {
    background-image: url(''' + img_url + ''');
    background-size: cover;
    }
    </style>
    '''
        st.markdown(page_bg_img, unsafe_allow_html=True)
Пример #27
0
def candr():
    caching.clear_cache()
    st.write('')
    st.header('Conclusions and Recommendations')
    st.write('-----------------------------------------------------------------------') 
    st.write('')

    st.subheader('Conclusions:')
    st.markdown('- Compose songs that are **higher tempo**, **lower danceability**, and that are **energetic.**')
    st.markdown('- Recommended **Pinoy Indie** artists to collaborate with are **Markki Stroem, Brisom, Gabe Bondoc, TALA, Jensen & The Flips**.')
    st.markdown('- Recommended **Pinoy Rock** artists to collaborate with are **6cyclemind, Bamboo, Shamrock, Cinderell, Fred Engay**.')

    st.write('')

    st.subheader('Recommendations:')
    st.markdown('- Explore different machine learning algorithms for classification.')
    st.markdown('- Take a look at how seasonality affects pinoy indie/rock overall streams.')
    st.markdown('- Explore different evaluation metrics of success other than charting in the Top 200 Philippines playlist.')
Пример #28
0
def main():
    caching.clear_cache()
    st.title("OCR Demo")
    activities = ["Show Instructions", "OCR"]
    choice = st.sidebar.selectbox("Activities", activities)

    if choice == "Show Instructions":
        filename = 'instruct1.md'
        try:
            with open(filename) as input:
                st.subheader(input.read())
        except FileNotFoundError:
            st.error('File not found')
        st.sidebar.success('To continue select one of the activities.')

    elif choice == "OCR":
        st.subheader("Optical Character Recognition")
        input_choice_flow()
Пример #29
0
def recommendation_engine():
    caching.clear_cache()
    st.write('')
    st.header('Recommendation Engine')
    st.write('-----------------------------------------------------------------------') 
    st.write('')

    st.subheader("Tracks Most Similar to 'Pwede Ba' (Pinoy Indie)")
    if st.checkbox('Show code', value=False, key="1"):
        st.code("""
        #get top 10 nearest to seed_track_data
        recommendation_df = tracks_df[tracks_df['predicted_genre']=='pinoy indie']\
                                            [tracks_df['track_id']!=seed_track_data['track_id']]\
                                            .sort_values('cosine_dist')[:10]
        recommendation_df[['track_name','artist_name','cosine_dist','predicted_genre']+feature_cols]
        recommendation_df.to_csv('../data/recommended_pinoy_indie.csv')
        """, language="python")
    df_indie = pd.read_csv('data/recommended_pinoy_indie.csv')
    st.write(df_indie.set_index('track_id').drop(columns=['Unnamed: 0']))
    st.write('')
    st.subheader('Top Artists with a Similar "Pinoy Indie" Sound:')
    st.write('Markki Stroem, Brisom, Gabe Bondoc, TALA, Jensen & The Flips')
    st.write('-----------------------------------------------------------------------') 
    st.write('')
    

    st.subheader("Tracks Most Similar to 'Pwede Ba' (Pinoy Rock)")
    if st.checkbox('Show code', value=False, key="2"):
        st.code("""
        #get top 10 nearest to seed_track_data
        recommendation_df = tracks_df[tracks_df['predicted_genre']=='pinoy rock']\
                                            [tracks_df['track_id']!=seed_track_data['track_id']]\
                                            .sort_values('cosine_dist')[:10]
        recommendation_df[['track_name','artist_name','cosine_dist','predicted_genre']+feature_cols]
        recommendation_df.to_csv('../data/recommended_pinoy_indie.csv')
        """, language="python")
    df_indie = pd.read_csv('data/recommended_pinoy_rock.csv')
    st.write(df_indie.set_index('track_id').drop(columns=['Unnamed: 0']))
    st.write('')
    st.subheader('Top Artists with a Similar "Pinoy Rock" Sound:')
    st.write('6cyclemind, Bamboo, Shamrock, Cinderell, Fred Engay')
Пример #30
0
 def main(self):
     try:
         [self.x_test, self.y_test, self.validate_clf, self.clf, self.validate_score, self.predictions] = \
             load_classifier(self.working_dir, self.prefix)
         st.markdown('**_CHECK POINT_**: Done training random forest classifier '
                     'mapping **{}** features to **{}** assignments. Move on to '
                     '__Generate video snippets for interpretation__.'.format(self.features.shape[0],
                                                                              self.predictions.shape[0]))
         if st.checkbox('Redo?', False, key='mr'):
             caching.clear_cache()
             self.randomforest()
         if st.checkbox("Show confusion matrix on test?", False, key='ms'):
             self.show_confusion_matrix()
         if st.checkbox("Show cross-validated accuracy on test?", False, key='mss'):
             self.show_crossval_score()
     except FileNotFoundError:
         self.randomforest()
         if st.checkbox("Show confusion matrix on test?", False, key='ms'):
             self.show_confusion_matrix()
         if st.checkbox("Show cross-validated accuracy on test?", False, key='mss'):
             self.show_crossval_score()