示例#1
0
def app():
    st.title('Orientation')

    st.write(
        '**This page will show the graphs and tables based on the Faculty Particpation in Orientation**'
    )

    data = st.file_uploader("Upload your relevant excel file")
    df = pd.read_csv(data)

    page_names = ["Department", "Faculty"]
    page = st.radio("", page_names, index=0)

    if page == "Department":
        col1, col2, col3, col4 = st.beta_columns(4)
        with col1:
            temp1 = st.button("Semester wise")
        with col2:
            temp2 = st.button("Year wise")
        with col3:
            temp3 = st.button("Venue")
        with col4:
            temp7 = st.button("University wise")

    if page == "Department":
        if temp1 == True:
            df['Date'] = df['Date'].astype('datetime64[ns]')

            df_1 = df['Date'].dt.year
            col = []
            for i in df_1:
                col.append(i)
            col = list(set(col))

            df1 = pd.DataFrame(data=None, columns=['Count'])
            for i in col:
                mask = (df['Date'] > str(i) + '0615') & (df['Date'] <=
                                                         str(i) + '1215')
                mask1 = (df['Date'] > str(i) + '1215') & (df['Date'] <=
                                                          str(i + 1) + '0615')
                test5 = df.loc[mask]
                test6 = df.loc[mask1]
                c1 = test5['Date']
                c2 = test6['Date']
                t1 = c1.shape[0]
                t2 = c2.shape[0]
                t1 = pd.DataFrame([t1],
                                  columns=['Count'],
                                  index=['ODD sem ' + str(i)])
                df1 = df1.append(pd.DataFrame(t1))
                t2 = pd.DataFrame([t2],
                                  columns=['Count'],
                                  index=['EVEN sem ' + str(i)])
                df1 = df1.append(pd.DataFrame(t2))

            st.write('**Table based on Sem-Wise Count for whole Department**')

            st.table(df1)
            st.write('**Graph based on Sem-Wise Count for whole Department **')

            st.bar_chart(df1)

    if page == "Department":
        if temp2 == True:

            df['Date'] = df['Date'].astype('datetime64[ns]')
            df_1 = df['Date'].dt.year
            col = []
            for i in df_1:
                col.append(i)
            col = list(set(col))
            df2 = pd.DataFrame(data=None, columns=['Count'])
            for i in col:
                mask = (df['Date'] > str(i) + '0615') & (df['Date'] <=
                                                         str(i + 1) + '0615')
                test5 = df.loc[mask]
                c1 = test5['Date']
                t1 = c1.shape[0]
                t1 = pd.DataFrame([t1],
                                  columns=['Count'],
                                  index=[str(i) + '-' + str(i + 1)])
                df2 = df2.append(pd.DataFrame(t1))

            st.write('**Table based on Year-Wise Count for whole Department**')

            st.table(df2)
            st.write(
                '**Graph based on Year-Wise Count for whole Department **')

            st.bar_chart(df2)

    if page == "Department":
        if temp3 == True:
            df2 = pd.DataFrame(data=None, columns=['Count'])

            totalr = len(df[df['Venue'] == 'RAIT'])
            totalr = totalr

            totall = len(df['Venue'])

            totald = totall - totalr

            t1 = pd.DataFrame([totalr], columns=['Count'], index=['RAIT'])
            df2 = df2.append(pd.DataFrame(t1))

            t1 = pd.DataFrame([totald],
                              columns=['Count'],
                              index=['Other Universities'])
            df2 = df2.append(pd.DataFrame(t1))

            st.write(
                '**Table based on Venue-Wise Count for whole Department**')

            st.table(df2)
            st.write(
                '**Graph based on Venue-Wise Count for whole Department **')

            st.bar_chart(df2)

    if page == "Department":
        if temp7 == True:

            df2 = pd.DataFrame(data=None, columns=['Count'])

            totalm = len(df[df['NameofUniversity'] == 'University of Mumbai'])
            totalr = len(df[df['NameofUniversity'] == 'RAIT'])
            totall = len(df['NameofUniversity'])

            totald = totall - totalm - totalr

            t1 = pd.DataFrame([totalm],
                              columns=['Count'],
                              index=['Mumbai University'])
            df2 = df2.append(pd.DataFrame(t1))

            t1 = pd.DataFrame([totalr], columns=['Count'], index=['RAIT'])
            df2 = df2.append(pd.DataFrame(t1))

            t1 = pd.DataFrame([totald],
                              columns=['Count'],
                              index=['Other Universities'])
            df2 = df2.append(pd.DataFrame(t1))

            st.write(
                '**Table based on University-Wise Count for whole Department**'
            )

            st.table(df2)
            st.write(
                '**Graph based on University-Wise Count for whole Department **'
            )

            st.bar_chart(df2)

    if page == "Faculty":

        col_one_list = df['NameOfFaculty'].tolist()

        col_one_list = list(set(col_one_list))

        df2 = pd.DataFrame(data=None, columns=['Count'])
        df1 = df
        faculties = df1["NameOfFaculty"].unique()
        faculty = st.selectbox('Select the name of the faculty:', faculties)
        j = col_one_list.index(faculty)

        df['Date'] = df['Date'].astype('datetime64[ns]')

        df_1 = df['Date'].dt.year

        col = []
        for i in df_1:
            col.append(i)
        col = list(set(col))

        for i in col:
            mask = (df['Date'] > str(i) + '0615') & (
                df['Date'] <= str(i + 1) + '0615') & (df['NameOfFaculty']
                                                      == str(col_one_list[j]))

            test5 = df.loc[mask]
            c1 = test5['Date']
            t1 = c1.shape[0]
            t1 = pd.DataFrame([t1],
                              columns=['Count'],
                              index=[str(i) + '-' + str(i + 1)])
            df2 = df2.append(pd.DataFrame(t1))
        st.write('**Table based on Year-Wise Count for Faculty**')

        st.table(df2)
        st.write('**Graph based on Year-Wise Count for Faculty **')

        st.bar_chart(df2)

        df4 = pd.DataFrame(data=None, columns=['Count'])

        for i in col:
            mask = (df['Date'] > str(i) + '0615') & (
                df['Date'] <= str(i) + '1215') & (df['NameOfFaculty'] == str(
                    col_one_list[j]))
            mask1 = (df['Date'] > str(i) + '1215') & (
                df['Date'] <= str(i + 1) + '0615') & (df['NameOfFaculty']
                                                      == str(col_one_list[j]))
            test5 = df.loc[mask]
            test6 = df.loc[mask1]
            c1 = test5['Date']
            c2 = test6['Date']
            t1 = c1.shape[0]
            t2 = c2.shape[0]
            t1 = pd.DataFrame([t1],
                              columns=['Count'],
                              index=['ODD sem ' + str(i)])
            df4 = df4.append(pd.DataFrame(t1))
            t2 = pd.DataFrame([t2],
                              columns=['Count'],
                              index=['EVEN sem ' + str(i)])
            df4 = df4.append(pd.DataFrame(t2))
        st.write('**Table based on Sem-Wise Count for Faculty**')

        st.table(df4)
        st.write('**Graph based on Sem-Wise Count for Faculty **')

        st.bar_chart(df4)

        data1 = df.loc[df['NameOfFaculty'] == str(col_one_list[j])]

        df5 = pd.DataFrame(data=None, columns=['Count'])

        totalm = len(
            data1[data1['NameofUniversity'] == 'University of Mumbai'])
        totalr = len(data1[data1['NameofUniversity'] == 'DYPU'])
        totall = len(data1['NameofUniversity'])

        totald = totall - totalm - totalr

        t1 = pd.DataFrame([totalm],
                          columns=['Count'],
                          index=['Mumbai University'])
        df5 = df5.append(pd.DataFrame(t1))

        t1 = pd.DataFrame([totalr], columns=['Count'], index=['RAIT'])
        df5 = df5.append(pd.DataFrame(t1))

        t1 = pd.DataFrame([totald],
                          columns=['Count'],
                          index=['Other Universities'])
        df5 = df5.append(pd.DataFrame(t1))

        st.write('**Table based on University-Wise Count for Faculty**')

        st.table(df5)
        st.write('**Graph based on University-Wise Count for Faculty **')

        st.bar_chart(df5)

        df6 = pd.DataFrame(data=None, columns=['Count'])

        data1 = df.loc[df['NameOfFaculty'] == str(col_one_list[j])]

        totalr = len(data1[data1['Venue'] == 'RAIT'])
        totalo = len(data1[data1['Venue'] == 'Online'])
        totalr = totalr + totalo

        totall = len(data1['Venue'])

        totald = totall - totalr

        t1 = pd.DataFrame([totalr], columns=['Count'], index=['RAIT'])
        df6 = df6.append(pd.DataFrame(t1))

        t1 = pd.DataFrame([totald],
                          columns=['Count'],
                          index=['Other Universities'])
        df6 = df6.append(pd.DataFrame(t1))

        st.write('**Table based on Venue-Wise Count for Faculty**')

        st.table(df6)
        st.write('**Graph based on Venue-Wise Count for Faculty **')

        st.bar_chart(df6)
示例#2
0
                              'y': 0.9,
                              'x': 0.45,
                              'xanchor': 'center',
                              'yanchor': 'top'
                          })
        st.plotly_chart(fig)
        st.subheader(f'{title} Data'.format(title))
        data = data.set_index('Date')
        st.table(data[states][-10:])

    try:
        states = st.multiselect('  ',
                                lis,
                                default=['Rajasthan', 'Delhi', 'Maharashtra'])
    except:
        st.write('Select State')

    if fetch == 'Confirmed Cases':
        figure_plot_state(confirm, states, 'Confirmed Cases', 'blue')
    elif fetch == 'Deaths':
        figure_plot_state(deaths, states, 'Deaths', 'red')
    elif fetch == 'Recoveries':
        figure_plot_state(recovery, states, 'Recovery', 'green')
    elif fetch == 'Tests':
        figure_plot_state(test, states, 'Total Tests', 'blue')

if radio == 'For Particular Time':
    st.markdown("""     <ul>
                        <li>End Date should be greater than Start Date</li>
                        <li>Select Checkbox to Visualize and Analyze</li>
                        </ul>
示例#3
0
def check_if_cached():
    global cache_was_hit
    cache_was_hit = False


@st.cache
def my_func(arg1, arg2=None, *args, **kwargs):
    return random.randint(0, 2**32)


check_if_cached()

if cache_was_hit:
    st.warning('You must clear your cache before you run this script!')
    st.write('''
        To clear the cache, press `C` then `Enter`. Then press `R` on this page
        to rerun.
    ''')
else:
    st.warning('''
        IMPORTANT: You should test rerunning this script (to get a failing
        test), then clearing the cache with the `C` shortcut and checking that
        the test passes again.
    ''')

    st.subheader('Test that basic caching works')
    u = my_func(1, 2, dont_care=10)
    v = my_func(1, 2, dont_care=10)
    if u == v:
        st.success('OK')
    else:
        st.error('Fail')
    def calculate_descriptors(self):
        st.markdown("## **Molecular descriptors**")
        if st.checkbox('Calculate Mordred descriptors (slower, more options)'):
            self.write_mordred_descriptors('.metadata/smiles.smi',
                                           '.metadata/csv/mordred.csv',
                                           self.data)
            # Read MORDRED descriptors
            descriptors = pd.read_csv('.metadata/csv/mordred.csv.gz',
                                      compression='gzip',
                                      low_memory=False)
            descriptors.rename(columns={'name': 'CID'}, inplace=True)
            self.calc = 'Mordred'  # control variable
        elif st.checkbox(
                'Calculate RDKit descriptors (faster, fewer options)'):
            self.write_rdkit_descriptors('.metadata/smiles.smi',
                                         '.metadata/csv/rdkit.csv', self.data)
            # Read RDKit descriptors
            descriptors = pd.read_csv('.metadata/csv/rdkit.csv.gz',
                                      compression='gzip')
            self.calc = 'RDKit'  # control variable
        else:
            file = st.file_uploader('or Upload descriptors file')
            show_file = st.empty()

            if not file:
                show_file.info("Please upload a file of type: .csv")
                self.copyright_note()
                st.stop()
            else:
                descriptors = pd.read_csv(file)
                if not 'CID' in descriptors.columns:
                    st.error('Compounds must be identified by "CID".')
                    self.copyright_note()
                    st.stop()
            file.close()
            self.calc = 'External file'  # control variable

        # Keep only numeric columns
        numeric = descriptors.select_dtypes(
            include=[int, float]).columns.tolist()
        descriptors = descriptors[['CID'] + numeric]
        # Drop NaN and zero-only columns
        descriptors.dropna(axis=1, inplace=True)
        descriptors = descriptors.loc[:, (descriptors != 0).any(axis=0)]

        st.markdown(f'#### Calculated descriptors (_{self.calc}_)')
        st.dataframe(descriptors.head())

        self.descriptors_cols = descriptors.columns.tolist()[1:]
        selected = st.multiselect(
            label="Select descriptors",
            options=(['Select all ({})'.format(len(self.descriptors_cols))] +
                     self.descriptors_cols))
        if 'Select all ({})'.format(len(self.descriptors_cols)) in selected:
            selected = self.descriptors_cols
        st.write("You have selected", len(selected), "features")

        if not selected:
            self.copyright_note()
            st.stop()

        descriptors = descriptors[['CID'] + selected]
        return descriptors
    def train_test_scores(self, model_name):
        import pickle
        try:
            file = open(f'pickle/{model_name}.pickle', 'rb')
            self.pipeline = pickle.load(file)
            file.close()
        except OSError as e:
            st.error(f"""Oops! It seems the model hasn't been trained yet.     
Detailed error: {str(e)}""")
            self.copyright_note()
            st.stop()

        from sklearn.metrics import roc_curve, auc
        fig, ax = pyplot.subplots()

        try:
            self.test_proba = self.pipeline.predict_proba(self.X_test)[:, 1]
            self.train_proba = self.pipeline.predict_proba(self.X_train)[:, 1]
        except ValueError as e:
            st.error(
                f'''Expected features do not match the given features, please train the model again.     
Detailed error: {str(e)}''')
            self.copyright_note()
            st.stop()

        fpr, tpr, _ = roc_curve(self.y_test, self.test_proba)
        auc_test = auc(fpr, tpr)
        ax.plot(fpr, tpr, label=f'Test set: {auc_test:>.2f}')

        fpr, tpr, _ = roc_curve(self.y_train, self.train_proba)
        auc_train = auc(fpr, tpr)
        ax.plot(fpr, tpr, label=f'Training set: {auc_train:>.2f}')

        pyplot.xlabel('False Positive Rate')
        pyplot.ylabel('True Positive Rate')
        pyplot.title(model_name)
        pyplot.legend()

        if not os.path.isdir('.metadata/roc'):
            os.makedirs('.metadata/roc')
        pyplot.savefig(f'.metadata/roc/{model_name}.png', dpi=200)

        st.markdown(
            '_* The dataset split into training and test sets is done randomly._'
        )
        st.markdown(
            '_** The training set accounts for 80% of the original dataset, and the test set accounts for the remaining 20%._'
        )
        st.markdown('### Receiver Operating Characteristic')
        st.pyplot(fig)

        if st.checkbox('Show ROC of the previous models'):
            _, _, filenames = next(os.walk('.metadata/roc'))
            filenames.remove(f'{model_name}.png')
            if not filenames:
                st.warning(
                    'No model to compare! You can test other classifiers if you wish to compare their performances.'
                )
            for clf in filenames:
                st.image(f'.metadata/roc/{clf}')

        from sklearn.metrics import f1_score
        from imblearn.metrics import geometric_mean_score

        y_pred = self.pipeline.predict(self.X_test)
        y_pred_train = self.pipeline.predict(self.X_train)
        scores = [
            model_name,
            f1_score(self.y_test, y_pred),
            geometric_mean_score(self.y_test, y_pred), auc_test,
            f1_score(self.y_train, y_pred_train),
            geometric_mean_score(self.y_train, y_pred_train), auc_train
        ]

        scores_data = pd.DataFrame([scores],
                                   columns=[
                                       'Classifier', 'test_f1',
                                       'test_geometric_mean', 'test_roc_auc',
                                       'train_f1', 'train_geometric_mean',
                                       'train_roc_auc'
                                   ])
        if os.path.isfile('.metadata/scores.csv'):
            scores_data = pd.concat(
                [scores_data, pd.read_csv('.metadata/scores.csv')])
            scores_data.drop_duplicates(subset=['Classifier'],
                                        inplace=True,
                                        keep='last')
            scores_data.reset_index(drop=True, inplace=True)

        scores_data.to_csv('.metadata/scores.csv', index=False)
        st.write('### Scoring metrics')
        st.write(scores_data)
示例#6
0
indicator_bb = BollingerBands(df['Close'])

bb = df
bb['bb_h'] = indicator_bb.bollinger_hband()
bb['bb_l'] = indicator_bb.bollinger_lband()
bb = bb[['Close', 'bb_h', 'bb_l']]

macd = MACD(df['Close']).macd()

rsi = RSIIndicator(df['Close']).rsi()

###################
# Set up main app #
###################

st.write('Stock Bollinger Bands')

st.line_chart(bb)

progress_bar = st.progress(0)

# https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py

st.write('Stock Moving Average Convergence Divergence (MACD)')
st.area_chart(macd)

st.write('Stock RSI ')
st.line_chart(rsi)

st.write('Recent data ')
st.dataframe(df.tail(10))
示例#7
0
    pace = st.number_input(f"Average pace in minutes", step=0.1)

    if race_length != 0 and fuel_consumption != 0 and pace != 0:
        race_length_in_seconds = race_length * 60
        pace_in_seconds = decrease_time_unit(pace)
        fuel_with_formation_lap = compute_fuel_to_add(race_length_in_seconds,
                                                      pace_in_seconds,
                                                      fuel_consumption)
        fuel_without_formation_lap = compute_fuel_to_add(
            race_length_in_seconds, pace_in_seconds, fuel_consumption, False)
        st.header(
            f"You should add {fuel_with_formation_lap}L with a formation lap \
             or {fuel_without_formation_lap}L without")
        st.write("""
            The number of lap is estimated as the length of the stint divided by the average pace, rounded up.
            The fuel to add is the fuel consumption per lap times the number of expected laps + 3% of uncertainty, rounded up.
            An extra lap is added in case of formation lap.
        """)

if tool == tools[1]:
    col1, col2, col3 = st.beta_columns(3)
    with col1:
        start_of_race = st.time_input(f"Start of race")
    with col2:
        race_length = st.number_input(f"Length in hours",
                                      value=0,
                                      format="%d",
                                      step=1)
    with col3:
        stint_length = st.number_input(f"Stint length in minutes",
                                       value=0,
示例#8
0
import streamlit as st
import pickle

st.set_page_config(page_icon='', initial_sidebar_state='expanded')

st.title('Beyonce or Rihanna Lyrics Classifier')

st.write('Use the sidebar to select a page to view.')

page = st.sidebar.selectbox('Page', ('About', 'EDA', 'Make a prediction'))


@st.cache
def load_data():
    ladies = pd.read_csv('lyrics_and_artist.csv', encoding='latin-1')
    return ladies


if page == 'About':
    st.subheader('About this project')
    st.markdown(
        '''This is a Streamlit app that hosts my Beyonce vs. Rihanna SVM model, with count vectorized text.
    For each of these artists, I gathered the lyrics for their top 75 most popular songs (not including remixes & features), according to the Genius song lyric site.



| Top 75 most popular songs for beyonce         | Top 75 most popular songs for Rihanna         |
|-----------------------------------------------|-----------------------------------------------|
| Song 1: "Drunk in Love"                       | Song 1: "Work"                                |
| Song 2: "Formation"                           | Song 2: "Love on the Brain"                   |
| Song 3: "Partition"                           | Song 3: "Needed Me"                           |
    doc_clean_new = []
    for line in doc_clean:
        result = replace_all(line, dic)
        doc_clean_new.append(result)

    prediksi_ini = doc_clean_new
    prediksi_ini = pd.Series(prediksi_ini)
    prediksi_ini = vectorizer.transform(prediksi_ini.values.ravel())
    prediksi_ini = prediksi_ini.toarray()

    #hasil = grid_NB.predict(prediksi_ini)
    hasil = modellin_sigmoid.predict(prediksi_ini)
    yokbisa = mapping_nama(y, y_trans)

    st.text('Hasil Klasifikasi Kategori:')

    with st.spinner(text='In progress'):
        st.write(yokbisa[hasil[0]])
        st.write(hasil[0])
        st.success('Classification Success')

if selected3 == "Contact":
    st.header('About Me')
    st.subheader('Muhammad Bondan Vitto Ramadhan')
    st.subheader('NRP : 06211840000086')
    st.image('Foto_Di_Ketua_DPR_2.jpg')
    st.text('Contact Me Through : ')
    st.text('Email      : [email protected]')
    st.text('LinkedIn   : linkedin.com/in/bondanvitto')
    st.text('Instagram  : bondanvitto')
示例#10
0
def run(data):
    """
    50 and 200 Day Exponential Moving Averages

    :return
    """

    st.markdown("<h3 style='text-align:center;'> Trend Indicator </h3>",
                unsafe_allow_html=True)
    st.write("""
        Trend indicators analyze whether a market is moving up, down, or sideways over time; this is a lagging measure
        as it looks at how historical data led to the current security price.
        
        Specifically, this indicator looks at the 50- and 200-day Exponential Moving Average of a security; in general,
        the 50-day EMA is used to measure the average intermediate price of a security, while the 200-day EMA measures
        the average long term price. 
        
        *SIGNALS*
        * **SELL:** Shorter-term EMA crossing over the longer-term average signifies a bearish change in trend
        * **BUY:** Longer-term EMA crossing over the shorter-term average signifies a bullish change in trend
        
        Example (Investopedia):
        """)
    st.markdown(
        '<center><img src="https://www.investopedia.com/thmb/W53XvEGTcsv5QywLWP4gkvAdWLE=/4888x3964/filters:no_'
        'upscale():max_bytes(150000):strip_icc():format(webp)/dotdash_Final_Top_Technical_Indicators_for_'
        'Rookie_Traders_Sep_2020-01-65454aefbc9042ef98df266def257fa3.jpg" height="250"/></center>',
        unsafe_allow_html=True)
    st.write()
    st.subheader('Stock Data')

    token = data['token']
    ticker = data['ticker']
    start_date = data['startDate']
    end_date = data['endDate']
    candles = data['candles']

    fifty = requests.get(url='https://finnhub.io/api/v1/indicator?symbol=' +
                         ticker + '&resolution=D&' + 'from=' +
                         str(int(start_date.timestamp())) + '&to=' +
                         str(int(end_date.timestamp())) +
                         '&indicator=ema&timeperiod=50&token=' + token).json()

    two_hundo = requests.get(
        url='https://finnhub.io/api/v1/indicator?symbol=' + ticker +
        '&resolution=D&' + 'from=' + str(int(start_date.timestamp())) +
        '&to=' + str(int(end_date.timestamp())) +
        '&indicator=ema&timeperiod=200&token=' + token).json()

    fig = make_subplots(specs=[[{"secondary_y": True}]])
    fig.add_trace(go.Candlestick(x=candles.index,
                                 open=candles['Open'],
                                 high=candles['High'],
                                 low=candles['Low'],
                                 close=candles['Close'],
                                 name='Candlestick'),
                  secondary_y=True)
    fig.add_trace(go.Scatter(x=candles.index[50:],
                             y=fifty['ema'][50:],
                             mode='lines',
                             line={'color': 'rgb(203,213,232)'},
                             name='50-Day EMA'),
                  secondary_y=True)
    fig.add_trace(go.Scatter(x=candles.index[200:],
                             y=two_hundo['ema'][200:],
                             mode='lines',
                             line={'color': 'rgb(253,205,172)'},
                             name='200-Day EMA'),
                  secondary_y=True)
    fig.add_trace(go.Bar(x=candles.index,
                         y=candles['Volume'],
                         marker={'color': 'rgb(0,0,0)'},
                         name='Volume'),
                  secondary_y=False)
    fig.update_xaxes(
        rangeslider_visible=True,
        rangeselector=dict(buttons=list([
            dict(count=1, label="1m", step="month", stepmode="backward"),
            dict(count=6, label="6m", step="month", stepmode="backward"),
            dict(count=1, label="YTD", step="year", stepmode="todate"),
            dict(count=1, label="1y", step="year", stepmode="backward"),
            dict(step="all")
        ])),
        rangebreaks=[dict(bounds=["sat", "sun"])],
        ticklabelmode="period")
    fig.layout.yaxis2.showgrid = False
    fig.layout.title = 'Trend Indicator Graph'

    st.plotly_chart(fig)
示例#11
0
    def _run_deeplc(self):
        """Run CALLC given user input, and show results."""
        # Parse user config
        config = self._parse_user_config(self.user_input)

        logger.info(
            "Run requested // %s // compounds %i / use_library %r / calibrate %r",
            datetime.now(), len(config["input_df"]))

        # Run CALLC
        st.header("Running CALLC")
        status_placeholder = st.empty()
        status_placeholder.info(":hourglass_flowing_sand: Running CALLC...")

        if "\t" in config["input_df"][0].decode("utf-8"):
            struct_dict = dict([(v.decode("utf-8").split("\t")[0],
                                 v.decode("utf-8").split("\t")[1])
                                for v in config["input_df"]])
        else:
            struct_dict = dict([(v.decode("utf-8").split(",")[0],
                                 v.decode("utf-8").split(",")[1])
                                for v in config["input_df"]])

        try:
            preds_l3_train, preds_l3_test, plot_setups, preds_l1_test, coefs = make_preds(
                reference_infile=config["input_df_calibration"],
                pred_infile=config["input_df"],
                num_jobs=4,
                GUI_obj=None,
                ch_size=100000)
            #print(plot_setups)
        except Exception as e:
            status_placeholder.error(":x: CALLC ran into a problem")
            st.exception(e)
        else:
            status_placeholder.success(":heavy_check_mark: Finished!")

            # Add predictions to input DataFrame
            #result_df = pd.read_csv(self.user_input["input_csv"])

            result_df = preds_l3_test

            # Show head of result DataFrame
            st.header("Results")
            st.subheader("Selection of predicted retention times")
            st.dataframe(result_df.head(100))

            st.subheader("Coefficients of 'Layer 3'")

            coef_str = ""
            for m, coef in coefs:
                if coef < 0.025:
                    continue
                coef_str = coef_str + "%s -> %s \n\r" % (m.replace(
                    "+RtGAM", ""), coef)
            st.write(coef_str)

            # Plot results
            self._plot_results(preds_l3_train, preds_l3_test,
                               preds_l1_test.loc[:, plot_setups.index],
                               struct_dict)

            # Download link
            st.subheader("Download predictions")
            filename = os.path.splitext(config["input_filename"])[0]
            self._df_download_href(result_df,
                                   filename + "_callc_predictions.csv")
示例#12
0
                col2.subheader(
                    'Number of Beds in Hospitals throughout the years in UAE:')
                hospitals_beds_count_bar = sns.barplot(
                    x="year",
                    y="beds",
                    palette='Blues_d',
                    hue='sector',
                    data=hospitals_beds_count)
                hospitals_beds_count_bar.set(
                    xlabel='Year', ylabel='Number of Beds in Hospitals')
                hospitals_beds_count_bar.set_title(
                    'Number of Beds in Hospitals in UAE based on Sector',
                    y=1.02)
                col2.pyplot()

                st.write(
                    'Based on the above plots we can realize that the number of private hospitals is increasing more than the government hospitals throughout the years with a huge difference almost double in 2018, while the numbers of beds in the government hospitals is higher even in 2018.'
                )

                st.write(hospitals)

            elif select1 == 'Staff Analysis':

                st.write('carlos')

elif password != '123456':
    st.write(
        'Please Enter the Correct Password from the Sidebar to display the Analysis'
    )
示例#13
0
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model


discriminator = make_discriminator_model()
decision = discriminator(generated_image)
st.write(decision)

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)


def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)

    total_loss = real_loss + fake_loss

    return total_loss


def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)
示例#14
0
def main():
    menu = ["Home", "About"]
    choice = st.sidebar.selectbox("Menu", menu)

    st.title("DevDeeds -Search Jobs")

    if choice == "Home":
        st.subheader("Home")

        # Nav  Search Form
        with st.form(key='searchform'):
            nav1, nav2, nav3 = st.beta_columns([3, 2, 1])

            with nav1:
                search_term = st.text_input("Search Job")
            with nav2:
                location = st.text_input("Location")

            with nav3:
                st.text("Search ")
                submit_search = st.form_submit_button(label='Search')

        st.success("You searched for {} in {}".format(search_term, location))

        # Results
        col1, col2 = st.beta_columns([2, 1])

        with col1:
            if submit_search:
                # Create Search Query
                search_url = base_url.format(search_term, location)
                # st.write(search_url)
                data = get_data(search_url)

                # Number of Results
                num_of_results = len(data)
                st.subheader("Showing {} jobs".format(num_of_results))
                # st.write(data)

                for i in data:
                    job_title = i['title']
                    job_location = i['location']
                    company = i['company']
                    company_url = i['company_url']
                    job_post_date = i['created_at']
                    job_desc = i['description']
                    job_howtoapply = i['how_to_apply']
                    st.markdown(JOB_HTML_TEMPLATE.format(
                        job_title, company, job_location, job_post_date),
                                unsafe_allow_html=True)

                    # Description
                    with st.beta_expander("Description"):
                        stc.html(JOB_DES_HTML_TEMPLATE.format(job_desc),
                                 scrolling=True)

                    # How to Apply
                    with st.beta_expander("How To Apply"):
                        # stc.html(job_howtoapply) # For White Theme
                        stc.html(JOB_DES_HTML_TEMPLATE.format(job_howtoapply),
                                 scrolling=True)  # For Dark Theme

        with col2:
            with st.form(key='email_form'):
                st.write("Be the first to get new jobs info")
                email = st.text_input("Email")

                submit_email = st.form_submit_button(label='Subscribe')

                if submit_email:
                    st.success("A message was sent to {}".format(email))

    else:
        st.subheader("About")
示例#15
0

data_load_state = st.text('Loading data...')

df = load_Data('regioni')
df_prov = load_Data('province')
df_prov = df_prov[(df_prov[['lat', 'long']] != 0).all(axis=1)]

df.rename(columns={'long': 'lon'}, inplace=True)
df_prov.rename(columns={'long': 'lon'}, inplace=True)

data_load_state.text('Loading data...done!')

if st.checkbox('Guarda i Dati'):
    st.subheader('Raw data')
    st.write(df)
    st.write(df_prov)

st.sidebar.markdown(
    ("Tutti i codici Open Source possono essere trovati qua  ") +
    "[GitHub](https://github.com/LorenzoFramba/CoronaVirus_in_Italy).")

st.sidebar.markdown((
    "Questo sito è frutto di lavoro individuale e autosostenuto. Se hai la possibilità di fare una donazione, lo apprezzerei tantissimo!  "
) + "[PayPal](paypal.me/rappasta).")

st.success((
    "Questo sito è frutto di lavoro individuale e autosostenuto. Se hai la possibilità di fare una donazione, lo apprezzerei tantissimo!  "
) + "[PayPal](paypal.me/rappasta).")

st.markdown((
                    progress_bar.progress(min(counter / length, 1.0))
    # Finally, we remove these visual elements by calling .empty().
    finally:
        if weights_warning is not None:
            weights_warning.empty()
        if progress_bar is not None:
            progress_bar.empty()


WEBRTC_CLIENT_SETTINGS = ClientSettings(
    rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
    media_stream_constraints={"video": True, "audio": True},
)


st.title("Face Detection systme")
st.write("Maza Aaya")
cascPath =  "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)

class VideoTransformer(VideoTransformerBase):
  def transform(self, frame):
    frame = frame.to_ndarray(format="bgr24")
    gray  = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor = 1.1,minNeighbors = 5,minSize=(30,30))
    for (x,y,w,h) in faces:
      cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
    return frame
        
webrtc_streamer(key="example", video_transformer_factory=VideoTransformer)
示例#17
0
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import numpy as np
import requests

# Writing title & description
st.write("""
# Memphis Parks
Here follow the first WEB App created with _Streamlit_.
The web app will show the location of different parks according to the user options that have been selected, accordying by ZipCode.

> Due for quickly development (and understading of the package), not all the Features are going to be used, and also, the data provided that show up in the map, are only the **ZipCode** and not the real postion of the park. 

> Video Tutorial followed from YouTube channell _SATSifaction_ : https://www.youtube.com/watch?v=vKRj7GiaiTY&t=21s

> Documentation consulted from _Streamlit_:https://docs.streamlit.io/en/stable/main_concepts.html#layout
""")


# Creating funciton to load the data and store it in the cache
@st.cache
def load_data():
    df = pd.read_csv('Memphis_Park.csv')
    return df


df = load_data()

# Creating sidebar ######################################
st.sidebar.header('User Input Options')
示例#18
0
    return df


df = load_data(99794)

page = st.sidebar.selectbox("Choose a page",
                            ['Homepage', 'Exploration', 'Prediction'])

if page == 'Homepage':
    st.title('Bank Loan Payment Classifier')

    data_load_state = st.text('Loading data...')
    data_load_state.text('Loading data...done!')

    if st.checkbox('Show the dataframe'):
        st.write(df.head(5))

elif page == 'Exploration':
    st.markdown('### Analysing column relations')
    st.write('Correlations:')
    rel = df.drop(columns=['Loan Status']).corrwith(df['Loan Status'])
    rel = rel.sort_values(ascending=False)
    rel = rel[:10]
    if st.checkbox('Show the Analysing column relations'):
        st.write(rel)

    st.markdown('### Analysing the Target Variable')
    st.text('visualizing Loan Status')
    fig, axs = plt.subplots(1, 2, figsize=(14, 7))
    sns.countplot(x='Loan Status', data=df, ax=axs[0])
    axs[0].set_title("Frequency of each Loan Status")
示例#19
0
文件: app.py 项目: p2327/flower_app
]

test_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv"

test_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url),
                                  origin=test_url)

# load data 'flower_app/data/iris_test.csv'
iris_test_data = pd.read_csv(test_fp, skiprows=[0], names=column_names)
# encode 0, 1, 2 label classes to string names
# np.where(condition, [x, y])
iris_test_data['species'] = np.where(
    iris_test_data['species'] == 0, 'setosa',
    np.where(iris_test_data['species'] == 1, 'versicolor', 'virginica'))

st.write(iris_test_data.head())


def show_random_iris():
    choices = ['setosa', 'versicolor', 'virginica']
    random_flower = random.choice(choices)
    flowers = {
        'setosa':
        'https://upload.wikimedia.org/wikipedia/commons/thumb/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg/440px-Kosaciec_szczecinkowaty_Iris_setosa.jpg',
        'versicolor':
        'https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Iris_versicolor_3.jpg/440px-Iris_versicolor_3.jpg',
        'virginica':
        'https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/Iris_virginica.jpg/440px-Iris_virginica.jpg'
    }
    random_url = flowers[random_flower]
    single_type = iris_test_data[(iris_test_data['species'] == random_flower)]
示例#20
0
import streamlit as st
from asr import record_and_predict, spell_check

st.header("Transcription vocale - FormaScience")

st.subheader("Essayer le modèle en temps réel")

st.sidebar.title("Paramètres")
duration = st.sidebar.slider("Durée de l'enregistrement", 0.0, 10.0, 5.0)

if st.button("Commencer l'enregistrement"):
    with st.spinner("Recording..."):
        prediction = record_and_predict(duration=duration)
        st.write("**Prediction**: ", prediction[0])
        st.write("**Spell Check**: ", spell_check(prediction[0]))
import yfinance as yf
import streamlit as st
import plotly.graph_objects as go

tickerSymbolOption = st.sidebar.selectbox('Symbol',
                                          options=('PETR4', 'BBDC3', 'BBDC4',
                                                   'ITUB4'),
                                          index=0)
graphType = st.sidebar.radio('Graph Type', ('Line', 'Candlestick'))
st.write(f"""
# Stock Price Viewer App :sunglasses:
Shown stock closing price and volume of: **{tickerSymbolOption}** 
""")

tickerData = yf.Ticker(tickerSymbolOption + '.SA')
tickerDf = tickerData.history(period='1d', start='2010-1-1', end='2020-12-31')
# Open	High	Low	Close	Volume	Dividends	Stock Splits
if graphType == 'Line':
    st.line_chart(tickerDf.Close)
    st.line_chart(tickerDf.Volume)
else:
    fig = go.Figure(data=[
        go.Candlestick(x=tickerDf.index,
                       open=tickerDf.Open,
                       high=tickerDf.High,
                       low=tickerDf.Low,
                       close=tickerDf.Close)
    ])
    st.plotly_chart(fig)
import mpl_toolkits
from datetime import datetime
import os, time, sys
from pandas import DataFrame

from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn import tree

st.title('Waka Waka Seattle Home Prices')
st.write("""
Our app predicts the **Seattle House Price**!
""")
st.write('---')

from PIL import Image
img = Image.open("seattle.jpg")
st.image(img)
st.write('---')
st.subheader("""
Home Prices Predicted Using Machine Learning
""")

df = pd.read_csv('clean1.csv')


#read and display csv
    def calculate_pca(self):
        max_value = len(self.descriptors_cols)
        default = 0.9
        n_components = st.number_input(
            f'Please enter the number of components to select [0, {max_value}]: ',
            value=default,
            min_value=0.0,
            max_value=float(max_value))
        st.markdown(
            f'''\* If the input number is less than 1, then it will correspond to the percentage of the explained 
variance. E.g. the default value corresponds to an explained variance of {default * 100}%.'''
        )
        if n_components > 1:
            n_components = int(n_components)

        from sklearn.decomposition import PCA
        from sklearn.preprocessing import StandardScaler
        from imblearn.pipeline import make_pipeline

        # Split training set into X and y
        y = self.merged_data['activity']
        X = self.merged_data[self.descriptors_cols].copy()

        pca = make_pipeline(StandardScaler(), PCA(n_components=n_components))

        state = st.text('Running PCA...')
        # Fit and transform the training data
        X_pca = pca.fit_transform(X)
        self.pca = pca

        state.text('PCA completed!')
        variance_total = sum(pca['pca'].explained_variance_ratio_)
        if pca['pca'].n_components_ < 51:
            fig, ax = pyplot.subplots(figsize=(12, 4))
            sns.barplot(x=[i for i in range(1, pca['pca'].n_components_ + 1)],
                        y=pca['pca'].explained_variance_ratio_,
                        ax=ax)
            ax.set(xlabel='Principal Component',
                   ylabel='Explained variance ratio',
                   title=f'Variance explained by {variance_total * 100:.1f}%')
            st.pyplot(fig)
        else:
            st.write(f'Explained variance: {variance_total * 100:.1f}%')

        # Reassign the data to the new transformed data
        pca_data = pd.DataFrame(X_pca)
        pca_features = [
            f'PCA_{i:02d}' for i in range(1, pca['pca'].n_components_ + 1)
        ]
        pca_data.columns = pca_features
        pca_data['CID'] = self.merged_data['CID'].tolist()
        pca_data['activity'] = y.tolist()
        # Rearrange the columns
        cols = pca_data.columns.tolist()
        cols = cols[-2:] + cols[:-2]
        pca_data = pca_data[cols]

        self.merged_data = pca_data
        self.descriptors = pca_data[['CID'] + pca_features]

        st.write('### Principal Components')
        st.write(self.descriptors.head())
def main():
    data = fetch_data()
    X_train, X_test, y_train, y_test = preprocessing(data)

    #Show data
    if st.checkbox("Show the Data We Used"):
        st.subheader("Home Sales From 2014 to 2015")
        st.dataframe(data)
        if st.checkbox("Quick View of Features Histogram"):
            data.hist(bins=50, figsize=(15, 15))
            st.pyplot()
    st.write('---')

    #Side bar
    if (st.sidebar.button("Back to Homepage")):
        import webbrowser
        webbrowser.open(
            'https://hungnw.github.io/seattle-house-prediction.github.io/')
    st.sidebar.header("Menu")
    st.sidebar.selectbox("Choose a City", ["Seattle"])
    ml_model = st.sidebar.selectbox("Choose a Model to Predict Home Prices", [
        "Random Forest Regressor", "Mutilple Linear Regression", "Coming Soon"
    ])
    viz = st.sidebar.selectbox(
        "Visualization",
        ['None', 'Feature Importance -RF Only', 'Tree- RF Only'])

    #Add line space for Home buttom
    # if(st.sidebar.button("Home")):
    #     import webbrowser
    #     webbrowser.open('https://hungnw.github.io/seattle-house-prediction.github.io/')

    #RF Model
    if (ml_model == "Random Forest Regressor"):
        st.subheader("Random Forest Model")
        score, regressor = randomForest(X_train, X_test, y_train, y_test)
        txt = "Accuracy of Random Forest Model is: " + str(round(score,
                                                                 2)) + "%"
        st.success(txt)
        st.write("---")

        if (viz == 'Feature Importance -RF Only'):
            feature_names = X
            score, regressor = randomForest(X_train, X_test, y_train, y_test)
            importance = sorted(zip((regressor.feature_importances_),
                                    feature_names),
                                reverse=True)
            importance_df = DataFrame(
                importance, columns=['Feature_importances', 'Feature_names'])
            importance_df.set_index('Feature_names', inplace=True)
            importance_df.sort_values(by='Feature_importances',
                                      ascending=True,
                                      inplace=True)
            # st.markdown("Randon Forest feature importance")
            # fig = px.bar(importance_df, x='Feature_importances')
            # st.plotly_chart(fig)
            # st.bar_chart(importance_df)
            importance_df.plot(kind='barh')
            plt.title('Random Forest feature importance')
            plt.legend(loc='lower right')
            st.pyplot()
            st.write('---')

        elif (viz == 'Tree- RF Only'):
            # from sklearn.tree import export_graphviz
            # # Export as dot file
            # export_graphviz(regressor.estimators_[3],
            #     max_depth=3,
            #     out_file='tree.dot',
            #     feature_names = list(X.columns),
            #     class_names = data.price,
            #     rounded = True, proportion = False,
            #     precision = 2, filled = True)
            st.markdown("Qucik view of decision tree no.3")
            tree = open('tree.txt')
            st.graphviz_chart(tree.read(), use_container_width=True)
            st.write('---')

        try:
            if (st.checkbox("Start a Search")):
                user_input_prediction = user_input_RF()
                pred = regressor.predict(user_input_prediction)
                error = 94784
                if (st.button("Submit")):
                    st.write('Mean Absolute Error: ', int(error))
                    txt = 'The Predicted Home Price is: $' + str(
                        int(pred)) + ' \u00B1 $' + str(error)
                    st.success(txt)

                feedback = st.radio(
                    "Waka Waka value your feedback, please rate from 1-5, (5) being excellent:",
                    ('Please choose from below', '1', '2', '3', '4', '5'))
                if feedback == 'Please choose from below':
                    st.text('')
                elif feedback == '1':
                    st.error("This option is not valid")
                elif feedback == '2':
                    st.warning("We have tried our best!")
                elif feedback == '3':
                    st.success("Thank you for your feedback.")
                elif feedback == '4':
                    st.success("Glad you like it!")
                elif feedback == '5':
                    st.success("Waka Waka agrees with you. Have a nice day!")
        except:
            pass

    #LR Model
    if (ml_model == "Mutilple Linear Regression"):
        st.subheader('Linear Regression Model')
        score, model = linearRegression(X_train, X_test, y_train, y_test)
        txt = "Accuracy of Linear Regression Model is: " + str(round(
            score, 2)) + "%. Proceed with caution"
        st.warning(txt)
        st.write('---')

        try:
            if (st.checkbox("Start a Search")):
                user_input_prediction = user_input_LR()
                pred = model.predict(user_input_prediction)
                if (st.button("Submit")):
                    txt = 'The Predicted Home Price is: $' + str(int(pred))
                    st.success(txt)

                feedback = st.radio(
                    "Waka Waka value your feedback, please rate from 1-5, (5) being excellent:",
                    ('Please choose from below', '1', '2', '3', '4', '5'))
                if feedback == 'Please choose from below':
                    st.text('')
                elif feedback == '1':
                    st.error("This option is not valid")
                elif feedback == '2':
                    st.warning("We have tried our best!")
                elif feedback == '3':
                    st.success("Thank you for your feedback.")
                elif feedback == '4':
                    st.success("Glad you like it!")
                elif feedback == '5':
                    st.success("Waka Waka agrees with you. Have a nice day!")
        except:
            st.write('error')

    #Coming Soon
    elif (ml_model == "Coming Soon"):
        text = "Coming  Soon..."
        i = 0
        while i < len(text):
            st.write(text[i])
            time.sleep(0.3)
            i += 1
    def upload_new_compounds(self):
        st.markdown('## Classify new compounds')
        file = st.file_uploader('Upload file *')
        show_file = st.empty()
        st.markdown('''\* File must contain the following columns:   
1 - "SMILES": SMILES structures of the compounds     
2 - "CID": compounds ID''')

        if not file:
            show_file.info("Please upload a file of type: .csv")
            self.copyright_note()
            st.stop()

        self.new_data = pd.read_csv(file)
        columns = self.new_data.columns.tolist()
        if 'SMILES' in columns and 'CID' in columns:
            self.new_data = self.new_data[columns]
        else:
            if 'SMILES' not in columns:
                st.error('Input file missing "SMILES"')
            else:
                st.error('Input file missing "CID"')

        st.markdown('#### New compounds')
        st.write(self.new_data.head())
        file.close()

        self.write_smiles(self.new_data, '.metadata/smiles2.smi')
        if self.calc == 'Mordred':
            self.write_mordred_descriptors('.metadata/smiles2.smi',
                                           '.metadata/csv/mordred2.csv',
                                           self.new_data)
            # Read MORDRED descriptors
            descriptors = pd.read_csv('.metadata/csv/mordred2.csv.gz',
                                      compression='gzip')
            descriptors.rename(columns={'name': 'CID'}, inplace=True)
        elif self.calc == 'RDKit':
            self.write_rdkit_descriptors('.metadata/smiles2.smi',
                                         '.metadata/csv/rdkit2.csv',
                                         self.new_data)
            # Read RDKit descriptors
            descriptors = pd.read_csv('.metadata/csv/rdkit2.csv.gz',
                                      compression='gzip')
        else:
            file = st.file_uploader(
                'Upload the descriptors file for the new compounds')
            show_file = st.empty()

            if not file:
                show_file.info("Please upload a file of type: .csv")
                self.copyright_note()
                st.stop()
            else:
                descriptors = pd.read_csv(file)
                if not 'CID' in descriptors.columns:
                    st.error('Compounds must be identified by "CID".')
                    self.copyright_note()
                    st.stop()
            file.close()
            try:
                tmp = pd.merge(self.new_data,
                               descriptors[['CID'] + self.descriptors_cols],
                               on=['CID'])
            except KeyError as e:
                st.error(
                    '''Expected features do not match the given features.     
Please make sure the input file contains the same descriptors used for training the model.'''
                )
                self.copyright_note()
                st.stop()

        descriptors.dropna(subset=self.descriptors_cols, inplace=True)

        if self.pca is not None:
            X = descriptors[self.descriptors_cols]
            X_new = self.pca.transform(X)
            # Reassign the data to the new transformed data
            pca_data = pd.DataFrame(X_new)
            pca_features = [
                f'PCA_{i:02d}'
                for i in range(1, self.pca['pca'].n_components_ + 1)
            ]
            pca_data.columns = pca_features
            pca_data['CID'] = descriptors['CID'].tolist()
            # Rearrange the columns
            cols = pca_data.columns.tolist()
            cols = cols[-1:] + cols[:-1]
            pca_data = pca_data[cols]
            self.new_data = pca_data
        else:
            self.new_data = pd.merge(self.new_data,
                                     descriptors[['CID'] +
                                                 self.descriptors_cols],
                                     on=['CID'])
示例#26
0
data = data[data['date/time'].dt.hour == hour]

st.markdown("Vehicle Collison between %i:00 and %i:00" % (hour,
                                                          (hour + 1) % 24))
midpoint = (np.average(data['latitude']), np.average(data['longitude']))

st.write(
    pdk.Deck(map_style="mapbox://styles/mapbox/light-v9",
             initial_view_state={
                 "latitude": midpoint[0],
                 "longitude": midpoint[1],
                 "zoom": 11,
                 "pitch": 50,
             },
             layers=[
                 pdk.Layer(
                     "HexagonLayer",
                     data=data[['date/time', 'latitude', 'longitude']],
                     get_positib=['longitude', 'latitude'],
                     radius=100,
                     extruded=True,
                     pickable=True,
                     elevation_scale=4,
                     elevation_range=[0, 1000],
                 ),
             ]))

st.subheader("breakdown by minute between %i:00 and %i:00" % (hour,
                                                              (hour + 1) % 24))
filtered = data[(data['date/time'].dt.hour >= hour)
                & (data['date/time'].dt.hour < (hour + 1))]
hist = np.histogram(filtered['date/time'].dt.minute, bins=60, range=(0, 60))[0]
示例#27
0
import streamlit as st
import os
import pandas as pd
import numpy as np

#add_selectbox=st.sidebar.radio("Select the type of Search Method ",("FAISS"))

#os.chdir('C:/Users/rishv/OneDrive/Northeastern/SEM3/Algorithmic Digital Marketing/Assignments/Assignment3/Streamlit')

#if add_selectbox=='FAISS':
st.title("FACEBOOK ADD SIMILARITY SEARCH")
st.write("----------------------------------------------------")


def get_data():
    return pd.read_csv('Faiss.csv')


n = 1
df = get_data()
images = df['0'].unique()
st.subheader("Select an image from drop down menu :")
pic = st.selectbox('Choices:', images)
st.write("**You selected**")
st.image(pic, width=None)

#Displaying output
z = st.slider('How many images do you want to see?', 1, 10, 5)
st.write("---------------------------------------------------------")
st.subheader("Output:")
st.write('**Images similar to the image selected by you: **')
示例#28
0
                                                   method=index_type,
                                                   n_results=10)
 if action in [0, 3]:
     answer, support_list = answer_question(
         question_doc,
         s2s_model,
         s2s_tokenizer,
         min_len=min_len,
         max_len=int(max_len),
         sampling=(sampled == "sampled"),
         n_beams=n_beams,
         top_p=top_p,
         temp=temp,
     )
     st.markdown("### The model generated answer is:")
     st.write(answer)
 if action in [0, 1, 3] and wiki_source != "none":
     st.markdown(
         "--- \n ### The model is drawing information from the following Wikipedia passages:"
     )
     for i, res in enumerate(support_list):
         wiki_url = "https://en.wikipedia.org/wiki/{}".format(
             res[0].replace(" ", "_"))
         sec_titles = res[1].strip()
         if sec_titles == "":
             sections = "[{}]({})".format(res[0], wiki_url)
         else:
             sec_list = sec_titles.split(" & ")
             sections = " & ".join([
                 "[{}]({}#{})".format(sec.strip(), wiki_url,
                                      sec.strip().replace(" ", "_"))
示例#29
0
import streamlit as st
import pandas as pd
import numpy as np
import glob
import cv2
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from PIL import Image

st.title("Data Exploration Engine")

st.write("Lets have a look at the data:")


@st.cache
def load_images(paths, color="gray"):
    image_files = []

    # Load just the first picture
    for f in paths[0:1]:
        image_files.append(sorted(glob.glob(f + "/*")))

    images = []
    if color == "gray":
        for ff in image_files:
            curr = []
            for f in ff:
                im = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
                curr.append(im)
            images.append(curr)
示例#30
0
"""
## マップのプロット
"""
df = pd.DataFrame(np.random.randn(100, 2) / [50, 50] + [35.69, 139.70],
                  columns=['lat', 'lon'])
st.map(df)
"""
## チャートを描く
"""
df = pd.DataFrame(np.random.rand(20, 3), columns=['a', 'b', 'c'])
st.line_chart(df)
st.area_chart(df)
st.bar_chart(df)
"""
## データフレームを表示
"""

df = pd.DataFrame({'1列目': [1, 2, 3, 4], '2列目': [10, 20, 30, 40]})
st.write('write')
st.write(df)
st.write('dataframe')
st.dataframe(df.style.highlight_max(axis=0), width=200, height=300)
st.write('table')
st.table(df)
"""
マジックコマンド
# 章
## 節
### 項
---
"""