コード例 #1
0
def main():
    st.set_page_config(page_title="Breast Cancer Detection", page_icon="­Ъда")
    st.header("Brest Cancer Detection Application")
    st.caption('Multiple predictive Machine Learning models are trained\
               on Breast Cancer data for potentially ascertaining\
               between malignant and benign tumor.')
    st.subheader("Sample Images")
    st.image("./Data/sample.png")
    st.markdown("---")
    st.subheader("Feature Selection")

    attrs = render_sliders()

    model_selected = st.selectbox("Select Machine Learning Model",
                                  options=[
                                      "Logistic Regressor",
                                      "Decision Tree Classifier",
                                      "Support Vector Classifier",
                                      "Random Forest Classifier"
                                  ])
    model = MODELS[model_selected]
    predict_selection = st.button("Predict")
    if predict_selection:
        with st.spinner("Predicting...."):
            prediction = predict(model, attrs)
            if not prediction:
                st.success("Benign Tumor")
            else:
                st.error("Malignant Tumor")
コード例 #2
0
def main():
    # st.title("Early Stage DM Risk Data App (DEMO)")
    st.title("Diyabet Risk Tespit Aplikasyonu (DEMO)")
    st.caption(
        "Mevcut uygulama tanı amaçlı değildir. Teşhis için lütfen doktorunuza başvurunuz."
    )
    # stc.html(html_temp)
    st.sidebar.image('togaylogogri.jpg')

    menu = [
        "Ana", "EDA-Keşfedici Veri Analizi", "ML-Makine Öğretisi", "Hakkında"
    ]
    choice = st.sidebar.selectbox("Menu", menu)

    if choice == "Ana":
        st.subheader("Ana")
        st.write(desc_temp)

    elif choice == "EDA-Keşfedici Veri Analizi":
        run_eda_app()

    elif choice == "ML-Makine Öğretisi":
        run_ml_app()

    else:
        st.subheader("Hakkında")
        st.text('10-02-2021 - Togay Tunca')
コード例 #3
0
def kappa():
    st.title("Cohen's Kappa Calculator")
    st.write("""
    1. Copy the codes of Coder 1 into the Coder 1 text entry field and hit "Enter." 
    2. Copy the codes for Coder 2 into the Coder 2 text entry field and hit "Enter."
    ❗ Make sure that the coding decisions between Coder 1 and Coder 2 are the same length.
       """)

    col1 = st.text_input('Coder 1', value='a a b')
    col2 = st.text_input('Coder 2', value='a a b')
    st.caption("📝 This app does not retain user data.")

    try:
        st.subheader('Results')
        c1, c2, c3 = st.columns(3)
        c1.metric('Dataset Length', str(len(col1.split())))
        c2.metric('Accuracy', str(accuracy_score(col1.split(), col2.split())))
        c3.metric('Kappa Score',
                  str(cohen_kappa_score(col1.split(), col2.split())))

        labels = sorted(list(set(col1.split() + col2.split())))
        indices = [str(label) + '_' for label in labels]
        st.write("Confusion Matrix:")
        st.dataframe(
            pd.DataFrame(confusion_matrix(col1.split(), col2.split()),
                         index=indices,
                         columns=labels))
        st.caption('Note: Coder 1 is used as the baseline for evaluation.')
        st.markdown(
            "For more an extended presentation on Cohen's Kappa see Hart-Davidson (2014), [Using Cohen's Kappa to Gauge Interrater Reliability](https://www.slideshare.net/billhd/kappa870)"
        )
    except ValueError:
        st.markdown('<mark>Error: Data must be the same length</mark>',
                    unsafe_allow_html=True)
コード例 #4
0
ファイル: ui_params.py プロジェクト: kavehbc/market-analyzer
def create_cross_validation_form(ui_params):
    col1, col2, col3 = st.columns(3)
    with col1:
        st_cv_initial_days = st.number_input("Initial days",
                                             value=730,
                                             min_value=1,
                                             step=1)
    with col2:
        st_cv_period_days = st.number_input("Period days",
                                            value=180,
                                            min_value=1,
                                            step=1)
    with col3:
        st_cv_horizon_days = st.number_input("Horizon days",
                                             value=365,
                                             min_value=1,
                                             step=1)

    st_validation_metric = st.selectbox("Validation Metric",
                                        options=VALIDATION_METRICS,
                                        index=3)
    show_cross_validation = st.form_submit_button(label='Cross-Validate')
    # show_cross_validation = st.checkbox("Show Cross-Validation", value=False)
    st.caption("This can take some time.")

    dic_return = DotDict(initial_days=st_cv_initial_days,
                         period_days=st_cv_period_days,
                         horizon_days=st_cv_horizon_days,
                         validation_metric=st_validation_metric,
                         cross_validation=show_cross_validation)

    return dic_return
コード例 #5
0
def main():
    st.title("Streamlit ECharts Demo")
    
    with st.sidebar:
        st.header("Configuration")
        api_options = ("echarts", "pyecharts")
        selected_api = st.selectbox(
            label="Choose your preferred API:", options=api_options,
        )

        page_options = (
            list(ST_PY_DEMOS.keys())
            if selected_api == "pyecharts"
            else list(ST_DEMOS.keys())
        )
        selected_page = st.selectbox(
            label="Choose an example", options=page_options,
        )
        demo, url = ST_DEMOS[selected_page] if selected_api=="echarts" else ST_PY_DEMOS[selected_page]

        if selected_api=="echarts":
            st.caption("""ECharts demos are extracted from https://echarts.apache.org/examples/en/index.html, 
            by copying/formattting the 'option' json object into st_echarts.
            Definitely check the echarts example page, convert the JSON specs to Python Dicts and you should get a nice viz.""")
        if selected_api=="pyecharts":
            st.caption("""Pyecharts demos are extracted from https://github.com/pyecharts/pyecharts-gallery,
            by copying the pyecharts object into st_pyecharts. 
            Pyecharts is still using ECharts 4 underneath, which is why the theming between st_echarts and st_pyecharts is different.""")
        
    demo()

    sourcelines, _ = inspect.getsourcelines(demo)
    with st.beta_expander("Source Code"):
        st.code(textwrap.dedent("".join(sourcelines[1:])))
    st.markdown(f"Credit: {url}")
コード例 #6
0
def main():
    st.title("Titanic: Machine Learning from Disaster")

    df = load_data()
    target = "Survived"
    features = [c for c in df.columns.values if c != target]

    with st.beta_expander("About Titanic"):
        c1, c2 = st.beta_columns(2)
        description = read_markdown_file("pages/titanic.md")
        c1.markdown(f"{description}", unsafe_allow_html=True)
        c2.image("images/titanic.jpg")

    st.header("Data preview")
    st.caption(f"Shape of dataset : {df.shape[0]} rows, {df.shape[1]} columns")
    st.dataframe(df.describe())
    cols_to_style = st.multiselect("Choose columns to apply BG gradient",
                                   features)
    st.dataframe(
        df.style.background_gradient(subset=cols_to_style, cmap="BuGn"))
    st.markdown("---")

    st.header("Plot distribution")
    col = st.selectbox("Choose a column to display", features)
    with_target = st.checkbox("Separate per target ?")
    chart = (alt.Chart(df).mark_bar().encode(
        alt.X(f"{col}:Q", bin=alt.Bin(maxbins=10)),
        alt.Y("count()"),
        tooltip=[col, "count()"],
    ).interactive())
    if with_target:
        chart = chart.encode(color=f"{target}:N")
    st.altair_chart(chart, use_container_width=True)
    st.markdown("---")

    st.header("Correlation")
    fig, ax = plt.subplots()
    sns.heatmap(df.corr(), ax=ax)
    st.pyplot(fig)
    st.markdown("---")

    st.header("Classification")
    with st.form(key="classify"):
        c1, c2 = st.beta_columns(2)
        n_estimators = c1.number_input("Choose number of trees:", 1, 1000, 100)
        max_depth = c2.number_input("Max depth:", 1, 100, 5)
        button_pressed = st.form_submit_button(label="Train model")

    if button_pressed:
        with st.spinner("Training..."):
            clf, confusion_matrix, importance_plot, force_plot = train_rf(
                df, n_estimators, max_depth)
            st.balloons()
            st.pyplot(confusion_matrix)
            st.pyplot(importance_plot)
            st_shap(force_plot, 400)
コード例 #7
0
    def display():

        with st.beta_expander('Movie Reviews'):
            st.caption('Reviews displayed in first-in-last-out method. Meaning Latest review is displayed at the top.')
            result = Database.getAllReviews('movie-reviews')
            st.dataframe(Format.tableFormat(result))

        with st.beta_expander('Short-film reviews'):
            st.caption('Reviews displayed in first-in-last-out method. Meaning Latest review is displayed at the top.')
            result = Database.getAllReviews('short-film-reviews')
            st.dataframe(Format.tableFormat(result)) 
def geometry_visualization_report(data: dict):

    st.header(f"Visualize Geometries")

    st.caption(f"""
        The intent of these maps is to guide engineers in figuring out if the shapefiles are corrupted or had been loaded improperly; indicators of this include points or polygons falling outside the NYC boundary, geometries appearing to be oversimplified (i.e. roadbeds look like "noodles"), and spatial data simply not existing.
        These maps are meant to be used in addition to the Mapped Capital Projects That Are Not in NYC table, as they may help identify which source spatial data files might be causing the issues. 
        Historically spatial data issues have been introduced when loading spatial files into data library, specifically when the source data projection has changed.
        If records are falling outside of NYC from a specific agency, that might indicate an issue upstream with how the data is being uploaded into data library
        """)

    st.pyplot(data["cpdb_dcpattributes_pts"].plot(
        markersize=5, color=COLOR_SCHEME[0]).figure)

    st.pyplot(
        data["cpdb_dcpattributes_poly"].plot(color=COLOR_SCHEME[0]).figure)
コード例 #9
0
def show():
    st.image(
        "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/279/joystick_1f579-fe0f.png",
        width=100,
    )
    st.write("""
        # Try out Session State!

        One of the most highly requested Streamlit features is finally here! Session 
        state allows you to preserve information throughout a browser session. 
        Below are some ideas for how to use it. 
        
        More info in the [blog post](https://blog.streamlit.io/session-state-for-streamlit/).
        """)

    st.write("---")
    counter.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/counter.py)"
    )

    st.write("---")
    tic_tac_toe.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/tic_tac_toe.py)"
    )

    st.write("---")
    todo_list.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/todo_list.py)"
    )

    st.write("---")
    labelling.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/labelling.py) – Images from [Kaggle Dogs vs. Cats](https://www.kaggle.com/c/dogs-vs-cats/overview)"
    )

    st.write("---")
    pagination.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/pagination.py)"
    )
コード例 #10
0
ファイル: main.py プロジェクト: cmjn14/streamlit
def request_works(concept_name):
    if len(concept_name) != 0:
        search_works = requests.get('https://api.openalex.org/works?search=' +
                                    concept_name.replace(" ", "%20") +
                                    '&filter=is_paratext:false' +
                                    polite).json()['results']
        for work in search_works:
            st.markdown("---")
            st.markdown(f"##### {work['display_name']}")

            authors_list = []
            for authorship in work['authorships']:
                author = authorship['author']
                author_display_name = author['display_name'] if author[
                    'orcid'] == None else (
                        f"[{author['display_name']}]({author['orcid']})")
                authors_list.append(author_display_name)
            st.markdown(", ".join(authors_list))

            st.caption(
                f"Published on **{work['publication_date']}** in ***{work['host_venue']['display_name']}*** ({work['host_venue']['publisher']})"
                .replace("in ***None***", ""))

            oa_info = "🟩 **Open access**" if work['open_access'][
                'is_oa'] == True else ""
            oa_info += " (" + work['host_venue']['license'].upper() + ")" if (
                len(work['host_venue']['license'] or "") != 0) else ""
            st.markdown(oa_info)

            st.caption(urllib.parse.quote(work['doi'], safe=':/'))

            st.caption(f"{work['cited_by_count']} citations")

            with st.expander("Other sources"):
                for source in work["alternate_host_venues"]:
                    st.caption(
                        f"- [{source['display_name']}]({source['url']})")

            with st.expander("Related concepts"):
                for work_concept in work['concepts']:
                    st.caption(work_concept['display_name'])
                    st.progress(float(work_concept['score']))
    return True
コード例 #11
0
ファイル: main.py プロジェクト: cmjn14/streamlit
def make_zip(zip_name, files_list):
    st.write("Creating " + zip_name + " with " + str(len(files_list)) +
             " files ...")
    try:
        with ZipFile(zip_name, 'w') as zip_file:
            for f in files_list:
                f_name = os.path.basename(f.name)
                try:
                    zip_file.write(f_name)
                    if os.path.exists(f_name):
                        os.remove(f_name)
                except Exception as e1:
                    st.caption(f"Could not add {f_name} to zip."
                               )  ### Should retry, probably due to timing
                    logging.error(traceback.format_exc())
        return zip_file
    except Exception as e:
        st.error("Could not create zip file.")
        logging.error(traceback.format_exc())
        return False
コード例 #12
0
def caption_demo():
    st.image(
        "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/279/ant_1f41c.png",
        width=100,
    )

    st.write("""
        # Try out `st.caption`!

        Ever wanted to add some small text to your streamlit app? Now you can, with `st.caption`:
        """)
    with st.echo():
        st.write("This is normal text")
        st.caption("This is small text 🐜")
    st.write("---")

    st.write("And `st.caption` also supports markdown:")
    with st.echo():
        st.caption(
            "Make it *italic* or **bold** or add [a link](https://streamlit.io/)"
        )
    st.write("---")

    st.write("It's also great to add a caption to a plot:")
    with st.echo():
        st.line_chart([2, 4, 3])
        st.caption("Just an example plot")
コード例 #13
0
def app():
    st.write("""
        # Try out Session State!

        One of the most highly requested Streamlit features is finally here! Session 
        state allows you to preserve information throughout a browser session. 
        Below are some ideas for how to use it. 
        
        More info in the [blog post](https://blog.streamlit.io/session-state-for-streamlit/).
        """)

    st.write("---")
    counter.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/counter.py)"
    )

    st.write("---")
    tic_tac_toe.show()
    st.caption(
        "[View code](https://github.com/streamlit/release-demos/blob/0.84/0.84/demos/tic_tac_toe.py)"
    )
コード例 #14
0
    def __init__(self) -> None:
        self.__indicador = PontoDePivot()
        self.__db = BancoDeDados(getenv('DATABASE'))

        self.config = st.set_page_config(
            page_title='Ponto de Pivot',
            page_icon=':chart_with_upwards_trend:',
            layout="centered")
        self.titulo = st.title('Ponto de Pivot')
        self.caption = st.caption(
            'Esta ferramenta utiliza o método LeandroStormer.')

        self.stock_name = st.text_input(
            'CÓDIGO DA AÇÃO:',
            value='',
            help='Não são aceitas ações fracionárias.',
            placeholder='EX: VALE3',
            key='stock').strip().upper()

        self.criar_botao()
コード例 #15
0
def chi_file_upload():
    """
    Python code adapted from Brownlee (June 15, 2018)
    """
    st.title('chi2 Test of Homogeneity')
    st.write(
        'This chi2 calculator assumes that your data is in the form of a contingency table:'
    )

    st.markdown("""
    |values|sample 1|sample 2|
    |-------|------|------|
    |val1|30|30|
    |val2|20|30|
    |val3|40|15|
    |val4|24|20|
    
    """)

    st.write('To use the chi2 calculator:')
    st.write("""
    1. Input the significance value.
    2. Upload your frequency table as an .csv or .xlsx file. Make sure that the column names for your two samples are "sample 1" and "sample 2."
       """)
    significance = float(
        st.text_input('Input significance value (default/max value is .05)',
                      value='.05'))

    uploaded = st.file_uploader('Upload your .csv or .xlsx file.')
    st.caption("📝 This app does not retain user data.")
    if uploaded != None:
        if uploaded.name.endswith('csv'):
            df = pd.read_csv(uploaded)
            s1 = [int(c) for c in df['sample 1']]
            s2 = [int(c) for c in df['sample 2']]

            chi, p_val, dof, ex = chi2_contingency([s1, s2], correction=False)
            p = 1 - significance
            crit_val = chi2.ppf(p, dof)

            st.subheader('Results')
            st.write('Uploaded Contingency Table:')
            st.write(df)
            c1 = st.container()
            c2, c3, c4, c5 = st.columns(4)

            c1.metric('p-value', str(p_val))
            c2.metric('# of Samples', str(len(s1)))
            c3.metric('Degrees of Freedom', "{:.2f}".format(dof))
            c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
            c5.metric('critical value', "{:.5f}".format(crit_val))
            st.write(
                "For an extended discussion of using chi2 tests for homogeneity for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
            )
        elif uploaded.name.endswith('xlsx'):
            df = pd.read_excel(uploaded)
            s1 = [int(c) for c in df['sample 1']]
            s2 = [int(c) for c in df['sample 2']]

            chi, p_val, dof, ex = chi2_contingency([s1, s2], correction=False)
            p = 1 - significance
            crit_val = chi2.ppf(p, dof)

            st.subheader('Results')
            st.write('Uploaded Contingency Table:')
            st.write(df)

            c1 = st.container()
            c2, c3, c4, c5 = st.columns(4)

            c1.metric('p-value', str(p_val))
            c2.metric('# of Samples', str(len(s1)))
            c3.metric('Degrees of Freedom', "{:.2f}".format(dof))
            c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
            c5.metric('critical value', "{:.5f}".format(crit_val))
            st.write(
                "For an extended discussion of using chi2 tests for homogeneity for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
            )
コード例 #16
0
def chi():
    """
    Python code adapted from Brownlee (June 15, 2018)
    """
    st.title('chi2 Test of Homogeneity')
    st.write(
        'This chi2 calculator assumes that your data takes the form of a frequency table:'
    )

    st.markdown("""
    |sample|value1|value2|value3|
    |------|------|------|------|
    |sample1|10|20|30|
    |sample2|10|15|25|
    """)
    st.caption("In this case, you copy and paste row-wise values")
    st.markdown('Or')
    st.markdown("""
    |value|sample 1|sample 2|
    |-----|--------|--------|
    |value1|10|10|
    |value2|20|15|
    |value3|30|25|
    """)
    st.caption("In this case, you copy and paste column-wise values")

    st.write(
        'The chi2 calculator accepts the first row of your data in the Sample 1 field and the second row of your data in the Sample 2 field.'
    )

    st.write('To use the chi2 calculator:')
    st.write("""
    1. Input the significant value (default/max value is .05)
    2. Copy the values of your first sample and paste into the Sample 1 text entry field and hit "Enter." 
    3. Copy the values for your second sample and paste into the Sample 2 text entry field and hit "Enter."
    ❗Samples 1 and Sample 2 must be numerical values. 
       """)
    significance = float(
        st.text_input('Input significance value (default/max value is .05)',
                      value='.05'))
    col1 = st.text_input('Sample 1', value='10 20 30')
    col2 = st.text_input('Sample 2', value='10 15 25')
    st.caption("📝 This app does not retain user data.")

    s1 = [int(c) for c in col1.split()]
    s2 = [int(c) for c in col2.split()]

    chi, p_val, dof, ex = chi2_contingency([s1, s2], correction=False)
    p = 1 - significance
    crit_val = chi2.ppf(p, dof)
    st.subheader('Results')
    c1 = st.container()
    c2, c3, c4, c5 = st.columns(4)

    c1.metric('p-value', str(p_val))
    c2.metric('Dataset Length', str(len(s1)))
    c3.metric('degree of freedom', "{:.2f}".format(dof))
    c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
    c5.metric('critical value', "{:.5f}".format(crit_val))
    st.write(
        "For an extended discussion of using chi2 tests for homogeneity for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
    )
コード例 #17
0
ファイル: app.py プロジェクト: jstock29/dealnodeal
def main():
    st.set_page_config(page_title="DEAL OR NO DEAL",
                       page_icon="🤑",
                       initial_sidebar_state="expanded")
    st.sidebar.title('DEAL OR NO DEAL')
    selected_banker = st.sidebar.selectbox(
        'Pick your Banker', ['Random Forest', 'LightGBM', 'XGBoost'], 0)
    if st.sidebar.button('New Game'):
        new_game()
    st.sidebar.markdown("""
    This is a simulation of the Deal or No Deal Banker's offers. The code for this project can be found at [my Github](https://github.com/jstock29/dealnodeal) and the data that I painstakingly collected from over 100 episodes of the show is on [my Kaggle](https://www.kaggle.com/jaredstock/deal-or-no-deal-game-data).
    
    You can see what the RoboBanker will offer by simulating a board at various rounds. Each round you should pick the correct number of values from the board:
    
    1. Pick 6 - 6 Total
    2. Pick 5 - 11 Total
    3. Pick 4 - 15 Total
    4. Pick 3 - 18 Total
    5. Pick 2 - 20 Total
    6. Pick 1 - 21 Total
    7. Pick 1 - 22 Total
    8. Pick 1 - 23 Total
    9. Pick 1 - 24 Total
    10. Pick 1 -25 Total
    
    After each round you can see what my RoboBanker is offering you and decide if that's a deal you want to take or not. I will not give you that money though.
    
    FYI: Anonymous game data is sent to my database so I can maybe do stuff with it later. I don't know why that would sketch you out, this is all fake, but there you go.
    """)

    st.sidebar.caption('Jared Stock | NYC | 2021')

    app_state = st.experimental_get_query_params()
    game_id = app_state['game_id'][0]
    round_number = int(app_state['round_number'][0])
    prev_offer = float(app_state['prev_offer'][0])
    offer = 0.

    # st.write(app_state)
    st.header('Board')
    st.write('')
    col1, col2, col3 = st.beta_columns(3)
    l_cols = VALUES[:len(VALUES) // 2]
    r_cols = VALUES[len(VALUES) // 2:]
    model = joblib.load(f'bankers/{selected_banker}.pkl')

    with col1:
        values_1 = [
            st.checkbox(str(val), key=session.run_id)
            for val in VALUES[:len(VALUES) // 2]
        ]
        left_sum = sum(
            [val for i, val in enumerate(l_cols) if not values_1[i]])
    with col2:
        values_2 = [
            st.checkbox(str(val), key=session.run_id)
            for val in VALUES[len(VALUES) // 2:]
        ]
        right_sum = sum(
            [val for i, val in enumerate(r_cols) if not values_2[i]])
    values = values_1 + values_2
    choices = [val for i, val in enumerate(VALUES) if values[i]]
    remaining = [val for i, val in enumerate(VALUES) if not values[i]]
    remaining_bigs = [_ for _ in remaining if _ in BIG_VALUES]

    average = np.average(remaining)
    _max = max(remaining)

    if right_sum == 0:
        balance = (left_sum / L_SUM)
    elif left_sum == 0:
        balance = (right_sum / R_SUM)
    else:
        balance = (right_sum / R_SUM) / (left_sum / L_SUM)
    ev = expected_value(remaining)

    with col3:
        st.subheader('Info')
        st.write(f'Round: {round_number}')
        st.write(f'Picked: {len(choices)}')
        st.write(f'Previous Offer: {prev_offer}')
        st.write(f'Expected Value: {round(ev, 0)}')
        st.write(
            f'Probability of having a big value: {round(len(remaining_bigs) / len(remaining) * 100, 1)}%'
        )

    st.subheader('Banker Offer')

    if len(choices) > 5:
        X = pd.DataFrame({
            'Round': [round_number],
            'Board Average': [ev],
            'Previous Offer': [prev_offer]
        })

        p = model.predict(X)
        offer = float(p[0])

        st.write(f'Offer: ${round(float(offer), 2)}')

        if offer / ev <= 1:
            st.progress(offer / ev)
        else:
            st.progress(1)
        st.caption(
            f'Offer % of Expected Value: {round((offer / ev) * 100, 2)}%')

    else:
        st.info('Pick values to see offers')

    col14, col15 = st.beta_columns(2)
    if len(choices) == 6 or len(choices) == 11 or len(choices) == 15 or len(
            choices) == 18 or len(choices) >= 20:
        with col14:
            if st.button('Deal!'):
                round_data = {
                    "Game ID":
                    game_id,
                    "Round":
                    round_number,
                    "Remaining Values":
                    str(remaining),
                    "Board Value":
                    sum(remaining),
                    "Board Average":
                    round(average, 0),
                    "Board Balance":
                    round(balance, 3),
                    "Probability of Big Value":
                    round(len(remaining_bigs) / len(remaining), 3),
                    "Previous Offer":
                    prev_offer,
                    "Offer":
                    round(offer, 0),
                    "Offer Percent of Average":
                    round(offer / average, 4),
                    "model":
                    selected_banker,
                    "datetime":
                    datetime.datetime.now(),
                    "Deal":
                    True
                }
                df = pd.DataFrame(round_data, index=[0])
                populate_round(df, 'player_games')
        with col15:
            if st.button('No Deal!'):
                round_data = {
                    "Game ID":
                    game_id,
                    "Round":
                    round_number,
                    "Remaining Values":
                    str(remaining),
                    "Board Value":
                    sum(remaining),
                    "Board Average":
                    round(average, 0),
                    "Board Balance":
                    round(balance, 3),
                    "Probability of Big Value":
                    round(len(remaining_bigs) / len(remaining), 3),
                    "Previous Offer":
                    prev_offer,
                    "Offer":
                    round(offer, 0),
                    "Offer Percent of Average":
                    round(offer / average, 4),
                    "model":
                    selected_banker,
                    "datetime":
                    datetime.datetime.now(),
                    "Deal":
                    False
                }
                round_number += 1
                st.experimental_set_query_params(round_number=round_number,
                                                 game_id=game_id,
                                                 prev_offer=round(offer, 0))
                df = pd.DataFrame(round_data, index=[0])
                populate_round(df, 'player_games')
    data = get_data('player_games')
    data = data.loc[data['Game ID'] == game_id]
    if st.checkbox('Show data'):
        st.write(data)

    visualization.single_line(data, game_id, width=600, height=400)
コード例 #18
0
        options=fieldrange.tolist())
    st.write("Field value equivalent to",
             str(round(selected_field * paramters.mu0, 3)), "[T]")

    s_index = fieldrange.tolist().index(selected_field)

    figtraj = graphm(
        timeEvol[s_index], Mx[s_index], My[s_index], Mz[s_index], "time [ns]",
        r'$m_i$', "Evolution at " +
        str(round(selected_field * paramters.mu0, 3)) + "[T]")

    st.pyplot(figtraj)

#st.write(r"As can be noted in the magnetization dynamics for a given external field value, the system quickly gets its magnetization direction according to the applied AC current. However, if we just employ a single period for the time integration, the result of the Fourier integral may differ from the actual coefficient, as the first time steps do not have a pure wave behavior.")

st.caption("Computing the harmonics")

#st.write(r"Therefore, in order to accurately compute the integral, each time integration of the LLG equation, for each $H_{\text{ext,x}}$ value, is performed over 4 complete periods $t_f=4/f$. Then, for computing the Fourier integral, the initial period of the time integration of the LLG equation is ommited from the computation. Furthermore, to improve the accuracy of the calculated harmonic component of the voltage, the remaining three periods are integrated and the normalization factor of the Fourier integral is adjusted accordingly. Finally, the integral is numerically approximated by the following sum:")
#st.latex(r'''
#V^{xy}_{ \omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \sin(\omega t_i) (\Delta t)_i \\ \: \\
#V^{xy}_{2\omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \cos(2\omega t_i) (\Delta t)_i
#''')
#st.write(r'Where $i$ represents an index of the elements of the lists containing the values of each step of the simulation (_Note that one period has been split into 1000 equidistant steps_). Inside the simulation the voltage is computed as $V^{xy}(t)=J_x(t) m_z(t) R_{AHE} \sigma$, where $\sigma$ is the cross section area of the conducting element. In our case $\sigma=(2 \mu m \times 6 \text{nm})$ ')

#st.write("Lastly, the resulting transfer curves using the Fourier series integral definition are: ")

figv1w = graph(fieldrangeT, signalw, r'$\mu_0 H_x$ (T)', r'$V_{\omega} [V]$ ',
               "Vw", "First harmonic voltage")
figv2w = graph(fieldrangeT, signal2w, r'$\mu_0 H_x$ (T)',
               r'$V_{2\omega} [V]$ ', "V2w", "Second harmonic voltage")
コード例 #19
0
def chi_goodness_file_upload():
    """
    
    """
    st.title('chi2 Goodness of Fit Test')
    st.write(
        'This chi2 calculator assumes that your data consists of a single frequency distribution in a .csv or .xlsx file:'
    )

    st.markdown("""
    
    |value|sample|
    |-----|--------|
    |value1|37|
    |value2|75|
    |value3|98|
    """)
    st.caption("Required table format.")

    st.write('To use the chi2 calculator:')
    st.write("""
    1. Input the significant value (default is .05)
    2. Upload your .csv or .xlsx file. Insure that you name your column of values "sample" like the example above. 
   
    ❗By default, expected frequencies are equally likely across classes. 
       """)
    significance = float(
        st.text_input('Input significance value (default is .05)',
                      value='.05'))
    st.caption("Significance values are often set to 0.005, 0.05, and 0.1")

    uploaded = st.file_uploader('Upload your .csv or .xlsx file.')
    st.caption("📝 This app does not retain user data.")

    if uploaded != None:
        if uploaded.name.endswith('csv'):
            df = pd.read_csv(uploaded)
            s1 = df['sample']

            chi, p_val = chisquare(s1)
            p = 1 - significance
            crit_val = chi2.ppf(p, len(s1) - 1)
            st.subheader('Results')
            c1 = st.container()
            c2, c3, c4, c5 = st.columns(4)

            c1.metric('p-value', str(p_val))
            c2.metric('Dataset Length', str(len(s1)))
            c3.metric('degree of freedom', "{:.2f}".format(len(s1) - 1))
            c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
            c5.metric('critical value', "{:.5f}".format(crit_val))
            st.write(
                "For an extended discussion of using chi2 goodness of fit tests for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
            )

        elif uploaded.name.endswith('xlsx'):
            df = pd.read_excel(uploaded)
            s1 = df['sample']

            chi, p_val = chisquare(s1)
            p = 1 - significance
            crit_val = chi2.ppf(p, len(s1) - 1)
            st.subheader('Results')
            c1 = st.container()
            c2, c3, c4, c5 = st.columns(4)

            c1.metric('p-value', str(p_val))
            c2.metric('Dataset Length', str(len(s1)))
            c3.metric('degree of freedom', "{:.2f}".format(len(s1) - 1))
            c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
            c5.metric('critical value', "{:.5f}".format(crit_val))
            st.write(
                "For an extended discussion of using chi2 goodness of fit tests for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
            )
        else:
            st.write('Please upload a .csv or .xslx file')
コード例 #20
0
    data_slice = simulated_data[simulated_data['Net profit options'] >
                                simulated_data['Net profit market']]
    if len(data_slice.index) > 0:
        lowest_better_idx = min(data_slice.index)
        return simulated_data.iloc[lowest_better_idx, :][[
            'Stock price', 'Price ratio %'
        ]].to_dict()
    return {'Stock price': 'Range too limited', 'Price ratio %': '-'}


st.set_page_config(page_title='Call option vs market price',
                   page_icon=None,
                   layout='wide')

st.write('## Call option vs market price purchasing: a simulated comparison')
st.caption('by *dpollozhani*')

with st.form('Input parameters'):
    firstrow_col1, firstrow_col2, firstrow_col3 = st.beta_columns(3)
    secondrow_col1, secondrow_col2, secondrow_col3 = st.beta_columns(3)

    option_price = firstrow_col1.number_input('Option price',
                                              min_value=1,
                                              value=10)
    no_of_options = firstrow_col2.number_input('# of options',
                                               min_value=1,
                                               value=6000)
    subsidy = firstrow_col3.number_input(
        'Subsidy (of which half is assumed taxable):',
        min_value=0.0,
        max_value=1.0,
コード例 #21
0
# specify the number of times each keyword scraping is run
numberOfTimes = st.slider(
    'How many times do you want this keyword scraping to be run?', 1, 100, 10)
listOfKeywords = ["nft", "crypto", "etf"]

col1, col2 = st.columns(2)
with col1:
    chosen_keywords = st_tags(label="Add Keywords here!",
                              text="press enter to add more",
                              value=listOfKeywords,
                              suggestions=['insurance', 'loans', 'blockchain'],
                              maxtags=10,
                              key='aljnf')
with col2:
    st.caption('Current List of Keywords')
    st.write((chosen_keywords))

submitted = st.button("Submit")
if submitted:
    st.write('Google Ads Scraping for the following eywrods:',
             str(chosen_keywords), ' for ', numberOfTimes, ' times.')

    resultDict = adScraper(numberOfTimes, chosen_keywords)
    rawOutput = jsonToDataFrame(resultDict, chosen_keywords)
    rawOutput.to_csv('AdScraperResult.csv', index=False)
    # resultJson = st.json(resultDict)
    # resultdf = st.dataframe(rawOutput)

displayResult = st.button("Display Result")
if displayResult:
コード例 #22
0
import numpy as np
from st_aggrid import AgGrid
from cornellbaseball import ncaa_scrape
from cornellbaseball import boydsworld_scraper as bd
from cornellbaseball import batting_metrics
from cornellbaseball import win_pct
import plotly.express as px
import plotly.graph_objects as go
import altair as alt
import time

st.set_page_config(page_title="Scouting Report: UVA", layout="wide")

player = st.sidebar.selectbox('Player', options=['Nate Savino'])
st.header(player)
st.caption("LHP / Jr / 6'3\" / 210lbs")

st.subheader("Basic Statisics")
basic_stats = pd.DataFrame(
    {
        'G': [16, 4],
        'GS': [10, 3],
        'IP': [50.4, 10.2],
        'Pitches': [814, 187],
        'FIP': [0, 0],
        'wOBA': [0, 0],
        'OPS': [0.736, 0.568],
        'OBP': [0.335, 0.306],
        'SLG': [0.401, 0.262],
        'ERA': [3.79, 3.38],
        'WHIP': [0, 0],
コード例 #23
0
reader = load_model() #load model

if image is not None:

    input_image = Image.open(image) #read image
    st.image(input_image) #display image

    with st.spinner("Loading..."):
        

        result = reader.readtext(np.array(input_image))

        result_text = [] #empty list for results


        for text in result:
            result_text.append(text[1])

        st.write(result_text)
    #st.success("Here you go!")
    st.balloons()
else:
    st.write("Upload an Image")

st.caption("Congrats it works")





コード例 #24
0
ファイル: ui_params.py プロジェクト: kavehbc/market-analyzer
def create_ui_params():
    st.title("Market Forecast")
    st.warning(
        ":warning: **Warning:** This tool neither recommends nor guarantees the performance of the given symbol. "
        "Use this tool and its forecasts at your own risk.")
    st.caption("Raw data is extracted from `Yahoo! Finance`.")
    st.caption(
        "The app usage is tracked using [statcounter.com](https://statcounter.com/),"
        " and it does not contain any personal information, since we never ask you any personal info."
        " The symbol names searched are stored for the auto-complete future."
        " That locally stored database can be accessed via `Popular Symbols` menu option."
        " This is an open-source application, and for more information you can check the `About app` section."
        " By using this app, you agreed with these terms and conditions.")
    st_ml_model = st.sidebar.selectbox("Predictive Model",
                                       options=list(ML_MODELS.keys()),
                                       index=0,
                                       format_func=lambda x: ML_MODELS[x])
    st_crypto_stock = st.sidebar.radio("Symbol Type", options=TICKER_TYPE)
    if st_crypto_stock == TICKER_TYPE[0]:
        st_crypto_name = st.sidebar.selectbox("Crypto Symbol",
                                              options=list(CRYPTOS.keys()),
                                              format_func=format_crypto)
        st_currency_name = st.sidebar.selectbox("Currency", options=CURRENCIES)

        if st_crypto_name is None:
            st_ticker_name = None
        else:
            st_ticker_name = st_crypto_name + "-" + st_currency_name

    elif st_crypto_stock == TICKER_TYPE[1]:
        st_ticker_name_list = st_tags.st_tags_sidebar(
            label='Stock Symbol',
            text='Press enter',
            value='',
            suggestions=get_top_tickers(n=100),
            maxtags=1,
            key='1')
        if len(st_ticker_name_list) > 0:
            st_ticker_name = st_ticker_name_list[0].upper()
        else:
            st_ticker_name = None

        # st_ticker_name = st.sidebar.text_input("Stock Symbol", value="MSFT").upper()
        st.sidebar.caption("Add `.TO` for the symbol in TSX")
    st_period = st.sidebar.selectbox("Period (History)",
                                     options=list(PERIODS.keys()),
                                     index=7,
                                     format_func=lambda x: PERIODS[x])
    st_interval = st.sidebar.selectbox("Interval",
                                       options=list(INTERVALS.keys()),
                                       index=8,
                                       format_func=lambda x: INTERVALS[x])
    st_price_column = st.sidebar.selectbox("Price",
                                           options=TICKER_DATA_COLUMN,
                                           index=3)
    st_future_days = st.sidebar.number_input("Future Days",
                                             value=365,
                                             min_value=1,
                                             step=1)
    st_future_volume = st.sidebar.number_input("Future Volume Assumption",
                                               value=0,
                                               min_value=0,
                                               step=1)
    st.sidebar.caption("Set Volume to 0 to ignore")
    st_training_percentage = st.sidebar.slider("Training Percentage",
                                               min_value=0.0,
                                               max_value=1.0,
                                               step=0.1,
                                               value=0.8)
    st_yearly_seasonality = st.sidebar.selectbox("Yearly Seasonality",
                                                 options=SEASONALITY_OPTIONS,
                                                 index=0)
    st_weekly_seasonality = st.sidebar.selectbox("Weekly Seasonality",
                                                 options=SEASONALITY_OPTIONS,
                                                 index=0)
    st_daily_seasonality = st.sidebar.selectbox("Daily Seasonality",
                                                options=SEASONALITY_OPTIONS,
                                                index=0)
    st_holidays = st.sidebar.selectbox("Holidays",
                                       options=list(HOLIDAYS.keys()),
                                       index=0,
                                       format_func=lambda x: HOLIDAYS[x])

    if st_crypto_stock == TICKER_TYPE[0]:
        st_seasonality_mode_index = 0
    else:
        st_seasonality_mode_index = 1
    st_seasonality_mode = st.sidebar.selectbox(
        "Seasonality Mode",
        options=SEASONALITY_MODE_OPTIONS,
        index=st_seasonality_mode_index)

    dic_return = DotDict(model=st_ml_model,
                         ticker_name=st_ticker_name,
                         period=st_period,
                         interval=st_interval,
                         future_days=st_future_days,
                         price_column=st_price_column,
                         future_volume=st_future_volume,
                         training_percentage=st_training_percentage,
                         yearly_seasonality=st_yearly_seasonality,
                         weekly_seasonality=st_weekly_seasonality,
                         daily_seasonality=st_daily_seasonality,
                         holidays=st_holidays,
                         seasonality_mode=st_seasonality_mode)
    return dic_return
コード例 #25
0
def cpdb():
    st.title("Capital Projects Database QAQC")
    branch = st.sidebar.selectbox("select a branch", ["main"])
    agency_label = {
        "sagency": "Sponsoring Agency",
        "magency": "Managing Agency"
    }
    agency_type = st.sidebar.selectbox(
        "select an agency type",
        ["sagency", "magency"],
        format_func=lambda x: agency_label.get(x),
    )
    agency_type_title = agency_label[agency_type]

    view_type = st.sidebar.selectbox(
        "select to view by number of projects or values of commitments in dollars",
        ["projects", "commitments"],
    )
    view_type_title = view_type.capitalize()
    view_type_unit = ("Number of Projects" if view_type == "projects" else
                      "Commitments Amount (USD)")

    subcategory = st.sidebar.selectbox(
        "choose a subcategory or entire portfolio",
        ["all categories", "fixed assets"])

    data = get_data(branch=branch)

    st.markdown(body="""
        
        ### About the Capital Projects Database

        The Capital Projects Database (CPDB), a data product produced by the New York City (NYC) Department of City Planning (DCP) Data Engineering team, captures key data points on potential, planned, and ongoing capital projects sponsored or managed by a capital agency in and around NYC.
        Information reported in the Capital Commitment Plan published by the NYC Office of Management and Budget (OMB) three times per year is the foundation that CPDB is built from.  Therefore, only capital projects that are in the Capital Commitment Plan are reflected in CPDB. Additional data sources are incorporated to map the capital projects.
        CPDB enables Planners to better understand and communicate New York City's capital project portfolio within and across particular agencies. While not comprehensive, CPDB's spatial data provides a broad understanding of what projects are taking place within a certain area, and is starting point to discovering opportunities for strategic neighborhood planning.

        ### About the QAQC Reports

        The QAQC page is designed to highlight key measures that can indicate potential data issues in a CPDB build. These graphs report summary statistics at the agency level and there are 3 ways to filter and view the data (w/ additional variation at the graph level):

        1. Agency type: Sponsoring agency OR Managing agency
        2. Aggregation type: the total number of projects OR the total sum ($) of all commitments
        3. Category type: Include projects in all categories (fixed asset, lump sum, ITT, Vehicles & equipment) OR include only projects that are categorized as Fixed Asset

        Additionally, there are basic geographic checks to facilitate the QAQC process of the disparate source data we receive from various city agencies. These checks are not meant to be comprehensive, rather they are intended to provide an indication if spatial data is outside of the NYC spatial boundaries or incorrect in some way.

        ### Key CPDB QAQC terms: 

        - **Mapped**: refers to a project record that has a geometry / spatial data associated with it.
        - Agency type: The **Managing** agency is the NYC Agency that is overseeing the construction of a project.  The **Sponsoring** agency is the NYC Agency that is funding the project.  The managing agency and sponsoring agency can be, but are not always the same.
        - Category type: Fixed Assets, are projects that are place specific and have an impact on the surrounding area, visible or not, such as park improvements or sewer reconstruction.  Projects are categorized by DCP based on key works in the project description.  Other categories include Lump Sum, and ITT, Vehicles, and Equipment​.
        - A **project** is a discrete capital investment, and is defined as a record that has a unique FMS ID.  
        - A **commitment** is an individual contribution to fund a portion of a project.  When looking at the "commitment" view you're looking at the sum of a commitments.
        
        #### Additional Resources
        - [CPDB Github Repo Wiki Page](https://github.com/NYCPlanning/db-cpdb/wiki) 
        - [Medium Blog on CPDB](https://medium.com/nyc-planning-digital/welcome-to-the-world-dcps-capital-projects-database-693a8b9782ac)

        """)

    df = data["cpdb_summarystats_" + agency_type].set_index(agency_type +
                                                            "acro")
    df_pre = data["pre_cpdb_summarystats_" +
                  agency_type].set_index(agency_type + "acro")
    if view_type == "commitments":
        st.header(
            f"Dollar ($) Value of Commitments by {agency_type_title} for {subcategory} (Mapped vs Unmapped)"
        )
        df = df[get_commit_cols(df)]
        df_pre = df_pre[get_commit_cols(df_pre)]
    else:
        st.header(
            f"Number of Projects by {agency_type_title} for {subcategory} (Mapped vs Unmapped)"
        )
        df.drop(labels=get_commit_cols(df), axis=1, inplace=True)
        df_pre.drop(labels=get_commit_cols(df_pre), axis=1, inplace=True)

    # sort the values based on projects/commitments and get the top ten agencies
    df_bar = sort_base_on_option(df,
                                 subcategory,
                                 view_type,
                                 map_option=0,
                                 ascending=False)
    # print(df_bar.index)
    fig1 = px.bar(
        df_bar,
        x=df_bar.index,
        y=VIZKEY[subcategory][view_type]["values"],
        labels=dict(sagencyacro="Sponsoring Agency",
                    magencyacro="Managing Agency"),
        barmode="group",
        width=1000,
        color_discrete_sequence=COLOR_SCHEME,
    )

    fig1.update_yaxes(title=view_type_unit)

    fig1.update_layout(legend_title_text="Variable")

    st.plotly_chart(fig1)

    st.caption(
        f"""This graph reports the {view_type_unit} (both mapped and unmapped) by {agency_type_title} for {subcategory}. 
        Typically, large city agencies including DPR (Dept. Parks and Rec.), DEP (Dept. of Environmental Protection), DOT (Dept. of Transportation), and DCAS (Dept of Citywide Admin. Services) have the largest count of projects and, generally, the highest capital expenditure.
        Some agencies (e.g. HPD [Housing Preservation & Development]) often have fewer total projects but high capital expenditure because of the nature of their projects which are related to building housing across NYC.
        The purpose of this graph is to get an overview of the distribution of projets or commitments by agency, and a sense of what portion of these are mapped."""
    )

    # ----- 2nd Graph
    st.header(
        f"Compare the Total {view_type_unit} in the Previous Version vs. the Latest Version of CPDB by {agency_type_title}"
    )

    map_options = {0: f"all {view_type}", 1: f"mapped {view_type} only"}
    map_option = st.radio(
        label=
        f"Choose to compare either all {view_type} or mapped {view_type} only.",
        options=[0, 1],
        format_func=lambda x: map_options.get(x),
    )
    map_title_text = "Mapped and Unmapped" if map_option == 0 else "Mapped Only"
    # get the difference dataframe
    diff = get_diff_dataframe(df, df_pre)
    df_bar_diff = sort_base_on_option(diff,
                                      subcategory,
                                      view_type,
                                      map_option=map_option)
    fig2 = go.Figure([
        go.Bar(
            name="Difference",
            x=df_bar_diff[VIZKEY[subcategory][view_type]["values"]
                          [map_option]],
            y=df_bar_diff.index,
            orientation="h",
        ),
        go.Bar(
            name="Latest Version",
            x=df[VIZKEY[subcategory][view_type]["values"][map_option]],
            y=df.index,
            orientation="h",
            visible="legendonly",
        ),
        go.Bar(
            name="Previous Version",
            x=df_pre[VIZKEY[subcategory][view_type]["values"][map_option]],
            y=df_pre.index,
            orientation="h",
            visible="legendonly",
        ),
    ])
    fig2.update_layout(
        barmode="group",
        width=1000,
        height=1000,
        title_text=
        f"Total {view_type_unit} by Version and {agency_type_title} ({map_title_text})",
        colorway=COLOR_SCHEME,
    )

    fig2.update_xaxes(title=f"Total {view_type_unit} ({map_title_text})")

    fig2.update_yaxes(title=agency_type_title)

    st.plotly_chart(fig2)

    st.caption(f"""  
        This graph visualizes the difference in the {view_type_unit} by {agency_type_title} between the current (aka latest) and the previous version of CPDB. 
        While the underlying Capital Commitment Plan data changes between versions, any drastic changes between CPDB versions that are illustrated by this graph can indicate if there is a specific agency or source dataset to look into further that may have introduced these anomalies.
        Anomalies include, but are not limited to, no projects being mapped for a given agency when there were mapped projects in the previous version, the number of projects doubling for an agency between versions, or the total sum of commitments halving for an agency between versions.
        This chart also gives the viewer the flexibility to change between all projects by Number of Projects (both mapped and unmapped) along with an option to just view the mapped (geolocated) projects. Click the "Latest Version" and "Previous Version" labels in the legend to display the total Number of Projects for each.
        """)

    #### ----- 3rd Graph
    st.header(
        f"Compare Mapping of {view_type.capitalize()} between Previous and Latest Versions by {agency_type_title}"
    )

    diff_perc = get_map_percent_diff(df, df_pre,
                                     VIZKEY[subcategory][view_type])

    fig3 = go.Figure([
        go.Bar(
            name="Difference",
            x=diff_perc.diff_percent_mapped,
            y=diff_perc.index,
            orientation="h",
        ),
        go.Bar(
            name="Latest Version",
            x=diff_perc.percent_mapped,
            y=diff_perc.index,
            orientation="h",
            visible="legendonly",
        ),
        go.Bar(
            name="Previous Version",
            x=diff_perc.pre_percent_mapped,
            y=diff_perc.index,
            orientation="h",
            visible="legendonly",
        ),
    ])

    fig3.update_layout(
        width=1000,
        height=1000,
        title_text=
        f"Percentage of {view_type_title} Mapped by Version and {agency_type_title}",
        colorway=COLOR_SCHEME,
    )

    fig3.update_xaxes(title=f"Percentage", tickformat=".2%")
    fig3.update_yaxes(title=agency_type_title)
    st.plotly_chart(fig3)

    st.caption(f"""
        This graph shows another important cut of the data in which we higlight the percentage of {view_type} succesfully mapped (geocoded) by {agency_type_title} between the last two verions of CPDB along with the pct. difference between those verions. 
        Typically, we'd expect a similar pct. of records to be mapped by {agency_type_title} between versions and any significant change should be looked at more closely.
        Click the "Latest Version" and "Previous Version" labels in the legend to display the percentage mapped for each.
        
        """)

    adminbounds(data)

    withinNYC_check(data)

    geometry_visualization_report(data)
コード例 #26
0
def kappa_file_upload():
    st.title("Cohen's Kappa Calculator")
    st.markdown("""
    Upload your .csv or .xlsx file. 
    
    Your files should feature the following format:
       """)

    dff = pd.DataFrame({
        'Coder 1': ['a', 'a', 'b'],
        'Coder 2': ['a', 'a', 'b']
    })
    st.dataframe(dff)

    uploaded_file = st.file_uploader("Upload your data as .csv or .xlsx")
    st.caption("📝 This app does not retain user data.")
    if uploaded_file != None:
        if str(uploaded_file.name).endswith('csv'):
            df = pd.read_csv(uploaded_file)

            st.subheader('Results')

            col1 = df['Coder 1'].tolist()
            col2 = df['Coder 2'].tolist()

            c1, c2, c3 = st.columns(3)
            c1.metric('Dataset Length', str(len(col1)))
            c2.metric('Accuracy', str(accuracy_score(col1, col2)))
            c3.metric('Kappa Score', str(cohen_kappa_score(col1, col2)))

            labels = sorted(list(set(col1 + col2)))
            indices = [str(label) + '_' for label in labels]
            st.write("Confusion Matrix:")
            st.dataframe(
                pd.DataFrame(confusion_matrix(col1, col2),
                             index=indices,
                             columns=labels))
            st.caption('Note: Coder 1 is used as the baseline for evaluation.')

            st.markdown(
                "For more an extended presentation on Cohen's Kappa see Hart-Davidson (2014), [Using Cohen's Kappa to Gauge Interrater Reliability(https://www.slideshare.net/billhd/kappa870)"
            )
            #except ValueError:
            #   st.markdown('<mark>Error: Data must be the same length</mark>', unsafe_allow_html=True)
        elif str(uploaded_file.name).endswith('xlsx'):
            df = pd.read_excel(uploaded_file)
            col1 = df['Coder 1'].tolist()
            col2 = df['Coder 2'].tolist()

            st.subheader('Results')
            c1, c2, c3 = st.columns(3)
            c1.metric('Dataset Length', str(len(col1)))
            c2.metric('Accuracy', str(accuracy_score(col1, col2)))
            c3.metric('Kappa Score', str(cohen_kappa_score(col1, col2)))

            labels = sorted(list(set(col1 + col2)))
            indices = [str(label) + '_' for label in labels]

            st.write(
                "Confusion Matrix (Coder 1 is treated as the baseline for evaluation):"
            )
            st.dataframe(
                pd.DataFrame(confusion_matrix(col1, col2),
                             index=indices,
                             columns=labels))
            st.caption('Note: Coder 1 is used as the baseline for evaluation.')

            st.markdown(
                "For more an extended presentation on Cohen's Kappa see Hart-Davidson (2014), [Using Cohen's Kappa to Gauge Interrater Reliability](https://www.slideshare.net/billhd/kappa870)"
            )
コード例 #27
0
def chi_goodness():
    """
    
    """
    st.title('chi2 Goodness of Fit Test')
    st.write(
        'This chi2 calculator assumes that your data consists of a single frequency distribution:'
    )

    st.markdown("""
    |sample|value1|value2|value3|
    |------|------|------|------|
    |sample|37|75|98|
    
    """)
    st.caption("In this case, you copy and paste row-wise values")
    st.markdown('Or')
    st.markdown("""
    |value|sample|
    |-----|--------|
    |value1|37|
    |value2|75|
    |value3|98|
    """)
    st.caption("In this case, you copy and paste column-wise values")

    st.write('To use the chi2 calculator:')
    st.write("""
    1. Input the significant value (default is .05)
    2. Copy the values of your sample and paste into the Sample text entry field and hit "Enter." 
    
    ❗By default, expected frequencies are equally likely. 
       """)
    significance = float(
        st.text_input('Input significance value (default is .05)',
                      value='.05'))
    st.caption("Significance values are often set to 0.005, 0.05, and 0.1")

    col1 = st.text_input('Sample', value='37 75 98')
    st.caption("📝 This app does not retain user data.")
    s1 = [int(c) for c in col1.split()]
    E = sum(s1) / len(s1)
    chis = [(s - E)**2 / E for s in s1]

    chi, p_val = chisquare(s1)
    p = 1 - significance
    crit_val = chi2.ppf(p, len(s1) - 1)
    st.subheader('Results')
    c1 = st.container()
    c2, c3, c4, c5 = st.columns(4)

    c1.metric('p-value', str(p_val))
    c2.metric('Dataset Length', str(len(s1)))
    c3.metric('degree of freedom', "{:.2f}".format(len(s1) - 1))
    chart_df = pd.DataFrame(chis, columns=['(O-E)**2/E'])
    st.bar_chart(chart_df)
    c4.metric('\n chi2 test statistic', "{:.5f}".format(chi))
    c5.metric('critical value', "{:.5f}".format(crit_val))

    st.write(
        "For an extended discussion of using chi2 goodness of fit tests for qualitative coding, see [Geisler and Swarts (2019)](https://wac.colostate.edu/docs/books/codingstreams/chapter9.pdf)"
    )
コード例 #28
0
        'Magazine': weapon_mag,
        'Handle': weapon_handle,
        'Stock': weapon_stock
    }
    val = {'weapon': weapon, 'nickname': 'Temp ' + weapon, 'equipped_attachments': attachments,
           'rarity': rarity, 'pap': pap, 'accuracy': None, 'critical': None}
    gun_lst.append(val)

# Build Analyze Class
if len(gun_lst) >= 1:
    analysis = Analyze(damage_profile=damage_profile, zombie_info=zom, weapon_dic_lst=gun_lst)

    st.header('Visualizations:')
    plots = st.multiselect('Plots to Display', ['Damage Per Max Ammo', 'Damage Per Clip', 'Damage Per Second',
                                                'Time To Kill', 'Shots To Kill'])
    for plot in plots:
            st.subheader(plot)
            dps_df = pd.DataFrame()
            for weapon in weapon_lst:
                if weapon != 'None':
                    dps_df[weapon] = analysis._compare_info_for_plots[weapon][plot]
            st.line_chart(data=dps_df)
            if 'Damage' in plot:
                st.caption('Damage Value vs Range (Meters)')
            elif 'Time' in plot:
                st.caption('Seconds vs Range (Meters)')
            elif 'Shots' in plot:
                st.caption('Shots vs Range (Meters)')
            elif 'Ratio' in plot:
                st.caption('Ratio vs Range (Meters)')
コード例 #29
0
    min_value=1,
    max_value=14,
    value=1,
    step=1,
)
user_plot = st.button('Plot forecast')
p1 = st.empty()
p2 = st.empty()
p3 = st.empty()

# Build Sidebar
with st.sidebar:
    st.title('Customize model')
    user_country = st.text_input('Insert ISO code of Country', value='BE')
    st.text('Take local holidays into account when training a model.')
    st.caption('List of available ISO codes here:')
    st.caption('https://github.com/dr-prodigy/python-holidays')
    st.subheader('Change with caution:')
    with st.form(key='Model Parameters'):
        user_chps = st.slider('Changepoint Prior Scale',
                              min_value=0.001,
                              max_value=0.050,
                              value=0.001,
                              step=0.001,
                              format='%.3f')
        user_sps = st.slider('Seasonality Prior Scale',
                             min_value=0.01,
                             max_value=10.00,
                             value=10.00,
                             step=0.01)
        user_hps = st.slider('Holidays Prior Scale',
コード例 #30
0
import streamlit as st
from PIL import Image

st.title('Streamlit Text Sizing')
st.title('This is a title')
st.header('This is a header')
st.subheader('This is a subheader')

st.write('This is a st.write call')
st.caption('This is small text')

st.markdown('---')
st.title('Markdown Heading Sizes')

st.markdown('# Heading Level One')
st.markdown('## Heading Level Two')
st.markdown('### Heading Level Three')
st.markdown('#### Heading Level Four')
st.markdown('##### Heading Level Five')
st.markdown('###### Heading Level Six')
st.caption('This is small text')

st.markdown('---')
image = Image.open('dog.jpg')
st.image(
    image,
    caption=
    'This is the default image caption: Photo by Marliese Streefland on Unsplash'
)
st.caption('Photo by Marliese Streefland on Unsplash')
st.write('Above: A caption without any tags')