def show_topic_co_occurrences(corpus, number_of_topics, number_of_chunks=100): st.header("Topic co-occurrences") if corpus is None: st.markdown("Please upload a corpus first") else: with st.expander("Help"): st.markdown(''' We consider topics to co-occur in the same document if the weight of both topics for that document are greater than *minimum weight*. The thickness of an edge in the co-occurrance graph indicates how often two topics co-occur in a document (at least *minimum edges* times). Each node represents a topic. Node size reflects the total weight of the topic. ''') min_weight = st.sidebar.slider("Minimum weight", 0.0, 0.5, value=0.1, step=0.05) min_edges = st.sidebar.slider("Minimum number of edges", 1, 10, value=1) graph_container = st.empty() with st.expander("Settings"): library_to_use = st.radio("Visualization library to use", ("VisJS", "GraphViz"), index=0) if library_to_use == "VisJS": smooth_edges = st.checkbox("Draw with smooth edges", value=False) if library_to_use == "VisJS": graph_pyvis = topic_coocurrence_graph_pyvis(topic_model(corpus, number_of_topics, number_of_chunks), corpus, number_of_topics, min_weight, min_edges, smooth_edges) graph_pyvis.show("topic-graph.html") with graph_container.container(): components.html(open("topic-graph.html", 'r', encoding='utf-8').read(), height=625) else: graph = topic_coocurrence_graph(topic_model(corpus, number_of_topics, number_of_chunks), corpus, number_of_topics, min_weight, min_edges) with graph_container.container(): st.graphviz_chart(graph) with st.expander("Topic sentences"): show_topic_sentences(corpus, number_of_topics, number_of_chunks, min_weight)
def Main(): with st.sidebar.expander("DVD_HK"): st.info(f''' Dividends: Hong Kong Listed Stocks only (for now...) ''') default_tickers = get_index_tickers( st_asset=st.sidebar.expander('Load an Index', expanded=True)) tickers = tickers_parser(st.text_input( 'enter stock ticker(s) [space separated]', value=default_tickers), return_list=True) timeframe_params = get_timeframe_params( st_asset=st.sidebar.expander("Timeframe"), default_tenor='50y') if tickers: # - div history with ex-date true range vs div amount/ true range df = get_dvd(tickers, ex_date_after=timeframe_params['end_date'], aggregate=True) show_upcoming_div(df, st_asset=st.expander("View Upcoming Dividend", expanded=True), timeframe_params=timeframe_params, atr_period=22) df_all = get_dvd(tickers, aggregate=True) show_past_div(df_all, st_asset=st.expander("View Pass Dividend"), timeframe_params=timeframe_params, atr_period=22)
def show_plot_macd(df,choice,x_as_label, ma1, ma2): df["ma_short"]= ema(df["Close"], ma1,0) df["ma_long"]= ema(df["Close"], ma2,0) df["MACD_Line"] = df["ma_short"] - df["ma_long"] df["Signal_Line"] = ema(df["MACD_Line"], 9,26) df["MACD_Histogram"] =df["MACD_Line"] - df["Signal_Line"] buy_price, sell_price, macd_signal, signal_macd = implement_macd_strategie_1(df['Close'], df['ma_short'], df['ma_long']) if signal_macd == 1: plot_macd(df, choice, buy_price, sell_price, macd_signal, x_as_label) else: with st.expander(f"Moving averages - {choice} not interesting" ): plot_macd(df, choice, buy_price, sell_price, macd_signal, x_as_label) buy_price, sell_price, macd_signal, signal_macd = implement_macd_strategie_2(df['Close'], df['MACD_Line'], df['Signal_Line']) if signal_macd == 1: plot_macd_2(df, choice, buy_price, sell_price, macd_signal, x_as_label) else: with st.expander(f"MACD 2 - {choice} not interesting" ): plot_macd_2(df, choice, buy_price, sell_price, macd_signal, x_as_label)
def multi_stocks(): # tickers = ["9988.HK","3311.HK","0005.HK"] ticker_mode = st.sidebar.selectbox("Ticker mode or Keyword mode", ("Keyword", "Ticker")) tickers_input = st.sidebar.text_input( "Enter the stock tickers, separated by a ' , ' or ' ; ' ") tickers = tickers_input.replace(';', ',').split(",") st.sidebar.write( "Search multiple stock or indexes by keywords or tickers.") if tickers != [""]: if ticker_mode == "Ticker": pctDf, dates, prices = get_pct_changes(tickers) st.dataframe(pctDf) st.markdown(get_table_download_link(pctDf), unsafe_allow_html=True) elif ticker_mode == "Keyword": kw_list = [] for kw in tickers: kw_list.append(name_convert(kw)) pctDf, dates, prices = get_pct_changes(kw_list) st.dataframe(pctDf) st.markdown(get_table_download_link(pctDf), unsafe_allow_html=True) with st.expander("Data"): st.dataframe(prices) st.markdown(get_table_download_link(prices), unsafe_allow_html=True) with st.expander("Plot"): st.write("""To be improved""") st.plotly_chart(px.line(prices), template=template) with st.expander("Dates"): try: for d in dates: st.write(f"""{d}: {dates[d].strftime('%d/%m/%Y')}""") st.write( """The dates might be a bit off if there is a holiday, dates might be shifted.""" ) st.write( """These dates are from the dataframe of the last asset in your search list.""" ) except: pass with st.expander("Delay Information"): delay = pd.read_html( "https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html" )[0] st.dataframe(delay) link = st.markdown( "[Yahoo Finance](https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html)", unsafe_allow_html=True) with st.expander("Percentage Change Calculation"): st.write("""To be implemented""") start_date_pct = st.text_input( "Start Date", f'{(today-datetime.timedelta(90)).strftime(format_date)}') end_date_pct = st.text_input( "End Date", f'{(today+datetime.timedelta(1)).strftime(format_date)}') return None
def index(): index_name = st.sidebar.text_input("Index", "") st.sidebar.write("Search a stock market index by keyword or ticker") if index_name != "": index_symbol = name_convert(index_name) indexData = yf.Ticker(index_symbol) name = indexData.info["shortName"] indexDf = indexData.history(period="max") index_fig = px.line(indexDf["Close"], template="simple_white", title='Historical Performance of {}'.format(name)) index_fig.update_xaxes( rangeslider_visible=False, rangeselector=dict(buttons=list([ dict(count=1, label="1 Month", step="month", stepmode="backward"), dict(count=6, label="6 Month", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1 Year", step="year", stepmode="backward"), dict(step="all") ]))) st.plotly_chart(index_fig, template=template) gainers, laggards = index_performance(index_name) with st.expander('Gainers'): st.dataframe(gainers) with st.expander('Laggards'): st.dataframe(laggards) # with st.expander('Percentage Change'): # st.markdown(f"{index_symbol.replace(r'%5E',r'^')}") # index_pct, index_dates = get_pct_changes(index_symbol.replace(r"%E5",r"^")) # st.dateframe(index_pct) # with st.expander("Dates"): # try: # for d in index_dates: # st.write(f"""{d}: {index_dates[d].strftime('%d/%m/%Y')}""") # st.write("""The dates might be a bit off if there is a holiday, dates might be shifted.""") # st.write("""These dates are from the dataframe of the last asset in your search list.""") # except: # pass return None
def main(): init_sessions() st.write(st.session_state) # expander optionals = st.expander("Search And Filter", False) optionals.checkbox("Active") optionals.radio("Pick Your Favourite", ["Apples", "Banaans", "Oranges"]) name_cols = optionals.columns(3) first_name = name_cols[0].text_input("First Name") last_name = name_cols[1].text_input("Last Name") middle_name = name_cols[2].text_input("Middle Name") st.sidebar.success("Menu") page = st.sidebar.radio("Choose An Item", ['Demos', 'Titanic', 'NYC Car Accidents']) # print(page) if page == "Demos": app_run_demo() logger.info('Demos') elif page == "Titanic": app_run_titanic() logger.info('Titanic') elif page == "NYC Car Accidents": app_run_car_accidents() logger.info('NYC Car Accidents')
def fourier_frame(): with st.expander("Create a Fourier Series"): # st.markdown('# Create a Fourier Series:') st.latex(r'\hat{f}(x) = \frac{a_0}{2} + \sum_{n=1}^{\infty}a_n\cos\bigg(\frac{2\pi n}{P}x\bigg) + ' r'\sum_{n=1}^{\infty}b_n\sin\bigg(\frac{2\pi n}{P}x\bigg)') selection_list = ['Sine', 'Cosine', 'Square', 'Triangle', 'Sawtooth', 'Gaussian'] st.markdown('## Wave Options') type = st.selectbox('Wave Type', selection_list, 0) amp = st.slider('Amplitude', 0., 100., 1., 0.1, "%.1f") per = st.slider('Period (or Gaussian FWHM)', 0.01, 100., 2*np.pi, 0.01, "%.2f") phase = st.slider('Phase (or Gaussian Mean)', -np.pi, np.pi, 0., 0.01, "%.2f") velocity = st.number_input('Velocity (v)', int(0), int(100), int(0), int(1), "%d") rectify = st.checkbox('Rectify', False) st.markdown('## Fourier Series Options') ts = st.number_input('Draw Timestamp', 0., 100., 0., 0.1, "%.1f") iters = st.number_input('Iterations', int(1), int(100), int(10), int(1), "%d") cols = st.columns(2) min = cols[0].number_input(label='Interval Min', value=-np.pi) max = cols[1].number_input(label='Interval Max', value=np.pi) interval = (min, max) res = st.number_input(label='Resolution', value=1000) pchart = build_fourier(type, rectify, amp, per, phase, velocity, ts, iters, interval, res) st.plotly_chart(pchart, use_container_width=True)
def visualize_similarity( nlp: spacy.language.Language, default_texts: Tuple[str, str] = ("apple", "orange"), *, threshold: float = 0.5, title: Optional[str] = "Vectors & Similarity", key: Optional[str] = None, ) -> None: """Visualizer for semantic similarity using word vectors.""" meta = nlp.meta.get("vectors", {}) if title: st.header(title) if not meta.get("width", 0): st.warning("No vectors available in the model.") else: cols = st.columns(2) text1 = cols[0].text_input("Text or word 1", default_texts[0], key=f"{key}_similarity_text1") text2 = cols[1].text_input("Text or word 2", default_texts[1], key=f"{key}_similarity_text2") doc1 = nlp.make_doc(text1) doc2 = nlp.make_doc(text2) similarity = doc1.similarity(doc2) similarity_text = f"**Score:** `{similarity}`" if similarity > threshold: st.success(similarity_text) else: st.error(similarity_text) exp = st.expander("Vector information") exp.code(meta)
def main(): plots = { "Plot Single Learning Curve": utils.plot_learning_curve, "Plot Learning Curve Confidence": utils.plot_learning_curve_conf, "Plot Learning Curve Sweep": utils.plot_learning_curve_sweep, "Plot Experiment": utils.plot_experiment, "Plot Mean Result": utils.plot_mean_result, "Plot Mean Result with Confidence": utils.plot_mean_conf, "Plot Sweep Result with Confidence": utils.plot_sweep_conf, "Plot QTable Values": utils.plot_values, "Plot QTable Visits": utils.plot_visits, } for k, f in plots.items(): with st.expander(k): loc = st.text_input("Path to experiment", key=k) if loc: try: fig = f(loc, return_fig=True) if type(fig) == list: for g in fig: st.plotly_chart(g) else: st.plotly_chart(fig) except: streamlit.write("Incorrect folder structure for plot")
def line_chart_pivot(df_, field, title): """Makes a linechart from a pivoted table, each column in a differnt line. Smooths the lines too. Args: df ([type]): [description] title ([type]): [description] """ df = make_pivot(df_, field) fig = go.Figure() columns = df.columns.tolist() columnlist = columns[1:] # st.write(columnlist) for col in columnlist: col_sma = col + "_sma" df[col_sma] = df[col].rolling(window=3, center=False).mean() fig.add_trace( go.Scatter(x=df["einddag_week"], y=df[col_sma], mode='lines', name=col)) fig.update_layout(title=dict(text=title + " (SMA 3)", x=0.5, y=0.85, font=dict(family="Arial", size=14, color='#000000')), xaxis_title="Einddag vd week", yaxis_title="VE") st.plotly_chart(fig, use_container_width=True) with st.expander(f"dataframe pivottable {title}"): df_temp = df.astype(str).copy(deep=True) st.write(df_temp)
def get_params(): #the streamlit stuff st.title('Dither sequences for blended acquisition') #start by getting the input parameters from the user. TODO: add tooltip my_expander1 = st.expander("General parameters:", expanded=True) with my_expander1: col1, col2, col3 = st.columns(3) with col1: no_src = st.number_input('Number of souces to dither:',1,9,3,1,help="This is the number of sources operating in flip-flop-flap- mode.") nPoints = st.number_input('The number of dithers per source:',10,2500,200,10, help='This is how many dithers you want for one source. Typically this number should be > no traces in the migration aperture.') compute_dithers=st.button("Plot the dithers (for QC), and prepare a file for downloading", help='Produce nice looking QC plots of the dither sequences.') get_help =st.button("Get a ppt that explains the dithering",help="Download a ppt with a lot of explanation on why you want to use the inverse Irwin-Hall distribution.") if get_help: #return a ppt # Load selected file filename="./dither_explained.pptx" with open(filename, 'rb') as f: s = f.read() download_button_str = download_button(s, filename, f'Click here to download {filename}') st.markdown(download_button_str, unsafe_allow_html=True) with col2: range_beg= st.number_input('Dither minimum in ms:', -2000,2000, 0, 4) range_end= st.number_input('Dither maximum in ms:', -2000,2000, 500, 4) dither_type=st.selectbox('Select type of dithers (remember that natural dithering adds to this):',('Inverse Irwin-Hall','Random','Halton','Poisson'), help="The IHH (Invese Irwin-Hall) is the one your should select! The others are for RnD and as illustrations.") if dither_type=="Inverse Irwin-Hall": st.warning('Contact Legal/IP/RnD before using invese Irwin-Hall dither on a survey!') with col3: nLevels = st.number_input('Number of levels (N+1 or N+2):',1,2,1,1, help="Keep this as 1 for all normal surveys. In a case where sources are going off very often, it might be advisable to also optimize the dithers for the N+2 shot. Pls contact RnD before using this on a real survey.") nBacksteps = st.number_input('Amount of anti-clustering:',1,5,5,1,help="Keep this at 5 or less. [3-5] is a good and robust choice. If you use numbers much larger than 5, there is a potential issue with regards to a CGG patent in that the effective distribution becones close to uniform random. Please contact Legal/IP council and RnD before going above 5.") user_seed = int(st.text_input("User seed (for rand numb gen):", "0", help="Keeping this at 0 will give different results each time, However, by providing your own seed, for example 123, the same random sequence is produced every time.")) if(user_seed!=0): random.seed(a=user_seed) return [nBacksteps, no_src, nPoints, range_beg, range_end, nLevels, dither_type, compute_dithers]
def show(self): new_names = [] to_keep = [] with st.expander("Configurations list"): for name in self.config_names: new_name = st.text_input("", name, key=f"{name} {self.key}") new_names.append(new_name) to_keep.append( st.checkbox(f"Show", True, key=f"Show {name} {self.key}")) self.data = list(compress(self.data, to_keep)) self.config_names = list(compress(new_names, to_keep)) self.headers = copy.deepcopy(self.data) list(map(lambda d: d.pop("results"), self.headers)) for idx, header in enumerate(self.headers): header_view = RunHeaderView(header) with st.expander(self.config_names[idx]): header_view.show()
def show_plot_bollinger(choice, x_as_label, df,z1, z2, wdw, center_boll): df = do_bollinger(df, z1, z2, wdw, center_boll) buy_price, sell_price, bb_signal, signal_bb = implement_bb_strategy(df['Close'], df['boll_low_1'], df['boll_high_1']) if signal_bb == 1: plot_boll(df, choice, buy_price, sell_price, bb_signal, x_as_label) else: with st.expander(f"Bollinger - {choice} not interesting" ): plot_boll(df, choice, buy_price, sell_price, bb_signal, x_as_label)
def show_legend(ais_data): with st.expander("Legend", expanded=False): body = [] for item in ais_data: empire_id = item["empire_id"] player = item["player_name"] span = colored_span(player, to_hex_color(item["color"])) body.append(f"""- **{empire_id}**: {span}""") st.markdown("\n".join(body), unsafe_allow_html=True)
def show(self): st.header(self.name) if self.is_single: st.dataframe(self.data["data"]["search"]["results"]) else: for i in range(min(len(self.data), 5)): with st.expander(f"Experiment: id={self.data[i]['id']}"): st.dataframe(self.data[i]["data"]["search"]["results"])
def accumulate_first_rows(df, x): """Accumulate the first X rows Args: df (df): table with numbers Returns: df : table with the first x rows accumulated """ # calculate the sum #df = df.drop(columns="index", axis=1) # df['Date_statistics'] = df['Date_statistics'].dt.date # from 2021-01-01T00:00:00+01:00 to yyyy-mm-dd # make a new df with the fraction, row-wize df_fractions A nr_of_columns = len(df.columns) nr_of_rows = len(df) column_list = df.columns.tolist() # calculate the fraction of each age group data = [] first_row_values = [] first_row_sums = [] number_of_first_rows = st.sidebar.slider( "Eerste x aantal dagen samenvoegen", 0, 21, 7) first_row_data = [] for c in range(nr_of_columns): first_row_sums.append(0.0) first_row_data.append(df.iat[number_of_first_rows - 1, 0]) # date of row number_of_first_rows for r in range(nr_of_rows): if r < number_of_first_rows: for c in range(1, nr_of_columns): first_row_sums[c] += df.iat[r, c] if r == number_of_first_rows - 1: for t in range(1, len(first_row_sums)): first_row_data.append(first_row_sums[t]) data.append(first_row_data) else: row_data = [] for c in range(nr_of_columns): try: row_data.append(df.iat[r, c]) except: # date row_data.append(df.iat[r, c]) data.append(row_data) df_accumulated = pd.DataFrame(data, columns=column_list) with st.expander('First rows accumulated', expanded=False): st.subheader(f"The first {number_of_first_rows} rows accumated") st.write(df_accumulated) return df_accumulated
def visualize_ner( pipe, # Nlu component_list text:str, ner_tags: Optional[List[str]] = None, show_label_select: bool = True, show_table: bool = False, title: Optional[str] = "Named Entities", sub_title: Optional[str] = "Recognize various `Named Entities (NER)` in text entered and filter them. You can select from over `100 languages` in the dropdown.", colors: Dict[str, str] = {}, show_color_selector: bool = False, set_wide_layout_CSS:bool=True, generate_code_sample:bool = False, key = "NLU_streamlit", model_select_position:str = 'side', show_model_select : bool = True, show_text_input:bool = True, show_infos:bool = True, show_logo:bool = True, ): StreamlitVizTracker.footer_displayed=False if set_wide_layout_CSS : _set_block_container_style() if show_logo :StreamlitVizTracker.show_logo() if show_model_select : model_selection = Discoverer.get_components('ner',include_pipes=True) model_selection.sort() if model_select_position == 'side':ner_model_2_viz = st.sidebar.selectbox("Select a NER model",model_selection,index=model_selection.index(pipe.nlu_ref.split(' ')[0])) else : ner_model_2_viz = st.selectbox("Select a NER model",model_selection,index=model_selection.index(pipe.nlu_ref.split(' ')[0])) pipe = pipe if pipe.nlu_ref == ner_model_2_viz else StreamlitUtilsOS.get_pipe(ner_model_2_viz) if title: st.header(title) if show_text_input : text = st.text_area("Enter text you want to visualize NER classes for below", text, key=key) if sub_title : st.subheader(sub_title) if generate_code_sample: st.code(get_code_for_viz('NER',StreamlitUtilsOS.extract_name(pipe),text)) if ner_tags is None: ner_tags = StreamlitUtilsOS.get_NER_tags_in_pipe(pipe) if not show_color_selector : if show_label_select: exp = st.expander("Select entity labels to highlight") label_select = exp.multiselect( "These labels are predicted by the NER model. Select which ones you want to display", options=ner_tags,default=list(ner_tags)) else : label_select = ner_tags pipe.viz(text,write_to_streamlit=True, viz_type='ner',labels_to_viz=label_select,viz_colors=colors, streamlit_key=key) else : # TODO WIP color select cols = st.columns(3) exp = cols[0].beta_expander("Select entity labels to display") color = st.color_picker('Pick A Color', '#00f900',key = key) color = cols[2].color_picker('Pick A Color for a specific entity label', '#00f900',key = key) tag2color = cols[1].selectbox('Pick a ner tag to color', ner_tags,key = key) colors[tag2color]=color if show_table : st.write(pipe.predict(text, output_level='chunk'),key = key) if show_infos : # VizUtilsStreamlitOS.display_infos() StreamlitVizTracker.display_model_info(pipe.nlu_ref, pipes = [pipe]) StreamlitVizTracker.display_footer()
def test_just_label(self): """Test that it can be called with no params""" expander = st.expander("label") with expander: pass expander_block = self.get_delta_from_queue() self.assertEqual(expander_block.add_block.expandable.label, "label") self.assertEqual(expander_block.add_block.expandable.expanded, False)
def request_works(concept_name): if len(concept_name) != 0: search_works = requests.get('https://api.openalex.org/works?search=' + concept_name.replace(" ", "%20") + '&filter=is_paratext:false' + polite).json()['results'] for work in search_works: st.markdown("---") st.markdown(f"##### {work['display_name']}") authors_list = [] for authorship in work['authorships']: author = authorship['author'] author_display_name = author['display_name'] if author[ 'orcid'] == None else ( f"[{author['display_name']}]({author['orcid']})") authors_list.append(author_display_name) st.markdown(", ".join(authors_list)) st.caption( f"Published on **{work['publication_date']}** in ***{work['host_venue']['display_name']}*** ({work['host_venue']['publisher']})" .replace("in ***None***", "")) oa_info = "🟩 **Open access**" if work['open_access'][ 'is_oa'] == True else "" oa_info += " (" + work['host_venue']['license'].upper() + ")" if ( len(work['host_venue']['license'] or "") != 0) else "" st.markdown(oa_info) st.caption(urllib.parse.quote(work['doi'], safe=':/')) st.caption(f"{work['cited_by_count']} citations") with st.expander("Other sources"): for source in work["alternate_host_venues"]: st.caption( f"- [{source['display_name']}]({source['url']})") with st.expander("Related concepts"): for work_concept in work['concepts']: st.caption(work_concept['display_name']) st.progress(float(work_concept['score'])) return True
def harmonic_frame(): with st.expander("Look at Spherical Harmonics"): st.latex('Y_\ell^m(\\theta, \\phi) = \sqrt{\\frac{(2\ell + 1)}{4\pi}\\frac{(\ell - m)!}{(\ell + m)!}}' 'e^{im\phi}P_\ell^m(\cos\\theta)') st.latex(r'\ell \in \mathbb{N}_0 \ \ \ \ \ m \in \{-\ell,-\ell+1,...,\ell-1, \ell\}') st.markdown('## Harmonic Options') lH = st.number_input("l ", int(0), int(100), int(0), int(1), "%d") mH = st.number_input("m ", int(-lH), int(lH), int(0), int(1), "%d") fig = build_harmonic(lH, mH) st.plotly_chart(fig, use_container_width=True)
def do_the_rudi(df_): """Calculate the fractions per age group. Calculate the difference related to day 0 as a % of day 0. Made for Rudi Lousberg Inspired by Ian Denton https://twitter.com/IanDenton12/status/1407379429526052866 Args: df (df): table with numbers Returns: df : table with the percentual change of the fracctions """ # calculate the sum #df = df.drop(columns="index", axis=1) df__ = accumulate_first_rows(df_, 7) df = df__.copy(deep=False) df["sum"] = df.sum(axis=1) df['Date_statistics'] = df[ 'Date_statistics'].dt.date # from 2021-01-01T00:00:00+01:00 to yyyy-mm-dd # make a new df with the fraction, row-wize df_fractions A nr_of_columns = len(df.columns) nr_of_rows = len(df) column_list = df.columns.tolist() # calculate the fraction of each age group data = [] for r in range(nr_of_rows): row_data = [] for c in range(nr_of_columns): try: row_data.append( round((df.iat[r, c] / df.at[r, "sum"] * 100), 2)) except: # date row_data.append(df.iat[r, c]) data.append(row_data) df_fractions = pd.DataFrame(data, columns=column_list) with st.expander('The fractions', expanded=False): st.subheader("The fractions") st.write(df_fractions) # calculate the percentual change of the fractions data = [] for r in range(nr_of_rows): row_data = [] for c in range(nr_of_columns): try: row_data.append( round(((df_fractions.iat[r, c] - df_fractions.iat[0, c]) / df_fractions.iat[0, c] * 100), 2)) except: row_data.append(df_fractions.iat[r, c]) data.append(row_data) return pd.DataFrame(data, columns=column_list)
def try_expander(expander_name, sidebar=True): if sidebar: try: return st.sidebar.expander(expander_name) except: return st.sidebar.beta_expander(expander_name) else: try: return st.expander(expander_name) except: return st.beta_expander(expander_name)
def hermite_frame(): with st.expander("Look at Hermite Polynomials"): st.latex(r'H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n}e^{-x^2}') st.latex(r'n \in \mathbb{N}_0') st.markdown('## Hermite Options') collect_nums = lambda x: [int(i) for i in re.split("[^0-9]", x) if i != ""] filter_nums = lambda _list: [item for item in _list if (0 <= item <= 100)] nums = st.text_input("n [type any number of values to plot]", "0, 1, 2, 3, 4") nH = filter_nums(collect_nums(nums)) fig = build_hermite(nH) st.plotly_chart(fig, use_container_width=True)
def visualize(data): streamlit.markdown('''## Runtime: {}'''.format(data[KEY_RUNTIME])) with streamlit.expander('Description'): streamlit.write(data[KEY_LONG_DESSCR]) with streamlit.expander('Notes'): streamlit.write(data[KEY_NOTES]) with streamlit.expander('Hyperparameters'): streamlit.write(data[KEY_HYPERPARAMETERS]) for idx, (name, log) in enumerate(data[KEY_LOGS_PROCESSED].items()): streamlit.markdown('''## {}'''.format(name)) slider_episodes = False slider_frames = False c1, c2, c3, c4 = streamlit.columns(4) if c1.button(f'Download High Resolution ID{idx}'): download_high_res(name, data[KEY_LOGS_RAW][name]) if c2.checkbox(f'Episode Slider ID{idx}' ): # if plot type in ['histogram', 'histogram2d'] slider_episodes = True if c3.checkbox(f'Frame Slider ID{idx}'): slider_frames = True slider_episodes = False c4.markdown('''Compression Factor: x{}'''.format(log[KEY_COMPRESSION])) figure = compute_figure(name, log, slider_episodes, slider_frames) if figure: streamlit.altair_chart(figure, use_container_width=FILL_BROWSER_WIDTH) else: streamlit.write('No data for this partition, how can this happen?')
def display_FTE_count(proj_data): c1, c2, c3, c4 = st.columns([1.2, 1, 1, 1]) with c1: with st.expander("Count of Associates"): st.write(len(proj_data)) with c2: with st.expander("Total FTE"): st.write(proj_data['FTE'].sum().round(2)) with c3: with st.expander("Onsite FTE"): on_filter = (proj_data['Offshore_Onsite'] == 'Onsite') st.write(proj_data[on_filter]['FTE'].sum().round(2)) with c4: with st.expander("Offshore FTE"): off_filter = (proj_data['Offshore_Onsite'] == 'Offshore') st.write(proj_data[off_filter]['FTE'].sum().round(2)) return proj_data
def laguerre_frame(): with st.expander("Look at Associated Laguerre Functions"): st.latex(r'L_q^p(x) = (-1)^p \frac{d^p}{dx^p}L_{p+q}(x)') st.latex(r'L_q(x) = \frac{e^x}{q!}\frac{d^q}{dx^q}(e^{-x}x^q)') st.latex(r'q \in \mathbb{N}_0 \ \ \ \ \ p \in \{0,1,...,q\}') st.markdown('## Laguerre Options') collect_nums = lambda x: [int(i) for i in re.split("[^0-9]", x) if i != ""] filter_nums = lambda _list: [item for item in _list if (0 <= item <= 100)] nums = st.text_input("q [type any number of values to plot]", "0, 1, 2, 3, 4") q = filter_nums(collect_nums(nums)) p = st.number_input('p', int(0), int(min(q)), int(0), int(1), "%d") fig = build_laguerre(q, p) st.plotly_chart(fig, use_container_width=True)
def bessel_frame(): with st.expander("Look at Bessel Functions"): st.latex(r'j_\ell(x) = (-x)^\ell \bigg(\frac{1}{x}\frac{d}{dx}\bigg)^\ell \frac{\sin(x)}{x}') st.latex(r'n_\ell(x) = -(-x)^\ell \bigg(\frac{1}{x}\frac{d}{dx}\bigg)^\ell \frac{\cos(x)}{x}') st.latex(r'\ell \in \mathbb{N}_0') st.markdown('## Bessel Options') collect_nums = lambda x: [int(i) for i in re.split("[^0-9]", x) if i != ""] filter_nums = lambda _list: [item for item in _list if (0 <= item <= 70)] nums = st.text_input("l [type any number of values to plot] ", "0, 1, 2, 3, 4") type = st.selectbox("Plot Type", ["Spherical Bessel", "Spherical Neumann"], 0) lB = filter_nums(collect_nums(nums)) fig = build_bessel(lB, type) st.plotly_chart(fig, use_container_width=True)
def show_keyword_co_coccurrences(corpus, number_of_topics, number_of_chunks): st.header("Keyword co-occurrences") if corpus is None: st.markdown("Please upload a corpus first") else: with st.expander("Help"): st.markdown(''' Summarize the top documents in a given topic as a graph. Its nodes are keywords in the documents (excluding language-specific, but not user-defined stopwords), and its edges indicate that two keywords appear in the same sentence. The thickness of an edge indicates how often two keywords occur together (at least *minimum edges* times). ''') navigate_topics_by_weight, keywords_selected_topic = topic_slider(number_of_topics) keywords_cut_off = st.sidebar.slider("Minium topic weight", 0.0, 1.0, value=0.8, step=0.05) keywords_min_edges = st.sidebar.slider("Minimum number of edges", 1, 15, value=5) topic = keywords_selected_topic if navigate_topics_by_weight: topic_order = sort_topics(topic_model(corpus, number_of_topics, number_of_chunks), corpus) topic = topic_order[topic] graph, nodes, top_documents = keyword_coocurrence_graph(topic_model(corpus, number_of_topics, number_of_chunks), corpus, topic, keywords_min_edges, keywords_cut_off) show_topic_info(corpus, number_of_topics, number_of_chunks, topic) keywords = topic_keywords(topic_model(corpus, number_of_topics, number_of_chunks), topic) if len(nodes) == 0: st.markdown("No graph. Use less restrictive criteria.") else: graph.show("keyword-graph.html") components.html(open("keyword-graph.html", 'r', encoding='utf-8').read(), height=625) st.markdown("Top-ranked documents for this topic") top_documents_df = pd.DataFrame(corpus.documents).iloc[top_documents] for i, row in top_documents_df.iterrows(): with st.expander(str(row["name"])): document = annotated_document(corpus, row["content"], keywords) st.markdown(document, unsafe_allow_html=True) download_link(top_documents_df, "top-documents-{}.csv".format(topic), "Download top documents")
def bonds(): headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36' } r = requests.get( "https://www.investing.com/rates-bonds/world-government-bonds", headers=headers) tables = pd.read_html(r.text) for keys in table_num: with st.expander(f"{keys.replace('_',' ')}"): st.dataframe( (tables[table_num[keys]]).drop(['Unnamed: 0', 'Unnamed: 9'], axis=1))
def legendre_frame(): with st.expander("Look at Associated Legendre Functions"): # st.markdown('# Look at Associated Legendre Functions:') st.latex(r'P_{\ell}(x) = \frac{1}{2^\ell \ell!}\frac{d^\ell}{dx^\ell}(x^2-1)^\ell') st.latex(r'P_{\ell}^m(x) = (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}P_{\ell}(x);\ \ P_{\ell}^{-m}(x) = (-1)^m\frac{(\ell - m)!}{(\ell+m)!}P_{\ell}^{m}(x)') st.latex(r'\ell \in \mathbb{N}_0 \ \ \ \ \ m \in \{-\ell,-\ell+1,...,\ell-1, \ell\}') st.markdown('## Legendre Options') collect_nums = lambda x: [int(i) for i in re.split("[^0-9]", x) if i != ""] filter_nums = lambda _list: [item for item in _list if (0 <= item <= 100)] nums = st.text_input("l [type any number of values to plot]", "0, 1, 2, 3, 4") lP = filter_nums(collect_nums(nums)) mP = st.number_input('m ', int(-min(lP)), int(min(lP)), int(0), int(1), "%d") type = st.selectbox("Plot Type", ["Cartesian (x)", "Polar (cos x)"], 0) fig = build_legendre(lP, mP, type) st.plotly_chart(fig, use_container_width=True)