def layout_func(): counts = get_leaderboard_df(self.series_list) counts["Proportion"] = counts["Total"] / counts["Total"].sum() table = dbc.Table.from_dataframe(counts, index=True, index_label="Method") # Apply URLS to index for row in table.children[1].children: state = urlencode({"methods": [row.children[0].children]}, doseq=True) row.children[0].children = html.A(row.children[0].children, href=f"/search/?{state}") return html.Div(header() + [ dcc.Location(id="url", refresh=False), dbc.Container([ breadcrumb_layout([("Home", "/"), (f"{self.title}", "")]), html.H2(self.title), table, ] + footer()), ])
def setup(self): self.config.suppress_callback_exceptions = True # Dynamically load tags data_sources_json_file = open("../shared_config/data_sources.json") self.series_list = json.load(data_sources_json_file) data_sources_json_file.close() self.layout = html.Div(header() + [ dcc.Location(id="url", refresh=False), dbc.Container([ breadcrumb_layout([("Home", "/"), ("Filter", "")]), dbc.Row([ dbc.Col(id="filter_panel", lg=3, sm=3), dbc.Col( [ html.H4("Results"), dcc.Loading(html.Div(id="filter_results")), ], lg=9, sm=9, ), ]), ] + footer(), ), ]) def filter_panel_children(params, tags, methods): children = [ html.H4("Filters"), dbc.FormGroup([ dbc.Label("Name"), apply_default_value(params)(dbc.Input)( id="name", placeholder="Name of a series...", type="search", value="", ), dbc.FormText("Type something in the box above"), ]), dbc.FormGroup([ dbc.Label("Tags"), apply_default_value(params)(dbc.Checklist)( options=[{ "label": t, "value": t } for t in tags], value=[], id="tags", ), ]), dbc.FormGroup([ dbc.Label("Method"), apply_default_value(params)(dbc.Checklist)( options=[{ "label": m, "value": m } for m in methods], value=[], id="methods", ), ]), ] return children component_ids = ["name", "tags", "methods"] @self.callback(Output("filter_panel", "children"), [Input("url", "href")]) @location_ignore_null([Input("url", "href")], "url") def filter_panel(value): parse_result = parse_state(value) all_tags = [] for series_dict in self.series_list: all_tags.extend(series_dict["tags"]) all_tags = sorted(set(all_tags)) # Dynamically load methods stats = get_forecast_data("statistics") all_methods = sorted(stats["models_used"]) return filter_panel_children(parse_result, all_tags, all_methods) @self.callback( Output("url", "search"), inputs=[Input(i, "value") for i in component_ids], ) @dash_kwarg([Input(i, "value") for i in component_ids]) def update_url_state(**kwargs): state = urlencode(kwargs, doseq=True) return f"?{state}" @self.callback( Output("filter_results", "children"), [Input(i, "value") for i in component_ids], ) @dash_kwarg([Input(i, "value") for i in component_ids]) def filter_results(**kwargs): # Fix up name if type(kwargs["name"]) == list: kwargs["name"] = "".join(kwargs["name"]) # Filtering by AND-ing conditions together forecast_series_dicts = {} for series_dict in self.series_list: try: forecast_series_dicts[ series_dict["title"]] = get_forecast_data( series_dict["title"]) except FileNotFoundError: continue filters = { "name": match_names, "tags": match_tags, "methods": match_methods, } list_filter_matches = [] for filter_key, filter_fn in filters.items(): matched_series_names = filter_fn(forecast_series_dicts, kwargs[filter_key]) list_filter_matches.append(matched_series_names) unique_series_titles = list( sorted(set.intersection(*list_filter_matches))) if len(unique_series_titles) > 0: results_list = [] for item_title in unique_series_titles: series_data = forecast_series_dicts[item_title] url_title = urlencode({"title": item_title}) thumbnail_figure = get_thumbnail_figure(series_data) results_list.append( html.Div([ html.A( [ html.H5(item_title), dcc.Graph( figure=thumbnail_figure, config={"displayModeBar": False}, ), ], href=f"/series?{url_title}", ), html.Hr(), ])) results = [ html. P(f"{len(unique_series_titles)} result{'s' if len(unique_series_titles) > 1 else ''} found" ), html.Div(results_list), ] else: results = [html.P("No results found")] return results
def setup(self): self.layout = html.Div(header() + [ dcc.Location(id="url", refresh=False), dbc.Container([ breadcrumb_layout([("Home", "/"), ("Series", "")]), dcc.Loading(dbc.Row([dbc.Col(id="series_graph", lg=12)])), dbc.Row([ dbc.Col( [ dbc.FormGroup([ dbc.Label("Forecast Method"), dcc.Dropdown( id="model_selector", clearable=False, ), ]), dcc.Loading(html.Div(id="meta_data_list")), ], lg=6, ), dbc.Col( [ dbc.FormGroup([ dcc.Dropdown( options=[ { "label": "Forecast", "value": "Forecast", }, { "label": "50% CI", "value": "CI_50", }, { "label": "75% CI", "value": "CI_75", }, { "label": "95% CI", "value": "CI_95", }, ], value="Forecast", clearable=False, id="forecast_table_selector", ), ]), dcc.Loading(html.Div(id="forecast_table")), ], lg=6, ), ]), ] + footer()), ]) def series_input(inputs, location_id="url"): def accept_func(func): @wraps(func) def wrapper(*args): input_names = [item.component_id for item in inputs] kwargs_dict = dict(zip(input_names, args)) parse_result = parse_state(kwargs_dict[location_id]) if "title" in parse_result: title = parse_result["title"][0] series_data_dict = get_forecast_data(title) del kwargs_dict[location_id] return func(series_data_dict, **kwargs_dict) else: raise PreventUpdate return wrapper return accept_func inputs = [Input("url", "href")] @self.callback(Output("breadcrumb", "children"), inputs) @location_ignore_null(inputs, location_id="url") @series_input(inputs, location_id="url") def update_breadcrumb(series_data_dict): return (series_data_dict["data_source_dict"]["short_title"] if "short_title" in series_data_dict["data_source_dict"] else series_data_dict["data_source_dict"]["title"]) @self.callback( Output("series_graph", "children"), inputs + [Input("model_selector", "value")], ) @location_ignore_null(inputs, location_id="url") @series_input(inputs + [Input("model_selector", "value")], location_id="url") def update_series_graph(series_data_dict, **kwargs): model_name = kwargs["model_selector"] series_figure = get_series_figure(series_data_dict, model_name) series_graph = dcc.Graph( figure=series_figure, config={ "modeBarButtonsToRemove": [ "sendDataToCloud", "autoScale2d", "hoverClosestCartesian", "hoverCompareCartesian", "lasso2d", "select2d", "toggleSpikelines", ], "displaylogo": False, }, ) return series_graph @self.callback( [ Output("model_selector", "options"), Output("model_selector", "value"), ], inputs, ) @location_ignore_null(inputs, location_id="url") @series_input(inputs, location_id="url") def update_model_selector(series_data_dict): best_model_name = select_best_model(series_data_dict) stats = get_forecast_data("statistics") all_methods = sorted(stats["models_used"]) all_methods_dict = dict(zip(all_methods, all_methods)) all_methods_dict[ best_model_name] = f"{best_model_name} - Best Model" model_select_options = [{ "label": v, "value": k } for k, v in all_methods_dict.items()] return model_select_options, best_model_name @self.callback( Output("meta_data_list", "children"), inputs + [Input("model_selector", "value")], ) @location_ignore_null(inputs, location_id="url") @series_input(inputs + [Input("model_selector", "value")], location_id="url") def update_meta_data_list(series_data_dict, **kwargs): model_name = kwargs["model_selector"] model_description = series_data_dict["all_forecasts"][model_name][ "model_description"] if model_description == model_name: model_description = "" model_cv_score = series_data_dict["all_forecasts"][model_name][ "cv_score"] return dbc.ListGroup([ dbc.ListGroupItem([ dbc.ListGroupItemHeading("Model Details"), dbc.ListGroupItemText([ html.P(model_name), html.P(model_description), html.P("CV score: %f" % model_cv_score), ]), ]), dbc.ListGroupItem([ dbc.ListGroupItemHeading("Forecast Updated At"), dbc.ListGroupItemText( series_data_dict["forecasted_at"].strftime( "%Y-%m-%d %H:%M:%S")), ]), dbc.ListGroupItem([ dbc.ListGroupItemHeading("Data Collected At"), dbc.ListGroupItemText( series_data_dict["downloaded_dict"] ["downloaded_at"].strftime("%Y-%m-%d %H:%M:%S")), ]), dbc.ListGroupItem([ dbc.ListGroupItemHeading("Data Source"), dbc.ListGroupItemText([ html.A( series_data_dict["data_source_dict"]["url"], href=series_data_dict["data_source_dict"]["url"], ) ]), ]), ]) @self.callback( Output("forecast_table", "children"), inputs + [ Input("forecast_table_selector", "value"), Input("model_selector", "value"), ], ) @location_ignore_null(inputs, location_id="url") @series_input( inputs + [ Input("forecast_table_selector", "value"), Input("model_selector", "value"), ], location_id="url", ) def update_forecast_table(series_data_dict, **kwargs): selected_column_map = { "Forecast": ["Forecast"], "CI_50": ["LB_50", "UB_50"], "CI_75": ["LB_75", "UB_75"], "CI_95": ["LB_95", "UB_95"], } model_name = kwargs["model_selector"] dataframe = series_data_dict["all_forecasts"][model_name][ "forecast_df"] column_name_map = {"forecast": "Forecast"} dataframe = dataframe.rename( column_name_map, axis=1)[selected_column_map[ kwargs["forecast_table_selector"]]].round(4) dataframe.index = dataframe.index.strftime("%Y-%m-%d %H:%M:%S") table = dbc.Table.from_dataframe(dataframe, index=True, index_label="Date") return table
def setup(self): contributors = json.load(open("static_files/profiles.json"))[ "contributors" ] research_team = json.load(open("static_files/profiles.json"))[ "research_team" ] self.layout = html.Div( header() + [ dcc.Location(id="url", refresh=False), dbc.Container( [ breadcrumb_layout( [("Home", "/"), (f"{self.title}", "")] ), dbc.Row( [ dbc.Col( [ html.H2("About"), html.P( [ """ This website is an intuitive tool that makes business forecasting accessible to the wider community. You can easily obtain predictions of commonly used variables together with the uncertainty around them. The website implements classical forecasting models as well as the novel models and methods developed by the members of the Time Series and Forecasting (TSF) research group in the University of Sydney Business School. The website visualizes and summarizes the forecasting results in an easy-to-understand manner. The forecasts are updated daily and include the latest publicly available information. It is an open-source project under the AGPL license, see """, html.A("https://github.com/forecastlab/forecast_dash", href="https://github.com/forecastlab/forecast_dash" ), " .", ] ), ], lg=6, ), dbc.Col( [ dbc.Jumbotron( [ html.H1("Our Mission"), html.Ol( [ html.Li( "To make forecasting models accessible to everyone." ), html.Li( "To provide the latest financial and economic forecasts of the commonly used time series." ), ] ), ] ) ], lg=6, ), ] ), dbc.Row( dbc.Col( html.H2( "Powered By", style={"margin-bottom": "32px"}, ), lg=12, ) ), dbc.Row( parse_poweredby("static_files/poweredby.json") ), dbc.Row( dbc.Col( html.H2( "Core Contributors", style={"margin-bottom": "32px"}, ), lg=12, ) ), dbc.Row(parse_people(contributors)), dbc.Row( dbc.Col( html.H2( "Research Group Leaders", style={"margin-bottom": "32px"}, ), lg=12, ) ), dbc.Row(parse_people(research_team)), ] + footer(), style={"margin-bottom": "64px"}, className="mb-5", ), ] )
def setup(self): self.layout = html.Div(header() + [ dcc.Location(id="url", refresh=False), dbc.Container( [ breadcrumb_layout([("Home", "/"), ("Blog", "")]), dbc.Row([ dbc.Col([html.H1("Recent posts"), html.Hr()]), ]), html.Div(id="body"), ] + footer(), style={"margin-bottom": "64px"}, ), ]) @self.callback(Output("body", "children"), [Input("url", "href")]) @location_ignore_null([Input("url", "href")], "url") def body(value): # Find page number parse_result = parse_state(value) if "page" not in parse_result: parse_result["page"] = ["1"] page_int = int(parse_result["page"][0]) # Load blog posts filenames = glob_re(r".*.md", "../blog") n_posts = len(filenames) blog_posts = [] for filename in filenames: fm_dict = Frontmatter.read_file("../blog/" + filename) fm_dict["filename"] = filename.split(".md")[0] blog_posts.append(fm_dict) # Sort by date blog_posts = sorted(blog_posts, key=lambda x: x["attributes"]["date"], reverse=True) # Render post previews h = html2text.HTML2Text() h.ignore_links = True n_posts_per_page = 5 start = (page_int - 1) * n_posts_per_page end = min((page_int) * n_posts_per_page, n_posts) body = [] for i in range(start, end): blog_post = blog_posts[i] if ("type" in blog_post["attributes"] and blog_post["attributes"]["type"] == "html"): body_html = blog_post["body"] else: body_html = markdown2.markdown(blog_post["body"], extras=markdown_extras) preview = textwrap.shorten(h.handle(body_html), 280, placeholder="...") body.append( dbc.Row( dbc.Col( [ html.A( html.H2( blog_post["attributes"]["title"], style={"padding-top": "8px"}, ), href=f"post?title={blog_post['filename']}", id=blog_post["filename"], ), html.P( [ " by ", blog_post["attributes"]["author"], ", ", humanize.naturaltime(datetime.now( ) - datetime.strptime( blog_post["attributes"]["date"], "%Y-%m-%d", )), ], className="subtitle mt-0 text-muted small", ), html.Div(preview, style={"padding-bottom": "8px"}), html.A( html.P( html.Strong( "Read more", className="text-left", ), style={"padding-bottom": "24px"}, ), href=f"post?title={blog_post['filename']}", ), html.Hr(), ], lg=8, ))) # Add bottom navigation # Previous | Page X of Y | Earlier n_pages = math.ceil(n_posts / n_posts_per_page) body.append( dbc.Row([ dbc.Col( html.A( html.P("< Previous Posts"), id="previous_link", href=f"?page={page_int+1}", className="text-left", ) if page_int < n_pages else [], lg=2, ), dbc.Col( html.P( f"Page {page_int} of {n_pages}", className="text-center", ), lg=4, ), dbc.Col( html.A( html.P("Earlier Posts >"), id="previous_link", href=f"?page={page_int-1}", className="text-right", ) if page_int > 1 else [], lg=2, ), ])) return body
def setup(self): self.layout = html.Div(header() + [ dcc.Location(id="url", refresh=False), dbc.Container( [ breadcrumb_layout([("Home", "/"), ("Blog", "/blog"), ("Post", "")]), dbc.Row(dbc.Col(id="post", lg=12)), ] + footer(), style={"margin-bottom": "64px"}, ), ]) inputs = [Input("url", "href")] @self.callback(Output("breadcrumb", "children"), inputs) @location_ignore_null(inputs, location_id="url") def update_breadcrumb(url): parse_result = parse_state(url) if "title" in parse_result: title = parse_result["title"][0] try: filename = glob_re(f"{title}.md", "../blog")[0] except: raise PreventUpdate fm_dict = Frontmatter.read_file("../blog/" + filename) fm_dict["filename"] = filename.split(".md")[0] return fm_dict["attributes"]["title"] @self.callback(Output("post", "children"), inputs) @location_ignore_null(inputs, location_id="url") def update_content(url): parse_result = parse_state(url) if "title" in parse_result: title = parse_result["title"][0] try: filename = glob_re(f"{title}.md", "../blog")[0] except: raise PreventUpdate blog_post = Frontmatter.read_file("../blog/" + filename) blog_post["filename"] = filename.split(".md")[0] return [ html.A( html.H2(blog_post["attributes"]["title"]), href=f"/blog?post={blog_post['filename']}", id=blog_post["filename"], ), html.Hr(), html.P( [ " by ", blog_post["attributes"]["author"], ", ", humanize.naturaltime( datetime.now() - datetime.strptime( blog_post["attributes"]["date"], "%Y-%m-%d")), ], className="subtitle mt-0 text-muted small", ), dash_dangerously_set_inner_html.DangerouslySetInnerHTML( blog_post["body"]) if "type" in blog_post["attributes"] and blog_post["attributes"]["type"] == "html" else dcc.Markdown(blog_post["body"]), ]