def run_main(): np.random.seed(43) torch.manual_seed(43) cluster = ConfigurationSettings().cluster if verify_cluster(cluster): print("Welcome to the PyHDX server!") log_root_dir = Path.home() / '.pyhdx' / 'logs' log_dir = log_root_dir / datetime.datetime.now().strftime('%Y%m%d') log_dir.mkdir(parents=True, exist_ok=True) # catch error when log dir does not exist root_log = logging.getLogger('pyhdx') root_log.setLevel(logging.DEBUG) fh = logging.FileHandler(log_dir / 'pyhdx_logs.txt') formatter = logging.Formatter( '%(asctime)s %(name)s [%(levelname)s]: %(message)s', "%Y-%m-%d %H:%M:%S") fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) root_log.addHandler(fh) root_log.info('Starting PyHDX server') tornado_logger = logging.getLogger('tornado.application') fh = logging.FileHandler(log_dir / 'tornado_logs.txt') formatter = logging.Formatter( '%(asctime)s %(name)s [%(levelname)s]: %(message)s', "%Y-%m-%d %H:%M:%S") fh.setFormatter(formatter) fh.setLevel(10) tornado_logger.addHandler(fh) pn.serve(APP_DICT, static_dirs={'pyhdx': STATIC_DIR})
def serve( data_dir: pathlib.Path = typer.Argument( "data", file_okay=False, dir_okay=True, resolve_path=True, help="The directory that contains the output that will be visualized", ), port: int = typer.Option( 0, help= "Specify the port on which the server will be listening to. A random one is chosen by default." ), websocket_origin: str = typer.Option( "localhost", help="The host that can connect to the websocket"), show: bool = typer.Option( True, help="Whether to open the server in a new browser tab on start"), ) -> None: # Move imports in here for improved performance import panel from .panels import elevation_max from .panels import elevation from .panels import video from .panels import grid from .panels import about from .panels import time_series panel.serve( panels={ "About": lambda: about(data_dir), "Mesh": lambda: grid(data_dir), "Max_Elevation": lambda: elevation_max(data_dir), "Elevation": lambda: elevation(data_dir), "Animation": lambda: video(data_dir), "Stations": lambda: time_series(data_dir) }, title={ "About": "General Info", "Mesh": "Display grid", "Max_Elevation": "Interactive map with the maximum elevation in the next 72hours", "Elevation": "Interactive maps with hourly elevation for the next 72hours", "Animation": "Video with the evolution of elevation data", "Stations": "Tide guage Time Series", }, port=port, index=(paths.TEMPLATES / "index.html").resolve().as_posix(), show=show, websocket_origin=websocket_origin, )
def run_apps(): np.random.seed(43) torch.manual_seed(43) scheduler_address = cfg.get("cluster", "scheduler_address") if not verify_cluster(scheduler_address): print( f"No valid Dask scheduler found at specified address: '{scheduler_address}'" ) return log_root_dir = Path.home() / ".pyhdx" / "logs" log_dir = log_root_dir / datetime.datetime.now().strftime("%Y%m%d") log_dir.mkdir(parents=True, exist_ok=True) # catch error when log dir does not exist root_log = logging.getLogger("pyhdx") root_log.setLevel(logging.DEBUG) fh = logging.FileHandler(log_dir / "pyhdx_logs.txt") formatter = logging.Formatter( "%(asctime)s %(name)s [%(levelname)s]: %(message)s", "%Y-%m-%d %H:%M:%S") fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) root_log.addHandler(fh) root_log.info("Starting PyHDX server") tornado_logger = logging.getLogger("tornado.application") fh = logging.FileHandler(log_dir / "tornado_logs.txt") formatter = logging.Formatter( "%(asctime)s %(name)s [%(levelname)s]: %(message)s", "%Y-%m-%d %H:%M:%S") fh.setFormatter(formatter) fh.setLevel(10) tornado_logger.addHandler(fh) #TODO Clean assets dir from pdb files Path(cfg.assets_dir).mkdir(exist_ok=True, parents=True) print("Welcome to the PyHDX server!") pn.serve( APP_DICT, static_dirs={ "pyhdx": STATIC_DIR, "assets": str(cfg.assets_dir) }, index=str(STATIC_DIR / "index.html"), )
async def start_server(self, loop, address, http_port): self.server = pn.serve(self.template, address=address, port=http_port, loop=loop, title="AMI", show=False)
def display_results(dset_file: Path, *preds_file: Path, show: bool = True): """Display accidents according to their severity and compare with predictions :param dset_file: CAS dataset .csv file :param preds_file: predictions .csv file for one method :param show: open the server in a new browser tab on start """ dset = pd.read_csv(dset_file, usecols=["X", "Y", "injuryCrash", "fold"]) dset["injuryCrash"] = dset["injuryCrash"].astype(float) if preds_file: filename_widget = pn.widgets.Select( name="Predictions file", options=list(preds_file), margin=(20, 20, 0, 20), width=400, ) @pn.depends(filename=filename_widget.param.value) def plot_crash_n_results(filename): dset["predictions"] = pd.read_csv(filename) dset["error"] = dset["injuryCrash"] - dset["predictions"] crash_map = plot_map(dset, "injuryCrash", "Ground truth", clim=(0, 1)) preds_map = plot_map(dset, "predictions", "Predictions", clim=(0, 1)) error_map = plot_map(dset, "error", "Errors", cmap="seismic", clim=(-1, 1)) hv_maps = pn.panel(crash_map + preds_map + error_map) return pn.Column(hv_maps[1][0][0], hv_maps[0]) pane = pn.Column(filename_widget, plot_crash_n_results) else: pane = pn.panel(plot_map(dset, "injuryCrash", "Ground truth")) pn.serve(pane, show=show)
def statistics_pipeline(info, pipeline_): """Serve the statistics of a pipeline run""" p_uuid, r_uuid = utils.resolve_pipeline_runs(info, pipeline_, run_type=PipelineRunTypes.training.name) utils.notice('Generating statistics for the pipeline run ID {}. If your ' 'browser opens up to a blank window, please refresh ' 'the page once.'.format(utils.format_uuid(r_uuid))) api = ce_api.PipelinesApi(utils.api_client(info)) stat_artifact = utils.api_call( api.get_pipeline_artifacts_api_v1_pipelines_pipeline_id_runs_pipeline_run_id_artifacts_component_type_get, pipeline_id=p_uuid, pipeline_run_id=r_uuid, component_type=GDPComponent.SplitStatistics.name) ws_id = info[info[constants.ACTIVE_USER]][constants.ACTIVE_WORKSPACE] path = Path(click.get_app_dir(constants.APP_NAME), 'statistics', str(ws_id), p_uuid, r_uuid) utils.download_artifact(artifact_json=stat_artifact[0].to_dict(), path=path) import tensorflow as tf from tensorflow_metadata.proto.v0 import statistics_pb2 import panel as pn result = {} for split in os.listdir(path): stats_path = os.path.join(path, split, 'stats_tfrecord') serialized_stats = next(tf.compat.v1.io.tf_record_iterator(stats_path)) stats = statistics_pb2.DatasetFeatureStatisticsList() stats.ParseFromString(serialized_stats) dataset_list = statistics_pb2.DatasetFeatureStatisticsList() for i, d in enumerate(stats.datasets): d.name = split dataset_list.datasets.append(d) result[split] = dataset_list h = utils.get_statistics_html(result) pn.serve(panels=pn.pane.HTML(h, width=1200), show=True)
def serve(self, static_dirs=None, **kwargs): """ Wrapper for pn.serve, with the inclusion of the required static assets.""" static_dirs = {} if static_dirs is None else static_dirs assets_elvis = {'assets': os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, 'assets'))} kwargs.setdefault('title', self.title) css, js = self._get_assets("", self.theme) pn.extension(css_files=css, js_files=js) return pn.serve(self.app, static_dirs={**assets_elvis, **static_dirs}, **kwargs)
def start_server(self): loop = IOLoop().current() server = pn.serve(self.r, show=False, title='Prior Comparison Tool', loop=loop, start=False) # nest_asyncio required because if opening in jupyter notebooks, IOloop is already in use nest_asyncio.apply() return server.run_until_shutdown()
def view_statistics(artifact_uri, magic: bool = False): """ View statistics in HTML. Args: artifact_uri (Text): magic (bool): """ stats_dict = get_statistics_dataset_dict(artifact_uri) h = get_statistics_html(stats_dict) if magic: import sys if 'ipykernel' not in sys.modules: raise EnvironmentError('The magic functions are only usable ' 'in a Jupyter notebook.') from IPython.core.display import display, HTML display(HTML(h)) else: pn.serve(panels=pn.pane.HTML(h, width=1200), show=True)
def markdown_server_session(): html = Markdown('#Title') server = serve(html, port=6001, show=False, start=False) session = pull_session(session_id='Test', url="http://localhost:{:d}/".format(server.port), io_loop=server.io_loop) yield html, server, session try: server.stop() except AssertionError: pass # tests may already close this
def html_server_session(): html = HTML('<h1>Title</h1>') server = serve(html, port=5006, show=False, start=False) session = pull_session(session_id='Test', url="http://localhost:{:d}/".format(server.port), io_loop=server.io_loop) yield html, server, session try: server.stop() except AssertionError: pass # tests may already close this
def create_sessions(slugs, titles): app1_slug, app2_slug = slugs apps = { app1_slug: Markdown('First app'), app2_slug: Markdown('Second app') } server = serve(apps, port=5008, title=titles, show=False, start=False) servers.append(server) session1 = pull_session(url=f"http://localhost:{server.port:d}/app1", io_loop=server.io_loop) session2 = pull_session(url=f"http://localhost:{server.port:d}/app2", io_loop=server.io_loop) return session1, session2
def serve(self, static_dirs=None, **kwargs): """ Wrapper for pn.serve(), with the inclusion of the required static assets. :static_dirs: Specify directories with static assets in addition to the standard elvis assets. :kwargs: key word arguments that are passed on to pn.serve """ static_dirs = {} if static_dirs is None else static_dirs assets_elvis = { 'assets': os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, 'assets')) } self._set_assets("assets\\", self.theme) return pn.serve(self.app, static_dirs={ **assets_elvis, **static_dirs }, **kwargs)
class MyApp(param.Parameterized): value = param.Integer(default=0, bounds=(0, 10)) data = param.Integer() def __init__(self, **params): print(__name__) super().__init__(**params) self.data_panel = pn.pane.Str() self.loading_spinner = pn.widgets.indicators.LoadingSpinner( width=25, height=25, sizing_mode="fixed") self.view = pn.Column(self.loading_spinner, self.param.value, self.data_panel, max_width=500) self._update_data() @param.depends("value", watch=True) def _update_data(self): self.loading_spinner.value = True self.data_panel.object = f"Data: {_get_data(self.value)}" self.loading_spinner.value = False if __name__.startswith("bokeh"): MyApp().view.servable() if __name__ == "__main__": pn.serve({"": MyApp().view})
from pyhdx.panel.base import DEFAULT_COLORS, STATIC_DIR from pyhdx.panel.data_sources import DataSource import panel as pn import numpy as np from pathlib import Path tmpl, ctrl = _single_app() directory = Path(__file__).parent fpath = directory / 'test_data' / 'ecSecB_torch_fit.txt' with open(fpath, 'rb') as f_obj: file_binary = f_obj.read() f_input = ctrl.control_panels['SingleMappingFileInputControl'] f_input.widget_dict['input_file'].filename = str(fpath) f_input.input_file = file_binary f_input.dataset_name = 'DS1' f_input._action_add_dataset() # s_ctrl = ctrl.control_panels['SingleControl'] # s_ctrl.dataset_name = 'DS1_deltaG' # s_ctrl.quantity = 'deltaG' # s_ctrl._action_add_dataset() pv_ctrl = ctrl.control_panels['ProteinViewControl'] pv_ctrl.rcsb_id = '1qyn' if __name__ == '__main__': pn.serve(tmpl, show=False, static_dirs={'pyhdx': STATIC_DIR})
if self.date: date_range = (df.index > self.date[0]) & (df.index < self.date[1]) date_range_str = f"- {self.date[0].strftime('%Y-%m-%d')} - {self.date[1].strftime('%Y-%m-%d')}" df_filtered = df[date_range][self.column] else: date_range_str = "" df_filtered = df[self.column] df_filtered.plot(ax=ax, title=f"Column - {self.column} {date_range_str}") plt.close() return fig def panel(self): return pn.Row(self.param, self.plot) test = Test() avocdo = Avocado() # Server Config routes = { # route/url : panel application / layout 'avocado': avocdo.panel, 'test': test.panel } pn.serve(routes, port=8080, show=False, websocket_origin="127.0.0.1:8080")
***********************************************************************/ """ # time consuming PeriodTime_study = pd.date_range( start=Start__PreddateD, end=End__PreddateD) #After test period of time PTime = pd.DataFrame(data=PeriodTime_study, columns=['date']) Time_series_forecasting_advanced(train, test, DATE_cut, index_cut, wn, ts_name, PTime, 'D') """ /********************************************************************** Financial analysis ***********************************************************************/ """ app_col = create_layout(tickerSymbol) pn.serve(app_col) """ /********************************************************************** Stock Index ***********************************************************************/ """ Start__traindateD = '2019-01-01' End__traindateD = End_realdata aord = Data_sia_reader_auto_index('^AORD', Start__traindateD, End__traindateD, '1d') nikkei = Data_sia_reader_auto_index('1360.T', Start__traindateD, End__traindateD, '1d') hsi = Data_sia_reader_auto_index('^HSI', Start__traindateD, End__traindateD, '1d')
The awesome_panel.application framework provides - Templates: One or more Templates to layout your app(s). A template might provide `main`, `sidebar`, `topbar` layouts where you can put your content. - Components: Smaller constitutents used to create the Template or PageComponents - Views: Layout+Styling of Components - Services: Services that can be used by the Template and components. For example a progress_service - Models: Like Application, Page, Author, Tag, Progress etc. """ import os import platform import panel as pn # We need to import the application module to get the applications added to the site from application import pages # pylint: disable=unused-import from application.config import site # for app in sorted(site.applications, key=lambda x: x.name): # print(f'<a href="{app.url}">{app.name}</a>') if __name__ == "__main__": address = os.getenv("BOKEH_ADDRESS", "0.0.0.0") APP_ROUTES = {app.url: app.view for app in site.applications} if platform.system() == "Windows": pn.serve(APP_ROUTES, port=80, dev=False, title="Awesome Panel", address=address) else: pn.serve( APP_ROUTES, port=80, dev=False, title="Awesome Panel", address=address, num_procs=4 )
app = param.Parameter() @param.depends("mission_id") def show_mission(self): if not self.mission_id: return None titles, signals, cpr = get_data(self.mission_id) try: annotations = pd.read_csv(pth_df, index_col=0) except FileNotFoundError: annotations = df_default app = AnnotationMission(cpr=cpr, signals=signals, titles=titles, mission_id=self.mission_id, annotations=annotations) self.app = app return app.render() def render(self): if self.app: self.annotations = self.app.annotations return pn.Column( pn.Param(self.param, parameters=["mission_id"]), self.show_mission, ) selector = SelectMission() server = pn.serve(selector.render)
import pycovid19usAltair import pycovid19globalAltair import panel as pn us_app = pycovid19usAltair.us_app global_app = pycovid19globalAltair.global_app #us_app.show(host='localhost', port=8889, websocket_origin='localhost:8889', open=False) pn.serve({ 'By_Country': global_app, 'US_Only': us_app }, port=8890, websocket_origin='localhost:8890', show=False)
import holoviews as hv import panel as pn import numpy as np hv.extension('bokeh') def sine(frequency, phase, amplitude): xs = np.linspace(0, np.pi * 4) return hv.Curve( (xs, np.sin(frequency * xs + phase) * amplitude)).options(width=800) if __name__ == '__main__': ranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2)) dmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges) pn.serve(dmap, port=5006, allow_websocket_origin=["localhost:5000"], show=False)
#!/usr/bin/env python # coding: utf-8 # In[ ]: import panel as pn from flask import Flask flask_app = Flask(__name__) @flask_app.route('/app') def hello_world(): return 'Hello, World!' def panel_app(): return pn.Column("# This Panel app runs alongside flask, access the flask app at [here](./flask/app)",pn.widgets.Select(options=['Africa', 'Asia', 'Europe'],value='Asia')) pn.serve({'/flask': flask_app,'/app': panel_app}, port=5001)
import panel as pn from bokeh.embed import server_document from fastapi import FastAPI, Request from fastapi.templating import Jinja2Templates from sliders.pn_app import createApp app = FastAPI() templates = Jinja2Templates(directory="templates") @app.get("/") async def bkapp_page(request: Request): script = server_document('http://127.0.0.1:5000/app') return templates.TemplateResponse("base.html", {"request": request, "script": script}) pn.serve({'/app': createApp}, port=5000, allow_websocket_origin=["127.0.0.1:8000"], address="127.0.0.1", show=False)
import panel as pn from mortgage_calculator.layout import layout pn.serve(layout)
import awesome_panel_extensions import panel as pn from awesome_panel_extensions.widgets.button import AwesomeButton button = AwesomeButton() def view(): pn.Column(pn.pane.Markdown("Hello World"), ) PREFIX = "sub/subsub" ROUTES = {"": view} pn.serve(ROUTES, port=5007, prefix=PREFIX)
import panel as pn from lambda_stock_ui import SessionManager from app_manager import ConfigAppManager from stock_forecasting import StockForecasting # from financial_analysis import FinancialAnalysis import os os.environ['working_bucket'] = 'crcdal-well-data' base_s3_bucket = 's3://stock-forecasting-sia/' StockForecasting.s3_bucket_path = base_s3_bucket # FinancialAnalysis.s3_bucket_path = base_s3_bucket app = SessionManager().start_session(ConfigAppManager) # Serve the app app.servable() pn.serve(app)
with open(fpath, 'rb') as f_obj: file_binary = f_obj.read() f_input = ctrl.control_panels['MappingFileInputControl'] f_input.widget_dict['input_file'].filename = str(fpath) f_input.input_file = file_binary f_input.dataset_name = 'DS1' f_input._action_add_dataset() f_input.widget_dict['input_file'].filename = str(fpath) f_input.input_file = file_binary f_input.dataset_name = 'DS2' f_input._action_add_dataset() diff = ctrl.control_panels['DifferenceControl'] diff.dataset_1 = 'DS1' diff.dataset_2 = 'DS2' comparison_name = 'Diff_ds1_ds2' diff.comparison_name = comparison_name diff.comparison_quantity = 'deltaG' diff._action_add_comparison() pv_ctrl = ctrl.control_panels['ProteinViewControl'] pv_ctrl.rcsb_id = '1qyn' if __name__ == '__main__': pn.serve(tmpl, show=True)
fit_control.r1 = 0.05 fit_control.r2 = 0.1 fit_control.epochs = 200000 fit_control.stop_loss = 0.001 fit_control.patience = 100 fit_control.learning_rate = 100 # ngl = ctrl.views['protein'] # ngl._ngl.pdb_string = Path(test_dir / '1qyn.pdb').read_text() # ctrl.views['protein'].object = pdb_string # fit_result = load_fitresult(fitresult_dir) # src.add(fit_result, 'fit_1') diff = ctrl.control_panels['DifferentialControl'] diff._action_add_comparison() #if __name__ == '__main__': #pn.state.onload(reload_dashboard) #pn.state.onload(reload_tables) pn.state.onload(init_dashboard) if __name__ == '__main__': pn.serve(tmpl, show=True, static_dirs={'pyhdx': STATIC_DIR, 'assets': cfg.assets_dir}) elif __name__.startswith('bokeh_app'): tmpl.servable()
# control_panels.append(DeveloperControl) # # figure_panels = [ # ImageFigure, # ProteinFigure, # LoggingFigure # ] # # elvis = GoldenElvis(ExtendedGoldenTemplate, ExtendedGoldenDarkTheme, title=VERSION_STRING_SHORT) # ctrl = ComparisonController(control_panels, figure_panels, cluster=cluster) # ctrl.logger.addHandler(get_default_handler(sys.stdout)) # elvis.compose(ctrl, # elvis.column( # elvis.stack( # elvis.view(ctrl.figure_panels['ProteinFigure']) # ), # elvis.row( # elvis.stack( # elvis.view(ctrl.figure_panels['ImageFigure']), # ), # elvis.view(ctrl.figure_panels['LoggingFigure']), # ) # )) # # return ctrl if __name__ == '__main__': ctrl = main_app() pn.serve(ctrl.template, static_dirs={'pyhdx': STATIC_DIR})
dic = {} directory = os.path.dirname(__file__) dic['file_path'] = os.path.join(directory, 'test_data', 'simulated_data_uptake.csv') dic['norm_mode'] = 'Theory' dic['be_percent'] = 0. dic['exp_state'] = 'state1' #todo this should be moved to reload_previous function # src_file = os.path.join(directory, 'test_data', 'fit_simulated_pfact.txt') # array = np_from_txt(src_file) # src_dict = {name: array[name] for name in array.dtype.names} # src_dict['y'] = src_dict['log_P'] # src_dict['color'] = np.full_like(array, fill_value=DEFAULT_COLORS['pfact'], dtype='<U7') # # dic['sources'] = {} # dic['sources']['pfact'] = src_dict # # with open(os.path.join(directory, 'test_data', 'fit_simulated_pfact.pick'), 'rb') as f: # fit_result = pickle.load(f) # # dic['fit_results'] = {} # dic['fit_results']['fr_pfact'] = fit_result ctrl = reload_previous(dic, ctrl) if __name__ == '__main__': pn.serve(tmpl)