コード例 #1
0
ファイル: plot.py プロジェクト: yimuchen/coffea
def bokeh_plot(histo, jup_url="http://127.0.0.1:8889"):
    if not isnotebook():
        raise NotImplementedError("Only usable in jupyter notebook")
    import bokeh.plotting.figure as bk_figure
    from bokeh.io import show
    from bokeh import palettes
    from bokeh.layouts import row, column
    from bokeh.models import ColumnDataSource
    from bokeh.models.widgets import RadioButtonGroup, CheckboxButtonGroup
    from bokeh.models.widgets import RangeSlider, Div
    from bokeh.io import output_notebook  # enables plot interface in J notebook

    # init bokeh

    from bokeh.application import Application
    from bokeh.application.handlers import FunctionHandler

    from bokeh.core.validation import silence
    from bokeh.core.validation.warnings import EMPTY_LAYOUT

    silence(EMPTY_LAYOUT, True)

    output_notebook()

    # Set up widgets
    cfg_labels = ["Ghost"]
    wi_config = CheckboxButtonGroup(labels=cfg_labels, active=[0])
    wi_dense_select = RadioButtonGroup(
        labels=[ax.name for ax in histo.dense_axes()], active=0
    )
    wi_sparse_select = RadioButtonGroup(
        labels=[ax.name for ax in histo.sparse_axes()], active=0
    )

    # Dense widgets
    sliders = {}
    for ax in histo.dense_axes():
        edge_vals = (histo.axis(ax.name).edges()[0], histo.axis(ax.name).edges()[-1])
        _smallest_bin = numpy.min(numpy.diff(histo.axis(ax.name).edges()))
        sliders[ax.name] = RangeSlider(
            title=ax.name,
            value=edge_vals,
            start=edge_vals[0],
            end=edge_vals[1],
            step=_smallest_bin,
            name=ax.name,
        )

    # Cat widgets
    togglers = {}
    for ax in histo.sparse_axes():
        togglers[ax.name] = CheckboxButtonGroup(
            labels=[i.name for i in ax.identifiers()], active=[0], name=ax.name
        )

    # Toggles for all widgets
    configers = {}
    for ax in histo.sparse_axes():
        configers[ax.name] = CheckboxButtonGroup(
            labels=["Display", "Ghost"], active=[0, 1], name=ax.name
        )
    for ax in histo.dense_axes():
        configers[ax.name] = CheckboxButtonGroup(
            labels=["Display"], active=[0], name=ax.name
        )

    # Figure
    fig = bk_figure(
        title="1D Projection",
        plot_width=500,
        plot_height=500,
        min_border=20,
        toolbar_location=None,
    )
    fig.yaxis.axis_label = "N"
    fig.xaxis.axis_label = "Quantity"

    # Iterate over possible overlays
    _max_idents = 0  # Max number of simultaneou histograms
    for ax in histo.sparse_axes():
        _max_idents = max(_max_idents, len([i.name for i in ax.identifiers()]))

    # Data source list
    sources = []
    sources_ghost = []
    for i in range(_max_idents):
        sources.append(ColumnDataSource(dict(left=[], top=[], right=[], bottom=[])))
        sources_ghost.append(
            ColumnDataSource(dict(left=[], top=[], right=[], bottom=[]))
        )

    # Hist list
    hists = []
    hists_ghost = []
    for i in range(_max_idents):
        if _max_idents < 10:
            _color = palettes.Category10[min(max(3, _max_idents), 10)][i]
        else:
            _color = palettes.magma(_max_idents)[i]
        hists.append(
            fig.quad(
                left="left",
                right="right",
                top="top",
                bottom="bottom",
                source=sources[i],
                alpha=0.9,
                color=_color,
            )
        )
        hists_ghost.append(
            fig.quad(
                left="left",
                right="right",
                top="top",
                bottom="bottom",
                source=sources_ghost[i],
                alpha=0.05,
                color=_color,
            )
        )

    def update_data(attrname, old, new):
        sparse_active = wi_sparse_select.active
        sparse_name = [ax.name for ax in histo.sparse_axes()][sparse_active]
        sparse_other = [ax.name for ax in histo.sparse_axes() if ax.name != sparse_name]

        dense_active = wi_dense_select.active
        dense_name = [ax.name for ax in histo.dense_axes()][dense_active]
        dense_other = [ax.name for ax in histo.dense_axes() if ax.name != dense_name]

        # Apply cuts in projections
        _h = histo.copy()
        for proj_ax in sparse_other:
            _idents = histo.axis(proj_ax).identifiers()
            _labels = [ident.name for ident in _idents]
            if 0 in configers[proj_ax].active:
                _h = _h.integrate(
                    proj_ax, [_labels[i] for i in togglers[proj_ax].active]
                )
            else:
                _h = _h.integrate(proj_ax)

        for proj_ax in dense_other:
            _h = _h.integrate(
                proj_ax, slice(sliders[proj_ax].value[0], sliders[proj_ax].value[1])
            )

        for cat_ix in range(_max_idents):
            # Update histo for each toggled overlay
            if cat_ix in togglers[sparse_name].active:
                cat_value = histo.axis(sparse_name).identifiers()[cat_ix]
                h1d = _h.integrate(sparse_name, cat_value)

                # Get shown histogram
                values = h1d.project(dense_name).values()
                if values != {}:
                    h = values[()]
                    bins = h1d.axis(dense_name).edges()

                    # Apply cuts on shown axis
                    bin_los = bins[:-1][bins[:-1] > sliders[dense_name].value[0]]
                    bin_his = bins[1:][bins[1:] < sliders[dense_name].value[1]]
                    new_bins = numpy.intersect1d(bin_los, bin_his)
                    bin_ixs = numpy.searchsorted(bins, new_bins)[:-1]
                    h = h[bin_ixs]

                    sources[cat_ix].data = dict(
                        left=new_bins[:-1],
                        right=new_bins[1:],
                        top=h,
                        bottom=numpy.zeros_like(h),
                    )
                else:
                    sources[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])

                # Add ghosts
                if 0 in wi_config.active:
                    h1d = histo.integrate(sparse_name, cat_value)
                    for proj_ax in sparse_other:
                        _idents = histo.axis(proj_ax).identifiers()
                        _labels = [ident.name for ident in _idents]
                        if 1 not in configers[proj_ax].active:
                            h1d = h1d.integrate(
                                proj_ax, [_labels[i] for i in togglers[proj_ax].active]
                            )
                        else:
                            h1d = h1d.integrate(proj_ax)
                    values = h1d.project(dense_name).values()
                    if values != {}:
                        h = h1d.project(dense_name).values()[()]
                        bins = h1d.axis(dense_name).edges()
                        sources_ghost[cat_ix].data = dict(
                            left=bins[:-1],
                            right=bins[1:],
                            top=h,
                            bottom=numpy.zeros_like(h),
                        )
                    else:
                        sources_ghost[cat_ix].data = dict(
                            left=[], right=[], top=[], bottom=[]
                        )
            else:
                sources[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])
                sources_ghost[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])

        # Cosmetics
        fig.xaxis.axis_label = dense_name

    for name, slider in sliders.items():
        slider.on_change("value", update_data)
    for name, toggler in togglers.items():
        toggler.on_change("active", update_data)
    for name, configer in configers.items():
        configer.on_change("active", update_data)
    # Button
    for w in [wi_dense_select, wi_sparse_select, wi_config]:
        w.on_change("active", update_data)

    from bokeh.models.widgets import Panel, Tabs

    layout = row(
        fig,
        column(
            Div(
                text="<b>Overlay Axis:</b>",
                style={"font-size": "100%", "color": "black"},
            ),
            wi_sparse_select,
            Div(
                text="<b>Plot Axis:</b>", style={"font-size": "100%", "color": "black"}
            ),
            wi_dense_select,
            Div(
                text="<b>Categorical Cuts:</b>",
                style={"font-size": "100%", "color": "black"},
            ),
            *[toggler for name, toggler in togglers.items()],
            Div(
                text="<b>Dense Cuts:</b>", style={"font-size": "100%", "color": "black"}
            ),
            *[slider for name, slider in sliders.items()]
        ),
    )

    # Config prep
    incl_lists = [[], [], []]
    for i, key in enumerate(list(configers.keys())):
        incl_lists[i // max(5, len(list(configers.keys())) / 3)].append(
            Div(
                text="<b>{}:</b>".format(key),
                style={"font-size": "70%", "color": "black"},
            )
        )
        incl_lists[i // max(5, len(list(configers.keys())) / 3)].append(configers[key])

    layout_cfgs = column(
        row(
            column(
                Div(
                    text="<b>Configs:</b>",
                    style={"font-size": "100%", "color": "black"},
                ),
                wi_config,
            )
        ),
        Div(
            text="<b>Axis togglers:</b>", style={"font-size": "100%", "color": "black"}
        ),
        row(
            column(incl_lists[0]),
            column(incl_lists[1]),
            column(incl_lists[2]),
        ),
    )

    # Update active buttons
    def update_layout(attrname, old, new):
        active_axes = [None]
        for name, wi in configers.items():
            if 0 in wi.active:
                active_axes.append(name)
        for child in layout.children[1].children:
            if child.name not in active_axes:
                child.visible = False
            else:
                child.visible = True

    for name, configer in configers.items():
        configer.on_change("active", update_layout)

    tab1 = Panel(child=layout, title="Projection")
    tab2 = Panel(child=layout_cfgs, title="Configs")
    tabs = Tabs(tabs=[tab1, tab2])

    def modify_doc(doc):
        doc.add_root(row(tabs, width=800))
        doc.title = "Sliders"

    handler = FunctionHandler(modify_doc)
    app = Application(handler)

    show(app, notebook_url=jup_url)
    update_data("", "", "")
コード例 #2
0
    minimum = 20
    combinationSelection = findBestCombinations(minimum)
    tabList = []
    for combination in combinationSelection:
        country = combination[0]
        product = combination[1]
        productionProduct = getLinkedProduct(product)[0]
        p = figure(title='Relation between ' + str(product) + ' price and ' +
                   str(productionProduct) + ' production')
        p.grid.grid_line_color = 'Black'
        p.background_fill_color = "#eeeeee"
        scatterLists = findScatterLists(productionDf, priceDf, country,
                                        product)
        productions = scatterLists[0]
        prices = scatterLists[1]
        mscatter(p, productions, prices, "circle")
        tab = Panel(child=p, title=str(country) + '/' + str(product))
        tabList.append(tab)
    tabs = Tabs(tabs=tabList)

    js, tag = autoload_static(tabs, CDN, "components/scatterplot.js")

    #the javascript code is written to a file, in this case components/testplot.js
    with open("components/scatterplot.js", "w+") as f:
        f.write(js)

    #place this tag where the plot should be on a page
    # for now, just copy paste it from the terminal
    # IMPORTANT: if this code is ran again, the tag needs to be replaced
    print(tag)
コード例 #3
0
from scripts.histogram import histogram_tab
from scripts.density import density_tab
from scripts.table import table_tab
from scripts.draw_map import map_tab
from scripts.routes import route_tab

# Using included state data from Bokeh for map
from bokeh.sampledata.us_states import data as states

# Read data into dataframes
flights = pd.read_csv(join(dirname(__file__), 'data', 'flights.csv'),
                      index_col=0).dropna()

# Formatted Flight Delay Data for map
map_data = pd.read_csv(join(dirname(__file__), 'data', 'flights_map.csv'),
                       header=[0, 1],
                       index_col=0)

# Create each of the tabs
tab1 = histogram_tab(flights)
tab2 = density_tab(flights)
tab3 = table_tab(flights)
tab4 = map_tab(map_data, states)
tab5 = route_tab(flights)

# Put all the tabs into one application
tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5])

# Put the tabs in the current document for display
curdoc().add_root(tabs)
コード例 #4
0
def plot_hail_file_metadata(
    t_path: str, ) -> Optional[Union[Grid, Tabs, bokeh.plotting.Figure]]:
    """
    Takes path to hail Table or MatrixTable (gs://bucket/path/hail.mt), outputs Grid or Tabs, respectively.
    Or if an unordered Table is provided, a Figure with file sizes is output.
    If metadata file or rows directory is missing, returns None.
    """
    panel_size = 600
    subpanel_size = 150

    files = hl.hadoop_ls(t_path)
    rows_file = [x["path"] for x in files if x["path"].endswith("rows")]
    entries_file = [x["path"] for x in files if x["path"].endswith("entries")]
    # cols_file = [x['path'] for x in files if x['path'].endswith('cols')]
    success_file = [
        x["modification_time"] for x in files if x["path"].endswith("SUCCESS")
    ]

    data_type = "Table"

    metadata_file = [
        x["path"] for x in files if x["path"].endswith("metadata.json.gz")
    ]
    if not metadata_file:
        logger.warning("No metadata file found. Exiting...")
        return None

    with hl.hadoop_open(metadata_file[0], "rb") as f:
        overall_meta = json.loads(f.read())
        rows_per_partition = overall_meta["components"]["partition_counts"][
            "counts"]

    if not rows_file:
        logger.warning("No rows directory found. Exiting...")
        return None
    rows_files = hl.hadoop_ls(rows_file[0])

    if entries_file:
        data_type = "MatrixTable"
        rows_file = [
            x["path"] for x in rows_files if x["path"].endswith("rows")
        ]
        rows_files = hl.hadoop_ls(rows_file[0])
    row_partition_bounds, row_file_sizes = get_rows_data(rows_files)

    total_file_size, row_file_sizes, row_scale = scale_file_sizes(
        row_file_sizes)

    if not row_partition_bounds:
        logger.warning("Table is not partitioned. Only plotting file sizes")
        row_file_sizes_hist, row_file_sizes_edges = np.histogram(
            row_file_sizes, bins=50)
        p_file_size = figure(plot_width=panel_size, plot_height=panel_size)
        p_file_size.quad(
            right=row_file_sizes_hist,
            left=0,
            bottom=row_file_sizes_edges[:-1],
            top=row_file_sizes_edges[1:],
            fill_color="#036564",
            line_color="#033649",
        )
        p_file_size.yaxis.axis_label = f"File size ({row_scale}B)"
        return p_file_size

    all_data = {
        "partition_widths":
        [-1 if x[0] != x[2] else x[3] - x[1] for x in row_partition_bounds],
        "partition_bounds":
        [f"{x[0]}:{x[1]}-{x[2]}:{x[3]}" for x in row_partition_bounds],
        "spans_chromosome": [
            "Spans chromosomes" if x[0] != x[2] else "Within chromosome"
            for x in row_partition_bounds
        ],
        "row_file_sizes":
        row_file_sizes,
        "row_file_sizes_human":
        [f"{x:.1f} {row_scale}B" for x in row_file_sizes],
        "rows_per_partition":
        rows_per_partition,
        "index":
        list(range(len(rows_per_partition))),
    }

    if entries_file:
        entries_rows_files = hl.hadoop_ls(entries_file[0])
        entries_rows_file = [
            x["path"] for x in entries_rows_files if x["path"].endswith("rows")
        ]
        if entries_rows_file:
            entries_files = hl.hadoop_ls(entries_rows_file[0])
            entry_partition_bounds, entry_file_sizes = get_rows_data(
                entries_files)
            total_entry_file_size, entry_file_sizes, entry_scale = scale_file_sizes(
                entry_file_sizes)
            all_data["entry_file_sizes"] = entry_file_sizes
            all_data["entry_file_sizes_human"] = [
                f"{x:.1f} {entry_scale}B" for x in row_file_sizes
            ]

    title = f"{data_type}: {t_path}"

    msg = f"Rows: {sum(all_data['rows_per_partition']):,}<br/>Partitions: {len(all_data['rows_per_partition']):,}<br/>Size: {total_file_size}<br/>"
    if success_file[0]:
        msg += success_file[0]

    source = ColumnDataSource(pd.DataFrame(all_data))
    p = figure(tools=TOOLS, plot_width=panel_size, plot_height=panel_size)
    p.title.text = title
    p.xaxis.axis_label = "Number of rows"
    p.yaxis.axis_label = f"File size ({row_scale}B)"
    color_map = factor_cmap(
        "spans_chromosome",
        palette=Spectral8,
        factors=list(set(all_data["spans_chromosome"])),
    )
    p.scatter(
        "rows_per_partition",
        "row_file_sizes",
        color=color_map,
        legend="spans_chromosome",
        source=source,
    )
    p.legend.location = "bottom_right"
    p.select_one(HoverTool).tooltips = [(x, f"@{x}") for x in (
        "rows_per_partition",
        "row_file_sizes_human",
        "partition_bounds",
        "index",
    )]

    p_stats = Div(text=msg)
    p_rows_per_partition = figure(x_range=p.x_range,
                                  plot_width=panel_size,
                                  plot_height=subpanel_size)
    p_file_size = figure(y_range=p.y_range,
                         plot_width=subpanel_size,
                         plot_height=panel_size)

    rows_per_partition_hist, rows_per_partition_edges = np.histogram(
        all_data["rows_per_partition"], bins=50)
    p_rows_per_partition.quad(
        top=rows_per_partition_hist,
        bottom=0,
        left=rows_per_partition_edges[:-1],
        right=rows_per_partition_edges[1:],
        fill_color="#036564",
        line_color="#033649",
    )
    row_file_sizes_hist, row_file_sizes_edges = np.histogram(
        all_data["row_file_sizes"], bins=50)
    p_file_size.quad(
        right=row_file_sizes_hist,
        left=0,
        bottom=row_file_sizes_edges[:-1],
        top=row_file_sizes_edges[1:],
        fill_color="#036564",
        line_color="#033649",
    )

    rows_grid = gridplot([[p_rows_per_partition, p_stats], [p, p_file_size]])

    if "entry_file_sizes" in all_data:
        title = f"Statistics for {data_type}: {t_path}"

        msg = f"Rows: {sum(all_data['rows_per_partition']):,}<br/>Partitions: {len(all_data['rows_per_partition']):,}<br/>Size: {total_entry_file_size}<br/>"
        if success_file[0]:
            msg += success_file[0]

        source = ColumnDataSource(pd.DataFrame(all_data))
        panel_size = 600
        subpanel_size = 150
        p = figure(tools=TOOLS, plot_width=panel_size, plot_height=panel_size)
        p.title.text = title
        p.xaxis.axis_label = "Number of rows"
        p.yaxis.axis_label = f"File size ({entry_scale}B)"
        color_map = factor_cmap(
            "spans_chromosome",
            palette=Spectral8,
            factors=list(set(all_data["spans_chromosome"])),
        )
        p.scatter(
            "rows_per_partition",
            "entry_file_sizes",
            color=color_map,
            legend="spans_chromosome",
            source=source,
        )
        p.legend.location = "bottom_right"
        p.select_one(HoverTool).tooltips = [(x, f"@{x}") for x in (
            "rows_per_partition",
            "entry_file_sizes_human",
            "partition_bounds",
            "index",
        )]

        p_stats = Div(text=msg)
        p_rows_per_partition = figure(x_range=p.x_range,
                                      plot_width=panel_size,
                                      plot_height=subpanel_size)
        p_rows_per_partition.quad(
            top=rows_per_partition_hist,
            bottom=0,
            left=rows_per_partition_edges[:-1],
            right=rows_per_partition_edges[1:],
            fill_color="#036564",
            line_color="#033649",
        )
        p_file_size = figure(y_range=p.y_range,
                             plot_width=subpanel_size,
                             plot_height=panel_size)

        row_file_sizes_hist, row_file_sizes_edges = np.histogram(
            all_data["entry_file_sizes"], bins=50)
        p_file_size.quad(
            right=row_file_sizes_hist,
            left=0,
            bottom=row_file_sizes_edges[:-1],
            top=row_file_sizes_edges[1:],
            fill_color="#036564",
            line_color="#033649",
        )
        entries_grid = gridplot([[p_rows_per_partition, p_stats],
                                 [p, p_file_size]])

        return Tabs(tabs=[
            Panel(child=entries_grid, title="Entries"),
            Panel(child=rows_grid, title="Rows"),
        ])
    else:
        return rows_grid
コード例 #5
0
def portfolio_page():
    if request.method=='GET':
        return render_template('portfolio.html')
    else:
        app_quantfy.vars={} # This is a dictionary
        # Define the variables. This is a local variable, but in Flask it will be passed to the plot route I guess
        
        app_quantfy.vars['sym'] = request.form['sym'].upper().strip(';').split(';') # 'sym' should be defined in html file as name
        
        if (app_quantfy.vars['sym'][0]=='') :  # sym is a list delimited by ;
            return render_template('portfolio.html',error_sym='<font size="3" color="red" > Provide at least one ticker symbol </font>') 
        
        
        if len(request.form['start_date'])!=0: # Here start and end date are keys are coming even if they are empty
            try:
                app_quantfy.vars['start_date']=dt.datetime.strptime(request.form['start_date'],'%m/%d/%Y')
            except ValueError:
                return render_template('portfolio.html',error_start_date='<font size="3" color="red" > Wrong date format </font>')
        else:
            # Take 1 years ago of the current date
            app_quantfy.vars['start_date']=dt.datetime.today()-dt.timedelta(days=365) # This does not give the accurate 5 years
        
        
        if  len(request.form['end_date'])!=0:
            try:
                app_quantfy.vars['end_date']=dt.datetime.strptime(request.form['end_date'],'%m/%d/%Y')
            except ValueError:
                return render_template('portfolio.html',error_end_date='<font size="3" color="red" > Wrong date format </font>')
        else:
            # Take today as the default date
            app_quantfy.vars['end_date']=dt.datetime.today()
        
        #print app_quantfy.vars
        if 'bench_sym' in request.form: 
            app_quantfy.vars['bench_sym']=request.form['bench_sym']
        else:
            app_quantfy.vars['bench_sym']='SPY'
        
        symbols=list(app_quantfy.vars['sym']); # Create a new list as we are doing insert operation next
        symbols.insert(0,app_quantfy.vars['bench_sym']); # Insert the default symbol in the symbols list
        
        # Here just get the data for the 'Adj. Close'
        full_data=[(sym, apicall_data.get_data_from_quandl(symbol=sym, features=['Adj. Close'], start_dt=app_quantfy.vars['start_date'],end_dt=app_quantfy.vars['end_date'])
                        ) for sym in symbols]
        
        # Convert this to required format
        
        df_all_sym=util.get_data(full_data)
        
        app_quantfy.vars['guess_alloc']=request.form['guess_alloc'].strip(';').split(';')
        
        
        
        app_quantfy.vars['start_value']=float(request.form['start_value']); # It has a default value
        
        if len(app_quantfy.vars['guess_alloc']) !=0 and (app_quantfy.vars['guess_alloc'][0]!='') : # app_quantfy.vars['guess_alloc'] is a list because of the strip function
            # print app_quantfy.vars['guess_alloc']
            # print len(app_quantfy.vars['guess_alloc'])
            app_quantfy.vars['guess_alloc']=[float(i) for i in app_quantfy.vars['guess_alloc']]
            try:
                assert len(app_quantfy.vars['guess_alloc'])==len(app_quantfy.vars['sym'])
            except AssertionError:
                return render_template('portfolio.html',error_alloc='<font size="3" color="red" > Number of allocations should be same as symbols   </font>')
            # Sum should be equal to one
            print app_quantfy.vars['guess_alloc']
            
            try:
                assert abs(sum(app_quantfy.vars['guess_alloc'])-1.0)<=1e-5 # Sometimes the rounding does not work correctly
            except AssertionError:
                return render_template('portfolio.html',error_alloc='<font size="3" color="red" > Sum should be 1   </font>')

            
        else:
            # Generate random numbers
            allocs=np.random.random(len(app_quantfy.vars['sym']))
            allocs /=allocs.sum()
            app_quantfy.vars['guess_alloc']=allocs
            #print allocs
        
        cr,adr,sddr,sr,ev,normalized_plot_df=optimization.access_portfolio(df_all_sym, app_quantfy.vars['bench_sym'], 
                                                                           app_quantfy.vars['guess_alloc'],
                                                                           sv=app_quantfy.vars['start_value'])
        
        #print cr,adr,sddr,sr,ev
        
        param_not_opt=pd.DataFrame([cr,adr,sddr,sr,ev],index=['Cumulative Return','Average Daily Return','Stand. Deviation Daily return',
                                                          'Sharpe Ratio','End value'], columns=['Unoptimized'])
        
        script_not_opt_table,div_not_opt_table=convert_pd_bokeh_html(param_not_opt)
        
        # print normalized_plot_df.head()
        hover=HoverTool(
            tooltips=[
                ("Portfolio",'$y')
                
                
            ]
        )
        TOOLS='pan,wheel_zoom,box_zoom,reset,save,box_select,crosshair'
        not_opt_p = figure(width=900, height=500, x_axis_type="datetime",tools=[TOOLS,hover])
        
        colors=['blue','red','green','#cc3300']
        
        for (i,ftr) in enumerate(normalized_plot_df):
            not_opt_p.line(normalized_plot_df.index,normalized_plot_df[ftr],legend=ftr,color=colors[i],line_width=2)
        
        #not_opt_p.line(normalized_plot_df)
        
        not_opt_p.title.text = "Un-optimized portfolio"
        not_opt_p.legend.location = "top_left"
        not_opt_p.xaxis.axis_label = 'Date'
        not_opt_p.yaxis.axis_label = 'Relative portfolio value'
        
        tab_not_opt=Panel(child=not_opt_p,title='Un-optimized portfolio')
        
        # script_not_opt, div_not_opt=components(not_opt_p)
        
        # print script_not_opt,div_not_opt
        # Now run optimized
        
        cr,adr,sddr,sr,ev,normalized_plot_df,optimal_alloc=optimization.optimize_portfolio(df_all_sym,app_quantfy.vars['bench_sym'],
                                                                             app_quantfy.vars['start_value'])
        
        
        # print cr,adr,sddr,sr,ev,optimal_alloc
        
        # print normalized_plot_df.head()
        hover=HoverTool(
            tooltips=[
                ("Portfolio",'$y')
                
                
            ]
        )
        
        opt_p = figure(width=900, height=500, x_axis_type="datetime",tools=[TOOLS,hover])
              
        for (i,ftr) in enumerate(normalized_plot_df):
            opt_p.line(normalized_plot_df.index,normalized_plot_df[ftr],legend=ftr,color=colors[i],line_width=2)
        
        
        # print normalized_plot_df
        opt_p.title.text = "Optimized portfolio value"
        opt_p.legend.location = "top_left"
        opt_p.xaxis.axis_label = 'Date'
        opt_p.yaxis.axis_label = 'Relative portfolio value'
        
        tab_opt=Panel(child=opt_p,title='Optimized portfolio')
        
        tabs=Tabs(tabs=[tab_not_opt,tab_opt])
        
        script_opt, div_opt=components(tabs)
        
        
        param_opt=pd.DataFrame([cr,adr,sddr,sr,ev],index=['Cummulative Return','Additive Daily Return','Stand. Deviation Daily return',
                                                          'Sharpe Ratio','End value'], columns=['Optimized'])
        
        all_params=param_not_opt.join(param_opt)
        
        script_opt_table,div_opt_table=convert_pd_bokeh_html(all_params)
        
        
              
        alloc_df=pd.DataFrame([app_quantfy.vars['guess_alloc'],list(optimal_alloc)],index=['Random/Guess allocations','Optimized allocations'],columns=app_quantfy.vars['sym'])
        
        #str_opt_alloc='Optimal allocations: '+', '.join([str(i) for i in optimal_alloc])
        script_alloc_df,div_alloc_df=convert_pd_bokeh_html(alloc_df)
        
        # script_not_opt_table=script_not_opt_table,div_not_opt_table=div_not_opt_table,
        return render_template('portfolio.html',script_opt_table=script_opt_table, div_opt_table=div_opt_table,
                               script_alloc_df=script_alloc_df,div_alloc_df=div_alloc_df,
                                script_opt=script_opt,plot_opt=div_opt
                               )
コード例 #6
0
def plot_params(list_data_tuples):
    
    #script_el_param=''
    #div_el_param='<h4> Time-series plot for computed parameters <table style="width 50%">  <tr>'
    
    list_plots=[]
       
      
    # Here we need to compute the following:
    # (1) Daily returns (2)  Rolling standard deviation (3) Bollinger bands (4) Rolling std
    
    # Generate plot for each symbols
    
    df=util.get_data(list_data_tuples)
    # Normalize the data
    df=df/df.ix[0,:]
       
    daily_returns = util.compute_daily_returns(df)
    rolling_mean=util.get_rolling_mean(df, window=20)
    # drop the rows where the values are 0, for instance for window 20, the first 20 are zeros
    rolling_mean=rolling_mean[(rolling_mean.sum(axis=1)!=0)]   
    rolling_std=util.get_rolling_std(df, window=20)
    rolling_std=rolling_std[(rolling_std.sum(axis=1)!=0)]
    
    u_bollinger_bnd,l_bollinger_bnd=util.get_bollinger_bands(rolling_mean, rolling_std)
    
    param_dict={'Rolling mean':rolling_mean,'Rolling SD': rolling_std,'Bollinger Bands':(u_bollinger_bnd,l_bollinger_bnd),'Daily returns':daily_returns}
    
    colors=viridis(len(daily_returns.columns));
    TOOLS='pan,wheel_zoom,box_zoom,reset,save,box_select,crosshair'
   

    for param in param_dict.keys():
        hover=HoverTool(
            tooltips=[
                ("Metric",'$y')
                ]
            )
        p = figure(width=1200, height=500, x_axis_type="datetime",tools=[TOOLS,hover])
    
        if param =="Bollinger Bands":
            upper_band=param_dict[param][0]
            lower_band=param_dict[param][1]
            for (i,sym) in enumerate(upper_band):
                p.line(upper_band.index,upper_band[sym],legend=sym,color=colors[i],line_width=2)
            
            for (i,sym) in enumerate(lower_band):
                p.line(lower_band.index,lower_band[sym],legend=sym,color=colors[i],line_width=2,line_dash='dashed')   
        else:
            for (i,sym) in enumerate(param_dict[param]):
                p.line(param_dict[param].index,param_dict[param][sym],legend=sym,color=colors[i],line_width=2)
                
        p.title.text = param
        p.legend.location = "top_left"
        p.xaxis.axis_label = 'Date'
        p.yaxis.axis_label = param
       
        tab=Panel(child=p,title=param)
        
        list_plots.append(tab)
    
    if len(list_plots)!=0:
        script_el_param, div_el_param=components(Tabs(tabs=list_plots))
    else:
        script_el_param=''
        div_el_param=''
    
    return script_el_param, div_el_param
コード例 #7
0
               legend='top_right')

# output_file("histogram_color.html")

show(p2)

# In[47]:

from bokeh.models.widgets import Panel, Tabs
from bokeh.io import output_file, show
from bokeh.plotting import figure

tab1 = Panel(child=p1, title="Frequency of Score")
tab2 = Panel(child=p2, title="By Grade")

tabs = Tabs(tabs=[tab1, tab2])
output_file("tabs.html")

show(tabs)

# # Using Plotly

# In[48]:

import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
init_notebook_mode(connected=True)

# In[49]:

x = mRests['GRADE']
コード例 #8
0
ファイル: plot.py プロジェクト: fagan2888/Project_Q
    # Get the current slider values
    x_start = datetime.fromtimestamp(date_slider.value[0] / 1000)
    x_end = datetime.fromtimestamp(date_slider.value[1] / 1000)
    x_start = pd.to_datetime(x_start)
    x_end = pd.to_datetime(x_end)
    #print(x_start)
    #print(x_end)
    # Generate new data
    new_df = df[(df['x'] >= x_start) & (df['x'] <= x_end)]

    new_df.loc[:, 'port'] = (new_df['port'].pct_change().fillna(0) +
                             1).cumprod() * 100
    new_df.loc[:, 'bm'] = (new_df['bm'].pct_change().fillna(0) +
                           1).cumprod() * 100
    new_df.loc[:, 'longOnly'] = (new_df['longOnly'].pct_change().fillna(0) +
                                 1).cumprod() * 100
    new_df.loc[:, 'ER_port'] = new_df['port'] - new_df['bm']
    new_df.loc[:, 'ER_long'] = new_df['port'] - new_df['longOnly']
    new_df.loc[:, 'dd'] = drawdown(new_df['port'].values).values
    new_df = new_df.reset_index().iloc[:, 1:]
    newdata = ColumnDataSource(new_df)
    source.data = newdata.data


date_slider.on_change('value', update_data)
plots = column(p1, p2, date_slider)
panel_1 = Panel(child=plots, title='Panel 1')
tabs = Tabs(tabs=[panel_1, panel_2])
curdoc().add_root(tabs)
curdoc().title = "DateRangeSlider Example"
コード例 #9
0
# No figure will be generated in this exercise. Instead, you will use these panels in the next exercise to build and display a tabbed layout.

# Import Panel from bokeh.models.widgets
from bokeh.models.widgets import Panel

# Create tab1 from plot p1: tab1
tab1 = Panel(child=p1, title='Latin America')

# Create tab2 from plot p2: tab2
tab2 = Panel(child=p2, title='Africa')

# Create tab3 from plot p3: tab3
tab3 = Panel(child=p3, title='Asia')

# Create tab4 from plot p4: tab4
tab4 = Panel(child=p4, title='Europe')

# Displaying tabbed layouts
# Tabbed layouts are collections of Panel objects. Using the figures and Panels from the previous two exercises, you'll create a tabbed layout to change the region in the fertility vs female literacy plots.
# Your job is to create the layout using Tabs() and assign the tabs keyword argument to your list of Panels. The Panels have been created for you as tab1, tab2, tab3 and tab4.
# After you've displayed the figure, explore the tabs you just added! The "Pan", "Box Zoom" and "Wheel Zoom" tools are also all available as before.
# Import Tabs from bokeh.models.widgets
from bokeh.models.widgets import Tabs

# Create a Tabs layout: layout
layout = Tabs(layout=[tab1, tab2, tab3, tab4])

# Specify the name of the output_file and show the result
output_file('tabs.html')
show(layout)
コード例 #10
0
ファイル: plotters.py プロジェクト: iancrossfield/PandExo
def create_component_jwst(result_dict):
    """Generate front end plots JWST
    
    Function that is responsible for generating the front-end interactive plots for JWST.

    Parameters 
    ----------
    result_dict : dict 
        the dictionary returned from a PandExo run
    
    Returns
    -------
    tuple 
        A tuple containing `(script, div)`, where the `script` is the
        front-end javascript required, and `div` is a dictionary of plot
        objects.
    """
    noccultations = result_dict['timing']['Number of Transits']

    # select the tools we want
    TOOLS = "pan,wheel_zoom,box_zoom,resize,reset,save"

    #Define units for x and y axis
    punit = result_dict['input']['Primary/Secondary']
    p = 1.0
    if punit == 'fp/f*': p = -1.0
    else: punit = '(' + punit + ')^2'

    if result_dict['input']['Calculation Type'] == 'phase_spec':
        x_axis_label = 'Time (secs)'
    else:
        x_axis_label = 'Wavelength [microns]'

    flux_out = result_dict['RawData']['flux_out']
    flux_in = result_dict['RawData']['flux_in']
    var_tot = result_dict['RawData']['var_out'] + result_dict['RawData'][
        'var_in']

    x = result_dict['FinalSpectrum']['wave']
    y = result_dict['FinalSpectrum']['spectrum_w_rand']
    err = result_dict['FinalSpectrum']['error_w_floor']

    y_err = []
    x_err = []
    for px, py, yerr in zip(x, y, err):
        np.array(x_err.append((px, px)))
        np.array(y_err.append((py - yerr, py + yerr)))

    source = ColumnDataSource(data=dict(x=x,
                                        y=y,
                                        y_err=y_err,
                                        x_err=x_err,
                                        err=err,
                                        flux_out=flux_out,
                                        flux_in=flux_in,
                                        var_tot=var_tot,
                                        p=var_tot * 0 + p,
                                        nocc=var_tot * 0 + noccultations))
    original = ColumnDataSource(data=dict(x=x,
                                          y=y,
                                          y_err=y_err,
                                          x_err=x_err,
                                          err=err,
                                          flux_out=flux_out,
                                          flux_in=flux_in,
                                          var_tot=var_tot))

    ylims = [
        min(result_dict['OriginalInput']['model_spec']) -
        0.1 * min(result_dict['OriginalInput']['model_spec']),
        0.1 * max(result_dict['OriginalInput']['model_spec']) +
        max(result_dict['OriginalInput']['model_spec'])
    ]
    xlims = [
        min(result_dict['FinalSpectrum']['wave']),
        max(result_dict['FinalSpectrum']['wave'])
    ]

    plot_spectrum = Figure(
        plot_width=800,
        plot_height=300,
        x_range=xlims,
        y_range=ylims,
        tools=TOOLS,  #responsive=True,
        x_axis_label=x_axis_label,
        y_axis_label=punit,
        title="Original Model with Observation")

    plot_spectrum.line(result_dict['OriginalInput']['model_wave'],
                       result_dict['OriginalInput']['model_spec'],
                       color="black",
                       alpha=0.5,
                       line_width=4)

    plot_spectrum.circle('x', 'y', source=source, line_width=3, line_alpha=0.6)
    plot_spectrum.multi_line('x_err', 'y_err', source=source)

    callback = CustomJS(args=dict(source=source, original=original),
                        code="""
            // Grab some references to the data
            var sdata = source.get('data');
            var odata = original.get('data');

            // Create copies of the original data, store them as the source data
            sdata['x'] = odata['x'].slice(0);
            sdata['y'] = odata['y'].slice(0);

            sdata['y_err'] = odata['y_err'].slice(0);
            sdata['x_err'] = odata['x_err'].slice(0);
            sdata['err'] = odata['err'].slice(0);

            sdata['flux_out'] = odata['flux_out'].slice(0);
            sdata['flux_in'] = odata['flux_in'].slice(0);
            sdata['var_tot'] = odata['var_tot'].slice(0);

            // Create some variables referencing the source data
            var x = sdata['x'];
            var y = sdata['y'];
            var y_err = sdata['y_err'];
            var x_err = sdata['x_err'];
            var err = sdata['err'];
            var p = sdata['p'];
            var og_ntran = sdata['nocc']

            var flux_out = sdata['flux_out'];
            var flux_in = sdata['flux_in'];
            var var_tot = sdata['var_tot'];

            var f = wbin.get('value');
            var ntran = ntran.get('value');

            var wlength = Math.pow(10.0,f);

            var ind = [];
            ind.push(0);
            var start = 0;


            for (i = 0; i < x.length-1; i++) {
                if (x[i+1] - x[start] >= wlength) {
                    ind.push(i+1);
                    start = i;
                }
            }

            if (ind[ind.length-1] != x.length) {
                ind.push(x.length);
            }

            var xout = [];


            var foutout = [];
            var finout = [];
            var varout = [];

            var xslice = []; 


            var foutslice = [];
            var finslice = [];
            var varslice = [];

            function add(a, b) {
                return a+b;
            }

            for (i = 0; i < ind.length-1; i++) {
                xslice = x.slice(ind[i],ind[i+1]);

                foutslice = flux_out.slice(ind[i],ind[i+1]);
                finslice = flux_in.slice(ind[i],ind[i+1]);
                varslice = var_tot.slice(ind[i],ind[i+1]);

                xout.push(xslice.reduce(add, 0)/xslice.length);
                foutout.push(foutslice.reduce(add, 0));
                finout.push(finslice.reduce(add, 0));
                varout.push(varslice.reduce(add, 0));

                new_err = 1.0;
                xslice = [];
                foutslice = [];
                finslice = [];
                varslice = [];
            }

            for (i = 0; i < x.length; i++) {
                new_err = Math.sqrt(varout[i]*og_ntran[i]/ntran)
                y[i] = p[i]*(foutout[i]-finout[i]+ (new_err*(Math.random()-Math.random())))/foutout[i]; 
                x[i] = xout[i];
                x_err[i][0] = xout[i];
                x_err[i][1] = xout[i];
                y_err[i][0] = y[i] + (new_err/foutout[i]);
                y_err[i][1] = y[i] -(new_err/foutout[i]);            
            }

            source.trigger('change');
        """)

    sliderWbin = Slider(title="binning",
                        value=np.log10(x[1] - x[0]),
                        start=np.log10(x[1] - x[0]),
                        end=np.log10(max(x) / 2.0),
                        step=.05,
                        callback=callback)
    callback.args["wbin"] = sliderWbin
    sliderTrans = Slider(title="Num Trans",
                         value=noccultations,
                         start=1,
                         end=50,
                         step=1,
                         callback=callback)
    callback.args["ntran"] = sliderTrans
    layout = column(row(sliderWbin, sliderTrans), plot_spectrum)

    #out of transit 2d output
    out = result_dict['PandeiaOutTrans']

    # Flux 1d
    x, y = out['1d']['extracted_flux']
    x = x[~np.isnan(y)]
    y = y[~np.isnan(y)]

    plot_flux_1d1 = Figure(tools=TOOLS,
                           x_axis_label='Wavelength [microns]',
                           y_axis_label='Flux (e/s)',
                           title="Out of Transit Flux Rate",
                           plot_width=800,
                           plot_height=300)
    plot_flux_1d1.line(x, y, line_width=4, alpha=.7)
    tab1 = Panel(child=plot_flux_1d1, title="Total Flux")

    # BG 1d
    x, y = out['1d']['bg']
    y = y[~np.isnan(y)]
    x = x[~np.isnan(y)]
    plot_bg_1d1 = Figure(tools=TOOLS,
                         x_axis_label='Wavelength [microns]',
                         y_axis_label='Flux (e/s)',
                         title="Background",
                         plot_width=800,
                         plot_height=300)
    plot_bg_1d1.line(x, y, line_width=4, alpha=.7)
    tab2 = Panel(child=plot_bg_1d1, title="Background Flux")

    # SNR 1d accounting for number of occultations
    x = out['1d']['sn'][0]
    y = flux_out / np.sqrt(result_dict['RawData']['var_out'])
    x = x[~np.isnan(y)]
    y = y[~np.isnan(y)]
    #y = y*np.sqrt(noccultations)
    plot_snr_1d1 = Figure(tools=TOOLS,
                          x_axis_label=x_axis_label,
                          y_axis_label='SNR',
                          title="SNR Out of Trans",
                          plot_width=800,
                          plot_height=300)
    plot_snr_1d1.line(x, y, line_width=4, alpha=.7)
    tab3 = Panel(child=plot_snr_1d1, title="SNR")

    # Error bars (ppm)

    x = result_dict['FinalSpectrum']['wave']
    y = result_dict['FinalSpectrum']['error_w_floor'] * 1e6
    x = x[~np.isnan(y)]
    y = y[~np.isnan(y)]
    ymed = np.median(y)

    plot_noise_1d1 = Figure(
        tools=TOOLS,  #responsive=True,
        x_axis_label=x_axis_label,
        y_axis_label='Error on Spectrum (PPM)',
        title="Error Curve",
        plot_width=800,
        plot_height=300,
        y_range=[0, 2.0 * ymed])
    ymed = np.median(y)
    plot_noise_1d1.circle(x, y, line_width=4, alpha=.7)
    tab4 = Panel(child=plot_noise_1d1, title="Error")

    #Not happy? Need help picking a different mode?
    plot_spectrum2 = Figure(plot_width=800,
                            plot_height=300,
                            x_range=xlims,
                            y_range=ylims,
                            tools=TOOLS,
                            x_axis_label=x_axis_label,
                            y_axis_label=punit,
                            title="Original Model",
                            y_axis_type="log")

    plot_spectrum2.line(result_dict['OriginalInput']['model_wave'],
                        result_dict['OriginalInput']['model_spec'],
                        line_width=4,
                        alpha=.7)
    tab5 = Panel(child=plot_spectrum2, title="Original Model")

    #create set of five tabs
    tabs1d = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5])

    # Detector 2d
    data = out['2d']['detector']

    xr, yr = data.shape

    plot_detector_2d = Figure(
        tools="pan,wheel_zoom,box_zoom,resize,reset,hover,save",
        x_range=[0, yr],
        y_range=[0, xr],
        x_axis_label='Pixel',
        y_axis_label='Spatial',
        title="2D Detector Image",
        plot_width=800,
        plot_height=300)

    plot_detector_2d.image(image=[data],
                           x=[0],
                           y=[0],
                           dh=[xr],
                           dw=[yr],
                           palette="Spectral11")

    #2d tabs

    #2d snr
    data = out['2d']['snr']
    data[np.isinf(data)] = 0.0
    xr, yr = data.shape
    plot_snr_2d = Figure(tools=TOOLS,
                         x_range=[0, yr],
                         y_range=[0, xr],
                         x_axis_label='Pixel',
                         y_axis_label='Spatial',
                         title="Signal-to-Noise Ratio",
                         plot_width=800,
                         plot_height=300)

    plot_snr_2d.image(image=[data],
                      x=[0],
                      y=[0],
                      dh=[xr],
                      dw=[yr],
                      palette="Spectral11")

    tab1b = Panel(child=plot_snr_2d, title="SNR")

    #saturation

    data = out['2d']['saturation']
    xr, yr = data.shape
    plot_sat_2d = Figure(tools=TOOLS,
                         x_range=[0, yr],
                         y_range=[0, xr],
                         x_axis_label='Pixel',
                         y_axis_label='Spatial',
                         title="Saturation",
                         plot_width=800,
                         plot_height=300)

    plot_sat_2d.image(image=[data],
                      x=[0],
                      y=[0],
                      dh=[xr],
                      dw=[yr],
                      palette="Spectral11")

    tab2b = Panel(child=plot_sat_2d, title="Saturation")

    tabs2d = Tabs(tabs=[tab1b, tab2b])

    result_comp = components({
        'plot_spectrum': layout,
        'tabs1d': tabs1d,
        'det_2d': plot_detector_2d,
        'tabs2d': tabs2d
    })

    return result_comp
コード例 #11
0
          legend_label='fourier series'
          )

plot.patch('x_patch', 'y_patch', source=source_interval_patch, alpha=.2)
plot.line('x_min', 'y_minmax', source=source_interval_bound)
plot.line('x_max', 'y_minmax', source=source_interval_bound)

sample_controls = widgetbox(sample_function_type)

default_controls = column(default_function_input,default_function_period_start,default_function_period_end)

# Panels for sample functions or default functions
sample_funs = Panel(child=sample_controls, title='sample function')
default_funs = Panel(child=default_controls, title='default function')
# Add panels to tabs
fun_tabs = Tabs(tabs=[sample_funs, default_funs])
fun_tabs.on_change('active', type_input_change)  # add callback for panel tabs

# lists all the controls in our app
controls = column(degree,fun_tabs)

# initialize data
source_view.data = my_bokeh_utils.get_user_view(plot)
function_change()

# regularly update user view
curdoc().add_periodic_callback(automatic_update, 1000)
# make layout
curdoc().add_root(row(plot, controls, height=600, width=800))
curdoc().title = split(dirname(__file__))[-1].replace('_',' ').replace('-',' ')  # get path of parent directory and only use the name of the Parent Directory for the tab name. Replace underscores '_' and minuses '-' with blanks ' '
コード例 #12
0
                    classes_checkbox,
                    sagittal_slice_slider,
                    coronal_slice_slider,
                    horizontal_slice_slider,
                    atlas_msg_div,
                    name='figure_control')
final = row(control, all_class_figures, name='main_layout')
atlas_tab = Panel(child=final, title='Atlas Projection')

# ANOVA tab
anova_plot = plot_anova(anova_categorical_select.value)
anova_show_message('Ready!', style={'color': 'green'})
anova_control = column(row(widgetbox(anova_categorical_select),
                           widgetbox(anova_statistic_cb)),
                       anova_msg_div,
                       name='anova_control')
anova_layout = column(anova_control, anova_plot, name='anova_layout')
anova_tab = Panel(child=anova_layout, title='ANOVA')

# Linear model tab
lm_plot = plot_linear_model_across_regions(lm_measurement_select.value)
lm_show_message('Ready!', style={'color': 'green'})
lm_control = widgetbox(lm_measurement_select, lm_msg_div, name='lm_control')
lm_layout = column(lm_control, lm_plot, name='lm_layout')
lm_tab = Panel(child=lm_layout, title='Linear Models')

tabs = Tabs(
    tabs=[subjects_tab, summary_stats_tab, atlas_tab, anova_tab, lm_tab])

curdoc().add_root(tabs)
コード例 #13
0
plot_events_usage_tab1.toolbar.active_drag = None
plot_events_usage_tab1.toolbar_location = "left"
plot_events_usage_layout1 = gridplot([[plot_events_usage_tab1]],
                                     toolbar_options={'logo': None})
row1 = row([plot_events_usage_layout1, table1_layout])
row2 = row([plot_bar_chart_events, map_plot_layout, tools_layout])
col = column([row1, row2, image_layout])
tab2_row1 = row([plot_events_usage_tab2])

# heatmap layout
heat_map_ = gridplot([p_heat_map, p_outliers],
                     ncols=1,
                     plot_width=1300,
                     plot_height=300,
                     toolbar_location='left')

summary_layout = column([div_header_summary, text_box_summary])
table_textbox = column([div_header_table2, data_table_tab2, summary_layout])
heat_map_layout = row([heat_map_, table_textbox])
tab2_final_layout = column([tab2_row1, heat_map_layout])

# define tabs
tab1 = Panel(child=col, title="Events")
tab2 = Panel(child=tab2_final_layout, title="Usage")
tab3 = Panel(child=get_water_balance_plot(plot=0), title="Water balance")
tabs = Tabs(tabs=[tab1, tab2, tab3])

#final layout
final_layout = gridplot([[tabs]], plot_width=2500, plot_height=650)
curdoc().add_root(final_layout)
コード例 #14
0
def affichage_proba(proba_textmining, proba_clf_inception,
                    proba_clf_svm_inception, label_proba_textmining,
                    label_proba_clf_inception, label_proba_clf_svm_inception):

    proba_et_label_text = pd.DataFrame({
        'proba_Text': proba_textmining[0],
        'label_Text': label_proba_textmining
    })
    proba_et_label_inception = pd.DataFrame({
        'proba_inception':
        proba_clf_inception[0],
        'label_inception':
        label_proba_clf_inception
    })
    proba_et_label_svm_inception = pd.DataFrame({
        'proba_SVM_inception':
        proba_clf_svm_inception[0],
        'label_SVM_inception':
        label_proba_clf_svm_inception
    })

    text = ColumnDataSource(proba_et_label_text)
    inception = ColumnDataSource(proba_et_label_inception)
    svm_inception = ColumnDataSource(proba_et_label_svm_inception)

    hover_text = HoverTool(tooltips=[("probabilit� ", "@proba_Text")])
    hover_inception = HoverTool(tooltips=[("probabilit� ",
                                           "@proba_inception")])
    hover_SVM_inception = HoverTool(tooltips=[("probabilit� ",
                                               "@proba_SVM_inception")])

    fig1 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_textmining)
    fig1.vbar(x='label_Text',
              top='proba_Text',
              source=text,
              width=0.5,
              fill_color='#45A7E2',
              line_color='#45A7E2')
    fig1.xaxis.major_label_orientation = 0.7
    fig1.add_tools(hover_text)
    tab1 = Panel(child=fig1, title='Text_mining_proba')

    fig2 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_clf_inception)
    fig2.vbar(x='label_inception',
              top='proba_inception',
              source=inception,
              width=0.5,
              fill_color='#E74C3C',
              line_color='#E74C3C')
    fig2.xaxis.major_label_orientation = 0.7
    fig2.add_tools(hover_inception)
    tab2 = Panel(child=fig2, title='Inception_proba')

    fig3 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_clf_svm_inception)
    fig3.vbar(x='label_SVM_inception',
              top='proba_SVM_inception',
              source=svm_inception,
              width=0.5,
              fill_color='#2ECC71',
              line_color='#2ECC71')
    fig3.xaxis.major_label_orientation = 0.7
    fig3.add_tools(hover_SVM_inception)
    tab3 = Panel(child=fig3, title='SVM_inception_proba')

    onglet = Tabs(tabs=[tab1, tab2, tab3])
    show(onglet)
コード例 #15
0
ファイル: view.py プロジェクト: rlcjj/Packages
strategy_select_tb7 = Select(title='strategy', value=strategy_data_provider.all_strategies[0],
                             options=strategy_data_provider.all_strategies)
benchmark_select_tb7 = Select(title='benchmark', value="SH000905",
                              options=["SH000905", "SH000300"])
startdate_tb7 = DatePicker(title='Start', min_date=datetime(2010, 1, 1), max_date=datetime.now(),
                           value=datetime(2010, 1, 1))
enddate_tb7 = DatePicker(title='End', min_date=datetime(2010, 1, 1), max_date=datetime.now(),
                         value=datetime(2010, 1, 1))
calculate_button_tb7 = Button(label="Calculate")
calculate_button_tb7.on_click(update_data_tb7)
brinson_attr = ColumnDataSource()
data_columns = ['类别', '收益额', '贡献度(%)@组合', '贡献度(%)@基准', '权重(%)@组合', '权重(%)@基准',
                 '权重(%)@超配', '涨幅(%)@组合', '涨幅(%)@基准',
                 '涨幅(%)@超额', '配置收益(%)', '选择收益(%)', '交互收益(%)', '超额收益(%)']
brinson_columns = []
for i, c in enumerate(data_columns):
    if i <= 1:
        brinson_columns.append(TableColumn(field=c, title=c))
    else:
        brinson_columns.append(TableColumn(field=c, title=c, formatter=NumberFormatter(format='0.00')))
brinson_table = DataTable(source=brinson_attr, columns=brinson_columns, width=1800, height=600)
widgets_tb7 = row(column(startdate_tb7, enddate_tb7), column(strategy_select_tb7, benchmark_select_tb7), calculate_button_tb7)
tab7 = Panel(child=column(widgets_tb7, brinson_table), title="Brinson")

# set layout
tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5, tab6, tab7])
# tabs = Tabs(tabs=[tab7])
curdoc().add_root(tabs)


コード例 #16
0
    index_x=xp,
    C1=ffit100[0].transpose()
    C2=ffit100[1].transpose(),
    C3=ffit100[2].transpose(),
    C4=ffit100[3].transpose(),
    C5=ffit100[4].transpose(),
    C6=ffit100[5].transpose(),
    C7=ffit100[6].transpose(),
    C8=ffit100[7].transpose(),
    C9=ffit100[8].transpose(),
    C10=ffit100[9].transpose(),
    C11=ffit100[10].transpose(),
    C12=ffit100[11].transpose(),
    C13=ffit100[12].transpose(),
    C14=ffit100[13].transpose(),
    C15=ffit100[14].transpose(),
    C16=ffit100[15].transpose(),
    C17=ffit100[16].transpose(),
    C18=ffit100[17].transpose(),
    C19=ffit100[18].transpose(),
    C20=ffit100[19].transpose(),
    C21=ffit100[20].transpose(),
    C22=ffit100[21].transpose(),
))
p_v3 = Line(data_collection_vis3, index='index_x', title="Polynomial Regression Model for fitting Human Genome Data analyzed using GSAF 2.0", xlabel='Position in Eigen Value Set', ylabel='Magnitude', width=vis_common_width, height=vis_common_height, legend=True, tools=vis_common_tab_tools)
tab3 = Panel(child=p_v3, title="Visualization 3 - Polynomial Regression")

tabs = Tabs(tabs=[tab1, tab2, tab3])  # Set Tabs
# show(tabs)  # Show Visualization Output

コード例 #17
0
def NLD_processing(df):
    # get original column names from df
    list_columns_names = df.columns

    # change the column labels from strings to integers
    list_columns_int = []

    for number in range(0, len(df.index.values)):
        list_columns_int.append(number)

    df.columns = list_columns_int

    # change the rows labels/ indexes from strings to integers
    df['index'] = list_columns_int
    df.set_index("index", inplace=True)

    # Making a function to map color to edges
    color_palette = list(reversed(Viridis11[:8]))
    w_max = df.values.max()
    w_min = df.values.min()
    step = (w_max-w_min)/(len(color_palette)-1)


    colors = []
    # Create a graph with 1-way edges for faster painting
    g=nx.DiGraph()
    for row in df.index.values:
        g.add_node(row)
        for column in df.index.values:
            if  row < column:
                if (df[row][column] > 0):
                    color_index = int((df[row][column] - w_min) / step)
                    g.add_edge(row, column, weight=df[row][column], color=color_palette[color_index])
                    colors.append(color_palette[color_index])

    weights = []
    # Create a separate graph with 2-way edges only to calculate weights
    g_w=nx.DiGraph()
    for row in df.index.values:
        g_w.add_node(row)
        for column in df.index.values:
            if  row != column:
                if (df[row][column] > 0):
                    g_w.add_edge(row, column, weight=df[row][column], color=color_palette[color_index])
                    weights.append(df[row][column])

    # do not draw edges with different widths if the max weight is too big
    if max(weights) > 30:
        for index, w in enumerate(weights):
            weights[index] = 1

    # loop over all nodes to find neighbors and set min, max, sum for egdes weights connected to a node
    node_w_dict = {}
    for n in list_columns_int:
        node_weight_list = []
        for nb in nx.neighbors(g_w, n):
            node_weight_list.append(nx.get_edge_attributes(g_w,'weight')[n, nb])
        len_list = len(node_weight_list)
        if len_list != 0:
            node_min_weight = min(node_weight_list)
            node_max_weight = max(node_weight_list)
            node_sum_weight = sum(node_weight_list)
            node_avr_weight = node_sum_weight / len_list
        else:
            node_min_weight = 0
            node_max_weight = 0
            node_sum_weight = 0
            node_avr_weight = 0
        node_w_dict.update({n:{'minweight':node_min_weight, 'maxweight':node_max_weight, 'avrweight':node_avr_weight, 'sumweight':node_sum_weight}})
    nx.set_node_attributes(g, node_w_dict)

    # Making a function to map node size
    deg_node_size_list = [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
    deg_max = max(list(list(zip(*g.degree))[1]))
    deg_min = min(list(list(zip(*g.degree))[1]))
    deg_step = (deg_max-deg_min)/(len(deg_node_size_list)-1)
    i = 0
    node_s_dict = {}
    for node in list(list(zip(*g.degree))[0]):
        deg_index = int((list(list(zip(*g.degree))[1])[i] - deg_min) / deg_step)
        node_s_dict.update({node:{'nodesize':deg_node_size_list[deg_index]}})
        i += 1
    nx.set_node_attributes(g, node_s_dict)


    # create a dictoinary with double for loop
    mapping = {old_label:new_label for old_label, new_label in itertools.zip_longest(sorted(g.nodes()), list_columns_names, fillvalue=1)}

    # relabel the names of the nodes from integers back to strings
    nx.relabel_nodes(g, mapping, copy=False)


    # Organize common layouts' size for NLD
    NLD_width  = 730
    NLD_height = 690

    color_mapper = LinearColorMapper(palette=color_palette, low=w_min, high=w_max)
    color_bar = ColorBar(color_mapper = color_mapper, border_line_color = None, location = (0,0))


    # circular layout
    plot_circle = Plot(plot_width=NLD_width, plot_height=NLD_height,
                x_range=Range1d(-1.1, 1.1), y_range=Range1d(-1.1, 1.1))

    graph_circle = NLD_pocessing_graph(g, weights, colors, nx.circular_layout)

    NLD_add_tools(plot_circle)

    plot_circle.add_layout(color_bar, 'right')

    plot_circle.renderers.append(graph_circle)


    # spring layout
    plot_spring = Plot(plot_width=NLD_width, plot_height=NLD_height,
                x_range=Range1d(-1.1, 1.1), y_range=Range1d(-1.1, 1.1))

    graph_spring = NLD_pocessing_graph(g, weights, colors, nx.spring_layout)

    NLD_add_tools(plot_spring)

    plot_spring.add_layout(color_bar, 'right')

    plot_spring.renderers.append(graph_spring)


    # force-directed layout
    plot_fd = Plot(plot_width=NLD_width, plot_height=NLD_height,
                x_range=Range1d(-1.1, 1.1), y_range=Range1d(-1.1, 1.1))

    graph_fd = NLD_FD_pocessing_graph(g, weights, colors)

    NLD_add_tools(plot_fd)

    plot_fd.add_layout(color_bar, 'right')

    plot_fd.renderers.append(graph_fd)


    # random layout
    plot_random = Plot(plot_width=NLD_width, plot_height=NLD_height,
                x_range=Range1d(-0.1, 1.1), y_range=Range1d(-0.1, 1.1))

    graph_random = NLD_random_processing_graph(g, weights, colors, nx.random_layout)

    NLD_add_tools(plot_random)

    plot_random.add_layout(color_bar, 'right')

    plot_random.renderers.append(graph_random)


    # Create panels for each layout
    circle_panel = Panel(child=plot_circle, title='Circle layout')
    spring_panel = Panel(child=plot_spring, title='Spring layout')
    random_panel = Panel(child=plot_random, title='Random layout')
    fd_panel     = Panel(child=plot_fd,     title='Force-Directed layout')

    # Assign NLD panels to Tabs
    tabsNLD_int = Tabs(tabs=[circle_panel, spring_panel, fd_panel, random_panel])
    return tabsNLD_int
コード例 #18
0
                          menu=n_menu,
                          value="10")
top_n_clusters_dropdown = Dropdown(label="Show Top 100 Clusters",
                                   button_type="warning",
                                   menu=n_clusters_menu,
                                   value="100")
tfidf_slider = Slider(start=0,
                      end=1,
                      value=0,
                      step=.05,
                      title="Informativeness")
cval_slider = Slider(start=0, end=1, value=0, step=.05, title="Completeness")
freq_slider = Slider(start=0, end=1, value=0, step=.05, title="Linguistical")
filter_topics_tab = Panel(child=filter_topics_table, title="Filter Topics")
filter_custom_tab = Panel(child=filter_custom_table, title="Custom Trends")
filter_tabs = Tabs(tabs=[filter_topics_tab, filter_custom_tab])
search_input_box = TextInput(title="Search:", value="", width=300)
search_button = Button(label="Go",
                       button_type="success",
                       width=50,
                       css_classes=['search_button'])
clear_button = Button(label="Clear",
                      button_type="success",
                      width=50,
                      css_classes=['clear_button'])
buttons_layout = column(Div(height=0), Row(search_button, clear_button))
analyse_button = Button(label="re-analyze", button_type="success")
filter_label = Div(text="",
                   style={
                       'color': 'red',
                       'padding-bottom': '0px',
コード例 #19
0
def produce_doc(doc):

    ############################################################################
    ############################ USER INTERFACE ################################

    choice = buttonbox('Click on what you want to plot.', 'Graphing Code',
        ('Proton', 'TrueBeam', 'Gulmay', 'Flexitron'))



    ############################################################################
    ###################### CONNECT TO THE DATABASE #############################

    # Tell code where the database is saved
    DatabaseLocation =  '\\\\mpb-dc101\\rtp-share$\\protons\\Work in Progress\\Christian\\Database\\Photon\\PhysicsQA_beCopy25022020.mdb'

    # Connect to the database. This connection will then be passed on to the tab
    # scripts to allow for reading from the database. Keeping it in the main
    # script to minimise redundant code.
    # Note that there may be issues here if 64-bit python tries to run a 32-bit
    # MS Access Driver.
    conn = pypyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};'
							r'DBQ=' + DatabaseLocation + ';'
                    		# May need a line here for the database password????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????
                    		)

    # With the connection made run a check to find out how long it took.
    endconn = time.time()
    print('\nConnection made in: ' + str(endconn - endlib) + 'sec')


    ############################################################################
    ######################## CREATE EACH OF THE TABS ###########################

    if choice == 'TrueBeam':
        # Create each tab by running the relevant scripts
        tab1 = Photon_Output_Graph(conn)
        tab2 = Electron_Energy_Graph_Old(conn)
        tab3 = JawTravel(conn)
        # Put all the tabs into one application
        tabs = Tabs(tabs = [tab1, tab2, tab3])
    elif choice == 'Proton':
        tab1 = Photon_Output_Graph(conn)
        tab2 = Electron_Energy_Graph(conn)
        tab3 = Flexitron_Output_Graph(conn)
        tab4 = Gulmay_Output_Graph(conn)
        tab5 = JawTravel(conn)
        # Put all the tabs into one application
        tabs = Tabs(tabs = [tab1, tab2, tab3, tab4, tab5])
    elif choice == 'Gulmay':
        tab1 = Gulmay_Output_Graph(conn)
        # Put all the tabs into one application
        tabs = Tabs(tabs = [tab1])
    elif choice == 'Flexitron':
        tab1 = Photon_Output_Graph(conn)
        tab2 = Electron_Energy_Graph(conn)
        tab3 = Gulmay_Output_Graph(conn)
        tab4 = hello(conn)
        tab5 = JawTravel(conn)
        # Put all the tabs into one application
        tabs = Tabs(tabs = [tab1, tab2, tab3, tab4, tab5])
    else:
        msgbox('Error')
        exit()

    # Put all of the tabs into the doccument
    doc.add_root(tabs)

    # With the tabs made run a check to find out how long it took.
    endconn = time.time()
    print('\nTabs made in: ' + str(endconn - endlib) + 'sec')

    return doc
コード例 #20
0
def plot_symbols(list_data_tuples,usr_price_features,data_src):
    """
    Input: List of tuples where (x[0] is a symbol, x[1] is a dict) and data-source
    This function returns the div element of all symbols, for given features.
    So this div element contains plots==len(symbols), 
    """
    #script_el_data=''
    #div_el_data='<h4> Time-series plot for all symbols with chosen features </h4><table style="width 50%"> <tr>'
    list_plots=[]
    TOOLS='pan,wheel_zoom,box_zoom,reset,save,box_select,crosshair'
    colors=['blue','red','green','#cc3300']
    for tpl in list_data_tuples:
        
        if 'error' not in tpl[1]:
            full_df,features=tpl[1]['data'],tpl[1]['features']
            
            # Before plotting remove the features that are not necessary (i.e, use the features that user requested for)
            # Make sure all the user requested features are there in the dataframe.
            #print usr_price_features, features,tpl[0]
            
            plot_price_features=list(set(usr_price_features).intersection(set(features)))
            
            df=full_df[plot_price_features]
            hover=HoverTool(
            tooltips=[
                ("Price",'$y')
                ]
            )
            
            
            p = figure(width=900, height=500, x_axis_type="datetime",tools=[TOOLS,hover])
            
            
            assert(len(colors)>=len(features))
            
            for (i,ftr) in enumerate(plot_price_features):
                p.line(df.index,df[ftr],legend=ftr,color=colors[i])
            
            p.title.text = "Data for %s from %s data source"%(tpl[0],data_src)
            p.legend.location = "top_left"
            p.xaxis.axis_label = 'Date'
            p.yaxis.axis_label = 'Price'
            
            tab=Panel(child=p,title=tpl[0])
            
            list_plots.append(tab)
        
        else:
            print tpl[0]
            
            #div_el_data+='<td>'+'Ticker symbol %s not found in the database'%(tpl[0])+'</td>'

    #div_el_data+='</tr></table>'
    
    #print script_el_data, div_el_data       
    if len(list_plots)!=0:
       script_el_data, div_el_data=components(Tabs(tabs=list_plots))
    else:
        script_el_data=''
        div_el_data=''
    
    
    return script_el_data, div_el_data
コード例 #21
0
def gen_tseries(yvar_str='deficit_gdp',
                start_year='min',
                main_df=main_df,
                recession_df=recession_df,
                note_text_list=[],
                fig_title_str='',
                fig_path=''):
    """
    This function creates a three-panel time-series plot--one panel for each
    definition of party control--for a particular variable as a percent of GDP.
    The particular variable is either deficits, receipts, or non-interest
    spending.

    Inputs:
        yvar_str (string): either ''deficit_gdp', 'receipts_gdp', or
            'spend_nonint_gdp'
    """
    # Create Variables for min and max values
    recession_data_length = len(recession_df['Peak'])
    if start_year == 'min':
        min_year = main_df['year'].min()
    else:
        min_year = int(start_year)
    main_df = main_df[main_df['year'] >= min_year]
    max_year = main_df['year'].max()
    min_yvar = main_df[yvar_str].min()
    max_yvar = main_df[yvar_str].max()

    # Create entire time series column data source for main and recession df's
    main_cds = ColumnDataSource(main_df)

    # Create Full control (WH + Sen + HouseRep) Republican control elements
    cntrl_all_rep_df = \
        main_df[(main_df['president_party'] == 'Republican') &
                (main_df['dem_senate_maj'] == 0) &
                (main_df['dem_house_maj'] == 0)]
    cntrl_all_rep_cds = ColumnDataSource(cntrl_all_rep_df)

    # Create Full control (WH + Sen + HouseRep) Democrat control elements
    cntrl_all_dem_df = \
        main_df[(main_df['president_party'] == 'Democrat') &
                (main_df['dem_senate_maj'] == 1) &
                (main_df['dem_house_maj'] == 1)]
    cntrl_all_dem_cds = ColumnDataSource(cntrl_all_dem_df)

    # Create Full control (WH + Sen + HouseRep) split control elements
    cntrl_all_split_df = \
        main_df[((main_df['president_party'] == 'Republican') &
                ((main_df['dem_senate_maj'] == 1) |
                (main_df['dem_house_maj'] == 1))) |
                ((main_df['president_party'] == 'Democrat') &
                ((main_df['dem_senate_maj'] == 0) |
                (main_df['dem_house_maj'] == 0)))]
    cntrl_all_split_cds = ColumnDataSource(cntrl_all_split_df)

    # Create Senate control (WH + Sen) Republican control elements
    cntrl_whsen_rep_df = \
        main_df[(main_df['president_party'] == 'Republican') &
                (main_df['dem_senate_maj'] == 0)]
    cntrl_whsen_rep_cds = ColumnDataSource(cntrl_whsen_rep_df)

    # Create Senate control (WH + Sen) Democrat control elements
    cntrl_whsen_dem_df = \
        main_df[(main_df['president_party'] == 'Democrat') &
                (main_df['dem_senate_maj'] == 1)]
    cntrl_whsen_dem_cds = ColumnDataSource(cntrl_whsen_dem_df)

    # Create Senate control (WH + Sen) split control elements
    cntrl_whsen_split_df = \
        main_df[((main_df['president_party'] == 'Republican') &
                (main_df['dem_senate_maj'] == 1)) |
                ((main_df['president_party'] == 'Democrat') &
                (main_df['dem_senate_maj'] == 0))]
    cntrl_whsen_split_cds = ColumnDataSource(cntrl_whsen_split_df)

    # Create House control (WH + HouseRep) Republican control elements
    cntrl_whhou_rep_df = \
        main_df[(main_df['president_party'] == 'Republican') &
                (main_df['dem_house_maj'] == 0)]
    cntrl_whhou_rep_cds = ColumnDataSource(cntrl_whhou_rep_df)

    # Create House control (WH + HouseRep) Democrat control elements
    cntrl_whhou_dem_df = \
        main_df[(main_df['president_party'] == 'Democrat') &
                (main_df['dem_house_maj'] == 1)]
    cntrl_whhou_dem_cds = ColumnDataSource(cntrl_whhou_dem_df)

    # Create House control (WH + HouseRep) split control elements
    cntrl_whhou_split_df = \
        main_df[((main_df['president_party'] == 'Republican') &
                (main_df['dem_house_maj'] == 1)) |
                ((main_df['president_party'] == 'Democrat') &
                (main_df['dem_house_maj'] == 0))]
    cntrl_whhou_split_cds = ColumnDataSource(cntrl_whhou_split_df)

    cntrl_cds_list = \
        [[cntrl_all_rep_cds, cntrl_all_dem_cds, cntrl_all_split_cds],
         [cntrl_whsen_rep_cds, cntrl_whsen_dem_cds, cntrl_whsen_split_cds],
         [cntrl_whhou_rep_cds, cntrl_whhou_dem_cds, cntrl_whhou_split_cds]]

    # Output to HTML file
    fig_title = fig_title_str
    fig_path = fig_path
    output_file(fig_path, title=fig_title)
    '''
    ---------------------------------------------------------------------------
    Create figure for each of the three party control definitions
    ---------------------------------------------------------------------------
    '''
    cntrl_str_list = ['all', 'whsen', 'whhou']
    panel_title_list = \
        ['Full control: (White House + Senate + House of Reps.)',
         'Senate control: (White House + Senate)',
         'House control: (White House + House of Reps.)']
    panel_list = []

    for k, v in enumerate(cntrl_str_list):
        # Create a figure with '% of GDP' as y-axis and year as x-axis
        fig = figure(title=fig_title,
                     plot_height=650,
                     plot_width=1100,
                     x_axis_label='Year',
                     x_range=(min_year - 1, max_year + 1),
                     y_axis_label='Percent of Gross Domestic Product',
                     y_range=(min_yvar - 3, max_yvar + 3),
                     toolbar_location=None)

        # Set title font size and axes font sizes
        fig.title.text_font_size = '15.5pt'
        fig.xaxis.axis_label_text_font_size = '12pt'
        fig.xaxis.major_label_text_font_size = '12pt'
        fig.yaxis.axis_label_text_font_size = '12pt'
        fig.yaxis.major_label_text_font_size = '12pt'

        # Modify tick intervals for X-axis and Y-axis
        fig.xaxis.ticker = SingleIntervalTicker(interval=10, num_minor_ticks=2)
        fig.xgrid.ticker = SingleIntervalTicker(interval=10)
        fig.yaxis.ticker = SingleIntervalTicker(interval=5, num_minor_ticks=5)
        fig.ygrid.ticker = SingleIntervalTicker(interval=5)

        # Create recession bars
        for x in range(0, recession_data_length):
            peak_year = recession_df['Peak'][x].year
            trough_year = recession_df['Trough'][x].year
            if (peak_year >= min_year and trough_year >= min_year):
                fig.patch(x=[peak_year, trough_year, trough_year, peak_year],
                          y=[-100, -100, max_yvar + 10, max_yvar + 10],
                          fill_color='gray',
                          fill_alpha=0.4,
                          line_width=0,
                          legend_label='Recession')
            if (peak_year == trough_year and peak_year >= min_year
                    and trough_year >= min_year):
                fig.patch(
                    x=[peak_year, trough_year + 1, trough_year + 1, peak_year],
                    y=[-100, -100, max_yvar + 10, max_yvar + 10],
                    fill_color='gray',
                    fill_alpha=0.4,
                    line_width=0,
                    legend_label='Recession')

        # Plotting the line and scatter point circles
        fig.line(x='year',
                 y=yvar_str,
                 source=main_cds,
                 color='#423D3C',
                 line_width=2)

        fig.circle(x='year',
                   y=yvar_str,
                   source=cntrl_cds_list[k][0],
                   size=10,
                   line_width=1,
                   line_color='black',
                   fill_color='red',
                   alpha=0.7,
                   muted_alpha=0.2,
                   legend_label='Republican control')

        fig.circle(x='year',
                   y=yvar_str,
                   source=cntrl_cds_list[k][1],
                   size=10,
                   line_width=1,
                   line_color='black',
                   fill_color='blue',
                   alpha=0.7,
                   muted_alpha=0.2,
                   legend_label='Democrat control')

        fig.circle(x='year',
                   y=yvar_str,
                   source=cntrl_cds_list[k][2],
                   size=10,
                   line_width=1,
                   line_color='black',
                   fill_color='green',
                   alpha=0.7,
                   muted_alpha=0.2,
                   legend_label='Split control')

        # Add information on hover
        if yvar_str == 'deficit_gdp':
            tool_str = 'Deficit / GDP'
        elif yvar_str == 'receipts_gdp':
            tool_str = 'Receipts / GDP'
        elif yvar_str == 'spend_nonint_gdp':
            tool_str = 'NonInt Spend / GDP'
        tooltips = [('Year', '@year'),
                    (tool_str, '@' + yvar_str + '{0.0}' + '%'),
                    ('President', '@president'),
                    ('White House', '@president_party'),
                    ('Rep. House Seats', '@rep_houseseats'),
                    ('Dem. House Seats', '@dem_houseseats'),
                    ('Rep. Senate Seats', '@rep_senateseats'),
                    ('Dem. Senate Seats', '@dem_senateseats')]
        hover_glyph = fig.circle(x='year',
                                 y=yvar_str,
                                 source=main_cds,
                                 size=10,
                                 alpha=0,
                                 hover_fill_color='gray',
                                 hover_alpha=0.5)
        fig.add_tools(HoverTool(tooltips=tooltips))

        # Turn off scrolling
        fig.toolbar.active_drag = None

        # Add legend
        fig.legend.location = 'bottom_center'
        fig.legend.border_line_width = 2
        fig.legend.border_line_color = 'black'
        fig.legend.border_line_alpha = 1
        fig.legend.label_text_font_size = '4mm'

        # Set legend muting click policy
        fig.legend.click_policy = 'mute'

        # Add notes below image
        for note_text in note_text_list[k]:
            caption = Title(text=note_text,
                            align='left',
                            text_font_size='4mm',
                            text_font_style='italic')
            fig.add_layout(caption, 'below')

        panel = Panel(child=fig, title=panel_title_list[k])
        panel_list.append(panel)

    # Assign the panels to Tabs
    tabs = Tabs(tabs=panel_list)

    # Display the generated figure
    # show(tabs)

    return tabs
コード例 #22
0
ファイル: wf_explore.py プロジェクト: CONDUITlab/wf_view
save_seg_button = Button(label='Save Segment',
                         button_type='success',
                         disabled=True)

seg_slider.on_change('value', seg_callback)
save_seg_button.on_click(save_button_cb)
show_peaks = CheckboxGroup(labels=['R Peaks'], active=[0])
rbg = RadioButtonGroup(labels=wf_classes, active=0)
plus = Button(label='+')
minus = Button(label='-')
plus.on_click(slider_plus)
minus.on_click(slider_minus)

wf_layout = layout(children=[
    show_peaks, cur_file_box,
    row(minus, seg_slider, plus), rbg, save_seg_button
])
wf_layout.children.append(p_seg)
wf_layout.children.append(p_wf_II)
wf_tab = Panel(child=wf_layout, title='Waveforms')

##################################  Bokeh Output ##################################
eng = matlab.engine.start_matlab()
eng.addpath(r'./WFDB')
# location of ecgpuwave matlab functions
eng.addpath(r'./mcode')
# combine the panels and plot
layout = Tabs(tabs=[file_tab, vs_tab, wf_tab])

curdoc().add_root(layout)
コード例 #23
0
def plot(filename='longterm_dl1_check.h5'):

    # First read in the camera geometry:
    cam_description_table = \
        Table.read(filename, path='instrument/telescope/camera/LSTCam')
    camgeom = CameraGeometry.from_table(cam_description_table)
    engineering_geom = camgeom.transform_to(EngineeringCameraFrame())

    file = tables.open_file('longterm_dl1_check.h5')

    bokeh_output_file(Path(filename).with_suffix('.html'),
                      title='LST1 long-term DL1 data check')

    run_titles = []
    for i, run in enumerate(file.root.pixwise_runsummary.col('runnumber')):
        date = pd.to_datetime(file.root.pixwise_runsummary.col('time')[i],
                              origin='unix', unit='s')
        run_titles.append('Run {0:05d}, {date}'.\
                          format(run,
                                 date = date.strftime("%b %d %Y %H:%M:%S")))

    runsummary = pd.read_hdf(filename, 'runsummary')
    page0 = Panel()
    fig_ped_rates = show_graph(x=pd.to_datetime(runsummary['time'],
                                                origin='unix', unit='s'),
                               y=runsummary['num_pedestals'] /
                                 runsummary['elapsed_time'],
                               xlabel='date',
                               ylabel='Interleaved pedestals rate',
                               ey=np.sqrt(runsummary['num_pedestals']) /
                                  runsummary['elapsed_time'],
                               xtype='datetime', ytype='linear',
                               point_labels=run_titles)
    fig_ff_rates = show_graph(x=pd.to_datetime(runsummary['time'],
                                               origin='unix', unit='s'),
                               y=runsummary['num_flatfield'] /
                                 runsummary['elapsed_time'],
                               xlabel='date',
                               ylabel='Interleaved flat field rate',
                               ey=np.sqrt(runsummary['num_flatfield']) /
                                  runsummary['elapsed_time'],
                               xtype='datetime', ytype='linear',
                               point_labels=run_titles)
    fig_cosmic_rates = show_graph(x=pd.to_datetime(runsummary['time'],
                                                   origin='unix', unit='s'),
                                  y=runsummary['num_cosmics'] /
                                  runsummary['elapsed_time'],
                                  xlabel='date',
                                  ylabel='Cosmics rate',
                                  ey=np.sqrt(runsummary['num_cosmics']) /
                                     runsummary['elapsed_time'],
                                  xtype='datetime', ytype='linear',
                                  point_labels=run_titles)
    fig_muring_rates = show_graph(x=pd.to_datetime(runsummary['time'],
                                                   origin='unix', unit='s'),
                                  y=runsummary['num_contained_mu_rings'] /
                                  runsummary['elapsed_time'],
                                  xlabel='date',
                                  ylabel='Contained mu-rings rate',
                                  ey=np.sqrt(runsummary[
                                                 'num_contained_mu_rings']) /
                                                 runsummary['elapsed_time'],
                                  xtype='datetime', ytype='linear',
                                  point_labels=run_titles)

    pad_width = 550
    pad_height = 350
    row1 = [fig_ped_rates, fig_ff_rates]
    row2 = [fig_cosmic_rates, fig_muring_rates]
    grid0 = gridplot([row1, row2], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page0.child = grid0
    page0.title = 'Event rates'

    page0b = Panel()
    altmin = np.rad2deg(runsummary['min_altitude'])
    altmean = np.rad2deg(runsummary['mean_altitude'])
    altmax = np.rad2deg(runsummary['max_altitude'])
    fig_altitude = show_graph(x=pd.to_datetime(runsummary['time'],
                                               origin='unix', unit='s'),
                              y=altmean,
                              xlabel='date',
                              ylabel='Telescope altitude (mean, min, max)',
                              eylow=altmean-altmin, eyhigh=altmax-altmean,
                              xtype='datetime', ytype='linear',
                              point_labels=run_titles)
    fig_altitude.y_range = Range1d(altmin.min()*0.95, altmax.max()*1.05)
    row1 = [fig_altitude]
    grid0b = gridplot([row1], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page0b.child = grid0b
    page0b.title = 'Pointing'

    page1 = Panel()
    pad_width = 350
    pad_height = 370
    mean = []
    stddev = []
    for item in file.root.pixwise_runsummary.col('ped_pix_charge_mean'):
        mean.append(item)
    for item in file.root.pixwise_runsummary.col('ped_pix_charge_stddev'):
        stddev.append(item)
    row1 = show_camera(np.array(mean), engineering_geom, pad_width,
                       pad_height, 'Pedestals mean charge',
                       run_titles)
    row2 = show_camera(np.array(stddev), engineering_geom, pad_width,
                       pad_height, 'Pedestals charge std dev',
                       run_titles)
    grid1 = gridplot([row1, row2], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page1.child = grid1
    page1.title = 'Interleaved pedestals'

    page2 = Panel()
    mean = []
    stddev = []
    for item in file.root.pixwise_runsummary.col('ff_pix_charge_mean'):
        mean.append(item)
    for item in file.root.pixwise_runsummary.col('ff_pix_charge_stddev'):
        stddev.append(item)
    row1 = show_camera(np.array(mean), engineering_geom, pad_width,
                       pad_height, 'Flat-Field mean charge (pe)', run_titles)
    row2 = show_camera(np.array(stddev), engineering_geom, pad_width,
                       pad_height, 'Flat-Field charge std dev (pe)', run_titles)
    grid2 = gridplot([row1, row2], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page2.child = grid2
    page2.title = 'Interleaved flat field, charge'

    page3 = Panel()
    mean = []
    stddev = []
    for item in file.root.pixwise_runsummary.col('ff_pix_rel_time_mean'):
        mean.append(item)
    for item in file.root.pixwise_runsummary.col('ff_pix_rel_time_stddev'):
        stddev.append(item)
    row1 = show_camera(np.array(mean), engineering_geom, pad_width,
                       pad_height, 'Flat-Field mean relative time (ns)',
                       run_titles, showlog=False)
    row2 = show_camera(np.array(stddev), engineering_geom, pad_width,
                       pad_height, 'Flat-Field rel. time std dev (ns)',
                       run_titles, showlog=False)
    grid3 = gridplot([row1, row2], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page3.child = grid3
    page3.title = 'Interleaved flat field, time'

    page4 = Panel()
    pulse_fraction_above_10 = []
    pulse_fraction_above_30 = []
    for item in file.root.pixwise_runsummary.col(
            'cosmics_pix_fraction_pulses_above10'):
        pulse_fraction_above_10.append(item)
    for item in file.root.pixwise_runsummary.col(
            'cosmics_pix_fraction_pulses_above30'):
        pulse_fraction_above_30.append(item)

    row1 = show_camera(np.array(pulse_fraction_above_10), engineering_geom,
                       pad_width, pad_height,
                       'Cosmics, fraction of >10pe pulses', run_titles)
    row2 = show_camera(np.array(pulse_fraction_above_30), engineering_geom,
                       pad_width, pad_height,
                       'Cosmics, fraction of >30pe pulses', run_titles)

    grid4 = gridplot([row1, row2], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page4.child = grid4
    page4.title = 'Cosmics'

    file.close()

    page5 = Panel()
    pad_width = 550
    pad_height = 280
    fig_mu_effi = show_graph(x=pd.to_datetime(runsummary['time'], origin='unix',
                                              unit='s'),
                             y=runsummary['mu_effi_mean'],
                             xlabel='date',
                             ylabel='telescope efficiency from mu-rings',
                             ey=runsummary['mu_effi_stddev'] / np.sqrt(
                                     runsummary['num_contained_mu_rings']),
                             xtype='datetime', ytype='linear',
                             point_labels=run_titles)
    fig_mu_effi.y_range = Range1d(0.,1.1*np.max(runsummary['mu_effi_mean']))

    fig_mu_width = show_graph(x=pd.to_datetime(runsummary['time'],
                                               origin='unix', unit='s'),
                              y=runsummary['mu_width_mean'],
                              xlabel='date',
                              ylabel='muon ring width (deg)',
                              ey=runsummary['mu_width_stddev'] / np.sqrt(
                                      runsummary['num_contained_mu_rings']),
                              xtype='datetime', ytype='linear',
                              point_labels=run_titles)
    fig_mu_width.y_range = Range1d(0.,1.1*np.max(runsummary['mu_width_mean']))

    fig_mu_intensity = show_graph(
        x=pd.to_datetime(runsummary['time'], origin='unix', unit='s'),
        y=runsummary['mu_intensity_mean'], xlabel='date',
        ylabel='mean muon ring intensity (p.e.)',
        xtype='datetime', ytype='linear', point_labels=run_titles)
    fig_mu_intensity.y_range = \
        Range1d(0., 1.1 * np.max(runsummary['mu_intensity_mean']))

    fig_mu_hg_peak = show_graph(
        x=pd.to_datetime(runsummary['time'], origin='unix', unit='s'),
        y=runsummary['mu_hg_peak_sample_mean'], xlabel='date',
        ey=runsummary['mu_hg_peak_sample_stddev'],
        ylabel='HG global peak sample id (mean&RMS)',
        xtype='datetime', ytype='linear', point_labels=run_titles)
    fig_mu_hg_peak.y_range = Range1d(0., 38.)
    fig_mu_lg_peak = show_graph(
        x=pd.to_datetime(runsummary['time'], origin='unix', unit='s'),
        y=runsummary['mu_lg_peak_sample_mean'], xlabel='date',
        ey=runsummary['mu_lg_peak_sample_stddev'],
        ylabel='LG global peak sample id (mean&RMS)',
        xtype='datetime', ytype='linear', point_labels=run_titles)
    fig_mu_lg_peak.y_range = Range1d(0., 38.)
    row1 = [fig_mu_effi, fig_mu_width]
    row2 = [fig_mu_intensity]
    row3 = [fig_mu_hg_peak, fig_mu_lg_peak]

    grid5 = gridplot([row1, row2, row3], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page5.child = grid5
    page5.title = "Muons"

    page6 = Panel()
    pad_width = 550
    pad_height = 350
    fig_ped = show_graph(x=pd.to_datetime(runsummary['time'],
                                          origin='unix',
                                          unit='s'),
                         y=runsummary['ped_charge_mean'],
                         xlabel='date',
                         ylabel='Camera-averaged pedestal charge (pe/pixel)',
                         ey=runsummary['ped_charge_mean_err'],
                         xtype='datetime', ytype='linear',
                         point_labels=run_titles)
    fig_ped.y_range = Range1d(0.,1.1*np.max(runsummary['ped_charge_mean']))

    fig_ped_stddev = show_graph(x=pd.to_datetime(runsummary['time'],
                                                 origin='unix',
                                                 unit='s'),
                                y=runsummary['ped_charge_stddev'],
                                xlabel='date',
                                ylabel='Camera-averaged pedestal charge std '
                                       'dev (pe/pixel)',
                                xtype='datetime', ytype='linear',
                                point_labels=run_titles)
    fig_ped_stddev.y_range = \
        Range1d(0.,1.1*np.max(runsummary['ped_charge_stddev']))

    row1 = [fig_ped, fig_ped_stddev]
    grid6 = gridplot([row1], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page6.child = grid6
    page6.title = "Interleaved pedestals, averages"

    page7 = Panel()
    pad_width = 550
    pad_height = 280
    fig_flatfield = show_graph(x=pd.to_datetime(runsummary['time'],
                                                origin='unix',
                                                unit='s'),
                               y=runsummary['ff_charge_mean'],
                               xlabel='date',
                               ylabel='Cam-averaged FF Q (pe/pixel)',
                               ey=runsummary['ff_charge_mean_err'],
                               xtype='datetime', ytype='linear',
                               point_labels=run_titles)
    fig_flatfield.y_range = Range1d(0.,1.1*np.max(runsummary['ff_charge_mean']))

    fig_ff_stddev = show_graph(x=pd.to_datetime(runsummary['time'],
                                                origin='unix',
                                                unit='s'),
                               y=runsummary['ff_charge_stddev'],
                               xlabel='date',
                               ylabel='Cam-averaged FF Q std '
                                      'dev (pe/pixel)',
                               xtype='datetime', ytype='linear',
                               point_labels=run_titles)
    fig_ff_stddev.y_range = \
        Range1d(0.,1.1*np.max(runsummary['ff_charge_stddev']))

    fig_ff_time = show_graph(x=pd.to_datetime(runsummary['time'],
                                              origin='unix',
                                              unit='s'),
                             y=runsummary['ff_time_mean'],
                             xlabel='date',
                             ylabel='Cam-averaged FF time (ns)',
                             ey=runsummary['ff_time_mean_err'],
                             xtype='datetime', ytype='linear',
                             point_labels=run_titles)

    fig_ff_time_std = show_graph(x=pd.to_datetime(runsummary['time'],
                                                  origin='unix',
                                                  unit='s'),
                                 y=runsummary['ff_time_stddev'],
                                 xlabel='date',
                                 ylabel='Cam-averaged FF t std '
                                        'dev (ns)',
                                 xtype='datetime', ytype='linear',
                                 point_labels=run_titles)
    fig_ff_rel_time_std = show_graph(x=pd.to_datetime(runsummary['time'],
                                                      origin='unix',
                                                      unit='s'),
                                     y=runsummary['ff_rel_time_stddev'],
                                     xlabel='date',
                                     ylabel='Cam-averaged FF '
                                            'rel. pix t std dev (ns)',
                                     xtype='datetime', ytype='linear',
                                     point_labels=run_titles)
    fig_ff_rel_time_std.y_range = \
        Range1d(0., np.max([1., runsummary['ff_rel_time_stddev'].max()]))

    row1 = [fig_flatfield, fig_ff_stddev]
    row2 = [fig_ff_time, fig_ff_time_std]
    row3 = [fig_ff_rel_time_std]

    grid7 = gridplot([row1, row2, row3], sizing_mode=None, plot_width=pad_width,
                     plot_height=pad_height)
    page7.child = grid7
    page7.title = "Interleaved FF, averages"

    tabs = Tabs(tabs=[page0, page0b, page1, page2, page3, page4, page5, page6,
                      page7])
    show(column(Div(text='<h1> Long-term DL1 data check </h1>'), tabs))
コード例 #24
0
def plot_2T(date_time, stations, models):
    """

    :param date_time: 必须是'YYMMDDHH'形式
    :param stations:
    :param models:
    :return:
    """
    tabs = []
    output_file('D:/MICAPSData/TempForecast/2T_Forecast.html',
                title=u'2米温度预报',
                mode='inline')

    names = [name for name in stations] + ['date_time_X', 'date_time_str']
    tools_to_show = 'hover,box_zoom,pan,save,resize,reset,wheel_zoom'
    colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple', 'pink']

    for model in models:

        # 处理u'河南WRF_RUC'的'YYYYMMDDHH'形式
        if model == u'河南WRF_RUC':
            date_time_condition = (
                datetime.datetime.strptime('20' + date_time, '%Y%m%d%H') -
                datetime.timedelta(hours=8)).strftime('%Y%m%d%H')
        else:
            date_time_condition = date_time

        data = []
        files_list = searchProductFiles(date_time_condition, models[model])

        for each in files_list:
            d = Diamond4(each)
            lon_lat_s = [stations[name][1] for name in stations]
            extracted_values = d.IDW(lon_lat_s)

            # 处理时间索引
            date_time_index = d.valid_time
            if model in [
                    u'河南WRF_RUC', u'GRAPES_GFS', u'GRAPES_MESO', u'T639粗'
            ]:
                date_time_index += datetime.timedelta(hours=8)

            # 注意bokeh在将时间对象作为X轴时会将本地时间转换为世界时,为了避免这种转换,需要再本地时间上再加上8h(北京时比世界时快8h)
            extracted_values.extend([
                date_time_index + datetime.timedelta(hours=8),
                date_time_index.strftime("%m/%d %Hh")
            ])
            data.append(pd.DataFrame(extracted_values, index=names).T)

        # 如果没有数据,则返回,防止出错
        if not data:
            continue

        df = pd.concat(data).sort_values('date_time_X', ascending=False)
        del data

        n_series = len(df)

        p = figure(plot_width=1920 - 140,
                   plot_height=1200 - 250,
                   x_axis_type="datetime",
                   tools=tools_to_show,
                   active_scroll="wheel_zoom")

        # 分别为每个站点绘制时间序列变化曲线
        for name, color in zip(stations, colors):
            source = ColumnDataSource(
                data={
                    'dateX': df['date_time_X'],
                    'v': df[name],
                    'dateX_str': df['date_time_str'],
                    'name': [name for n in xrange(n_series)]
                })

            p.line('dateX', 'v', color=color, legend=name, source=source)
            circle = p.circle('dateX',
                              'v',
                              fill_color="white",
                              size=8,
                              color=color,
                              legend=name,
                              source=source)
            p.tools[0].renderers.append(circle)

        # 图例显示策略
        p.legend.click_policy = "hide"
        # 显示标签
        hover = p.select(dict(type=HoverTool))
        hover.tooltips = [(u"温度", "@v{0.0}"), (u"站点", "@name"),
                          (u"时间", "@dateX_str")]
        hover.mode = 'mouse'

        # 标题设置
        if model == u'EC细 2TMax_3h':
            title = ' '.join([date_time, u'EC细', u'过去3小时2米最高温度预报'])
        elif model == u'EC细 2TMin_3h':
            title = ' '.join([date_time, u'EC细', u'过去3小时2米最低温度预报'])
        else:
            title = ' '.join([date_time, model, u'2米温度预报'])
        p.title.text = title

        p.title.align = "center"
        p.title.text_font_size = "25px"
        # p.title.background_fill_color = "#aaaaee"
        # p.title.text_color = "orange"
        p.xaxis.axis_label = u'日期/时间'
        p.yaxis.axis_label = u'温度(℃)'

        p.xaxis[0].formatter = DatetimeTickFormatter(
            hours=['%m/%d %Hh', '%m/%d %H:%M'], days=['%m/%d %Hh'])
        p.xaxis[0].ticker = DatetimeTicker(desired_num_ticks=20,
                                           num_minor_ticks=4)

        # todo.根据上午还是下午确定不同的日界线
        # location使用实数表示,所以必须把时间转换成时间戳,但不清楚为什么要乘以1000
        dateX = df['date_time_X'].tolist()
        n_days = (dateX[-1] - dateX[0]).days + 1
        forecast_span = [
            Span(location=time.mktime(
                (dateX[0] + datetime.timedelta(days=i) +
                 datetime.timedelta(hours=12)).timetuple()) * 1000,
                 dimension='height',
                 line_color='red',
                 line_dash='dashed',
                 line_width=2) for i in xrange(n_days)
        ]
        for span in forecast_span:
            p.add_layout(span)

        tab = Panel(child=p, title=model)
        tabs.append(tab)
    tabs = Tabs(tabs=tabs)
    save(tabs)  # 直接保存就行
コード例 #25
0
ファイル: plot.py プロジェクト: wyhzhbit/holoviews
    def initialize_plot(self, plots=None, ranges=None):
        ranges = self.compute_ranges(self.layout, self.keys[-1], None)

        plot_grid = self._compute_grid()
        passed_plots = [] if plots is None else plots
        r_offset = 0
        col_offsets = defaultdict(int)
        tab_plots = []

        for r in range(self.rows):
            # Compute row offset
            row = [(k, sp) for k, sp in self.subplots.items() if k[0] == r]
            row_padded = any(len(sp.layout) > 2 for k, sp in row)
            if row_padded:
                r_offset += 1

            for c in range(self.cols):
                subplot = self.subplots.get((r, c), None)

                # Compute column offset
                col = [(k, sp) for k, sp in self.subplots.items() if k[1] == c]
                col_padded = any(len(sp.layout) > 1 for k, sp in col)
                if col_padded:
                    col_offsets[r] += 1
                c_offset = col_offsets.get(r, 0)

                if subplot is None:
                    continue

                shared_plots = list(passed_plots) if self.shared_axes else None
                subplots = subplot.initialize_plot(ranges=ranges,
                                                   plots=shared_plots)
                nsubplots = len(subplots)

                # If tabs enabled lay out AdjointLayout on grid
                if self.tabs:
                    title = subplot.subplots['main']._format_title(
                        self.keys[-1], dimensions=False)
                    if not title:
                        title = ' '.join(self.paths[r, c])
                    if nsubplots == 1:
                        grid = subplots[0]
                    elif nsubplots == 2:
                        grid = gridplot([subplots],
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    else:
                        grid = [[subplots[2], None], subplots[:2]]
                        grid = gridplot(children=grid,
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    tab_plots.append((title, grid))
                    continue

                # Situate plot in overall grid
                if nsubplots > 2:
                    plot_grid[r + r_offset - 1][c + c_offset - 1] = subplots[2]
                plot_column = plot_grid[r + r_offset]
                if nsubplots > 1:
                    plot_column[c + c_offset - 1] = subplots[0]
                    plot_column[c + c_offset] = subplots[1]
                else:
                    plot_column[c + c_offset - int(col_padded)] = subplots[0]
                passed_plots.append(subplots[0])

        # Wrap in appropriate layout model
        kwargs = dict(sizing_mode=self.sizing_mode)
        if self.tabs:
            plots = filter_toolboxes([p for t, p in tab_plots])
            panels = [Panel(child=child, title=t) for t, child in tab_plots]
            layout_plot = Tabs(tabs=panels)
        else:
            plot_grid = layout_padding(plot_grid, self.renderer)
            plot_grid = filter_toolboxes(plot_grid)
            plot_grid, width = pad_plots(plot_grid)
            layout_plot = gridplot(children=plot_grid,
                                   width=width,
                                   toolbar_location=self.toolbar,
                                   merge_tools=self.merge_tools,
                                   **kwargs)

        title = self._get_title(self.keys[-1])
        if title:
            self.handles['title'] = title
            layout_plot = Column(title, layout_plot, **kwargs)

        self.handles['plot'] = layout_plot
        self.handles['plots'] = plots

        self._update_callbacks(layout_plot)
        if self.shared_datasource:
            self.sync_sources()

        self.drawn = True

        return self.handles['plot']
コード例 #26
0
matplotlib.use('Agg')
from bokeh.models.widgets import Panel, Tabs
from bokeh.io import show, curdoc
from bokeh.layouts import layout
import rubric
import interactive_mcdm
import features_checklist
import instructions
import tool_descriptions

doc = curdoc()

doc.title = "QDAS Assessment Toolkit"

rubric = rubric.p
mcdm = interactive_mcdm.app_layout
features = features_checklist.p
instr = instructions.app_layout
desc = tool_descriptions.ToolDesc().app_layout

instr_tab = Panel(child=instr, title="Introduction")
tab3 = Panel(child=rubric, title="Rubric")
tab2 = Panel(child=features, title="Features Checklist")
tab4 = Panel(child=mcdm, title="MCDM")
tab1 = Panel(child=desc, title="Briefs")

tabs = Tabs(tabs=[instr_tab, tab1, tab2, tab3, tab4], width=475)

app_layout = layout([tabs])
doc.add_root(app_layout)
コード例 #27
0
def modify_doc(doc):
    
    # function to make a dataset for histogram based on a list of set filters

    valid_bin_widths = ['day', 'week', 'month']
    default_bin_width='week'
    slider_date_end = datetime.date.today()
    slider_date_start = slider_date_end - relativedelta(months=6, day=1) # at most 2 months ago    
    
    # return delta and align for a range according to bin_width
    # bin_width is one of 'week', 'month', 'day'
    # delta can be used to move a date to the next bin, align to
    # snap back a range the the current bin start 
    def align_range(bin_width):        
        if bin_width == 'week':
            delta = relativedelta(weeks=1)
            align = relativedelta(weekday=Monday(-1))

        elif bin_width == 'month':
            delta = relativedelta(months=1)
            align = relativedelta(day=1)
        else:
            #nothing special to do for 'day'
            delta = relativedelta(days=1)
            align = relativedelta()

        return delta, align


    def make_dataset(endpoint, borough_list, date_start, date_end, bin_width):
        delta, align = align_range(bin_width)
        date_start += align
        date_end += align + delta
        df = query_dates(endpoint, date_start, date_end)

        def histograms():
            prev_buckets = None
            for i, borough_name in enumerate(borough_list): 
                subset = df [df['borough'] == borough_name]
 
                edges = list(time_range(date_start, date_end, delta))
                buckets = subset['estimated_job_costs'].groupby(lambda x: x - align)\
                                                       .agg(sum=np.sum, 
                                                            mean=np.mean, 
                                                            amax=np.max, 
                                                            len=len)

                max_subset = subset.groupby(lambda x: x-align)\
                                   .apply(lambda rows: rows.iloc[np.argmax(rows['estimated_job_costs'].values)])
                
                # it is possible that buckets do not cover the full range, so we create 
                # another data frame for the full range and fill it with 0 
                tmp=pd.DataFrame(index=edges, columns=buckets.columns)
                tmp.fillna(0, inplace=True)

                # then we copy the subset shared with the other dataframe
                tmp.loc[buckets.index & tmp.index ] = buckets.loc[buckets.index & tmp.index]
                buckets = tmp
            
                # extend edges with an extra 'after-the-end' element
                edges = edges + [edges[-1] + delta]                    
                buckets.sort_index()
                # groupby.agg creates one column per aggregate
                buckets['sum'] /= 10**6
                buckets['mean'] /= 1000
                buckets['amax'] /= 1000
                # nothing to do with buckets['len']
                buckets['left'] = edges[:-1]
                buckets['right'] = edges[1:]
                buckets['color'] = Category20_16[i]
                buckets['name'] = borough_name

                for c, format in col_meta.items():
                    if prev_buckets is not None:
                        buckets[c + '_top'] =  buckets[c] + prev_buckets[c + '_top']
                        buckets[c + '_bottom'] =  prev_buckets[c + '_top']
                    else:
                        buckets[c + '_top'] = buckets[c]
                        buckets[c + '_bottom'] = 0
                    buckets['f_' + c] = buckets[c].apply(lambda x: format%(x))
                buckets['f_period'] = buckets.index.map(lambda x: '{} - {}'.format(x.date(), (x+delta).date()))
                def f_address(rows):
                    addr = '{street_name} {house_no} {work_on_floor}'.format(**rows.to_dict())
                    return addr
                buckets['f_address'] = max_subset.apply(f_address, axis=1)
                buckets['f_job_description'] = max_subset['job_description']
                prev_buckets = buckets

                yield buckets.reset_index()

        #Dataframe to hold information
        by_borough = pd.DataFrame()
        # Overall dataframe
        all_buckets = list(histograms())
        by_borough = by_borough.append(all_buckets, sort=False)
        by_borough.sort_values(['name', 'left'], inplace=True)
        return ColumnDataSource(by_borough)

    def make_plot(src, title, y_label, tooltip, column):
        # Blank plot with correct labels
        p = figure(plot_width = 500, plot_height = 500, 
                   title = title,
                   x_axis_type='datetime',
                   sizing_mode='stretch_both',
                   x_axis_label = 'Date', y_axis_label = y_label)            
        # Quad glyphs to create a histogram
        p.quad(source = src, bottom = column +'_bottom', top = column + '_top', left = 'left', right = 'right',
               color = 'color', fill_alpha = 0.7, hover_fill_color = 'color', legend_label = 'name',
               hover_fill_alpha = 1.0, line_color = 'black')
        
                          
        if column == 'amax':
            tooltips = [('Period:','@f_period'),
                        ('Borough', '@name'), 
                        ('Address', '@f_address'),
                        ('Description', '@f_job_description'),
                        ('cost', '@f_amax')
                    ]
        else:
            tooltips = [('Period:','@f_period'),
                        ('Borough', '@name'), 
                        (tooltip, '@f_'+column)
                    ]
        
        # Hover tool with vline mode
        hover = HoverTool(tooltips=tooltips)

        p.add_tools(hover)

        # Styling
        p = style(p, col_meta[column])

        return p

    def style(p, y_format):
        # Title 
        p.title.align = 'center'
        p.title.text_font_size = '20pt'
        p.title.text_font = 'serif'

        # Axis titles
        p.xaxis.axis_label_text_font_size = '14pt'
        p.xaxis.axis_label_text_font_style = 'bold'
        p.yaxis.axis_label_text_font_size = '14pt'
        p.yaxis.axis_label_text_font_style = 'bold'

        p.yaxis.formatter = PrintfTickFormatter (format=y_format)

        # Tick labels
        p.xaxis.major_label_text_font_size = '12pt'
        p.yaxis.major_label_text_font_size = '12pt'

        return p

    
    src = ColumnDataSource()
    old_params = [None]
    def do_update():
        try:
            new_params = (approval_res, 
                          [borough_selection.labels[i] for i in borough_selection.active],
                          fixup_date(date_select.value[0]),
                          fixup_date(date_select.value[1]),
                          valid_bin_widths[binwidth_select.active])
            if new_params != old_params[0]:
                show_spinner()
                new_data = make_dataset(*new_params)
                old_params[0] = new_params

                src.data.update(new_data.data)
        except Exception:
            print(traceback.print_exc())

    def update(attr, old, new):
        do_update()
    
    # DateRangeSlider mouseup is broken, do nothing on change and use a timer
    slow_update=[time.time()]
    def update_no_op(attr, old, new):
        show_spinner()
        if time.time()-slow_update[0] < .5:
            return
        slow_update[0] = time.time()
        update(attr, old, new)
    def time_update():
        #return
        slow_update[0] = time.time()
        do_update()
        hide_spinner()
    
    spinner_text = """
    <!-- https://www.w3schools.com/howto/howto_css_loader.asp -->
    <div class="loader" >
    <style scoped>
    .loader {
        border: 16px solid #f3f3f3; /* Light grey */
        border-top: 16px solid #3498db; /* Blue */
        border-radius: 50%;
        margin: auto;
        width: 100px;
        height: 100px;
        animation: spin 2s linear infinite;
    }

    @keyframes spin {
        0% { transform: rotate(0deg); }
        100% { transform: rotate(360deg); }
    } 
    </style>
    </div>
    """
    div_spinner = Div(text="",width=120,height=120)
    def show_spinner():
        div_spinner.text = spinner_text
    def hide_spinner():
        div_spinner.text = ""

    binwidth_select = RadioButtonGroup(labels=valid_bin_widths,
                                       active=valid_bin_widths.index(default_bin_width), #index of 'week', i.e. 0
                                       sizing_mode='stretch_both')
    binwidth_select.on_change('active', update)
    
    date_default_end= slider_date_end
    date_default_start = date_default_end - relativedelta(months=1)
    date_select = DateRangeSlider(start=slider_date_start, 
                                  end=slider_date_end, 
                                  value=(date_default_start,date_default_end), 
                                  callback_policy='mouseup', # do not start untill mouse released
                                  step=1,
                                  callback_throttle=1000,
                                  sizing_mode='stretch_both') # this is slow, so calls at most every 2000ms

    date_select.on_change('value', update_no_op)

    available_boroughs = ['QUEENS', 'MANHATTAN', 'STATEN ISLAND', 'BROOKLYN', 'BRONX']

    borough_selection = CheckboxGroup(labels=available_boroughs, active = list(range(0, len(available_boroughs))),
                                     sizing_mode='stretch_both')
    borough_selection.on_change('active', update)
    
    initial_borough = [borough_selection.labels[i] for i in borough_selection.active]
    
    # Put controls in a single element
    controls = layout([[borough_selection, binwidth_select, date_select, div_spinner]] , width=500)
    
    col_meta = { 
        'len': '%d', 
        'mean': '%dl',
        'sum': '%dM',
        'amax': '%dk'
    }
    
    data = [ ('Number of Projects', 'Total projects', 'counts', 'len'),
             ('Most Expensive Project', 'Max cost', 'cost', 'amax'),
             ('Total Project Cost', 'Total project cost', 'cost', 'sum'),
             ('Mean Project Cost', 'Median project cost', 'cost', 'mean') ]
    do_update()
    plots = [ make_plot(src, *args) for args in data ]

    # Create a row layout
    lyt = layout([controls, plots[3]], 
                 plots[0:3])
    
    # Make a tab with the layout 
    tab = Panel(child=lyt, title = 'Histogram')
    tabs = Tabs(tabs=[tab])
    
    doc.add_periodic_callback(time_update, 1000)
    doc.add_root(tabs)
コード例 #28
0
    def dashboard_files_per_day(self):
        """Scatter of number of files per day added to ``JWQLDB``

        Parameters
        ----------
        None

        Returns
        -------
        tabs : bokeh.models.widgets.widget.Widget
            A figure with tabs for each instrument.
        """

        source = build_table('filesystem_general')
        if not pd.isnull(self.delta_t):
            source = source[(source['date'] >= self.date - self.delta_t)
                            & (source['date'] <= self.date)]

        date_times = [
            pd.to_datetime(datetime).date()
            for datetime in source['date'].values
        ]
        source['datestr'] = [
            date_time.strftime("%Y-%m-%d") for date_time in date_times
        ]

        p1 = figure(title="Number of Files Added by Day",
                    tools="reset,hover,box_zoom,wheel_zoom",
                    tooltips="@datestr: @total_file_count",
                    plot_width=1700,
                    x_axis_label='Date',
                    y_axis_label='Number of Files Added')
        p1.line(x='date',
                y='total_file_count',
                source=source,
                color='#6C5B7B',
                line_dash='dashed',
                line_width=3)
        disable_scientific_notation(p1)
        tab1 = Panel(child=p1, title='Files Per Day')

        p2 = figure(title="Available & Used Storage",
                    tools="reset,hover,box_zoom,wheel_zoom",
                    tooltips="@datestr: @total_file_count",
                    plot_width=1700,
                    x_axis_label='Date',
                    y_axis_label='Storage Space [Terabytes?]')
        p2.line(x='date',
                y='available',
                source=source,
                color='#F8B195',
                line_dash='dashed',
                line_width=3,
                legend='Available Storage')
        p2.line(x='date',
                y='used',
                source=source,
                color='#355C7D',
                line_dash='dashed',
                line_width=3,
                legend='Used Storage')
        disable_scientific_notation(p2)
        tab2 = Panel(child=p2, title='Storage')

        p1.xaxis.formatter = DatetimeTickFormatter(
            hours=["%d %B %Y"],
            days=["%d %B %Y"],
            months=["%d %B %Y"],
            years=["%d %B %Y"],
        )
        p1.xaxis.major_label_orientation = pi / 4

        p2.xaxis.formatter = DatetimeTickFormatter(
            hours=["%d %B %Y"],
            days=["%d %B %Y"],
            months=["%d %B %Y"],
            years=["%d %B %Y"],
        )
        p2.xaxis.major_label_orientation = pi / 4

        tabs = Tabs(tabs=[tab1, tab2])

        return tabs
コード例 #29
0
"""
Displaying tabbed layouts
Tabbed layouts are collections of Panel objects. Using the figures and Panels from the previous two exercises, you'll create a tabbed layout to change the region in the fertility vs female literacy plots.

Your job is to create the layout using Tabs() and assign the tabs keyword argument to your list of Panels. The Panels have been created for you as tab1, tab2, tab3 and tab4.

After you've displayed the figure, explore the tabs you just added! The "Pan", "Box Zoom" and "Wheel Zoom" tools are also all available as before.

INSTRUCTION
-----------

Import Tabs from bokeh.models.widgets.
Create a Tabs layout called layout with tab1, tab2, tab3, and tab4.
Click 'Submit Answer' to output the file and show the figure.

"""
# Import Tabs from bokeh.models.widgets
from bokeh.models.widgets import Tabs

# Create a Tabs layout: layout
layout = Tabs(tabs=[tab1, tab2, tab3, tab4])

# Specify the name of the output_file and show the result
output_file('tabs.html')
show(layout)
コード例 #30
0
          line_width=3,
          line_alpha=0.6,
          legend='fourier series'
          )

plot.patch('x_patch', 'y_patch', source=source_interval_patch, alpha=.2)
plot.line('x_min', 'y_minmax', source=source_interval_bound)
plot.line('x_max', 'y_minmax', source=source_interval_bound)

sample_controls = widgetbox(sample_function_type)

default_controls = column(default_function_input,default_function_period_start,default_function_period_end)

# Panels for sample functions or default functions
sample_funs = Panel(child=sample_controls, title='sample function')
default_funs = Panel(child=default_controls, title='default function')
# Add panels to tabs
fun_tabs = Tabs(tabs=[sample_funs, default_funs])
fun_tabs.on_change('active', type_input_change)  # add callback for panel tabs

# lists all the controls in our app
controls = column(degree,fun_tabs)

# initialize data
function_change()

# regularly update user view
curdoc().add_periodic_callback(automatic_update, 100)
# make layout
curdoc().add_root(row(plot, controls, height=600, width=800))
コード例 #31
0
def backtest(start_date, end_date, hold_days, strategy, data, weight='average', benchmark=None, stop_loss=None, stop_profit=None):

    # portfolio check
    if weight != 'average' and weight != 'price':
        print('Backtest stop, weight should be "average" or "price", find',
              weight, 'instead')

    # get price data in order backtest
    data.date = end_date
    price = data.get('收盤價', (end_date - start_date).days)
    # start from 1 TWD at start_date,
    end = 1
    date = start_date

    # record some history
    equality = pd.Series()
    nstock = {}
    transections = pd.DataFrame()
    maxreturn = -10000
    minreturn = 10000

    def trading_day(date):
        if date not in price.index:
            temp = price.loc[date:]
            if temp.empty:
                return price.index[-1]
            else:
                return temp.index[0]
        else:
            return date

    def date_iter_periodicity(start_date, end_date, hold_days):
        date = start_date
        while date < end_date:
            yield (date), (date + datetime.timedelta(hold_days))
            date += datetime.timedelta(hold_days)

    def date_iter_specify_dates(start_date, end_date, hold_days):
        dlist = [start_date] + hold_days + [end_date]
        if dlist[0] == dlist[1]:
            dlist = dlist[1:]
        if dlist[-1] == dlist[-2]:
            dlist = dlist[:-1]
        for sdate, edate in zip(dlist, dlist[1:]):
            yield (sdate), (edate)

    if isinstance(hold_days, int):
        dates = date_iter_periodicity(start_date, end_date, hold_days)
    elif isinstance(hold_days, list):
        dates = date_iter_specify_dates(start_date, end_date, hold_days)
    else:
        print('the type of hold_dates should be list or int.')
        return None

    sdate_list = []
    edate_list = []
    returns_word = []
    return_rates = []
    rows = []
    col = {}
    sumup = {  # sumup:上述所有list打包成一個字典
        "sdate": None,
        "edate": None,
        "returns_word": None,
        "return_rates": None
    }
    for sdate, edate in dates:

        # select stocks at date
        data.date = sdate
        stocks = strategy(data)

        # hold the stocks for hold_days day
        s = price[stocks.index & price.columns][sdate:edate].iloc[1:]

        if s.empty:
            s = pd.Series(1, index=pd.date_range(
                sdate + datetime.timedelta(days=1), edate))
        else:

            if stop_loss != None:
                below_stop = (
                    (s / s.bfill().iloc[0]) - 1)*100 < -np.abs(stop_loss)
                below_stop = (below_stop.cumsum() > 0).shift(2).fillna(False)
                s[below_stop] = np.nan

            if stop_profit != None:
                above_stop = (
                    (s / s.bfill().iloc[0]) - 1)*100 > np.abs(stop_profit)
                above_stop = (above_stop.cumsum() > 0).shift(2).fillna(False)
                s[above_stop] = np.nan

            s.dropna(axis=1, how='all', inplace=True)

            # record transections
            bprice = s.bfill().iloc[0]
            sprice = s.apply(lambda s: s.dropna().iloc[-1])
            transections = transections.append(pd.DataFrame({
                'buy_price': bprice,
                'sell_price': sprice,
                'lowest_price': s.min(),
                'highest_price': s.max(),
                'buy_date': pd.Series(s.index[0], index=s.columns),
                'sell_date': s.apply(lambda s: s.dropna().index[-1]),
                'profit(%)': (sprice/bprice - 1) * 100
            }))

            s.ffill(inplace=True)

            # calculate equality
            # normalize and average the price of each stocks
            if weight == 'average':
                s = s/s.bfill().iloc[0]
            s = s.mean(axis=1)
            s = s / s.bfill()[0]

        col['sdate'] = sdate
        col['edate'] = edate
        col['return_word'] = returns_word
        col['return_rates'] = return_rates
        # print(sdate)
        sdate_list.append(sdate)
        edate_list.append(edate)
        # print(sdate_list)
        returns_word.append('報酬率:')
        return_rates.append(s.iloc[-1]/s.iloc[0] * 100 - 100)
        sumup = {                                       # sumup:上述所有list打包成一個字典
            # "sdate": pd.to_datetime(sdate_list),
            # "edate": pd.to_datetime(edate_list),
            # "returns_word": returns_word,
            # "return_rates": return_rates
        }

        # print('----------row新增col---------')
        # print(rows)

        df_sumup = pd.DataFrame(
            rows, columns=["start_date", "end_date", "報酬率:", "return_rates"])
        # print('----------df_sumup---------')
        # print(df_sumup)

        # print some log

        print(sdate, '-', edate,
              '報酬率: %.2f' % (s.iloc[-1]/s.iloc[0] * 100 - 100),
              '%', 'nstock', len(stocks))

        maxreturn = max(maxreturn, s.iloc[-1]/s.iloc[0] * 100 - 100)
        minreturn = min(minreturn, s.iloc[-1]/s.iloc[0] * 100 - 100)

        # plot backtest result
        ((s*end-1)*100).plot()
        equality = equality.append(s*end)
        end = (s/s[0]*end).iloc[-1]

        if math.isnan(end):
            end = 1

        # add nstock history
        nstock[sdate] = len(stocks)


########################## 2021.3.6 新增 ##########################################
    aaa = []
    for i in range(len(return_rates)):  # "return_rates" is [list].
        j = i+2
        aa = [sdate_list[j-2], edate_list[j-2], "報酬率:", return_rates[i]]
        # print('--------------aa list---------------')
        # print(aa)
        aaa.append(aa)  # aaa是aa的組成, aaa is table
        # print(aaa)
    df_aaa = pd.DataFrame(
        aaa, columns=["sdate", "edate", "", "報酬率"], dtype="float64")
    # print(df_aaa)
    # print(pd.DataFrame(rows))

    rows.append(col)
    # print('----------row新增col---------')
    # print(rows)
    print('每次換手最大報酬 : %.2f %' % maxreturn)
    print('每次換手最少報酬 : %.2f %' % minreturn)
    print(stocks.index)
    dff = pd.DataFrame(stocks)
    dff.reset_index(inplace=True)
    # print(dff)


##############2021.3.8 新增 #####################################################

    import bokeh.io
    import copy
    from bokeh.plotting import figure, output_file, show, output_notebook, save
    from bokeh.resources import INLINE
    from bokeh.models import ColumnDataSource, Rect, HoverTool, Range1d, RangeTool, DateFormatter
    from bokeh.layouts import column, gridplot, row, widgetbox
    from bokeh.embed import file_html
    from bokeh.resources import CDN
    from bokeh.models.widgets import TableColumn, DataTable, Slider
    from bokeh.models.widgets import Panel, Tabs

    def colorful(a):
        a.background_fill_color = "black"
        a.title.text_color = 'white'
        a.border_fill_color = 'black'
        a.outline_line_color = 'white'
        a.xaxis.axis_line_color = 'white'
        a.xaxis.axis_label_text_color = "yellow"
        a.yaxis.axis_label_text_color = "yellow"
        a.xaxis.major_label_text_color = 'white'
        a.yaxis.major_label_text_color = 'white'
        a.xgrid.grid_line_color = 'white'
        a.ygrid.grid_line_color = 'white'
        a.xgrid.grid_line_alpha = 0.5
        a.ygrid.grid_line_alpha = 0.5

    # df_aaa['累加報酬率']
    return_rates_copy = copy.deepcopy(return_rates)
    # return_rates = [float(x) for x in return_rates]

    for i in range(len(return_rates)-1):
        return_rates[i] = return_rates[i]
        return_rates[i+1] = return_rates[i]+return_rates[i+1]
    print(return_rates)
    df_aaa['累加報酬率'] = return_rates

    df_aaa.sdate = pd.to_datetime(df_aaa.sdate)
    df_aaa.edate = pd.to_datetime(df_aaa.edate)
    # print('--------------df_aaa  DataFrame---------------')
    # print(df_aaa)

    p = figure(
        plot_width=800, plot_height=250, x_axis_type="datetime",
        tools=["pan,wheel_zoom,box_zoom,reset,save"])
    colorful(p)
    p.yaxis.axis_label = "報酬率(%)"
    p.xaxis.axis_label = "日期"
    p.line(df_aaa["sdate"], df_aaa["累加報酬率"],
           legend_label="累加報酬率", line_color="orange", line_width=2)
    p.circle(df_aaa["sdate"], df_aaa["累加報酬率"], color="#FF69B4", size=10)
    # show(p)

    sordata = ColumnDataSource(df_aaa)

    columns = [
        TableColumn(field="sdate", title="sdate", formatter=DateFormatter()),
        TableColumn(field="edate", title="edate", formatter=DateFormatter()),
        TableColumn(field="", title=""),
        TableColumn(field="報酬率", title="報酬率"),
        TableColumn(field="累加報酬率", title="累加報酬率"),
    ]

    data_table = DataTable(source=sordata, columns=columns,
                           fit_columns=True, width=800, height=800)
    t1 = Panel(child=data_table, title="報酬率")  # t1 table1
    # print(dff['stock_id'])
    df_stocks = pd.DataFrame(columns=['符合條件的股票代碼'])
    df_stocks["符合條件的股票代碼"] = dff['stock_id']
    print(df_stocks)
    sordata1 = ColumnDataSource(df_stocks)
    columns1 = [TableColumn(field="符合條件的股票代碼", title="符合條件的股票代碼"),
                ]
    data_table1 = DataTable(source=sordata1, columns=columns1,
                            fit_columns=True, width=800, height=800)
    t2 = Panel(child=data_table1, title="符合條件股票")

    tabs = Tabs(tabs=[t1, t2])
    # show(tabs)

    # show(widgetbox([data_table1], sizing_mode='scale_both'))
# save()方法和.show()不能一起用,使用下面兩行要將上面的show()註解掉 ln253,ln267
    output_file("報酬率.html")
    save(obj=column(p, tabs))
    # output_file("報酬率.html")
    # save(obj=column(p, widgetbox(
    # [data_table, data_table1], sizing_mode='scale_both')))

############## #####################################################

    if benchmark is None:
        benchmark = price['0050'][start_date:end_date].iloc[1:]

    # bechmark (thanks to Markk1227)
    ((benchmark/benchmark[0]-1)*100).plot(color=(0.8, 0.8, 0.8))
    plt.ylabel('Return On Investment (%)')
    plt.grid(linestyle='-.')
    plt.show()

    # ((benchmark/benchmark.cummax()-1)*100).plot(legend=True, color=(0.8, 0.8, 0.8))
    # ((equality/equality.cummax()-1)*100).plot(legend=True)
    # plt.ylabel('Dropdown (%)')
    # plt.grid(linestyle='-.')
    # plt.show()

    pd.Series(nstock).plot.bar()
    plt.ylabel('Number of stocks held')

    fig, ax = plt.subplots(figsize=(10, 10))
    # ax.axis('off')
    # ax.axis('tight')

    tb = ax.table(cellText=df_aaa.values,
                  colLabels=df_aaa.columns, bbox=[0, 0, 1, 1])
    tb[0, 0].set_facecolor('#363636')
    tb[0, 1].set_facecolor('#363636')
    tb[0, 2].set_facecolor('#363636')
    tb[0, 3].set_facecolor('#363636')
    tb[0, 0].set_text_props(color='w')
    tb[0, 1].set_text_props(color='w')
    tb.set_fontsize(30)
    plt.show()
    tmpfile = BytesIO()

    fig.savefig(tmpfile, format='png')
    encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')

    html = 'Some html head' + \
        '<img src=\'data:image/png;base64,{}\'>'.format(
            encoded) + 'Some more html'

    with open('test.html', 'w') as f:
        f.write(html)

    # html_graph = mpld3.fig_to_html(fig)
    # print(html_graph)
    # Html_file = open("index.html", "w", encoding='utf-8-sig')
    # Html_file.write(html_graph)
    # Html_file.close()

    return equality, transections