Ejemplo n.º 1
0
    def runTest(self):
        """ tests depiction """
        inputfile = os.path.join("..", "testdata", "monitoring", "m1000.json")
        with open(inputfile, 'rt') as f:
            res = json.load(f)

        logfile = os.path.join("..", "testdata", "monitoring",
                               "logfile_big.log")

        dss = DepictServerStatus(logfile=logfile)
        dss.read_json(res, data_tag='all')
        dss.subset_data(from_data_tag='all',
                        to_data_tag='pre_insert',
                        column_name="context|info|message",
                        cell_values=["About to insert"])
        tab_server = dss.depict(
            data_tag='pre_insert',
            tab_title="In RAM sequence",
            metrics='server|scstat',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_memory = dss.depict(
            data_tag='pre_insert',
            tab_title="RAM usage",
            metrics='server|mstat',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_g2n = dss.depict(
            data_tag='pre_insert',
            tab_title="Db: guid->neighbours",
            metrics='dstats|guid2neighbour',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_g2m = dss.depict(
            data_tag='pre_insert',
            tab_title="Db: guid->metadata",
            metrics='dstats|guid2meta',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_sm = dss.depict(
            data_tag='pre_insert',
            tab_title="Db: server monitor",
            metrics='dstats|server_monitoring',
            x_axis_label='Order sequences added (Oldest --> Most recent)')

        # get details of the most recent 5 guids
        n = 5
        recent_guids = dss.most_recent_guids('all', n=n)
        dss.subset_data(from_data_tag='all',
                        to_data_tag='recent_guids',
                        column_name='content|activity|guid',
                        cell_values=[x for x in recent_guids])
        tab_rserver = dss.depict(data_tag='recent_guids',
                                 tab_title="Last {0} RAM sequences".format(n),
                                 metrics='server|scstat')
        tab_rmemory = dss.depict(data_tag='recent_guids',
                                 tab_title="Last {0} RAM usage".format(n),
                                 metrics='server|mstat')

        # get tail of logfile
        n_latest_lines = 100
        res = dss.logfile_tail(n_latest_lines)

        # render
        div = Div(text="[last {0} lines of log file are shown]<br/>".format(
            n_latest_lines) + res.replace('\n', '<br />'),
                  render_as_text=False,
                  width=1000,
                  height=800)
        tab_log = Panel(child=div, title='Log tail')

        doc = Tabs(tabs=[
            tab_server, tab_memory, tab_g2n, tab_g2m, tab_sm, tab_rserver,
            tab_rmemory
        ])

        show(doc)
Ejemplo n.º 2
0
from bokeh.models.widgets import Panel, Tabs
from bokeh.io import output_file, show
from bokeh.plotting import figure
import GraphViz, MultiLevelPerfViz, TimelineViz
import networkx as nx
import numpy as np
from bokeh.embed import file_html
from bokeh.resources import INLINE


G=nx.complete_graph(8)

# Tab 1 
plot = GraphViz.create(G, 'Complete Graph(8)', layout_prog='dot', tools="wheel_zoom,box_zoom,save,reset")
tab1 = Panel(child=plot, title="Graph")

# Tab 2
networks = ['VGG','Inception', 'Resnet']
confs = ['conf1','conf2','conf3','conf4','conf5','conf6','conf7']
data = np.random.randint(low = 1, high = 1000, size=(len(networks), len(confs)))
plot = MultiLevelPerfViz.create(networks, confs, data, "Perf. for multiple runs", "Cycles(in Millions)",
                                          tools= "wheel_zoom,box_zoom,save,reset")
tab2 = Panel(child=plot, title="MultiLevel")

# Tab3
listEvntsName = ['EvntType1', 'EvntType2', 'EvnType3']
x = np.linspace(1,9,9,dtype='int')
evntStartTime = np.array([x**2, x**2 + 2, x**2 + 5])
evntEndTime = evntStartTime + np.random.randint(1,5,(3,9))

plot = TimelineViz.create(listEvntsName, evntStartTime, evntEndTime, 'TimelineViz')
Ejemplo n.º 3
0
p2.plot_width = 600
p2.plot_height = 600
p3.plot_width = 600
p3.plot_height = 600

# inputs = column(*controls, sizing_mode='fixed', height=300, width=500)
l1 = layout([[p1]], sizing_mode='fixed', height=600, width=150)
l2 = layout([[p2, picker]], sizing_mode='fixed', height=600, width=150)
l3 = layout([[p3]], sizing_mode='fixed', height=600, width=150)
l4 = layout([[p4]], sizing_mode='fixed', height=600, width=150)
l5 = layout([[p5]], sizing_mode='fixed', height=600, width=150)
l6 = layout([[p6]], sizing_mode='fixed', height=600, width=150)
l7 = layout([[p7]], sizing_mode='fixed', height=600, width=150)

# Tab setup
tab1 = Panel(child=l1, title="Division per hospital ward")
tab2 = Panel(child=l2, title="Age covid-19 patients")
tab3 = Panel(child=l3, title="Virus correlation")
tab4 = Panel(child=l4, title="Blood values")
tab5 = Panel(child=l5, title="Blood gas analysis")
tab6 = Panel(child=l6, title="Enzyme & Minerals & Protein")
tab7 = Panel(child=l7, title="Remaining numerical tests")
tab8 = Panel(child=p, title="Grid-overview of visualizations")

tabs = Tabs(tabs=[tab8, tab1, tab2, tab3, tab4, tab5, tab6, tab7])

layout = layout([[text], [tabs]])
show(layout)

#curdoc().add_root(layout)
#bokeh serve --show JBI100_GUI.py
Ejemplo n.º 4
0
def get_plots(target_df, target_variable, **kwargs):

    if target_variable == 'p_in_ab' or target_variable == 'inning':
        df = get_df_p_in_ab(target_variable, **kwargs)
    else:
        df = get_df(target_df, target_variable, **kwargs)
    
    p_tab_list = []
    for year in df.year.unique():
        p = figure(x_range=[str(i) for i in list(df[df.year == year]['count'].unique())])
        p.vbar_stack(['number_of_FB', 
                      'number_of_BB', 
                      'number_of_OS', 
                      'number_of_OT'],
                      x='count',
                      width=0.8,
                      color=('red', 'blue', 'green', 'yellow'),
                      source=df[df.year == year],
                      legend_label = ['Fastballs', 'Breaking balls', 'Offspeed', 'Other'])
        yaxis = p.select(dict(type=Axis, layout="left"))[0]
        yaxis.formatter.use_scientific = False
        hover = HoverTool(tooltips=[(target_variable, '@count'),
                                    ('Number of Fastballs', '@number_of_FB'), 
                                    ('Number of Breaking Balls', '@number_of_BB'),
                                    ('Number of Offspeed', '@number_of_OS'),
                                    ('Number of Other', '@number_of_OT')])
        p.add_tools(hover)
        p_tab_list.append(p)
        p.xaxis.axis_label = target_variable
        p.yaxis.axis_label = 'Number of pitches'
        if len(kwargs) > 1:
            kwarg_list = list(kwargs.items())
            p.title.text = '{} for {} {}'.format(target_variable, kwarg_list[0][1], kwarg_list[1][1])
        else:
            p.title.text = '{} for all pitchers'.format(target_variable)

    tab1 = Panel(child=p_tab_list[0], title='2015')
    tab2 = Panel(child=p_tab_list[1], title='2016')
    tab3 = Panel(child=p_tab_list[2], title='2017')
    tab4 = Panel(child=p_tab_list[3], title='2018')

    layout1 = Tabs(tabs=[tab1, tab2, tab3, tab4])
    output_notebook()
    
    pp_tab_list = []
    for year in df.year.unique():
        pp = figure(x_range=[str(i) for i in list(df[df.year == year]['count'].unique())])
        pp.vbar_stack(['percent_FB', 
                       'percent_BB', 
                       'percent_OS', 
                       'percent_OT'],
                      x='count',
                      width=0.8,
                      color=('red', 'blue', 'green', 'yellow'),
                      source=df[df.year == year],
                      legend_label = ['Fastballs', 'Breaking Balls', 'Offspeed', 'Other'])
        yaxis = pp.select(dict(type=Axis, layout="left"))[0]
        yaxis.formatter.use_scientific = False
        hover = HoverTool(tooltips=[(target_variable, '@count'),
                                    ('Percent Fastballs', '@percent_FB'), 
                                    ('Percent Breaking Balls', '@percent_BB'),
                                    ('Percent Offspeed', '@percent_OS'),
                                    ('Percent Other', '@percent_OT')])
        pp.add_tools(hover)
        pp_tab_list.append(pp)
        pp.xaxis.axis_label = target_variable
        pp.yaxis.axis_label = 'Percent of pitches'
        
        if len(kwargs) > 1:
            kwarg_list = list(kwargs.items())
            pp.title.text = '{} for {} {}'.format(target_variable, kwarg_list[0][1], kwarg_list[1][1])
        else:
            pp.title.text = '{} for all pitchers'.format(target_variable)

    tab1 = Panel(child=pp_tab_list[0], title='2015')
    tab2 = Panel(child=pp_tab_list[1], title='2016')
    tab3 = Panel(child=pp_tab_list[2], title='2017')
    tab4 = Panel(child=pp_tab_list[3], title='2018')

    layout2 = Tabs(tabs=[tab1, tab2, tab3, tab4])
    output_notebook()
    show(row(layout1, layout2))
Ejemplo n.º 5
0
def bias_plots(conn, start, end):
    '''Combines plots to a tab
    Parameters
    ----------
    conn : DBobject
        Connection object that represents database
    start : time
        Startlimit for x-axis and query (typ. datetime.now()- 4Months)
    end : time
        Endlimit for x-axis and query (typ. datetime.now())
    Return
    ------
    p : tab object
        used by dashboard.py to set up dashboard
    '''

    descr = Div(text="""
    <style>
    table, th, td {
      border: 1px solid black;
      background-color: #efefef;
      border-collapse: collapse;
      padding: 5px
    }
    table {
      border-spacing: 15px;
    }
    </style>

    <body>
    <table style="width:100%">
      <tr>
        <th><h6>Plotname</h6></th>
        <th><h6>Mnemonic</h6></th>
        <th><h6>Description</h6></th>
      </tr>
      <tr>
        <td>VSSOUT</td>
        <td>IGDP_MIR_IC_V_VSSOUT<br>
            IGDP_MIR_SW_V_VSSOUT<br>
            IGDP_MIR_LW_V_VSSOUT<br> </td>
        <td>Detector Bias VSSOUT (IC,SW, & LW)</td>
      </tr>
      <tr>
        <td>VDETCOM</td>
        <td>IGDP_MIR_IC_V_VDETCOM<br>
            IGDP_MIR_SW_V_VDETCOM<br>
            IGDP_MIR_LW_V_VDETCOM<br> </td>
        <td>Detector Bias VDETCOM (IC,SW, & LW)</td>
      </tr>
      <tr>
        <td>VRSTOFF</td>
        <td>IGDP_MIR_IC_V_VRSTOFF<br>
            IGDP_MIR_SW_V_VRSTOFF<br>
            IGDP_MIR_LW_V_VRSTOFF<br> </td>
        <td>Detector Bias VRSTOFF (IC,SW, & LW)</td>
      </tr>
      <tr>
        <td>VP</td>
        <td>IGDP_MIR_IC_V_VP<br>
            IGDP_MIR_SW_V_VP<br>
            IGDP_MIR_LW_V_VP<br> </td>
        <td>Detector Bias VP (IC,SW, & LW)</td>
      </tr>
      <tr>
        <td>VDDUC</td>
        <td>IGDP_MIR_IC_V_VDDUC<br>
            IGDP_MIR_SW_V_VDDUC<br>
            IGDP_MIR_LW_V_VDDUC<br> </td>
        <td>Detector Bias VDDUC (IC,SW, & LW)</td>
      </tr>

    </table>
    </body>
    """,
                width=1100)

    plot1 = vdetcom(conn, start, end)
    plot2 = vssout(conn, start, end)
    plot3 = vrstoff(conn, start, end)
    plot4 = vp(conn, start, end)
    plot5 = vdduc(conn, start, end)

    lay = gridplot([[plot2, plot1], [plot3, plot4], [plot5, None]],
                   merge_tools=False)

    layout = Column(descr, lay)

    tab = Panel(child=layout, title="BIAS")

    return tab
    if (select1.value == "2014"):
        plot_year(plot2014_tier)
    if (select1.value == "2015"):
        plot_year(plot2015_tier)
    if (select1.value == "2016"):
        plot_year(plot2016_tier)


select1 = Select(title="Please Select Year:",
                 value="fig4",
                 options=[
                     ("2008", "2008"),
                     ("2009", "2009"),
                     ("2010", "2010"),
                     ("2011", "2011"),
                     ("2012", "2012"),
                     ("2013", "2013"),
                     ("2014", "2014"),
                     ("2015", "2015"),
                     ("2016", "2016"),
                 ])
select1.on_change("value", update_plot1)
curdoc().clear()
myLayout2 = layout([[select1]])
tab1 = Panel(child=myLayout1, title="Trafficking countries information")
tab2 = Panel(child=myLayout2, title="Tier information")
tabs = Tabs(tabs=[tab1, tab2])
curdoc().add_root(tabs)
#curdoc().add_root(plot2008_tier)
curdoc().add_root(plot2008)
Ejemplo n.º 7
0
def _create_ui_components() -> (Figure, ColumnDataSource):  # pylint: disable=too-many-statements
    global asp_table_source, asp_filter_src, op_table_source, op_filter_src
    global stats, aspects, tabs, lexicons_dropdown
    stats = pd.DataFrame(columns=["Quantity", "Score"])
    aspects = pd.Series([])

    def new_col_data_src():
        return ColumnDataSource({"file_contents": [], "file_name": []})

    large_text = HTMLTemplateFormatter(template="""<div><%= value %></div>""")

    def data_column(title):
        return TableColumn(
            field=title, title='<span class="header">' + title + "</span>", formatter=large_text
        )

    asp_table_columns = [
        data_column("Term"),
        data_column("Alias1"),
        data_column("Alias2"),
        data_column("Alias3"),
    ]
    op_table_columns = [data_column("Term"), data_column("Score"), data_column("Polarity")]

    asp_table_source = empty_table("Term", "Alias1", "Alias2", "Alias3")
    asp_filter_src = empty_table("Term", "Alias1", "Alias2", "Alias3")
    asp_src = new_col_data_src()

    op_table_source = empty_table("Term", "Score", "Polarity", "Polarity")
    op_filter_src = empty_table("Term", "Score", "Polarity", "Polarity")
    op_src = new_col_data_src()

    asp_table = DataTable(
        source=asp_table_source,
        selectable="checkbox",
        columns=asp_table_columns,
        editable=True,
        width=600,
        height=500,
    )
    op_table = DataTable(
        source=op_table_source,
        selectable="checkbox",
        columns=op_table_columns,
        editable=True,
        width=600,
        height=500,
    )

    asp_examples_box = _create_examples_table()
    op_examples_box = _create_examples_table()
    asp_layout = layout([[asp_table, asp_examples_box]])
    op_layout = layout([[op_table, op_examples_box]])
    asp_tab = Panel(child=asp_layout, title="Aspect Lexicon")
    op_tab = Panel(child=op_layout, title="Opinion Lexicon")
    tabs = Tabs(tabs=[asp_tab, op_tab], width=700, css_classes=["mytab"])

    lexicons_menu = [("Open", "open"), ("Save", "save")]
    lexicons_dropdown = Dropdown(
        label="Edit Lexicons",
        button_type="success",
        menu=lexicons_menu,
        width=140,
        height=31,
        css_classes=["mybutton"],
    )

    train_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    train_dropdown = Dropdown(
        label="Extract Lexicons",
        button_type="success",
        menu=train_menu,
        width=162,
        height=31,
        css_classes=["mybutton"],
    )

    inference_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    inference_dropdown = Dropdown(
        label="Classify",
        button_type="success",
        menu=inference_menu,
        width=140,
        height=31,
        css_classes=["mybutton"],
    )

    text_status = TextInput(
        value="Select training data", title="Train Run Status:", css_classes=["statusText"]
    )
    text_status.visible = False

    train_src = new_col_data_src()
    infer_src = new_col_data_src()

    with open(join(SOLUTION_DIR, "dropdown.js")) as f:
        args = dict(
            clicked=lexicons_dropdown,
            asp_filter=asp_filter_src,
            op_filter=op_filter_src,
            asp_src=asp_src,
            op_src=op_src,
            tabs=tabs,
            text_status=text_status,
            train_src=train_src,
            infer_src=infer_src,
            train_clicked=train_dropdown,
            infer_clicked=inference_dropdown,
            opinion_lex_generic="",
        )
        code = f.read()

    args["train_clicked"] = train_dropdown
    train_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    args["train_clicked"] = inference_dropdown
    inference_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    args["clicked"] = lexicons_dropdown
    lexicons_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    def update_filter_source(table_source, filter_source):
        df = table_source.to_df()
        sel_inx = sorted(table_source.selected.indices)
        df = df.iloc[sel_inx, 1:]
        new_source = ColumnDataSource(df)
        filter_source.data = new_source.data

    def update_examples_box(data, examples_box, old, new):
        examples_box.source.data = {"Examples": []}
        unselected = list(set(old) - set(new))
        selected = list(set(new) - set(old))
        if len(selected) <= 1 and len(unselected) <= 1:
            examples_box.source.data.update(
                {
                    "Examples": [str(data.iloc[unselected[0], i]) for i in range(4, 24)]
                    if len(unselected) != 0
                    else [str(data.iloc[selected[0], i]) for i in range(4, 24)]
                }
            )

    def asp_selected_change(_, old, new):
        global asp_filter_src, asp_table_source, aspects_data
        update_filter_source(asp_table_source, asp_filter_src)
        update_examples_box(aspects_data, asp_examples_box, old, new)

    def op_selected_change(_, old, new):
        global op_filter_src, op_table_source, opinions_data
        update_filter_source(op_table_source, op_filter_src)
        update_examples_box(opinions_data, op_examples_box, old, new)

    def read_csv(file_src, headers=False, index_cols=False, readCSV=True):
        if readCSV:
            raw_contents = file_src.data["file_contents"][0]

            if len(raw_contents.split(",")) == 1:
                b64_contents = raw_contents
            else:
                # remove the prefix that JS adds
                b64_contents = raw_contents.split(",", 1)[1]
            file_contents = base64.b64decode(b64_contents)
            return pd.read_csv(
                io.BytesIO(file_contents),
                encoding="ISO-8859-1",
                keep_default_na=False,
                na_values={None},
                engine="python",
                index_col=index_cols,
                header=0 if headers else None,
            )
        return file_src

    def read_parsed_files(file_content, file_name):
        try:
            # remove the prefix that JS adds
            b64_contents = file_content.split(",", 1)[1]
            file_content = base64.b64decode(b64_contents)
            with open(SENTIMENT_OUT / file_name, "w") as json_file:
                data_dict = json.loads(file_content.decode("utf-8"))
                json.dump(data_dict, json_file)
        except Exception as e:
            print(str(e))

    # pylint: disable=unused-argument
    def train_file_callback(attr, old, new):
        global train_data
        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)
        train = TrainSentiment(parse=True, rerank_model=None)
        if len(train_src.data["file_contents"]) == 1:
            train_data = read_csv(train_src, index_cols=0)
            file_name = train_src.data["file_name"][0]
            raw_data_path = SENTIMENT_OUT / file_name
            train_data.to_csv(raw_data_path, header=False)
            print("Running_SentimentTraining on data...")
            train.run(data=raw_data_path)
        else:
            f_contents = train_src.data["file_contents"]
            f_names = train_src.data["file_name"]
            raw_data_path = SENTIMENT_OUT / train_src.data["file_name"][0].split("/")[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print("Running_SentimentTraining on data...")
            train.run(parsed_data=raw_data_path)

        text_status.value = "Lexicon extraction completed"

        with io.open(AcquireTerms.acquired_aspect_terms_path, "r") as fp:
            aspect_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(aspect_data_csv))
        file_data = file_data.decode("utf-8")
        asp_src.data = {"file_contents": [file_data], "file_name": ["nameFile.csv"]}

        out_path = LEXICONS_OUT / "generated_opinion_lex_reranked.csv"
        with io.open(out_path, "r") as fp:
            opinion_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(opinion_data_csv))
        file_data = file_data.decode("utf-8")
        op_src.data = {"file_contents": [file_data], "file_name": ["nameFile.csv"]}

    def show_analysis() -> None:
        global stats, aspects, plot, source, tabs
        plot, source = _create_plot()
        events_table = _create_events_table()

        # pylint: disable=unused-argument
        def _events_handler(attr, old, new):
            _update_events(events_table, events_type.active)

        # Toggle display of in-domain / All aspect mentions
        events_type = RadioButtonGroup(labels=["All Events", "In-Domain Events"], active=0)

        analysis_layout = layout([[plot], [events_table]])

        # events_type display toggle disabled
        # analysis_layout = layout([[plot],[events_type],[events_table]])

        analysis_tab = Panel(child=analysis_layout, title="Analysis")
        tabs.tabs.insert(2, analysis_tab)
        tabs.active = 2
        events_type.on_change("active", _events_handler)
        source.selected.on_change("indices", _events_handler)  # pylint: disable=no-member

    # pylint: disable=unused-argument
    def infer_file_callback(attr, old, new):

        # run inference on input data and current aspect/opinion lexicons in view
        global infer_data, stats, aspects

        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)

        df_aspect = pd.DataFrame.from_dict(asp_filter_src.data)
        aspect_col_list = ["Term", "Alias1", "Alias2", "Alias3"]
        df_aspect = df_aspect[aspect_col_list]
        df_aspect.to_csv(SENTIMENT_OUT / "aspects.csv", index=False, na_rep="NaN")

        df_opinion = pd.DataFrame.from_dict(op_filter_src.data)
        opinion_col_list = ["Term", "Score", "Polarity", "isAcquired"]
        df_opinion = df_opinion[opinion_col_list]
        df_opinion.to_csv(SENTIMENT_OUT / "opinions.csv", index=False, na_rep="NaN")

        solution = SentimentSolution()

        if len(infer_src.data["file_contents"]) == 1:
            infer_data = read_csv(infer_src, index_cols=0)
            file_name = infer_src.data["file_name"][0]
            raw_data_path = SENTIMENT_OUT / file_name
            infer_data.to_csv(raw_data_path, header=False)
            print("Running_SentimentInference on data...")
            text_status.value = "Running classification on data..."
            stats = solution.run(
                data=raw_data_path,
                aspect_lex=SENTIMENT_OUT / "aspects.csv",
                opinion_lex=SENTIMENT_OUT / "opinions.csv",
            )
        else:
            f_contents = infer_src.data["file_contents"]
            f_names = infer_src.data["file_name"]
            raw_data_path = SENTIMENT_OUT / infer_src.data["file_name"][0].split("/")[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print("Running_SentimentInference on data...")
            text_status.value = "Running classification on data..."
            stats = solution.run(
                parsed_data=raw_data_path,
                aspect_lex=SENTIMENT_OUT / "aspects.csv",
                opinion_lex=SENTIMENT_OUT / "opinions.csv",
            )

        aspects = pd.read_csv(SENTIMENT_OUT / "aspects.csv", encoding="utf-8")["Term"]
        text_status.value = "Classification completed"
        show_analysis()

    # pylint: disable=unused-argument
    def asp_file_callback(attr, old, new):
        global aspects_data, asp_table_source
        aspects_data = read_csv(asp_src, headers=True)
        # Replaces None values by empty string
        aspects_data = aspects_data.fillna("")
        new_source = ColumnDataSource(aspects_data)
        asp_table_source.data = new_source.data
        asp_table_source.selected.indices = list(range(len(aspects_data)))

    # pylint: disable=unused-argument
    def op_file_callback(attr, old, new):
        global opinions_data, op_table_source, lexicons_dropdown, df_opinion_generic
        df = read_csv(op_src, headers=True)
        # Replaces None values by empty string
        df = df.fillna("")
        # Placeholder for generic opinion lexicons from the given csv file
        df_opinion_generic = df[df["isAcquired"] == "N"]
        # Update the argument value for the callback customJS
        lexicons_dropdown.js_property_callbacks.get("change:value")[0].args[
            "opinion_lex_generic"
        ] = df_opinion_generic.to_dict(orient="list")
        opinions_data = df[df["isAcquired"] == "Y"]
        new_source = ColumnDataSource(opinions_data)
        op_table_source.data = new_source.data
        op_table_source.selected.indices = list(range(len(opinions_data)))

    # pylint: disable=unused-argument
    def txt_status_callback(attr, old, new):
        print("Status: " + new)

    text_status.on_change("value", txt_status_callback)

    asp_src.on_change("data", asp_file_callback)
    # pylint: disable=no-member
    asp_table_source.selected.on_change("indices", asp_selected_change)

    op_src.on_change("data", op_file_callback)
    op_table_source.selected.on_change("indices", op_selected_change)  # pylint: disable=no-member

    train_src.on_change("data", train_file_callback)
    infer_src.on_change("data", infer_file_callback)

    return layout([[_create_header(train_dropdown, inference_dropdown, text_status)], [tabs]])
Ejemplo n.º 8
0
    def initialize_plot(self, plots=None, ranges=None):
        ranges = self.compute_ranges(self.layout, self.keys[-1], None)

        plot_grid = self._compute_grid()
        passed_plots = [] if plots is None else plots
        r_offset = 0
        col_offsets = defaultdict(int)
        tab_plots = []

        for r in range(self.rows):
            # Compute row offset
            row = [(k, sp) for k, sp in self.subplots.items() if k[0] == r]
            row_padded = any(len(sp.layout) > 2 for k, sp in row)
            if row_padded:
                r_offset += 1

            for c in range(self.cols):
                subplot = self.subplots.get((r, c), None)

                # Compute column offset
                col = [(k, sp) for k, sp in self.subplots.items() if k[1] == c]
                col_padded = any(len(sp.layout) > 1 for k, sp in col)
                if col_padded:
                    col_offsets[r] += 1
                c_offset = col_offsets.get(r, 0)

                if subplot is None:
                    continue

                shared_plots = list(passed_plots) if self.shared_axes else None
                subplots = subplot.initialize_plot(ranges=ranges,
                                                   plots=shared_plots)
                nsubplots = len(subplots)

                # If tabs enabled lay out AdjointLayout on grid
                if self.tabs:
                    title = subplot.subplots['main']._format_title(
                        self.keys[-1], dimensions=False)
                    if not title:
                        title = ' '.join(self.paths[r, c])
                    if nsubplots == 1:
                        grid = subplots[0]
                    elif nsubplots == 2:
                        grid = gridplot([subplots],
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    else:
                        grid = [[subplots[2], None], subplots[:2]]
                        grid = gridplot(children=grid,
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    tab_plots.append((title, grid))
                    continue

                # Situate plot in overall grid
                if nsubplots > 2:
                    plot_grid[r + r_offset - 1][c + c_offset - 1] = subplots[2]
                plot_column = plot_grid[r + r_offset]
                if nsubplots > 1:
                    plot_column[c + c_offset - 1] = subplots[0]
                    plot_column[c + c_offset] = subplots[1]
                else:
                    plot_column[c + c_offset - int(col_padded)] = subplots[0]
                passed_plots.append(subplots[0])

        # Wrap in appropriate layout model
        kwargs = dict(sizing_mode=self.sizing_mode)
        if self.tabs:
            plots = filter_toolboxes([p for t, p in tab_plots])
            panels = [Panel(child=child, title=t) for t, child in tab_plots]
            layout_plot = Tabs(tabs=panels)
        else:
            plot_grid = layout_padding(plot_grid, self.renderer)
            plot_grid = filter_toolboxes(plot_grid)
            plot_grid = pad_plots(plot_grid)
            layout_plot = gridplot(children=plot_grid,
                                   toolbar_location=self.toolbar,
                                   merge_tools=self.merge_tools,
                                   **kwargs)

        title = self._get_title(self.keys[-1])
        if title:
            self.handles['title'] = title
            layout_plot = Column(title, layout_plot, **kwargs)

        self.handles['plot'] = layout_plot
        self.handles['plots'] = plots

        self._update_callbacks(layout_plot)
        if self.shared_datasource:
            self.sync_sources()

        if self.top_level:
            self.init_links()

        self.drawn = True

        return self.handles['plot']
Ejemplo n.º 9
0
def create_layout(data, start_year, end_year):
    """
    Function for creating a bokeh layout with all of the data tables
    """

    agg_data = data["aggr_outputs"]
    # create aggregate table
    clt_title = f"<h3>{agg_data['current']['title']}</h3>"
    current_law_table = Div(text=clt_title + agg_data["current"]["renderable"],
                            width=1000)
    rt_title = f"<h3>{agg_data['reform']['title']}</h3>"
    reform_table = Div(text=rt_title + agg_data["reform"]["renderable"],
                       width=1000)
    ct_title = f"<h3>{agg_data['change']['title']}</h3>"
    change_table = Div(text=ct_title + agg_data["change"]["renderable"],
                       width=1000)

    current_tab = Panel(child=current_law_table, title="Current Law")
    reform_tab = Panel(child=reform_table, title="Reform")
    change_tab = Panel(child=change_table, title="Change")
    agg_tabs = Tabs(tabs=[current_tab, reform_tab, change_tab])

    key_map = {
        "current": "Current",
        "reform": "Reform",
        "ind_income": "Income Tax",
        "payroll": "Payroll Tax",
        "combined": "Combined Tax",
        "dist": "Distribution Table",
        "diff": "Differences Table"
    }

    tbl_data = data["tbl_outputs"]
    yr_panels = []
    # loop through each year (start - end year)
    for yr in range(start_year, end_year + 1):
        # loop through each table type: dist, idff
        tbl_panels = []
        for tbl_type, content in tbl_data.items():
            # loop through sub tables: current, reform for dist
            # ind_income, payroll, combined for diff
            content_panels = []
            for key, value in content.items():
                # loop through each grouping: bins, deciles
                grp_panels = []
                for grp, grp_data in value.items():
                    _data = grp_data[yr]
                    # create a data table for this tab
                    title = f"<h3>{_data['title']}</h3>"
                    note = ("<p><i>All monetary totals are in billions. "
                            "All counts are in millions. "
                            "Averages and shares are as shown.</i></p>")
                    tbl = Div(text=title + note + _data["renderable"],
                              width=1000)
                    grp_panel = Panel(child=tbl, title=grp.title())
                    grp_panels.append(grp_panel)
                grp_tab = Tabs(tabs=grp_panels)
                # panel for the sub tables
                content_panel = Panel(child=grp_tab, title=key_map[key])
                content_panels.append(content_panel)
            content_tab = Tabs(tabs=content_panels)
            # panel for the table types
            tbl_panel = Panel(child=content_tab, title=key_map[tbl_type])
            tbl_panels.append(tbl_panel)
        type_tab = Tabs(tabs=tbl_panels)
        # panel for the year
        yr_panel = Panel(child=type_tab, title=str(yr))
        yr_panels.append(yr_panel)

    yr_tabs = Tabs(tabs=yr_panels)

    agg_layout = layout(children=[agg_tabs])
    table_layout = layout(children=[yr_tabs])
    agg_data = json_item(agg_layout)
    table_data = json_item(table_layout)

    # return a dictionary of outputs ready for COMP
    agg_outputs = {
        "media_type": "bokeh",
        "title": "Aggregate Results",
        "data": agg_data,
    }
    table_outputs = {
        "media_type": "bokeh",
        "title": "Tables",
        "data": table_data,
    }

    # return js, div, cdn_js, cdn_css, widget_js, widget_css
    return agg_outputs, table_outputs
Ejemplo n.º 10
0
def fpe_plots(conn, start, end):
    '''Combines plots to a tab
    Parameters
    ----------
    conn : DBobject
        Connection object that represents database
    start : time
        Startlimit for x-axis and query (typ. datetime.now()- 4Months)
    end : time
        Endlimit for x-axis and query (typ. datetime.now())
    Return
    ------
    p : tab object
        used by dashboard.py to set up dashboard
    '''
    descr = Div(text="""
    <style>
    table, th, td {
      border: 1px solid black;
      background-color: #efefef;
      border-collapse: collapse;
      padding: 5px
    }
    table {
      border-spacing: 15px;
    }
    </style>

    <body>
    <table style="width:100%">
      <tr>
        <th><h6>Plotname</h6></th>
        <th><h6>Mnemonic</h6></th>
        <th><h6>Description</h6></th>
      </tr>
      <tr>
        <td>2.5V Ref and FPE Digg</td>
        <td>IMIR_SPW_V_DIG_2R5V<br>
            IMIR_PDU_V_REF_2R5V<br> </td>
        <td>FPE 2.5V Digital and FPE 2.5V PDU Reference Voltage</td>
      </tr>
      <tr>
        <td>FPE Dig. 5V</td>
        <td>IMIR_PDU_V_DIG_5V<br>
            IMIR_PDU_I_DIG_5V</td>
        <td>FPE 5V Digital Voltage and Current</td>
      </tr>
      <tr>
        <td>FPE Ana. 5V</td>
        <td>IMIR_PDU_V_ANA_5V<br>
            IMIR_PDU_I_ANA_5V</td>
        <td>FPE +5V Analog Voltage and Current</td>
      </tr>
      <tr>
        <td>FPE Ana. N5V</td>
        <td>IMIR_PDU_V_ANA_N5V<br>
            IMIR_PDU_I_ANA_N5V</td>
        <td>FPE -5V Analog Voltage and Current</td>
      </tr>
      <tr>
        <td>FPE Ana. 7V</td>
        <td>IMIR_PDU_V_ANA_7V<br>
            IMIR_PDU_I_ANA_7V</td>
        <td>FPE +7V Analog Voltage and Current</td>
      </tr>
       <tr>
         <td>FPE Ana. N7V</td>
         <td>IMIR_PDU_V_ANA_N7V<br>
             IMIR_PDU_I_ANA_N7V</td>
         <td>FPE -7V Analog Voltage and Current</td>
       </tr>
    </table>
    </body>
    """,
                width=1100)

    plot1 = dig5(conn, start, end)
    plot2 = refdig(conn, start, end)
    plot3 = ana5(conn, start, end)
    plot4 = ana5n(conn, start, end)
    plot5 = ana7(conn, start, end)
    plot6 = ana7n(conn, start, end)

    lay = gridplot([[plot2, plot1], [plot3, plot4], [plot5, plot6]],
                   merge_tools=False)

    layout = Column(descr, lay)

    tab = Panel(child=layout, title="FPE VOLTAGE/CURRENT")

    return tab
Ejemplo n.º 11
0
def temperature_plots(conn, start, end):
    '''Combines plots to a tab
    Parameters
    ----------
    conn : DBobject
        Connection object that represents database
    start : time
        Startlimit for x-axis and query (typ. datetime.now()- 4Months)
    end : time
        Endlimit for x-axis and query (typ. datetime.now())
    Return
    ------
    p : tab object
        used by dashboard.py to set up dashboard
    '''

    descr = Div(text="""
    <style>
    table, th, td {
      border: 1px solid black;
      background-color: #efefef;
      border-collapse: collapse;
      padding: 5px
    }
    </style>

    <body>
    <table style="width:100%">
      <tr>
        <th><h6>Plotname</h6></th>
        <th><h6>Mnemonic</h6></th>
        <th><h6>Description</h6></th>
      </tr>
      <tr>
        <td>CRYO Temperatures</td>
        <td>IGDP_MIR_ICE_T1P_CRYO<br>
            IGDP_MIR_ICE_T2R_CRYO<br>
            IGDP_MIR_ICE_T3LW_CRYO<br>
            IGDP_MIR_ICE_T4SW_CRYO<br>
            IGDP_MIR_ICE_T5IMG_CRYO<br>
            IGDP_MIR_ICE_T6DECKCRYO<br>
            IGDP_MIR_ICE_T7IOC_CRYO<br>
            IGDP_MIR_ICE_FW_CRYO<br>
            IGDP_MIR_ICE_CCC_CRYO<br>
            IGDP_MIR_ICE_GW14_CRYO<br>
            IGDP_MIR_ICE_GW23_CRYO<br>
            IGDP_MIR_ICE_POMP_CRYO<br>
            IGDP_MIR_ICE_POMR_CRYO<br>
            IGDP_MIR_ICE_IFU_CRYO<br>
            IGDP_MIR_ICE_IMG_CRYO<br></td>
        <td>Deck Nominal Temperature (T1)<br>
            Deck Redundant Temperature (T2)<br>
            LW FPM I/F Temperature (T3)<br>
            SW FPM I/F Temperature (T4)<br>
            IM FPM I/F Temperature (T5)<br>
            Deck Opp. Nom. Temperature (T6)<br>
            Deck Opp. Red. Temperature (T7)<br>
            FWA Temperature<br>
            CCC Temperature<br>
            DGA-A (GW14) Temperature<br>
            DGA-B (GW23) Temperature<br>
            POMH Nominal Temperature<br>
            POMH Redundant Temperature<br>
            MRS (CF) Cal. Source Temperature<br>
            Imager (CI) Cal. Source Temperature<br></td>
      </tr>
      <tr>
        <td>IEC Temperatures</td>
        <td>ST_ZTC1MIRIA<br>
            ST_ZTC2MIRIA<br>
            ST_ZTC1MIRIB<br>
            ST_ZTC2MIRIB<br>
            IMIR_PDU_TEMP<br>
            IMIR_IC_SCE_ANA_TEMP1<br>
            IMIR_SW_SCE_ANA_TEMP1<br>
            IMIR_LW_SCE_ANA_TEMP1<br>
            IMIR_IC_SCE_DIG_TEMP<br>
            IMIR_SW_SCE_DIG_TEMP<br>
            IMIR_LW_SCE_DIG_TEMP<br></td>
        <td>ICE A IEC panel Temp<br>
            FPE A IEC panel Temp<br>
            ICE B IEC panel (Redundant) Temp<br>
            FPE B IEC panel (Redundant) Temp<br>
            FPE PDU Temperature<br>
            FPE SCE Analogue board Temperature (IC, SW & LW) <br>
            FPE SCE Digital board Temperature (IC, SW & LW)<br></td>
      </tr>
       <tr>
         <td>Detector Temperatures</td>
         <td>IGDP_MIR_IC_DET_TEMP<br>
            IGDP_MIR_lW_DET_TEMP<br>
            IGDP_MIR_SW_DET_TEMP<br></td>
         <td>Detector Temperature (IC,SW&LW)<br></td>
       </tr>
    </table>
    </body>
    """,
                width=1100)

    plot1 = cryo(conn, start, end)
    plot2 = temp(conn, start, end)
    plot3 = det(conn, start, end)

    layout = column(descr, plot1, plot2, plot3)
    tab = Panel(child=layout, title="TEMPERATURE")

    return tab
Ejemplo n.º 12
0
def main():
    #path variables
    #in_path = '/mnt/data04/Conduit/afib/new_files/1hour/'
    #af_outpath = '/mnt/data04/Conduit/afib/AF_annotations/af_ann4/'
    in_path = join(dirname(__file__), "data_in/")
    af_outpath = join(dirname(__file__), "data_out/")
    #Miscellaneous variables
    colours = {
        '': 'black',
        'O': 'blue',
        'N': 'green',
        '~': 'purple',
        'A': 'red'
    }
    #wf_classes = ['AF','Normal','Other','Noise','No Signal']
    wf_classes = ['AF', 'Not AF']
    #global variables remove if you can
    newECG = pd.DataFrame()
    df_ann = pd.DataFrame(columns=[0])

    #widgets to be used as necessary
    rdo_btn_wave_lbls = RadioButtonGroup(labels=wf_classes, active=2)
    date_range_slider = Slider(title="Date Range", step=10)
    table_source, table_columns = load_file_source(in_path, af_outpath)
    file_table = DataTable(source=table_source,
                           columns=table_columns,
                           width=800,
                           height=600)
    pgph_file_loaded = Div(text="Select file from table.",
                           width=1000,
                           height=50)
    pgph_file_loaded.style = {
        "font-size": '1.2em',
        'font-weight': 'bold',
        'color': 'SteelBlue'
    }
    txt_processing = Div(
        text=
        "Use slider to select segments, label by selecting the wave type and pressing 'Label'. Use the save to save when finished all annotations.",
        width=1000,
        height=30)
    txt_processing.style = {
        "font-size": '1.2em',
        'font-weight': 'bold',
        'color': 'SteelBlue',
        'white-space': 'pre'
    }

    btn_lbl = Button(label='Label', button_type='success')
    btn_save = Button(label='Save', button_type='danger')
    btn_load_annotated_graph = Button(label='Load Annotated Graph',
                                      button_type='warning')

    #----------------------------- Functions ---------------------------------#
    ''' Callback function for the data table ColumnDataSource (table_source).
        Load the hd5 file based on the selected row in the data_table.
        Generate the output file path.
        Update the date range slider according to the new file (date_range_slider).
        Generate a plot of the hd5 ECG lead II data (wave_graph).
        Then combine all into a layout (graph_layout)
        Update the Tab Pane with this new layout.
        Returns: Nothing'''
    def callback_select_file_table(attr, old, new):
        global newECG, out_file
        #clear tabs graphs to reduce overhead
        output_tab.child.children = []
        sel_id = new
        txt_processing.text = "Use slider to select segments, label by selecting the wave type and pressing 'Label'. Use the save button to when done."
        txt_processing.style = {"font-size": '1.2em', 'color': 'SteelBlue'}
        wave_file_path = load_selected_file(table_source, sel_id)
        out_file = load_annotation_file(
            table_source, sel_id, af_outpath)  #to be used for saving the file
        pgph_file_loaded.text = "Processing..."
        wave_hdfs, hdfs_keys = load_hd5_file(wave_file_path)
        print(hdfs_keys)
        newECG = (wave_hdfs.select(
            key='Waveforms').II).to_frame()  #get lead II from waveforms)
        newECG.reset_index(
            inplace=True)  #move datetime index to column and reset the index
        newECG.columns = ['date', 'II']
        wave_hdfs.close()
        #enable the buttons if disabled
        btn_save.disabled = False
        btn_lbl.disabled = False
        btn_load_annotated_graph.disabled = True
        #check if the file has already been annotated
        annotated = check_if_annotated(table_source, sel_id)
        if annotated == 'Yes':
            txt_processing.text = 'This file has already been annotated. If you save this file again you will overwrite previous data.'
            txt_processing.style = {"font-size": '1.2em', 'color': 'Red'}
            btn_load_annotated_graph.disabled = False
        #create figure
        print("*********************Creating Figure (in Callback File Select)")
        get_next_graph(0, newECG)
        pgph_file_loaded.text = "File loaded, navigate Label Data Tab to annotate lead II."

    ''' Using the AF annotation file, create a Bokeh figure with annotated data.
        Update the output_tab to show this figure.
        Disable some buttons to keep users on track. '''

    def load_output_graph():
        global newECG
        btn_save.disabled = True
        btn_lbl.disabled = True
        btn_load_annotated_graph.disabled = True
        txt_processing.text = 'Loading Plot...'
        df_AF = pd.read_hdf(out_file)
        noise, normal, other, af, nosig, notAF = load_annotations(
            0, df_AF.shape[0] - 1, newECG, df_AF)
        output_graph = get_graph_annotated(noise, normal, other, af, nosig,
                                           notAF)
        output_tab.child.children = [output_graph]
        txt_processing.text = 'Plot loaded, navigate to "Final Annotated Graph" tab to view. &#10 If you save this file again you will overwrite previous data.'
        btn_save.disabled = False
        btn_lbl.disabled = False

    def get_next_graph(sind, df):
        length = 20000  #7200
        eind = sind + length
        if eind <= list(df.index)[-1] and sind < list(
                df.index
        )[-1]:  #as long as the end and beginning are less than the end of the dataframe
            sub_df = df.iloc[sind:eind]
            sub_df = sub_df.set_index('date')
            del sub_df.index.name
            source = ColumnDataSource(sub_df)
            wave_graph = get_graph(source)
            start_span, end_span = add_span(source)
            wave_graph.add_layout(start_span)
            wave_graph.add_layout(end_span)
            start_rng = start_span.location
            end_rng = end_span.location
        elif eind < list(df.index)[-1] and sind < list(
                df.index
        )[-1]:  #if the start is before but the end is after then just use the end of the dataframe
            sub_df = df.iloc[sind:list(df.index)[-1]]
            sub_df = sub_df.set_index('date')
            del sub_df.index.name
            source = ColumnDataSource(sub_df)
            wave_graph = get_graph(source)
            start_span, end_span = add_span(source)
            wave_graph.add_layout(start_span)
            wave_graph.add_layout(end_span)
            start_rng = start_span.location
            end_rng = end_span.location
        else:
            txt_processing.text = 'You have finished annotating this file.'
            return
        end_span.location = start_span.location
        #slider to change date range
        date_range_slider.start = start_rng
        date_range_slider.end = end_rng
        date_range_slider.value = start_rng
        date_range_slider.on_change('value', callback_date_time_slider)
        graph_layout = column(
            widgetbox([txt_processing, btn_load_annotated_graph, btn_save],
                      width=250),
            widgetbox(Div(text="""<hr/>""",
                          style={
                              'display': 'block',
                              'height': '1px',
                              'border': '0',
                              'border-top': '1px solid #css',
                              'margin': '1em 0',
                              'padding': '0'
                          }),
                      width=1400),
            widgetbox([rdo_btn_wave_lbls, btn_lbl], width=300),
            widgetbox(date_range_slider, width=1350),
            widgetbox(Div(text="""<hr/>""",
                          style={
                              'display': 'block',
                              'height': '1px',
                              'border': '0',
                              'border-top': '1px solid #css',
                              'margin': '1em 0',
                              'padding': '0'
                          }),
                      width=1400), wave_graph)
        wf_tab.child.children = [graph_layout]

    ''' Use a ColumnDataSource to plot the ECG lead II waveform data in a line plot.
        Parameters: source a ColumnDataSource
        Returns: p a Bokeh figure '''

    def get_graph(source):
        p = figure(plot_width=1400,
                   plot_height=500,
                   x_axis_type='datetime',
                   tools=['zoom_in', 'zoom_out', 'xpan', 'ypan'])
        date_range = source.data['II'][0:20000]
        p.y_range = Range1d(start=min(date_range) - 1, end=max(date_range) + 1)
        dt_axis_format = ["%d-%m-%Y %H:%M"]
        wf_x_axis = DatetimeTickFormatter(
            hours=dt_axis_format,
            days=dt_axis_format,
            months=dt_axis_format,
            years=dt_axis_format,
        )
        p.xaxis.formatter = wf_x_axis
        p.line(x='index',
               y='II',
               source=source,
               line_color='black',
               line_width=1)
        return p

    ''' Get the first and last time points from the ColumnDataSource (source).
        Utlizes tzlocal to add an offset which modifies the data.
        Returns integer timetuple value for the dates found: start, end '''

    def get_time(source):
        start = pd.to_datetime(min(source.data['index'])).timestamp() * 1000
        end = pd.to_datetime(max(source.data['index'])).timestamp() * 1000
        return start, end

    ''' Generate two Bokeh Spans based on the ColumnDataSource given (source).
        Spans are at the first and last datetimes in the source data.
        Parameters: source a ColumnDataSource
        Returns: Span, Span '''

    def add_span(source):
        start, end = get_time(source)
        # Start span represents the start of the area of interest
        start_span = Span(location=start,
                          dimension='height',
                          line_color='green',
                          line_dash='dashed',
                          line_width=3)
        # End span represents the end of the area of interest
        end_span = Span(location=end,
                        dimension='height',
                        line_color='red',
                        line_dash='dashed',
                        line_width=3)
        return start_span, end_span

    ''' Callback function for Bokeh Slider, move the Spans on the specified graph (generated in callback_file_table) to the location specified by the Span.
        Returns: Nothing '''

    def callback_date_time_slider(attr, old, new):
        inds = get_spans()
        wf_tab.child.children[0].children[5].renderers[inds[1]].location = new

    ''' Navigate through the widgets on the wave_graph to find the spans
        Returns the widget indexes of the spans'''

    def get_spans():
        inds = []
        for x in range(len(wf_tab.child.children[0].children[5].renderers)):
            if isinstance(wf_tab.child.children[0].children[5].renderers[x],
                          Span):
                inds.append(x)
        return inds

    ''' Callback function for btn_lbl.
        Get the location of the spans from the wave_graph, then call segment_and_label
        Return: Nothing '''

    def callback_btn_lbl():
        inds = get_spans()
        active = rdo_btn_wave_lbls.labels[rdo_btn_wave_lbls.active]
        start_span = wf_tab.child.children[0].children[5].renderers[inds[0]]
        end_span = wf_tab.child.children[0].children[5].renderers[inds[1]]
        segment_and_label(active, start_span.location, end_span.location)

    ''' Function to get ECG data between two Spans (after modifying the timetuple to timestamp).
        Call apply_annotations using start and end indexes found.
        Modify the global df_ann variable.
        Update slider position (start to end), (end to start).
        Parameters: label a string (AF, Normal, Noise, Other), start a timpletuple integer, end a timetuple integer
        Return: nothing '''

    def segment_and_label(label, start, end):
        global newECG
        print("*********************Segmenting and Labelling")
        try:
            txt_processing.text = "Use slider to select segments, label by selecting the wave type and pressing 'Label'. Use the save button to when done."
            start_dt = pd.Timestamp(start / 1000, unit='s')
            end_dt = pd.Timestamp(end / 1000, unit='s')
            mask = (newECG['date'] > start_dt) & (newECG['date'] <= end_dt)
            df_sub = newECG.loc[mask]  #apply mask
            indexes = list(df_sub.index)  #get indexes
            s_ind = indexes[0]  #get first index
            e_ind = indexes[-1]  #get last index
            apply_annotations(label, s_ind, e_ind,
                              df_ann)  #concatenate dataframe of annotations
            get_next_graph(e_ind, newECG)
        except IndexError:
            txt_processing.text = 'Indexing error. Advance slider.'

    ''' Function to apply annotations to a dataframe structured like that of Computing in Cardiology AF algorithm.
        Parameters: label a string (AF, Normal, Noise, Other), s_ind the index of the start datetime,
                    e_ind the index of the end datetime, df a pandas database to be appended to, columns= 'AF'
        Return: Nothing '''

    def apply_annotations(label, s_ind, e_ind, df):
        #data frame structure like this
        if label == 'AF':
            df.loc[s_ind, 0] = 'A'
        elif label == 'Not AF':
            df.loc[s_ind, 0] = 'nAF'
        elif label == 'Noise':
            df.loc[s_ind, 0] = '~'
        elif label == 'Normal':
            df.loc[s_ind, 0] = 'N'
        elif label == 'Other':
            df.loc[s_ind, 0] = 'O'
        elif label == 'No Signal':
            df.loc[s_ind, 0] = '-'

    ''' Stream update to ColumnDataSource that the file has been annotated'''

    def mark_as_done(file_path):
        print("*********************Marking as Done")
        table_source = file_table.source
        name = os.path.splitext(os.path.basename(file_path))[0]  #get file name
        ind = list(table_source.data['name']).index(
            name)  #get index within the source
        patches = {
            'annotated': [(ind, 'Yes')]
        }  #new data to update ColumnDataSource with
        table_source.patch(patches)  #update - ***** THROWING ERROR - CHECK!!

    ''' Callback function for btn_save.
        Utilizes the out_file global variable for the path.
        Appends dataframe to output file. '''

    def callback_save_annotations():
        print("*********************Saving Annotations")
        txt_processing.style = {"font-size": '1.2em', 'color': 'Red'}
        print("Writting: ", out_file)
        txt_processing.text = 'Saving Annotations...'
        df_ann.to_hdf(out_file, key='AF', format='t')
        print("success!")
        btn_save.disabled = True
        btn_lbl.disabled = True
        mark_as_done(out_file)
        nrows = df_ann.shape[0]
        df_ann.drop(df_ann.index[:nrows],
                    inplace=True)  #clear dataframe for a new file to be loaded
        btn_load_annotated_graph.disabled = False
        txt_processing.style = {"font-size": '1.2em', 'color': 'SteelBlue'}
        txt_processing.text = '''Done. Click 'Load Annotated Graph' to view annotations or to "File Management" to select new file to anntoate. You will need to reload this file to make changes.'''

    ''' Load annotations from AF formatted hdf file. '''

    def load_annotations(start, end, df, df_AF):
        # Initialize empty dataframes
        noise = pd.DataFrame()
        normal = pd.DataFrame()
        other = pd.DataFrame()
        af = pd.DataFrame()
        nosig = pd.DataFrame()
        notAF = pd.DataFrame()
        # Read annotations
        for n in range(start, end):
            df_temp = df.iloc[df_AF.index[n]:df_AF.index[n + 1]]
            df_temp.index = list(df_temp['date'])
            df_temp.drop(columns='date')
            value = df_AF.iloc[n, 0]
            if value == '~':
                noise = noise.append(df_temp)
            elif value == 'N':
                normal = normal.append(df_temp)
            elif value == 'O':
                other = other.append(df_temp)
            elif value == 'A':
                af = af.append(df_temp)
            elif value == '-':
                nosig = nosig.append(df_temp)
            elif value == 'nAF':
                notAF = notAF.append(df_temp)
        #add the last labelled section (end+1) to end of df
        df_temp = df.iloc[df_AF.index[end]:df.index[-1]]
        df_temp.index = list(df_temp['date'])
        df_temp.drop(columns='date')
        value = df_AF.iloc[end, 0]
        if value == '~':
            noise = noise.append(df_temp)
        elif value == 'N':
            normal = normal.append(df_temp)
        elif value == 'O':
            other = other.append(df_temp)
        elif value == 'A':
            af = af.append(df_temp)
        elif value == '-':
            nosig = nosig.append(df_temp)
        elif value == 'nAF':
            notAF = notAF.append(df_temp)
        return noise, normal, other, af, nosig, notAF

    ''' Create Bokeh figure from dataframes af, normal, other and noise. '''

    def get_graph_annotated(noise, normal, other, af, nosig, notAF):
        p = figure(
            plot_width=1400,
            plot_height=500,
            x_axis_type='datetime',
            tools=['box_zoom', 'wheel_zoom', 'pan', 'reset', 'crosshair'])
        # plot color coded waves (if they exist)
        if noise.empty is False:
            p.line(x='index',
                   y='II',
                   source=noise,
                   color='blue',
                   legend='Noise')
        if normal.empty is False:
            p.line(x='index',
                   y='II',
                   source=normal,
                   color='green',
                   legend='Normal')
        if other.empty is False:
            p.line(x='index',
                   y='II',
                   source=other,
                   color='purple',
                   legend='Other')
        if af.empty is False:
            p.line(x='index', y='II', source=af, color='red', legend='AF')
        if nosig.empty is False:
            p.line(x='index',
                   y='II',
                   source=nosig,
                   color='black',
                   legend='No Signal')
        if notAF.empty is False:
            p.line(x='index',
                   y='II',
                   source=notAF,
                   color='grey',
                   legend='No Signal')
        dt_axis_format = ["%d-%m-%Y %H:%M"]
        wf_x_axis = DatetimeTickFormatter(
            hours=dt_axis_format,
            days=dt_axis_format,
            months=dt_axis_format,
            years=dt_axis_format,
        )
        p.xaxis.formatter = wf_x_axis
        return p

    ############################## Assign Callbacks ##########################################
    #table_source.on_change('selected', callback_select_file_table) #assign callback
    table_source.selected.on_change('indices', callback_select_file_table)
    btn_lbl.on_click(callback_btn_lbl)
    btn_save.on_click(callback_save_annotations)
    btn_load_annotated_graph.on_click(load_output_graph)

    ################################## Load Document ##########################################
    ####layouts####
    file_layout = column(widgetbox(pgph_file_loaded, file_table, width=1000))
    col_waveforms = column(name="figures", sizing_mode='scale_width')
    col_output = column(name="output", sizing_mode='scale_width')

    ###tabs###
    wf_tab = Panel(child=col_waveforms, title='Label Data')
    file_tab = Panel(child=file_layout, title='File Management')
    output_tab = Panel(child=col_output, title='Final Annotated Graph')
    tab_pane = Tabs(tabs=[file_tab, wf_tab, output_tab], width=1000)

    ###combine into document####
    curdoc().add_root(column(tab_pane))
    curdoc().title = "AF Annotator 4"
Ejemplo n.º 13
0
def bubble_plot_tabs(dataframes):
    dataframes = dataframes.copy()

    # convert asset dicts to pandas dataframes
    base_df = pd.DataFrame.from_dict(dataframes['base_output_by_asset'])
    reform_df = pd.DataFrame.from_dict(dataframes['reform_output_by_asset'])
    change_df = pd.DataFrame.from_dict(dataframes['changed_output_by_asset'])

    list_df = [base_df, change_df, reform_df]
    list_string = ['base', 'change', 'reform']

    data_sources = {}
    for i, df in enumerate(list_df):
        # remove data from Intellectual Property, Land, and Inventories Categories
        df = df[~df['asset_category'].
                isin(['Intellectual Property', 'Land', 'Inventories'])].copy()
        df = df.dropna()

        # define the size DataFrame, if change, use base sizes
        if list_string[i] == 'base':
            SIZES = list(range(20, 80, 15))
            size = pd.qcut(df['assets_c'].values, len(SIZES), labels=SIZES)
            size_c = pd.qcut(df['assets_c'].values, len(SIZES), labels=SIZES)
            size_nc = pd.qcut(df['assets_nc'].values, len(SIZES), labels=SIZES)
            df['size'] = size
            df['size_c'] = size_c
            df['size_nc'] = size_nc
        else:
            df['size'] = size
            df['size_c'] = size_c
            df['size_nc'] = size_nc

        # form the two Categories: Equipment and Structures
        equipment_df = df[(~df.asset_category.str.contains('Structures'))
                          & (~df.asset_category.str.contains('Buildings'))]
        structure_df = df[(df.asset_category.str.contains('Structures')) |
                          (df.asset_category.str.contains('Buildings'))]

        format_fields = [
            'metr_c', 'metr_nc', 'metr_c_d', 'metr_nc_d', 'metr_c_e',
            'metr_nc_e', 'mettr_c', 'mettr_nc', 'mettr_c_d', 'mettr_nc_d',
            'mettr_c_e', 'mettr_nc_e', 'rho_c', 'rho_nc', 'rho_c_d',
            'rho_nc_d', 'rho_c_e', 'rho_nc_e', 'z_c', 'z_nc', 'z_c_d',
            'z_nc_d', 'z_c_e', 'z_nc_e'
        ]

        # Make short category
        make_short = {
            'Instruments and Communications Equipment':
            'Instruments and Communications',
            'Office and Residential Equipment': 'Office and Residential',
            'Other Equipment': 'Other',
            'Transportation Equipment': 'Transportation',
            'Other Industrial Equipment': 'Other Industrial',
            'Nonresidential Buildings': 'Nonresidential Bldgs',
            'Residential Buildings': 'Residential Bldgs',
            'Mining and Drilling Structures': 'Mining and Drilling',
            'Other Structures': 'Other',
            'Computers and Software': 'Computers and Software',
            'Industrial Machinery': 'Industrial Machinery'
        }
        equipment_df['short_category'] = equipment_df['asset_category'].map(
            make_short)
        structure_df['short_category'] = structure_df['asset_category'].map(
            make_short)

        # Add the Reform and the Baseline to Equipment Asset
        for f in format_fields:
            equipment_copy = equipment_df.copy()
            equipment_copy['rate'] = equipment_copy[f]
            equipment_copy['hover'] = equipment_copy.apply(
                lambda x: "{0:.1f}%".format(x[f] * 100), axis=1)
            simple_equipment_copy = equipment_copy.filter(items=[
                'size', 'size_c', 'size_nc', 'rate', 'hover', 'short_category',
                'Asset'
            ])
            data_sources[list_string[i] + '_equipment_' +
                         f] = ColumnDataSource(simple_equipment_copy)

        # Add the Reform and the Baseline to Structures Asset
        for f in format_fields:
            structure_copy = structure_df.copy()
            structure_copy['rate'] = structure_copy[f]
            structure_copy['hover'] = structure_copy.apply(
                lambda x: "{0:.1f}%".format(x[f] * 100), axis=1)
            simple_structure_copy = structure_copy.filter(items=[
                'size', 'size_c', 'size_nc', 'rate', 'hover', 'short_category',
                'Asset'
            ])
            data_sources[list_string[i] + '_structure_' +
                         f] = ColumnDataSource(simple_structure_copy)

        # Create initial data sources to plot on load
        if list_string[i] == 'base':
            equipment_copy = equipment_df.copy()
            equipment_copy['rate'] = equipment_copy['mettr_c']
            equipment_copy['hover'] = equipment_copy.apply(
                lambda x: "{0:.1f}%".format(x['mettr_c'] * 100), axis=1)
            simple_equipment_copy = equipment_copy.filter(items=[
                'size', 'size_c', 'size_nc', 'rate', 'hover', 'short_category',
                'Asset'
            ])
            data_sources['equip_source'] = ColumnDataSource(
                simple_equipment_copy)

            structure_copy = structure_df.copy()
            structure_copy['rate'] = structure_copy['mettr_c']
            structure_copy['hover'] = structure_copy.apply(
                lambda x: "{0:.1f}%".format(x['mettr_c'] * 100), axis=1)
            simple_structure_copy = structure_copy.filter(items=[
                'size', 'size_c', 'size_nc', 'rate', 'hover', 'short_category',
                'Asset'
            ])
            data_sources['struc_source'] = ColumnDataSource(
                simple_structure_copy)

    # Define categories for Equipments assets
    equipment_assets = [
        'Computers and Software', 'Instruments and Communications',
        'Office and Residential', 'Transportation', 'Industrial Machinery',
        'Other Industrial', 'Other'
    ]

    # Define categories for Structures assets
    structure_assets = [
        'Residential Bldgs', 'Nonresidential Bldgs', 'Mining and Drilling',
        'Other'
    ]

    # Equipment plot
    p = figure(
        plot_height=540,
        plot_width=990,
        y_range=list(reversed(equipment_assets)),
        tools='hover',
        background_fill_alpha=0,
        title=
        'Marginal Effective Total Tax Rates on Corporate Investments in Equipment'
    )
    p.title.align = 'center'
    p.title.text_color = '#6B6B73'

    hover = p.select(dict(type=HoverTool))
    hover.tooltips = [('Asset', ' @Asset (@hover)')]

    p.xaxis.axis_label = "Marginal effective total tax rate"
    p.xaxis[0].formatter = NumeralTickFormatter(format="0.1%")

    p.toolbar_location = None
    p.min_border_right = 5

    p.outline_line_width = 5
    p.border_fill_alpha = 0
    p.xaxis.major_tick_line_color = "firebrick"
    p.xaxis.major_tick_line_width = 3
    p.xaxis.minor_tick_line_color = "orange"

    p.outline_line_width = 1
    p.outline_line_alpha = 1
    p.outline_line_color = "black"

    p.circle(x='rate',
             y='short_category',
             color=BLUE,
             size='size',
             line_color="#333333",
             fill_alpha=.4,
             source=data_sources['equip_source'],
             alpha=.4)

    # Style the tools
    p.add_tools(WheelZoomTool(), ResetTool(), SaveTool())
    p.toolbar_location = "right"
    p.toolbar.logo = None

    # Define and add a legend
    legend_cds = ColumnDataSource({
        'size': SIZES,
        'label': ['<$20B', '', '', '<$1T'],
        'x': [0, .15, .35, .6]
    })
    p_legend = figure(height=150,
                      width=480,
                      x_range=(-0.075, .75),
                      title='Asset Amount')
    p_legend.circle(y=None,
                    x='x',
                    size='size',
                    source=legend_cds,
                    color=BLUE,
                    fill_alpha=.4,
                    alpha=.4,
                    line_color="#333333")
    l = LabelSet(y=None,
                 x='x',
                 text='label',
                 x_offset=-20,
                 y_offset=-50,
                 source=legend_cds)
    p_legend.add_layout(l)
    p_legend.axis.visible = False
    p_legend.grid.grid_line_color = None
    p_legend.toolbar.active_drag = None

    data_sources['equip_plot'] = p

    # Structures plot
    p2 = figure(
        plot_height=540,
        plot_width=990,
        y_range=list(reversed(structure_assets)),
        tools='hover',
        background_fill_alpha=0,
        title=
        'Marginal Effective Total Tax Rates on Corporate Investments in Structures'
    )
    p2.title.align = 'center'
    p2.title.text_color = '#6B6B73'

    hover = p2.select(dict(type=HoverTool))
    hover.tooltips = [('Asset', ' @Asset (@hover)')]
    p2.xaxis.axis_label = "Marginal effective total tax rate"
    p2.xaxis[0].formatter = NumeralTickFormatter(format="0.1%")
    p2.toolbar_location = None
    p2.min_border_right = 5
    p2.outline_line_width = 0
    p2.border_fill_alpha = 0

    p2.xaxis.major_tick_line_color = "firebrick"
    p2.xaxis.major_tick_line_width = 3
    p2.xaxis.minor_tick_line_color = "orange"

    p2.circle(x='rate',
              y='short_category',
              color=RED,
              size='size',
              line_color="#333333",
              fill_alpha=.4,
              source=data_sources['struc_source'],
              alpha=.4)

    p2.outline_line_width = 1
    p2.outline_line_alpha = 1
    p2.outline_line_color = "black"

    # Style the tools
    p2.add_tools(WheelZoomTool(), ResetTool(), SaveTool())
    p2.toolbar_location = "right"
    p2.toolbar.logo = None

    # Define and add a legend
    p2_legend = figure(height=150,
                       width=380,
                       x_range=(-0.075, .75),
                       title='Asset Amount')
    p2_legend.circle(y=None,
                     x='x',
                     size='size',
                     source=legend_cds,
                     color=RED,
                     fill_alpha=.4,
                     alpha=.4,
                     line_color="#333333")
    l2 = LabelSet(y=None,
                  x='x',
                  text='label',
                  x_offset=-20,
                  y_offset=-50,
                  source=legend_cds)
    p2_legend.add_layout(l2)
    p2_legend.axis.visible = False
    p2_legend.grid.grid_line_color = None
    p2_legend.toolbar.active_drag = None

    data_sources['struc_plot'] = p2

    # add buttons
    controls_callback = CustomJS(args=data_sources,
                                 code=CONTROLS_CALLBACK_SCRIPT)

    c_nc_buttons = RadioButtonGroup(labels=['Corporate', 'Noncorporate'],
                                    active=0,
                                    callback=controls_callback)
    controls_callback.args['c_nc_buttons'] = c_nc_buttons

    format_buttons = RadioButtonGroup(labels=['Baseline', 'Reform', 'Change'],
                                      active=0,
                                      callback=controls_callback)
    controls_callback.args['format_buttons'] = format_buttons

    interest_buttons = RadioButtonGroup(
        labels=['METTR', 'METR', 'Cost of Capital', 'Depreciation'],
        active=0,
        width=700,
        callback=controls_callback)
    controls_callback.args['interest_buttons'] = interest_buttons

    type_buttons = RadioButtonGroup(
        labels=['Typically Financed', 'Equity Financed', 'Debt Financed'],
        active=0,
        width=700,
        callback=controls_callback)
    controls_callback.args['type_buttons'] = type_buttons

    # Create Tabs
    tab = Panel(child=column([p, p_legend]), title='Equipment')
    tab2 = Panel(child=column([p2, p2_legend]), title='Structures')
    tabs = Tabs(tabs=[tab, tab2])
    layout = gridplot(children=[[tabs], [c_nc_buttons, interest_buttons],
                                [format_buttons, type_buttons]])

    # Create components
    js, div = components(layout)
    cdn_js = CDN.js_files[0]
    cdn_css = CDN.css_files[0]

    return js, div, cdn_js, cdn_css
Ejemplo n.º 14
0
def plot_wiki_revisions_JINJA():
    try:
        page_title = request.args.get('page_title')
        page_title = page_title.title()
        title = f'Revisions to the "<a href="https://en.wikipedia.org/wiki/{page_title}">{page_title}</a>" Wikipedia Page Over Time'

        try:
            users_and_timestamps = urllib.parse.unquote(request.args.get('chart_data'))
            users_and_timestamps = ast.literal_eval(users_and_timestamps)

        except:
            rv_data = Wiki_Query(page_title).revisions_data()
            users_and_timestamps = [(a, b) for a, b in zip(rv_data.rv_users, rv_data.rv_timestamps)]


        timestamps = [item[1] for item in users_and_timestamps]
        timestamps.reverse()
        num_revisions = len(timestamps)
        orig_author = users_and_timestamps[-1][0]

        editors = set(item[0] for item in users_and_timestamps)
        num_editors = len(editors)

        dates = [datetime.strptime(d, '%Y-%m-%dT%H:%M:%SZ').date() for d in timestamps]
        days = sorted(list(set(dates)))

        date_freqs = {}
        for date in dates:
            if not str(date) in date_freqs:
                date_freqs[str(date)] = 1
            else:
                date_freqs[str(date)] += 1

        date_freqs = [item[1] for item in sorted(date_freqs.items(), key=lambda x: x[0])]
        cum_edits = np.cumsum(date_freqs)

        days_str = days
        days = [datetime.combine(day, datetime.min.time()) for day in days]

        source = ColumnDataSource(data=dict(x_values=days,
                                            y_values=date_freqs,
                                            desc=days_str,
                                            yy_values=[d - 1 for d in date_freqs],
                                            color=['#0072B2' for d in days],
                                            color2=['red' for d in days],
                                            cum_edits=cum_edits,
                                            ))

        def get_width():
            mindate = min(source.data['x_values'])
            maxdate = max(source.data['x_values'])
            return ((maxdate - mindate).total_seconds() * 1000 / len(source.data['x_values']))

        p1 = figure(title=page_title,
                    x_axis_label='Time In Days',
                    y_axis_label='Revisions Per Day',
                    x_axis_type='datetime',
                    tools="pan,wheel_zoom,box_zoom,undo,redo,reset,save")

        # add a line renderer for Hovertool tracking.
        p1line = p1.line('x_values', 'y_values', name=page_title, source=source, alpha=0)
        # Add vbar for data display
        p1.vbar(x='x_values', width=get_width(), color='color', top='y_values', bottom=0, source=source)

        # add an interactive tools to the visual
        p1.add_tools(HoverTool(tooltips=[("Date", "@desc"), ("# Revisions", "@y_values")],
                               mode='vline',
                               renderers=[p1line]))

        p1.toolbar.logo = None

        p1.background_fill_color = "#eeeeee"
        p1.xaxis.axis_line_color = "#bcbcbc"
        p1.yaxis.axis_line_color = "#bcbcbc"
        tab1 = Panel(child=p1, title='Frequency')

        p2 = figure(title=page_title,
                    x_axis_label='Time in Days',
                    y_axis_label='Count of Revisions',
                    x_axis_type='datetime',
                    tools="pan,wheel_zoom,box_zoom,reset,save")

        # add a line renderer for Hovertool tracking.
        p2line = p2.line('x_values', 'cum_edits', alpha=0, source=source)
        # Add vbar for data display
        p2.vbar(x='x_values', width=get_width(), color='color', top='cum_edits', bottom=0, source=source)

        # add some interactive tools to the visual
        p2.add_tools(HoverTool(tooltips=[("Date", "@desc"), ("# Revisions", "@cum_edits")],
                               mode='vline',
                               renderers=[p2line]))

        p2.toolbar.logo = None

        p2.background_fill_color = "#eeeeee"
        p2.xaxis.axis_line_color = "#bcbcbc"
        p2.yaxis.axis_line_color = "#bcbcbc"

        tab2 = Panel(child=p2, title='Cumulative')

        tabs = Tabs(tabs=[tab1, tab2])

        html = file_html(tabs, CDN, page_title)

        chart_data = urllib.parse.urlencode({'chart_data':users_and_timestamps})
        return render_template('PlotWikiRevisions_JINJA.html',
                               html=html,
                               page_title=page_title,
                               title=title,
                               num_revisions=num_revisions,
                               num_editors=num_editors,
                               orig_author=orig_author,
                               chart_data=chart_data)

    except:
        if len(page_title) == 0:
            num_revisions = ''
            num_editors = ''
            title = "Search Revisions on Wikipedia Over Time"
            html = '''<div id="chart"><img class="about" src="{{image}}" onerror="this.onerror=null; this.src='static/W_mark.png'" alt="Click below"/></div>'''
            return render_template('PlotWikiRevisions_JINJA.html',
                                   html=html,
                                   page_title=page_title,
                                   image='static/W_mark.png',
                                   title=title,
                                   num_revisions=num_revisions,
                                   num_editors=num_editors)
        else:
            num_revisions = 0
            num_editors = 0
            title = "Search Revisions on Wikipedia Over Time"
            html = '''<div id="chart"><img class="about" src="{{image}}" onerror="this.onerror=null; this.src='https://upload.wikimedia.org/wikipedia/commons/a/a0/Font_Awesome_5_regular_frown.svg'" alt="Click below"/></div>'''
            return render_template('PlotWikiRevisions_JINJA.html',
                                   html=html,
                                   page_title=page_title,
                                   image='https://upload.wikimedia.org/wikipedia/commons/a/a0/Font_Awesome_5_regular_frown.svg',
                                   title=title,
                                   num_revisions=num_revisions,
                                   num_editors=num_editors)
Ejemplo n.º 15
0
                  width=300,
                  height=100)

        # Plots layout of all components.
        genome_plot = layout(
            row([
                genome_plot,
                column([
                    Div(text="""""", width=300, height=220), cov_graph, ose,
                    slider, slider_af, syngroup,
                    widgetbox(div), reset_button
                ])
            ]))

        # Creates both tabs and different plots for each sample.
        tab = Panel(child=genome_plot, title=name.split('/')[-1])
        list_tabs.append(tab)
        list_plots.append(genome_plot)

    tabs_genomes = Tabs(tabs=list_tabs)
    plots_genomes = (column(list_plots))

    total_num_mutations = 0

    # Sorts data by protein location.
    merged.Protein = merged.Protein.astype("category")
    merged.Protein.cat.set_categories(protein_names, inplace=True)
    merged.sort_values(["Protein"])

    # Creates protein plots.
    # These graphs plot sample # (based on metadata) on x-axis, and alelle frequency on the y-axis. Tabs allow
Ejemplo n.º 16
0
        ]
    category_filter = BooleanFilter(category_filter)

    view = CDSView(source=expenses_source,
                   filters=[month_filter, category_filter])

    return DataTable(source=expenses_source,
                     columns=table_columns,
                     view=view,
                     sizing_mode='stretch_both')


expenses_table = get_expenses_table()
overview_row = row([monthly_overview, expenses_table],
                   sizing_mode='stretch_both')
overview_panel = Panel(child=overview_row, title='Overview')


def bar_tap_callback(event):
    """
    update the expenses table to show only entries of the selected month
    """
    # TODO when no bar is selected list index out of range
    try:
        selected = monthly_category_source.selected.indices[-1]
        print(selected)
        selected_month = monthly_category_df.index[selected].month
    except IndexError:
        selected_month = None
    expenses_table = get_expenses_table(selected_month)
    overview_row.children[1] = expenses_table
Ejemplo n.º 17
0
# Create a list containing plots p3 and p4: row2
row2 = [p3, p4]

# Create a gridplot using row1 and row2: layout
layout = gridplot([row1, row2])

# Specify the name of the output_file and show the result
output_file('grid.html')
show(layout)

# Import Panel from bokeh.models.widgets
from bokeh.models.widgets import Panel

# Create tab1 from plot p1: tab1
tab1 = Panel(child=p1, title='Latin America')

# Create tab2 from plot p2: tab2
tab2 = Panel(child=p2, title='Africa')

# Create tab3 from plot p3: tab3
tab3 = Panel(child=p3, title='Asia')

# Create tab4 from plot p4: tab4
tab4 = Panel(child=p4, title='Europe')

# Import Tabs from bokeh.models.widgets
from bokeh.models.widgets import Tabs

# Create a Tabs layout: layout
layout = Tabs(tabs=[tab1, tab2, tab3, tab4])
Ejemplo n.º 18
0
def showGraph():
    Product_ID = request.args.get('pid')
    sdate = request.args.get('sdate')
    edate = request.args.get('edate')
    cur = con.cursor()
    cur.execute("select * from product")
    product = cur.fetchall()

    if (sdate == None and edate == None):
        cur.execute("select TOP 7 * from v_vendor_closing where prod_id=?",
                    product[0][0])
    else:
        cur.execute(
            "select * from v_vendor_closing where prod_id=? and date>=? and date<=?",
            Product_ID, sdate, edate)

    data = cur.fetchall()

    X = []
    y1 = []
    y2 = []
    for i in data:
        X.append(i[1])
        y1.append(i[2])
        y2.append(i[3])

    hover1 = HoverTool(tooltips=[("Quantity", "@top")])
    barchart = figure(x_axis_type='datetime',
                      plot_height=450,
                      title="Stock Counts",
                      toolbar_location=None,
                      tools=[hover1],
                      sizing_mode='stretch_both')
    barchart.vbar(x=X, top=y1, width=70400000)

    barchart.y_range.start = 0
    barchart.xaxis.ticker = DaysTicker(days=np.arange(1, 32))
    barchart.xaxis.major_label_orientation = 1.5
    barchart.xaxis.formatter = DatetimeTickFormatter(days=["%d %B"],
                                                     months=["%d %B"])

    hover2 = HoverTool(tooltips=[("Quantity", "@y")])
    p = figure(x_axis_type='datetime',
               plot_height=450,
               title="Stock Counts",
               toolbar_location=None,
               tools=[hover1],
               sizing_mode='stretch_both')
    p.vbar(x=X, top=y2, width=70400000)
    p.xaxis.ticker = DaysTicker(days=np.arange(1, 32))
    p.xaxis.major_label_orientation = 1.5
    p.y_range.start = 0
    p.xaxis.formatter = DatetimeTickFormatter(days=["%d %B %Y"],
                                              months=["%d %B %Y"])

    tab1 = Panel(child=barchart, title="Sales")
    tab2 = Panel(child=p, title="Dispose")
    tabs = Tabs(tabs=[tab1, tab2])
    script, div = components(tabs)

    #script2, div2 = components(p)
    return render_template("vendor/showGraph.html",
                           bars_count=1,
                           the_div=div,
                           the_script=script,
                           product=product)
Ejemplo n.º 19
0
                alpha=0.6,
                size=10,
                selection_color="red",
                hover_color="red")
left_lin.yaxis.axis_label = "Frequency (per 10k words)"
left_lin.xaxis.axis_label = "Rank"
hoverL = left_lin.select(dict(type=HoverTool))
hoverL.tooltips = {
    "word": "@word",
    "rank": "@rank_x_new",
    "freq": "@freq_x",
    "per_10k": "@rel_x_new",
    "LL": "@LL",
    "pval": "@pval"
}
panel_llin = Panel(child=left_lin, title='linear')

left_log = figure(tools=TOOLS,
                  x_axis_type='log',
                  y_axis_type='log',
                  plot_width=500,
                  plot_height=500,
                  sizing_mode='fixed',
                  output_backend="webgl")
left_log.circle('rank_x_new',
                'rel_x_new',
                source=source,
                alpha=0.6,
                size=10,
                selection_color="red",
                hover_color="red")
Ejemplo n.º 20
0
def draw_heatmaps(df, outfile, title, taxonomic_rank, colour):
    """
    Draw heatmaps for the given input dataframe, to
    the specified file with the given title.
    """
    # If the sample contains only superkingdom information, use that:
    if taxonomic_rank == "superkingdom":
        # create source info
        # and set hovertool tooltip parameters
        samples = df["Sample_name"].astype(str)
        assigned = df["superkingdom"].astype(str)
        reads = df["reads"].astype(float)
        percent_of_total = df["Percentage"].astype(float)

        colors = len(reads) * colour  # multiply to make an equally long list

        max_load = max(percent_of_total)
        alphas = [
            min(x / float(max_load), 0.9) + 0.1 for x in percent_of_total
        ]

        source = ColumnDataSource(data=dict(
            samples=samples,
            assigned=assigned,
            reads=reads,
            percent_of_total=percent_of_total,
            colors=colors,
            alphas=alphas,
        ))

        y_value = (assigned, "assigned")

    # Otherwise, create the usual heatmap input info for each
    # (relevant) taxonomic rank down to species.
    else:
        # Remove 'unclassified' taxa: NaN in dataframe
        df = df[df[taxonomic_rank].notnull()]

        # Check if the dataframe is empty
        if df.empty:
            # If so, warn the user and exit
            return (None, False)

        else:
            # If it is not empty, continue normally
            if (max(
                    pd.DataFrame(
                        df.groupby(["Sample_name", taxonomic_rank]).size())[0])
                    > 3):
                # if there are taxa with more than 3 contigs *in one sample*
                # the hover info boxes will be too many, so
                # aggregate statistics per taxon

                aggregated = True

                new_df = pd.DataFrame(
                    df.groupby(["Sample_name",
                                taxonomic_rank]).size()).reset_index()
                new_df = new_df.rename(columns={0: "Number_of_contigs"})

                min_df = pd.DataFrame(
                    df.groupby(["Sample_name",
                                taxonomic_rank]).min()).reset_index()
                max_df = pd.DataFrame(
                    df.groupby(["Sample_name",
                                taxonomic_rank]).max()).reset_index()
                sum_df = pd.DataFrame(
                    df.groupby(["Sample_name",
                                taxonomic_rank]).sum()).reset_index()
                avg_df = pd.DataFrame(
                    df.groupby(["Sample_name",
                                taxonomic_rank]).mean()).reset_index()

                for column in [
                        "Plus_reads",
                        "Minus_reads",
                        "Avg_fold",
                        "Length",
                        "Percentage",
                        "Nr_ORFs",
                ]:
                    min_df = min_df.rename(columns={column: "MIN_%s" % column})
                    max_df = max_df.rename(columns={column: "MAX_%s" % column})
                    sum_df = sum_df.rename(columns={column: "SUM_%s" % column})
                    avg_df = avg_df.rename(columns={column: "AVG_%s" % column})

                    new_df["MIN_%s" % column] = min_df["MIN_%s" % column]
                    new_df["MAX_%s" % column] = max_df["MAX_%s" % column]
                    new_df["SUM_%s" % column] = sum_df["SUM_%s" % column]
                    new_df["AVG_%s" % column] = avg_df["AVG_%s" % column]

                for stat in ["MIN", "MAX", "SUM", "AVG"]:
                    new_df["%s_reads" %
                           stat] = (new_df["%s_Minus_reads" % stat] +
                                    new_df["%s_Plus_reads" % stat])

                new_df["tax_name"] = min_df["tax_name"]
                new_df["taxon"] = min_df[taxonomic_rank]
                new_df["total_reads"] = df["read_pairs"]

                new_df = new_df.fillna(0)

                samples = new_df["Sample_name"].astype(str)
                nr_contigs = new_df["Number_of_contigs"].astype(int)
                assigned = new_df["tax_name"].astype(str)
                taxonomy = new_df["taxon"].astype(str)
                min_reads = new_df["MIN_reads"].astype(int)
                max_reads = new_df["MAX_reads"].astype(int)
                sum_reads = new_df["SUM_reads"].astype(int)
                avg_reads = new_df["AVG_reads"].astype(int)
                total_reads = new_df["total_reads"].astype(int)
                min_percentage = new_df["MIN_Percentage"].astype(float)
                max_percentage = new_df["MAX_Percentage"].astype(float)
                sum_percentage = new_df["SUM_Percentage"].astype(float)
                avg_percentage = new_df["AVG_Percentage"].astype(float)
                min_coverage = new_df["MIN_Avg_fold"].astype(int)
                max_coverage = new_df["MAX_Avg_fold"].astype(int)
                sum_coverage = new_df["SUM_Avg_fold"].astype(int)
                avg_coverage = new_df["AVG_Avg_fold"].astype(int)
                min_length = new_df["MIN_Length"].astype(int)
                max_length = new_df["MAX_Length"].astype(int)
                sum_length = new_df["SUM_Length"].astype(int)
                avg_length = new_df["AVG_Length"].astype(int)
                min_nr_orfs = new_df["MIN_Nr_ORFs"].astype(int)
                max_nr_orfs = new_df["MAX_Nr_ORFs"].astype(int)
                sum_nr_orfs = new_df["SUM_Nr_ORFs"].astype(int)
                avg_nr_orfs = new_df["AVG_Nr_ORFs"].astype(int)

                colors = len(samples) * colour

                max_load = max(avg_percentage)
                alphas = [
                    min(x / float(max_load), 0.9) + 0.1 for x in avg_percentage
                ]
                # scale darkness to the average percentage of reads

                source = ColumnDataSource(data=dict(
                    samples=samples,
                    nr_contigs=nr_contigs,
                    assigned=assigned,
                    taxonomy=taxonomy,
                    min_reads=min_reads,
                    max_reads=max_reads,
                    sum_reads=sum_reads,
                    avg_reads=avg_reads,
                    total_reads=total_reads,
                    min_percentage=min_percentage,
                    max_percentage=max_percentage,
                    sum_percentage=sum_percentage,
                    avg_percentage=avg_percentage,
                    min_coverage=min_coverage,
                    max_coverage=max_coverage,
                    sum_coverage=sum_coverage,
                    avg_coverage=avg_coverage,
                    min_length=min_length,
                    max_length=max_length,
                    sum_length=sum_length,
                    avg_length=avg_length,
                    min_nr_orfs=min_nr_orfs,
                    max_nr_orfs=max_nr_orfs,
                    sum_nr_orfs=sum_nr_orfs,
                    avg_nr_orfs=avg_nr_orfs,
                    colors=colors,
                    alphas=alphas,
                ))

            else:
                # no taxon has too many contigs assigned per sample,
                # so create a plot for everything

                aggregated = False

                df.fillna(0, inplace=True)

                samples = df["Sample_name"].astype(str)
                scaffolds = df["scaffold_name"].astype(str)
                assigned = df["tax_name"].astype(str)
                taxonomy = df[taxonomic_rank].astype(str)
                reads = df["reads"].astype(int)
                total_reads = df["read_pairs"].astype(int)
                percent_of_total = df["Percentage"].astype(float)
                coverage = df["Avg_fold"].astype(int)
                contig_length = df["Length"].astype(int)
                nr_orfs = df["Nr_ORFs"].astype(int)

                colors = len(
                    reads) * colour  # multiply to make an equally long list

                max_load = max(percent_of_total)
                alphas = [
                    min(x / float(max_load), 0.9) + 0.1
                    for x in percent_of_total
                ]

                source = ColumnDataSource(data=dict(
                    samples=samples,
                    scaffolds=scaffolds,
                    assigned=assigned,
                    taxonomy=taxonomy,
                    reads=reads,
                    total_reads=total_reads,
                    percent_of_total=percent_of_total,
                    coverage=coverage,
                    contig_length=contig_length,
                    nr_orfs=nr_orfs,
                    colors=colors,
                    alphas=alphas,
                ))

            y_value = (taxonomy, "taxonomy")

    TOOLS = "hover, save, pan, box_zoom, wheel_zoom, reset"

    p = figure(
        title=title,
        # If desired, the sample can be displayed as "Run x, sample y"
        # -> uncomment the next line
        # x_range = [ "Run %s, sample %s" % (x.split('_')[0], x.split('_')[1]) for x in list(sorted(set(samples))) ],
        x_range=list(sorted(set(df["Sample_name"]))),
        y_range=list(reversed(sorted(set(
            y_value[0])))),  # reverse to order 'from top to bottom'
        x_axis_location="above",
        toolbar_location="right",
        tools=TOOLS,
    )

    # Edit the size of the heatmap when there are many samples and/or taxa
    if len(set(samples)) > 20:
        p.plot_width = int(len(set(samples)) * 25)
    else:
        pass
    # Adjust heatmap sizes depending on the number of
    # taxa observed (not applicable for superkingdom heatmap)
    if taxonomic_rank != "superkingdom":
        if len(set(taxonomy)) > 100:
            p.plot_height = int(p.plot_height * 3)
            p.plot_width = int(p.plot_width * 1.5)
        elif len(set(taxonomy)) > 50:
            p.plot_height = int(p.plot_height * 2)
            p.plot_width = int(p.plot_width * 1.2)
        elif len(set(taxonomy)) > 25:
            p.plot_height = int(p.plot_height * 1.2)
        else:
            pass

        # And set tooltip depending on superkingdoms

        if aggregated:
            # An aggregated format requires a different hover tooltip
            p.select_one(HoverTool).tooltips = [
                ("Sample", "@samples"),
                ("Taxon", "@assigned"),
                ("Number of scaffolds", "@nr_contigs"),
                # ('-----', ""), # If you like a separator in the tooltip
                (
                    "Number of reads total (min, avg, max)",
                    "@sum_reads (@min_reads, @avg_reads, @max_reads)",
                ),
                (
                    "Scaffold length total (min, avg, max)",
                    "@sum_length (@min_length, @avg_length, @max_length)",
                ),
                (
                    "Number of ORFs total (min, avg, max)",
                    "@sum_nr_orfs (@min_nr_orfs, @avg_nr_orfs, @max_nr_orfs)",
                ),
                (
                    "Depth of coverage total (min, avg, max)",
                    "@sum_coverage (@min_coverage, @avg_coverage*, @max_coverage)",
                ),
                ("*", "darkness scaled to this number"),
            ]
        else:
            p.select_one(HoverTool).tooltips = [
                ("Sample", "@samples"),
                ("Scaffold", "@scaffolds"),
                ("Taxon", "@assigned"),
                ("Number of reads",
                 "@reads (@percent_of_total % of sample total)"),
                ("Scaffold length", "@contig_length"),
                ("Number of ORFs", "@nr_orfs"),
                ("Average Depth of Coverage", "@coverage"),
            ]
    else:
        p.select_one(HoverTool).tooltips = [
            ("Sample", "@samples"),
            ("Taxon", "@assigned"),
            ("Number of reads", "@reads"),
            ("Percentage of total", "@percent_of_total %"),
        ]

    p.grid.grid_line_color = None
    p.axis.axis_line_color = None
    p.axis.major_tick_line_color = None
    if len(set(assigned)) > 15:
        p.axis.major_label_text_font_size = "10pt"
    else:
        p.axis.major_label_text_font_size = "12pt"
    p.axis.major_label_standoff = 0
    p.xaxis.major_label_orientation = np.pi / 4
    p.title.text_color = colour[0]
    p.title.text_font_size = "16pt"
    p.title.align = "right"

    p.rect(
        "samples",
        y_value[1],
        1,
        1,
        source=source,
        color="colors",
        alpha="alphas",
        line_color=None,
    )

    panel = Panel(child=p, title=title.split()[1].title())
    # the .title() methods capitalises a string

    if taxonomic_rank == "superkingdom":
        # The superkingdom heatmap still requires a single output file
        output_file(outfile, title=title)
        save(p)
        print("The heatmap %s has been created and written to: %s" %
              (title, outfile))
        return None
    else:
        return (panel, True)
p1.circle(x="Year", y="Global_Sales", source=source, color="red")
p2 = figure()
p2.circle(x="Year", y="EU_Sales", source=source, color="black")
p3 = figure()
p3.circle(x="Year", y="NA_Sales", source=source, color="blue")
p4 = figure()
p4.circle(x="Year", y="JP_Sales", source=source, color="orange")
layout1 = row(p1, p2)
layout2 = row(p3, p4)
layout3 = column(layout1, layout2)
show(layout3)
#nested
# I use p1, p2 and p3 that are created at above
layout = row(column(p1, p2), p3)
show(layout)
# Grid plot
layout = gridplot([[p1, p2], [p3, None]], toolbar_location="above")
show(layout)

#Tabbed layout
#I use p1 and p2 that are created at above
tab1 = Panel(child=p1, title="Global_Sales")
tab2 = Panel(child=p2, title="EU_Sales")
tabs = Tabs(tabs=[tab1, tab2])
show(tabs)
# linking axis
# We will use p1 and p2 that are created at above
p2.x_range = p1.x_range
p2.y_range = p1.y_range
layout4 = column(p1, p2)
show(layout4)
Ejemplo n.º 22
0
p2.ygrid.band_fill_color = "olive"
p2.ygrid.band_fill_alpha = 0.1

p2.circle(temp_date,
          tempmaxavg,
          size=10,
          legend='close',
          color='green',
          alpha=0.5)

p2.circle(temp_date, temp, size=4, legend='close', color='darkgrey', alpha=0.2)

p2.line(temp_date, temp_avg, legend='avg', color='navy')

p2.legend.location = "top_left"
tab2 = Panel(child=p2, title="Maximum temperatures")

tempmin[np.argmax(tempmin)] = 33.7
temp = np.array(tempmin)
tempminavg = np.array(tempmin_avg)
temp_date = np.array(dates, dtype=np.datetime64)
temp_avg = np.convolve(np.convolve(temp, window, 'same'), window, 'same')

p1 = figure(x_axis_type="datetime",
            title="Min Temperature through the years",
            plot_width=900,
            plot_height=600)
p1.grid.grid_line_alpha = 0
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Temperature'
p1.ygrid.band_fill_color = "olive"
Ejemplo n.º 23
0
                         alpha=.85,
                         color='green',
                         legend="B1")
    ampB2 = ampPlot.line(x=x,
                         y=y,
                         line_width=2,
                         alpha=.85,
                         color='blue',
                         legend="B2")
    ampB3 = ampPlot.line(x=x,
                         y=y,
                         line_width=2,
                         alpha=.85,
                         color='orange',
                         legend="B3")
    tabAmp = Panel(child=ampPlot, title="Amplitude")

    corrPlot = figure(plot_width=600,
                      plot_height=800,
                      tools=TOOLS,
                      x_range=Range1d(0, 100))
    corrPlot.legend.location = "top_left"
    corrPlot.legend.click_policy = "hide"
    corrPlot.xaxis[0].axis_label = "% (percent)"
    corrPlot.yaxis[0].axis_label = "Bin"
    corrB0 = corrPlot.line(x=x,
                           y=y,
                           line_width=2,
                           alpha=.85,
                           color='red',
                           legend="B0")
Ejemplo n.º 24
0
def temperature_plots(conn, start, end):
    '''Combines plots to a tab
    Parameters
    ----------
    conn : DBobject
        Connection object that represents database
    start : time
        Startlimit for x-axis and query (typ. datetime.now()- 4Months)
    end : time
        Endlimit for x-axis and query (typ. datetime.now())
    Return
    ------
    p : tab object
        used by dashboard.py to set up dashboard
    '''
    descr = Div(text="""
    <style>
    table, th, td {
      border: 1px solid black;
      background-color: #efefef;
      border-collapse: collapse;
      padding: 5px
    }
    table {
      border-spacing: 15px;
    }
    </style>

    <body>
    <table style="width:100%">
      <tr>
        <th><h6>Plotname</h6></th>
        <th><h6>Mnemonic</h6></th>
        <th><h6>Description</h6></th>
      </tr>
      <tr>
        <td>IRSU monitored Temperatures</td>
        <td>SI_GZCTS75A<br>
            SI_GZCTS68A<br>
            SI_GZCTS81A<br>
            SI_GZCTS80A<br>
            SI_GZCTS76A<br>
            SI_GZCTS79A<br>
            SI_GZCTS77A<br>
            SI_GZCTS78A<br>
            SI_GZCTS69A</td>
        <td>CAA IRSU Temperature<br>
            CAM IRSU Temperature<br>
            COM1 Nominal IRSU Temperature<br>
            COM1 Redundant IRSU Temperature<br>
            FWA IRSU Temperature<br>
            GWA IRSU Temperature<br>
            Thermal Strap Nominal IRSU Temperature<br>
            Thermal Strap Redundant IRSU Temperature<br>
            MSA Nominal IRSU Temperature<br>
            MSA Redundant IRSU Temperature</td>
      </tr>

      <tr>
        <td>FPE Temperatures/td>
        <td>IGDP_NRSI_C_CAM_TEMP<br>
            IGDP_NRSI_C_COL_TEMP<br>
            IGDP_NRSI_C_COM1_TEMP<br>
            IGDP_NRSI_C_FOR_TEMP<br>
            IGDP_NRSI_C_IFU_TEMP<br>
            IGDP_NRSI_C_BP1_TEMP<br>
            IGDP_NRSI_C_BP2_TEMP<br>
            IGDP_NRSI_C_BP3_TEMP<br>
            IGDP_NRSI_C_BP4_TEMP<br>
            IGDP_NRSI_C_RMA_TEMP</td>
        <td>OA CAM Temperature<br>
            OA COL Temperature<br>
            OA COM1 Temperature<br>
            OA FOR Temperature<br>
            OA IFU Temperature<br>
            OA BP1 Temperature<br>
            OA BP2 Temperature<br>
            OA BP3 Temperature<br>
            OA BP4 Temperature<br>
            OA RMA Temperature</td>
      </tr>

      <tr>
        <td>Box Temperatures</td>
        <td>IGDP_NRSD_ALG_TEMP<br>
            INRSH_HK_TEMP1<br>
            INRSH_HK_TEMP2</td>
        <td>ICE Internal Temperature 1<br>
            ICE Internal Temperature 2</td>
      </tr>

      <tr>
        <td>MCE internal Temperatures</td>
        <td>INRSM_MCE_PCA_TMP1<br>
            INRSM_MCE_PCA_TMP2<br>
            INRSM_MCE_AIC_TMP_FPGA<br>
            INRSM_MCE_AIC_TMP_ADC<br>
            INRSM_MCE_AIC_TMP_VREG<br>
            INRSM_MCE_MDAC_TMP_FPGA<br>
            INRSM_MCE_MDAC_TMP_OSC<br>
            INRSM_MCE_MDAC_TMP_BRD<br>
            INRSM_MCE_MDAC_TMP_PHA<br>
            INRSM_MCE_MDAC_TMP_PHB</td>
        <td>MCE PCA Board Temperature 1<br>
            MCE PCA Board Temperature 2<br>
            MCE AIC Board FPGA Temperature<br>
            MCE AIC Board Analog/Digital Converter Temperature<br>
            MCE AIC Board Voltage Regulator Temperature<br>
            MCE MDAC Board FPGA Temperature<br>
            MCE MDAC Board Oscillator Temperature<br>
            MCE MDAC Board Temperature<br>
            MCE MDAC Board Phase A PA10 Temperature<br>
            MCE MDAC Board Phase B PA10 Temperature</td>
      </tr>

      <tr>
        <td>MSA Temperatures</td>
        <td>INRSM_Q1_TMP_A<br>
            INRSM_Q2_TMP_A<br>
            INRSM_Q3_TMP_A<br>
            INRSM_Q4_TMP_A<br>
            INRSM_MECH_MTR_TMP_A<br>
            INRSM_LL_MTR_TMP_A<br>
            INRSM_MSA_TMP_A</td>
        <td>MSA Quad 1 Temperature<br>
            MSA Quad 2 Temperature<br>
            MSA Quad 3 Temperature<br>
            MSA Quad 4 Temperature<br>
            MSA Magnetic Arm Motor Temperature<br>
            MSA Launch Lock Motor Temperature<br>
            MSA Frame Temperature</td>
      </tr>

      <tr>
        <td>FPA Temperatures</td>
        <td>IGDP_NRSD_ALG_FPA_TEMP<br>
            IGDP_NRSD_ALG_A1_TEMP<br>
            IGDP_NRSD_ALG_A2_TEMP</td>
        <td>FPE Temperature<br>
            FPA Temperature<br>
            ASIC 1 Temperature<br>
            ASIC 2 Temperature</td>
      </tr>

      <tr>
        <td>Heat Strap Temperatures (Trim Heaters)</td>
        <td>SI_GZCTS74A<br>
            SI_GZCTS67A</td>
        <td>FPA TH-Strap A Temperature from IRSU A<br>
            FPA TH-Strap B Temperature from IRSU A</td>
      </tr>

      <tr>
        <td>CAA Lamps / FWA,GWA</td>
        <td>IGDP_NRSI_C_CAAL1_TEMP<br>
            IGDP_NRSI_C_CAAL2_TEMP<br>
            IGDP_NRSI_C_CAAL3_TEMP<br>
            IGDP_NRSI_C_CAAL4_TEMP<br>
            IGDP_NRSI_C_FWA_TEMP<br>
            IGDP_NRSI_C_GWA_TEMP</td>
        <td>CAA Temperature LINE1<br>
            CAA Temperature LINE2<br>
            CAA Temperature LINE3<br>
            CAA Temperature LINE4<br>
            FWA Temperature Sensor Value<br>
            GWA Temperature Sensor Value</td>
      </tr>

    </table>
    </body>
    """,
                width=1100)

    plot1 = irsu_temp(conn, start, end)
    plot2 = fpe_temp(conn, start, end)
    plot3 = box_temp(conn, start, end)
    plot4 = mce_internal_temp(conn, start, end)
    plot5 = msa_temp(conn, start, end)
    plot6 = fpa_temp(conn, start, end)
    plot7 = heat_strap_temp(conn, start, end)
    plot8 = caal_temp(conn, start, end)

    layout = Column(descr, plot1, plot2, plot3, plot4, plot5, plot6, plot7,
                    plot8)

    tab = Panel(child=layout, title="TEMPERATURE")

    return tab
Ejemplo n.º 25
0
def nonOccupiers():
    #bokeh_doc = curdoc()
    dfn = pd.read_csv('BokehApp/Data/TT_nonOccupier.csv', delimiter='\t', index_col='Years')
    dfnt = dfn[['Total Transactions', 'Total Non-Occupiers']]
    rowX = '2010', '2011','2012','2013','2014','2015','2016', '2017', '2018'
    sourcent = ColumnDataSource(data=dict( x = rowX, y=dfnt['Total Transactions'], y1=dfnt['Total Non-Occupiers']))
    pn = figure(x_range=rowX, plot_height=350, plot_width=550, title='Properties Transactions in Ireland', y_axis_label=None, x_axis_label=None, tools = 'pan, wheel_zoom, box_zoom, reset')
    pn.vbar(x=dodge('x', 0.0, range=pn.x_range), top='y', width=0.3, source=sourcent, color='#440154', legend=value('Total Transactions'))
    pn.vbar(x=dodge('x', -0.35, range=pn.x_range), top='y1', width=0.3, source=sourcent, color='#FDE724', legend=value('Total Non-Occupiers'))
    
    pn.x_range.range_padding = 0.05
    pn.legend.location = 'top_left'
    hoverpn = HoverTool()
    hoverpn.tooltips=[('Transactions', 'total @y / non-occupiers @y1')]
    pn.add_tools(hoverpn)
    tick_labelspn = {'10000':'10K','20000':'20K','30000':'30K','40000':'40K','50000':'50K', '60000':'60K'}
    pn.yaxis.major_label_overrides = tick_labelspn
    pn.legend.background_fill_alpha=None
    pn.legend.border_line_alpha=0
    pn.legend.label_text_font_size = "11px"
    pn.y_range.end = dfnt.values.max()*1.1+1
    pn.legend.click_policy="hide"
    pn.title.text_font_size = '15px'
    pn.xaxis.major_label_text_font_style = 'bold'
    pn.grid.grid_line_color=None
    pn.toolbar.autohide = True
    #return pn
    #show(pn)def NonOccupiers(): 
    dfn1 = pd.read_csv('BokehApp/Data/TT_nonOccupier.csv', delimiter='\t', index_col='Years')
    dfn3 = dfn1[['Former Owner-Occupier', 'Non-Occupier', 'Non-Household Buyer']]
    rX = '2010', '2011','2012','2013','2014','2015','2016', '2017', '2018'

    srcn3 = ColumnDataSource(data=dict( x = rX,
                                    y=dfn3['Former Owner-Occupier'],
                                    y1=dfn3['Non-Occupier'],
                                    y2=dfn3['Non-Household Buyer']))
    pn3 = figure(x_range=rX, plot_height=350, plot_width=550, title='Properties Transactions in Ireland', y_axis_label=None, x_axis_label=None, tools = 'pan, wheel_zoom, box_zoom, reset')

    pn3.line(x='x', y='y', line_width=2.5, line_color='#440154', source=srcn3, legend=value('Former Owner-Occupier'))
    pn3.line(x='x', y='y1', line_width=2.5, line_color='#FDE724', source=srcn3, legend=value('Non-Occupier'))
    pn3.circle(x='x', y='y', size=5, color='#B2DD2C', source=srcn3, legend=value('Former Owner-Occupier'))
    pn3.circle(x='x', y='y1', size=5, color='#440154', source=srcn3, legend=value('Non-Occupier'))
    pn3.line(x='x', y='y2', line_width=2.5, line_color='#9DD93A', source=srcn3, legend=value('Non-Household Buyer'))
    pn3.circle(x='x', y='y2', size=5, color='#365A8C', source=srcn3, legend=value('Non-Household Buyer'))

            #pne.vbar(x='x', top='y', width=0.4, source=srcne, color='#440154', legend=value('Existing'))
            #pne.vbar(x='x', top='y1', width=0.4, source=srcne, color='#FDE724', legend=value('New'))

    pn3.legend.location = 'top_left'
    hoverpn3 = HoverTool()
    hoverpn3.tooltips=[('Former Owner', '@y'),('Non-Occupier', '@y1'), ('Non-Household', '@y2')]
    pn3.add_tools(hoverpn3)
    tick_labelspn3 = {'5000':'5K','10000':'10K','15000':'15K','20000':'20K','25000':'25K'}
    pn3.yaxis.major_label_overrides = tick_labelspn3
            #pn.xaxis.major_label_overrides = {'2010':'2010', '2011':'2011'}
    #pn3.legend.background_fill_alpha=None
    #pn3.legend.border_line_alpha=0
    pn3.legend.label_text_font_size = "11px"
            #pne.y_range.end = dfnt.values.max()*1.1+1
            #pn.x_range.start = rowX*1.1+1
    pn3.legend.click_policy="hide"
    pn3.title.text_font_size = '15px'
    pn3.xaxis.major_label_text_font_style = 'bold'
    #pn3.xgrid.grid_line_color=None
    pn3.grid.grid_line_alpha = 0.6
    pn3.grid.grid_line_dash = 'dotted'
    pn3.grid.grid_line_dash_offset = 5
    pn3.grid.grid_line_width = 2
    pn3.toolbar.autohide = True
    pn3.outline_line_color=None
    pn3.legend.background_fill_alpha=None
    pn3.legend.border_line_alpha=0
    #return pn3
    #show(pn3)

    dfne = pd.read_csv('BokehApp/Data/HT_NewExisiting.csv', delimiter='\t', index_col='Years')
    rX = '2010', '2011','2012','2013','2014','2015','2016', '2017', '2018'

    srcne = ColumnDataSource(data=dict( x = rX,
                                    y=dfne['Existing'],
                                    y1=dfne['New']))
    pne = figure(x_range=rX, plot_height=350, plot_width=550, title='Properties Transactions in Ireland', y_axis_label=None, x_axis_label=None, tools = 'pan, wheel_zoom, box_zoom, reset')

    pne.line(x='x', y='y', line_width=2.5, line_color='#440154', source=srcne, legend=value('Existing'))
    pne.line(x='x', y='y1', line_width=2.5, line_color='#FDE724', source=srcne, legend=value('New'))
    pne.circle(x='x', y='y', size=5, color='#B2DD2C', source=srcne, legend=value('Existing'))
    pne.circle(x='x', y='y1', size=5, color='#35B778', source=srcne, legend=value('New'))

            #pne.vbar(x='x', top='y', width=0.4, source=srcne, color='#440154', legend=value('Existing'))
            #pne.vbar(x='x', top='y1', width=0.4, source=srcne, color='#FDE724', legend=value('New'))

    pne.legend.location = 'top_left'
    hoverpne = HoverTool()
    hoverpne.tooltips=[('Transactions', 'Exisiting @y / New @y1')]
    pne.add_tools(hoverpne)
    tick_labelspne = {'10000':'10K','20000':'20K','30000':'30K','40000':'40K'}
    pne.yaxis.major_label_overrides = tick_labelspne
            #pn.xaxis.major_label_overrides = {'2010':'2010', '2011':'2011'}
    pne.legend.background_fill_alpha=None
    pne.legend.border_line_alpha=0
    pne.legend.label_text_font_size = "11px"
            #pne.y_range.end = dfnt.values.max()*1.1+1
            #pn.x_range.start = rowX*1.1+1
    pne.legend.click_policy="hide"
    pne.title.text_font_size = '15px'
    pne.xaxis.major_label_text_font_style = 'bold'
    #pne.xgrid.grid_line_color=None
    pne.grid.grid_line_alpha = 0.6
    pne.grid.grid_line_dash = 'dotted'
    pne.grid.grid_line_dash_offset = 5
    pne.grid.grid_line_width = 2
    pne.toolbar.autohide = True
    pne.outline_line_color=None
    #show(pne)

    dfn = pd.read_csv('BokehApp/Data/TT_nonOccupier.csv', delimiter='\t', index_col='Years')
    dfnt = dfn[['Total Transactions', 'Total Non-Occupiers']]

    rowX = '2010', '2011','2012','2013','2014','2015','2016', '2017', '2018'

    sourcent = ColumnDataSource(data=dict( x = rowX,
                                        y=dfnt['Total Transactions'],
                                        y1=dfnt['Total Non-Occupiers']))
    pn = figure(x_range=rowX, plot_height=350, plot_width=550, title='Properties Transactions in Ireland', y_axis_label=None, x_axis_label=None, tools = 'pan, wheel_zoom, box_zoom, reset')
        #pn.x_range=rowX
    pn.vbar(x=dodge('x', 0.0, range=pn.x_range), top='y', width=0.3, source=sourcent, color='#440154', legend=value('Total Transactions'))
    pn.vbar(x=dodge('x', -0.35, range=pn.x_range), top='y1', width=0.3, source=sourcent, color='#FDE724', legend=value('Total Non-Occupiers'))

        #pn.x_range.factors = xstr
        #x_range = FactorRange(factors=['2010', '2011', '2012','2013','2014','2015','2016','2017','2018'])
    pn.x_range.range_padding = 0.05
    pn.legend.location = 'top_left'
    hoverpn = HoverTool()
    hoverpn.tooltips=[('Transactions', 'total @y / non-occupiers @y1')]
    pn.add_tools(hoverpn)
    tick_labelspn = {'10000':'10K','20000':'20K','30000':'30K','40000':'40K','50000':'50K', '60000':'60K'}
    pn.yaxis.major_label_overrides = tick_labelspn
        #pn.xaxis.major_label_overrides = {'2010':'2010', '2011':'2011'}
    pn.legend.background_fill_alpha=None
    pn.legend.border_line_alpha=0
    pn.legend.label_text_font_size = "11px"
    pn.y_range.end = dfnt.values.max()*1.1+1
        #pn.x_range.start = rowX*1.1+1
    pn.legend.click_policy="hide"
    pn.title.text_font_size = '15px'
    pn.xaxis.major_label_text_font_style = 'bold'
    #pn.grid.grid_line_color=None
    pn.grid.grid_line_alpha = 0.6
    pn.grid.grid_line_dash = 'dotted'
    pn.grid.grid_line_dash_offset = 5
    pn.grid.grid_line_width = 2
    pn.toolbar.autohide = True
    pn.outline_line_color=None
    #show(pn)
    t1 = Panel(child=pn, title='Overview')
    t2 = Panel(child=pne, title='Type of sale')
    t3 = Panel(child=pn3, title='Type of buyer')
    tabs = Tabs(tabs=[t1,t2,t3])
    return tabs
Ejemplo n.º 26
0
def plot_defects(render, stack, out_html_dir, args):
    tspecs = args[0]
    matches = args[1]
    dis_tiles = args[2]
    gap_tiles = args[3]
    seam_centroids = np.array(args[4])
    stats = args[5]
    z = args[6]

    # Tile residual mean
    tile_residual_mean = cr.compute_mean_tile_residuals(stats['tile_residuals'])

    tile_positions = []
    tile_ids = []
    residual = []
    for ts in tspecs:
        tile_ids.append(ts.tileId)
        pts = []
        pts.append([ts.minX, ts.minY])
        pts.append([ts.maxX, ts.minY])
        pts.append([ts.maxX, ts.maxY])
        pts.append([ts.minX, ts.maxY])
        pts.append([ts.minX, ts.minY])
        tile_positions.append(pts)

        try:
            residual.append(tile_residual_mean[ts.tileId])
        except KeyError:
            residual.append(50) # a high value for residual for that tile

        #if ts.tileId in tile_residual_mean.keys():
        #    residual.append(tile_residual_mean[ts.tileId])
        #else:
        #    residual.append(50) # a high value for residual for that tile

    out_html = os.path.join(
            out_html_dir,
            "%s_%d_%s.html" % (
                stack,
                z,
                datetime.datetime.now().strftime('%Y%m%d%H%S%M%f')))

    output_file(out_html)
    xs = []
    ys = []
    alphas = []
    for tp in tile_positions:
        sp = np.array(tp)
        x = list(sp[:,0])
        y = list(sp[:,1])
        xs.append(x)
        ys.append(y)
        alphas.append(0.5)

    fill_color = []
    label = []
    for t in tile_ids:
        if t in gap_tiles:
            label.append("Gap tiles")
            fill_color.append("red")
        elif t in dis_tiles:
            label.append("Disconnected tiles")
            fill_color.append("yellow")
        else:
            label.append("Stitched tiles")
            fill_color.append("blue")

    color_mapper = CategoricalColorMapper(factors=['Gap tiles', 'Disconnected tiles', 'Stitched tiles'], palette=["red", "yellow", "blue"])
    source = ColumnDataSource(data=dict(x=xs, y=ys, alpha=alphas, names=tile_ids, fill_color=fill_color, labels=label))

    seam_source = ColumnDataSource(data=dict(
        x=(seam_centroids[:, 0] if len(seam_centroids) else []),
        y=(seam_centroids[:, 1] if len(seam_centroids) else []),
        lbl=["Seam Centroids" for s in xrange(len(seam_centroids))]))

    TOOLS = "pan,box_zoom,reset,hover,tap,save"

    p = figure(title=str(z), width=1000, height=1000, tools=TOOLS, match_aspect=True)
    pp = p.patches('x', 'y', source=source, alpha='alpha', line_width=2, color={'field':'labels', 'transform': color_mapper}, legend='labels')
    cp = p.circle('x', 'y', source=seam_source, legend='lbl', size=11)

    jscode = """
        var inds = cb_obj.selected['1d'].indices;
        var d = cb_obj.data;
        var line = "<span style='float:left;clear:left;font_size=0.5pt'><br>" + d['%s'][inds[0]] + "</b></span>\\n";
        var text = div.text.concat(line);
        var lines = text.split("\\n")
        if ( lines.length > 35 ) { lines.shift(); }
        div.text = lines.join("\\n");
    """
    div = Div(width=1000)
    layout = row(p, div)

    urls = "%s:%d/render-ws/v1/owner/%s/project/%s/stack/%s/tile/@names/png-image?scale=0.1"%(render.DEFAULT_HOST, render.DEFAULT_PORT, render.DEFAULT_OWNER, render.DEFAULT_PROJECT, stack)
    urls = "%s:%d/render-ws/v1/owner/%s/project/%s/stack/%s/tile/@names/withNeighbors/jpeg-image?scale=0.1"%(render.DEFAULT_HOST, render.DEFAULT_PORT, render.DEFAULT_OWNER, render.DEFAULT_PROJECT, stack)

    taptool = p.select(type=TapTool)
    taptool.renderers = [pp]
    taptool.callback = OpenURL(url=urls)

    hover = p.select(dict(type=HoverTool))
    hover.renderers = [pp]
    hover.point_policy = "follow_mouse"
    hover.tooltips = [("tileId", "@names"), ("x", "$x{int}"), ("y", "$y{int}")]

    source.callback = CustomJS(args=dict(div=div), code=jscode%('names'))

    # add point match plot in another tab
    plot = point_match_plot(tspecs, matches)

    # montage statistics plots in other tabs

    stat_layout = plot_residual(xs, ys, residual)

    tabs = []
    tabs.append(Panel(child=layout, title="Defects"))
    tabs.append(Panel(child=plot, title="Point match plot"))
    tabs.append(Panel(child=stat_layout, title="Mean tile residual"))

    plot_tabs = Tabs(tabs=tabs)

    save(plot_tabs)

    return out_html
Ejemplo n.º 27
0
def gains():
    if request.method == "POST":
        f = request.files["file"]
        unit = request.form["unit"]
        exercise_quantity = request.form["exercise_quantity"]
        try:
            cutoff = float(request.form["cutoff"]) / 100
        except Exception as e:
            print(e)
            cutoff = -1

        if cutoff < 0 or cutoff > 1:
            flash(f"Invalid cutoff percentage", "danger")
            return redirect(url_for("viz"))

        if not f:
            flash("No file selected", "danger")
            return redirect(url_for("viz"))
        if (mimetype := f.mimetype) not in MIME_TYPES:
            flash(f"Invalid file format {mimetype}", "danger")
            return redirect(url_for("viz"))

        df = pd.read_csv(f, sep=";")
        df.loc[:, "Date"] = pd.to_datetime(df["Date"]).dt.date

        df.loc[df["Weight Unit"] != unit, "Weight"] \
                = df.loc[df["Weight Unit"] != unit, :].apply(
                        lambda x: models.weight_conversion[unit](x["Weight"]),
                        axis=1)
        df["Weight"].fillna(1, inplace=True)
        df["Weight"].replace(0, 1, inplace=True)

        unique_exercises = list(df["Exercise Name"].value_counts().index)
        exercise_slice = models.quantity_conversion[exercise_quantity](
            len(unique_exercises))

        tabs = []
        exercises = unique_exercises[:exercise_slice]
        for ex in exercises:
            df_ex = df.loc[df["Exercise Name"] == ex].copy()

            # Calculate estimated 1 RM of set
            df_ex.loc[:, "Est. 1 RM"] = df_ex.apply(
                lambda x: models.get_1rm(x["Weight"], x["Reps"]), axis=1)

            df_ex["Workout 1 RM"] = df_ex.groupby(
                "Date")["Est. 1 RM"].transform("max")

            # Calculate total volume of set
            df_ex.loc[:, "Set Volume"] = df_ex.apply(
                lambda x: models.get_volume(
                    x["Weight"], x["Reps"], x["Workout 1 RM"], cutoff=cutoff),
                axis=1)

            # Group all exercises by date performed on
            d = df_ex.groupby("Date")[["Set Volume", "Est. 1 RM"]].agg({
                "Set Volume":
                "sum",
                "Est. 1 RM": ["max", "idxmax"]
            })
            d.columns = d.columns.droplevel()
            d.dropna(inplace=True)

            # Get Reps/Weight of the maximal 1 RM value of each date
            d.loc[:, "Reps"] = df_ex.loc[d["idxmax"], "Reps"].values
            d.loc[:, "Weight"] = df_ex.loc[d["idxmax"], "Weight"].values

            d.index = pd.to_datetime(d.index)
            d.rename(columns={
                "max": "Est. 1 RM",
                "sum": "Total Daily Volume"
            },
                     inplace=True)
            d2 = d.resample("D").asfreq(0)
            d2.drop(columns="idxmax", inplace=True)

            # Group all workouts by week (starting on Monday)
            d3 = d2.resample("W-MON").agg({
                "Total Daily Volume": "sum",
                "Est. 1 RM": ["max", "idxmax"]
            })
            d3.columns = d3.columns.droplevel()
            d3.fillna(0, inplace=True)

            # Get Reps/Weight/Workout Date of the maximal 1 RM value of week
            d3.loc[:, "Reps"] = d2.loc[d3["idxmax"], "Reps"].values
            d3.loc[:, "Weight"] = d2.loc[d3["idxmax"], "Weight"].values
            d3.loc[:, "Workout Date"] = d3["idxmax"].dt.strftime("%d %B %Y")

            d3.drop(columns="idxmax", inplace=True)
            d3.rename(columns={
                "max": "Est. 1 RM",
                "sum": "Total Weekly Volume"
            },
                      inplace=True)
            d3.loc[d3["Total Weekly Volume"] == 0,
                   "Total Weekly Volume"] = None
            d3.loc[d3["Est. 1 RM"] == 0, "Est. 1 RM"] = None

            source = ColumnDataSource(d3)

            hover = HoverTool()
            hover.tooltips = [
                ("Reps", "@Reps"),
                ("Weight", "@Weight"),
                ("Est. 1 RM", "@{Est. 1 RM}"),
                ("Date", "@{Workout Date}"),
            ]

            fig1 = figure(
                title="Total Weekly Volume",
                y_axis_label=f"Weight [{unit}]",
                # width=PLT_WIDTH,
                # height=PLT_HEIGHT,
                aspect_ratio=PLT_WIDTH / PLT_HEIGHT,
                x_axis_type="datetime")
            fig1.circle(x="Date",
                        y="Total Weekly Volume",
                        source=source,
                        color=SECONDARY_COLOR,
                        size=5)

            fig1 = models.style_fig(fig1)

            fig2 = figure(
                title="Est. 1 RM",
                y_axis_label=f"Weight [{unit}]",
                # width=PLT_WIDTH,
                # height=PLT_HEIGHT,
                aspect_ratio=PLT_WIDTH / PLT_HEIGHT,
                x_axis_type="datetime")
            fig2.circle(x="Date",
                        y="Est. 1 RM",
                        source=source,
                        color=PRIMARY_COLOR_LIGHT,
                        size=5)
            fig2 = models.style_fig(fig2)
            fig2.add_tools(hover)

            tab = Panel(child=column(fig1, fig2, sizing_mode="scale_both"),
                        title=ex)
            tabs.append(tab)

        div, script = components(Tabs(tabs=tabs, max_width=PLT_WIDTH))

        return render_template("gains.html",
                               title="gains::viz",
                               div=div,
                               script=script)
Ejemplo n.º 28
0
)

layout4 = layout(
    [widgetbox(btnGroupType4), widgetbox(btnGroupPosition2)],
    [widgetbox(slider,patch, plot, height=400, width=1000)],
)









tab1 = Panel(child=layout1, title='Change Chart')
tab2 = Panel(child=layout2, title='Change Plot')
tab3 = Panel(child=layout3, title='Data Chart')
tab4 = Panel(child=layout4, title='Data Plot')
tabs = Tabs(tabs=[tab1, tab2, tab3, tab4])








# layout

page = layout(
Ejemplo n.º 29
0
def affichage_proba(proba_textmining, proba_clf_inception,
                    proba_clf_svm_inception, label_proba_textmining,
                    label_proba_clf_inception, label_proba_clf_svm_inception):

    proba_et_label_text = pd.DataFrame({
        'proba_Text': proba_textmining[0],
        'label_Text': label_proba_textmining
    })
    proba_et_label_inception = pd.DataFrame({
        'proba_inception':
        proba_clf_inception[0],
        'label_inception':
        label_proba_clf_inception
    })
    proba_et_label_svm_inception = pd.DataFrame({
        'proba_SVM_inception':
        proba_clf_svm_inception[0],
        'label_SVM_inception':
        label_proba_clf_svm_inception
    })

    text = ColumnDataSource(proba_et_label_text)
    inception = ColumnDataSource(proba_et_label_inception)
    svm_inception = ColumnDataSource(proba_et_label_svm_inception)

    hover_text = HoverTool(tooltips=[("probabilit� ", "@proba_Text")])
    hover_inception = HoverTool(tooltips=[("probabilit� ",
                                           "@proba_inception")])
    hover_SVM_inception = HoverTool(tooltips=[("probabilit� ",
                                               "@proba_SVM_inception")])

    fig1 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_textmining)
    fig1.vbar(x='label_Text',
              top='proba_Text',
              source=text,
              width=0.5,
              fill_color='#45A7E2',
              line_color='#45A7E2')
    fig1.xaxis.major_label_orientation = 0.7
    fig1.add_tools(hover_text)
    tab1 = Panel(child=fig1, title='Text_mining_proba')

    fig2 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_clf_inception)
    fig2.vbar(x='label_inception',
              top='proba_inception',
              source=inception,
              width=0.5,
              fill_color='#E74C3C',
              line_color='#E74C3C')
    fig2.xaxis.major_label_orientation = 0.7
    fig2.add_tools(hover_inception)
    tab2 = Panel(child=fig2, title='Inception_proba')

    fig3 = figure(plot_width=1000,
                  plot_height=400,
                  x_range=label_proba_clf_svm_inception)
    fig3.vbar(x='label_SVM_inception',
              top='proba_SVM_inception',
              source=svm_inception,
              width=0.5,
              fill_color='#2ECC71',
              line_color='#2ECC71')
    fig3.xaxis.major_label_orientation = 0.7
    fig3.add_tools(hover_SVM_inception)
    tab3 = Panel(child=fig3, title='SVM_inception_proba')

    onglet = Tabs(tabs=[tab1, tab2, tab3])
    show(onglet)
Ejemplo n.º 30
0
    def make_report(self, all_result, recent_result):
        """ makes a Bokeh report on the server's health and activity
        
        input:
        dbquery_result: result of calling PERSIST.server_monitoring
        """
        self.read_json(all_result, data_tag='pre_insert')
        self.read_json(recent_result, data_tag='recent_all')
        tab_server = self.depict(
            data_tag='pre_insert',
            tab_title="In RAM sequence",
            metrics='server|scstat',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_memory = self.depict(
            data_tag='pre_insert',
            tab_title="RAM usage",
            metrics='server|mstat',
            x_axis_label='Order sequences added (Oldest --> Most recent)')

        tab_g2n = self.depict(
            data_tag='pre_insert',
            tab_title="Db: guid->neighbours",
            metrics='dstats|guid2neighbour',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_g2m = self.depict(
            data_tag='pre_insert',
            tab_title="Db: guid->metadata",
            metrics='dstats|guid2meta',
            x_axis_label='Order sequences added (Oldest --> Most recent)')
        tab_sm = self.depict(
            data_tag='pre_insert',
            tab_title="Db: server monitor",
            metrics='dstats|server_monitoring',
            x_axis_label='Order sequences added (Oldest --> Most recent)')

        # get details of the most recent guids
        n = 10
        recent_guids = self.most_recent_guids('recent_all', n=n)
        self.subset_data(from_data_tag='recent_all',
                         to_data_tag='recent_server',
                         column_name='content|activity|guid',
                         cell_values=[x for x in recent_guids])

        tab_rserver = self.depict(
            data_tag='recent_server',
            tab_title="Last {0} inserts: Sequences".format(n),
            metrics='server|scstat')
        tab_rmemory = self.depict(data_tag='recent_server',
                                  tab_title="Last {0} inserts: RAM".format(n),
                                  metrics='server|mstat')
        self._set_server_info()

        # get tail of logfile
        n_latest_lines = 100
        res = self.logfile_tail(n_latest_lines)

        # render
        div = Div(text="[last {0} lines of log file are shown]<br/>".format(
            n_latest_lines) + res.replace('\n', '<br />'),
                  render_as_text=False,
                  width=1000,
                  height=800)
        tab_log = Panel(child=div, title='Log tail')

        doc = {}
        s1 = self.server_info_tab()
        doc['Report'] = Tabs(tabs=[
            s1, tab_rserver, tab_rmemory, tab_server, tab_memory, tab_g2n,
            tab_g2m, tab_sm, tab_log
        ])
        return doc