示例#1
0
文件: main.py 项目: surferran/TAILOR
#from scripts.draw_map import map_tab
from scripts.routes import route_tab

# Using included state data from Bokeh for map
from bokeh.sampledata.us_states import data as states

# Read data into dataframes
flights = pd.read_csv(join(dirname(__file__), 'data', 'flights.csv'),
                      index_col=0).dropna()

# Formatted Flight Delay Data for map
map_data = pd.read_csv(join(dirname(__file__), 'data', 'flights_map.csv'),
                       header=[0, 1],
                       index_col=0)

# Create each of the tabs
#tab1 = histogram_tab(flights)
tab2 = density_tab(flights)
#tab3 = table_tab(flights)
#tab4 = map_tab(map_data, states)
tab5 = route_tab(flights)

# Put all the tabs into one application
tabs = Tabs(tabs=[tab2, tab5])

# Put the tabs in the current document for display
curdoc().add_root(tabs)

if '__name__' == '__main__':
    pass
示例#2
0
    def initialize_plot(self, plots=None, ranges=None):
        ranges = self.compute_ranges(self.layout, self.keys[-1], None)
        passed_plots = [] if plots is None else plots
        plots = [[] for _ in range(self.rows)]
        tab_titles = {}
        insert_rows, insert_cols = [], []
        for r, c in self.coords:
            subplot = self.subplots.get((r, c), None)
            if subplot is not None:
                shared_plots = passed_plots if self.shared_axes else None
                subplots = subplot.initialize_plot(ranges=ranges,
                                                   plots=shared_plots)

                # Computes plotting offsets depending on
                # number of adjoined plots
                offset = sum(r >= ir for ir in insert_rows)
                if len(subplots) > 2:
                    # Add pad column in this position
                    insert_cols.append(c)
                    if r not in insert_rows:
                        # Insert and pad marginal row if none exists
                        plots.insert(r + offset,
                                     [None for _ in range(len(plots[r]))])
                        # Pad previous rows
                        for ir in range(r):
                            plots[ir].insert(c + 1, None)
                        # Add to row offset
                        insert_rows.append(r)
                        offset += 1
                    # Add top marginal
                    plots[r + offset - 1] += [subplots.pop(-1), None]
                elif len(subplots) > 1:
                    # Add pad column in this position
                    insert_cols.append(c)
                    # Pad previous rows
                    for ir in range(r):
                        plots[r].insert(c + 1, None)
                    # Pad top marginal if one exists
                    if r in insert_rows:
                        plots[r + offset - 1] += 2 * [None]
                else:
                    # Pad top marginal if one exists
                    if r in insert_rows:
                        plots[r + offset - 1] += [None] * (1 +
                                                           (c in insert_cols))
                plots[r + offset] += subplots
                if len(subplots) == 1 and c in insert_cols:
                    plots[r + offset].append(None)
                passed_plots.append(subplots[0])
                if self.tabs:
                    title = subplot.subplots['main']._format_title(
                        self.keys[-1], dimensions=False)
                    if not title:
                        title = ' '.join(self.paths[r, c])
                    tab_titles[r, c] = title
            else:
                plots[r + offset] += [empty_plot(0, 0)]

        # Replace None types with empty plots
        # to avoid bokeh bug
        plots = layout_padding(plots, self.renderer)

        # Wrap in appropriate layout model
        kwargs = dict(sizing_mode=self.sizing_mode)
        if self.tabs:
            panels = [
                Panel(child=child, title=str(tab_titles.get((r, c))))
                for r, row in enumerate(plots) for c, child in enumerate(row)
                if child is not None
            ]
            layout_plot = Tabs(tabs=panels)
        else:
            plots = filter_toolboxes(plots)
            plots, width = pad_plots(plots)
            layout_plot = gridplot(children=plots,
                                   width=width,
                                   toolbar_position=self.toolbar,
                                   merge_tools=self.merge_tools,
                                   **kwargs)

        title = self._get_title(self.keys[-1])
        if title:
            self.handles['title'] = title
            layout_plot = Column(title, layout_plot, **kwargs)

        self._update_callbacks(layout_plot)
        self.handles['plot'] = layout_plot
        self.handles['plots'] = plots
        if self.shared_datasource:
            self.sync_sources()

        self.drawn = True

        return self.handles['plot']
示例#3
0
source9 = ColumnDataSource(data9)
p9 = createCountP(source9, rankNames1, colSpec3[:2], title9, ethnicNames) 
data10 = createPercentSource(df2019, rankNames1, 'ethnicity', ethnicNames)
source10 = ColumnDataSource(data10)
p10 = createPercentP(source10, rankNames1, colSpec3[:2], title10, ethnicNames)   
data11 = createCountSource(df2020, rankNames1, 'ethnicity', ethnicNames)
source11 = ColumnDataSource(data11)
p11 = createCountP(source11, rankNames1, colSpec3[:2], title11, ethnicNames)    
data12 = createPercentSource(df2020, rankNames1, 'ethnicity', ethnicNames)
source12 = ColumnDataSource(data12)
p12 = createPercentP(source12, rankNames1, colSpec3[:2], title12, ethnicNames, True)
p9.title.text_font_size = '8pt';p10.title.text_font_size = '8pt'
p11.title.text_font_size = '8pt';p12.title.text_font_size = '8pt'
p9.y_range = p11.y_range; p10.y_range = p12.y_range
hoverEthnicity = HoverTool(tooltips=[
        ('Rank','@rank'), ('Hispanic','@0'), ('Not Hispanic', '@1')
        ])
p9.add_tools(hoverEthnicity); p10.add_tools(hoverEthnicity)
p11.add_tools(hoverEthnicity); p12.add_tools(hoverEthnicity)  

first = Panel(child=gridplot([[p1,p3,widgetbox(select1, width=180)], [p2,p4,]]), title='Race')
second = Panel(child=gridplot([[p5,p7,widgetbox(select2, width=180)], [p6, p8,]]), title='Gender')
third = Panel(child=gridplot([[p9,p11,widgetbox(select3, width=180)], [p10, p12,]]), title='Ethnicity')
tabs = Tabs(tabs = [first, second, third])

curdoc().add_root(tabs)
#output_file('test.html')
#show(tabs)

  
示例#4
0
from bokeh.io import curdoc
from bokeh.models.widgets import Tabs, Panel
from bokeh_widgets.formatter_widget import FormatterWidget
from bokeh_widgets.trainer_widget import TrainerWidget
from bokeh_widgets.test_widget import TestWidget
from bokeh_widgets.player_widget import PlayerWidget
from bokeh_widgets.warmup_widget import WarmUpWidget

document = curdoc()

# Setup logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)

# Widgets and tabs
formatter = FormatterWidget().create_widget()
trainer = TrainerWidget().create_widget()
tester = TestWidget().create_widget()
warmup = WarmUpWidget(parent=document).create_widget()
player = PlayerWidget(parent=document).create_widget()

# Layout
tab_format = Panel(child=formatter, title='Format')
tab_train = Panel(child=trainer, title='Train')
tab_test = Panel(child=tester, title='Test')
tab_warmup = Panel(child=warmup, title='Warmup')
tab_play = Panel(child=player, title='Play')
tabs = Tabs(tabs=[tab_format, tab_train, tab_test, tab_warmup, tab_play])
document.add_root(tabs)
示例#5
0
unselect_all.on_click(deactivate_all_update)

# Initialize source
initial_states = [states_select1.labels[i] for i in states_select1.active] + \
                        [states_select2.labels[i] for i in states_select2.active] + \
                        [states_select3.labels[i] for i in states_select3.active]

src = make_dataset(initial_states,
                   start=range_select.value[0],
                   end=range_select.value[1],
                   thresh=thresh_select.value)

# Plot
plot_lin_s = make_plot(src, y_scale='linear')
plot_log_s = make_plot(src, y_scale='log')

# Put controls in a single element
controls_row = row(WidgetBox(states_select1, width=140),
                   WidgetBox(states_select2, width=120),
                   WidgetBox(states_select3, width=120))
controls_col = WidgetBox(range_select, thresh_select, select_all, unselect_all)

# Create a row layout
layout_lin_s = row(column(controls_col, controls_row), plot_lin_s)
layout_log_s = row(column(controls_col, controls_row), plot_log_s)
# Make a tab with the layout
tab_lin_s = Panel(child=layout_lin_s, title='by State, Linear Scale')
tab_log_s = Panel(child=layout_log_s, title='by State, Log Scale')
tabs = Tabs(tabs=[tab_lin_s, tab_log_s])

curdoc().add_root(tabs)
layout1 = get_bar()
layout2 = top_countries(country_rank_df)
p3, p2 = gdp_income(result_df, scatter_countries, source_gdp)
layout3 = row(p3, p2)
p4, p5, p6, p7, p8 = get_plot()

tab1 = Panel(child=layout1, title="Country Comparison")
tab2 = Panel(child=layout2, title="Top Climbers/Top Fallers")
tab3 = Panel(child=layout3, title="GDP Incoming Rank vs. Rank Relation")
tab4 = Panel(child=p4, title="Score Comparison")
tab5 = Panel(child=p5, title="GDP Per Capita with Score")
tab6 = Panel(child=p6, title="Incoming vs Outgoing Rates")
tab7 = Panel(child=p7, title="Where Can I travel to?")
tab8 = Panel(child=p8, title="Highlighting Differences")

tabs = Tabs(tabs=[tab1, tab2, tab4, tab5, tab6, tab7, tab8, tab3])

call_back_obj = False


def panelActive(attr, old, new):
    global call_back_obj
    if tabs.active == 7:
        call_back_obj = curdoc().add_periodic_callback(update_gdp, 2000)
    elif tabs.active != 7 and call_back_obj:
        curdoc().remove_periodic_callback(call_back_obj)
        call_back_obj = False


#curdoc().add_root(tabs)
save(tabs)
示例#7
0
    f.vbar(x='DATE', top='value', source = plot_CDS_be, fill_alpha = 0.5,\
       width=dt.timedelta(1), \
       line_color='black', color='color', legend_label="Be")

    f.vbar(x='DATE', top='value', source = plot_CDS_fr, fill_alpha = 0.5,\
       width=dt.timedelta(1), \
       line_color='black', color='color', y_range_name='France', legend_label="Fr")

    f.legend.location = "top_left"
    f.grid.grid_line_alpha = 0
    f.xaxis.axis_label = 'Date'
    f.ygrid.band_fill_color = "olive"
    f.ygrid.band_fill_alpha = 0.1

    return f


total = make_plot_compare(['hosp', 'TOTAL_IN'])
tab1 = Panel(child=total, title="Total")

icu = make_plot_compare(['rea', 'TOTAL_IN_ICU'])
tab2 = Panel(child=icu, title="ICU")

#deaths = make_plot_compare(['dc', 'DEATHS'])

tabs = Tabs(tabs=[tab1, tab2])

layout = grid([[cat_selection], [p], [tabs]])

curdoc().add_root(layout)
示例#8
0
tab2 = Panel(child=p2, title="Africa")

# Create tab3 from plot p3: tab3
tab3 = Panel(child=p3, title="Asia")

# Create tab4 from plot p4: tab4
tab4 = Panel(child=p4, title="Europe")


--------------------------------------------------
# Exercise_6 
# Import Tabs from bokeh.models.widgets
from bokeh.models.widgets import Tabs

# Create a Tabs layout: layout
layout = Tabs(tabs=[tab1, tab2, tab3, tab4])

# Specify the name of the output_file and show the result
output_file('tabs.html')
show(layout)


--------------------------------------------------
# Exercise_7 
# Link the x_range of p2 to p1: p2.x_range
p2.x_range = p1.x_range

# Link the y_range of p2 to p1: p2.y_range
p2.y_range = p1.y_range

# Link the x_range of p3 to p1: p3.x_range
示例#9
0
    for column_inference in df_submit.columns:
        data_inference[column_inference] = df_submit.loc[:, column_inference]
    source_inference.data = data_inference


button_read_csv_inference.on_click(read_csv_inference)
button_fill_missing_value_inference.on_click(fill_missing_value_inference)
button_convert_categorical_inference.on_click(convert_categorical_value_inference)
button_inference.on_click(inference)
button_download.callback = CustomJS(args=dict(source=source_inference),
                                    code=open(join(dirname(__file__), "download.js")).read())

input_load_data_inference = column(button_read_csv_inference,
                                   button_convert_categorical_inference,
                                   column_fill_missing_value_inference,
                                   input_fill_missing_value_inference,
                                   button_fill_missing_value_inference)
output_load_data_inference = column(title_table_titanic_inference, data_table_titanic_inference)
load_titanic_inference = row(input_load_data_inference, output_load_data_inference)
show_inference = row(column(button_inference, button_download), data_table_inference)
tab_load_titanic_inference = Panel(child=column(load_titanic_inference, show_inference), title="Inference")


# consolidate all tabs

tabs = Tabs(tabs=[tab_load_titanic, tab_data_visualization, tab_train, tab_load_titanic_inference])

curdoc().add_root(tabs)


示例#10
0
def Adjacent(doc):
    args = doc.session_context.request.arguments
    file = args.get('file')[0]
    file = str(file.decode('UTF-8'))

    try:
        df = pd.read_csv("media/" + file, sep=';')
        print('Loaded data succesfully')
    except:
        raise Exception("File does not exist")
    nArr = df.index.values
    dfArr = df.values

    nodes = dfArr
    names = nArr

    N = len(names)
    counts = np.zeros((N, N))
    for i in range(0, len(nodes)):
        for j in range(0, len(nodes)):
            counts[i, j] = nodes[j][i]
            counts[j, i] = nodes[j][i]

    # If data too large
    #########################################################
    N = len(names)
    if len(names) > 110:
        counts = np.delete(counts, np.s_[110:N], axis=0)
        counts = np.delete(counts, np.s_[110:N], axis=1)
    if len(names) > 110:
        names = np.delete(names, np.s_[110:N])

    # Deleting duplicates
    #########################################################
    arrayi = []
    arrayj = []
    for i in names:
        for j in names:
            indexi = np.where(names == i)
            indexj = np.where(names == j)
            for q in indexi[0]:
                for l in indexj[0]:
                    if i == j and q != l:
                        if q not in arrayj or l not in arrayi:
                            names = np.delete(names, l)
                            arrayi.append(q)
                            arrayj.append(l)
    for j in arrayj:
        counts = np.delete(counts, (j), axis=0)
        counts = np.delete(counts, (j), axis=1)

    deleted = 0
    index_K = 0
    for k in counts:
        index = np.where(k == 0.0)
        if len(index[0]) == (len(counts) + deleted):
            counts = np.delete(counts, index_K - deleted, axis=0)
            counts = np.delete(counts, index_K - deleted, axis=1)
            names = np.delete(names, index_K - deleted)
            deleted = deleted + 1
        index_K = index_K + 1
    #########################################################

    # Make a distance matrix
    #######################################################
    N = len(counts)
    distancematrix = np.zeros((N, N))
    count = 0
    for node_1 in counts:
        distancematrix[count] = node_1
        count = count + 1

    for m in range(N):
        for n in range(N):
            if distancematrix[m][n] == 0:
                distancematrix[m][n] = float("inf")
    for l in range(N):
        distancematrix[l][l] = 0

    for k in range(N):
        for i in range(N):
            for j in range(N):
                if distancematrix[i][
                        j] > distancematrix[i][k] + distancematrix[k][j]:
                    distancematrix[i][
                        j] = distancematrix[i][k] + distancematrix[k][j]

    # Reorder alphabetically
    ########################################################
    namesAlph = np.array(sorted(names))
    N = len(namesAlph)
    nodesAlph = np.zeros((N, N))
    index_x_2 = 0
    index_y_2 = 0
    for name_x in namesAlph:
        for name_y in namesAlph:
            index_y = np.where(names == name_y)
            index_x = np.where(names == name_x)
            nodesAlph[index_x_2][index_y_2] = counts[index_x[0][0]][index_y[0]
                                                                    [0]]
            index_y_2 = index_y_2 + 1
        index_y_2 = 0
        index_x_2 = index_x_2 + 1
    #########################################################

    # Reorder hierarchy for increasingly and decreasing
    ########################################################
    N = len(counts)
    distanceM = np.zeros((N, N))
    distanceM_2 = np.zeros((N, N))
    distanceM_3 = np.zeros((N, N))
    count = 0
    for node in distancematrix:
        distanceM[count] = node
        count = count + 1
    namesHeirRow = [""] * len(names)
    namesHeirColumn = [""] * len(names)

    # SORTING COLUMNS
    sumsOfRows = []
    sum = 0
    index = 0
    for rows in distanceM:
        for value in rows:
            sum = sum + value
        sumsOfRows.append([sum, index])
        sum = 0
        index = index + 1
    sumsOfRows = sorted(sumsOfRows)
    index = 0
    for sum in sumsOfRows:
        for rows in range(0, len(distanceM)):
            distanceM_2[rows][index] = distanceM[rows][sum[1]]
            namesHeirColumn[index] = names[sum[1]]
        index = index + 1

    # SORTING ROWS
    sumsOfRows = []
    sum = 0
    index = 0
    for rows in distanceM_2:
        for value in rows:
            sum = sum + value
        sumsOfRows.append([sum, index])
        sum = 0
        index = index + 1
    sumsOfRows = sorted(sumsOfRows)
    index = 0
    for sum in sumsOfRows:
        distanceM_3[index] = distanceM_2[sum[1]]
        namesHeirRow[index] = names[sum[1]]
        index = index + 1
    #######################################################

    # Establishing all the values
    #######################################################
    xname = []
    yname = []
    alpha = []
    for i, node1 in enumerate(counts):
        for j, node2 in enumerate(counts):
            xname.append(names[i])
            yname.append(names[j])
            alpha.append(min(counts[i][j], 0.6) + 0.3)
    xname_2 = []
    yname_2 = []
    alpha_2 = []
    for i, node1 in enumerate(nodesAlph):
        for j, node2 in enumerate(nodesAlph):
            xname_2.append(namesAlph[i])
            yname_2.append(namesAlph[j])
            alpha_2.append(min(nodesAlph[i][j], 0.6) + 0.3)
    xname_3 = []
    yname_3 = []
    alpha_3 = []
    for i, node1 in enumerate(distanceM_3):
        for j, node2 in enumerate(distanceM_3):
            xname_3.append(namesHeirColumn[i])
            yname_3.append(namesHeirRow[j])
            alpha_3.append(min(distanceM_3[i][j], 0.6) + 0.3)
    #######################################################

    # Creating a color map
    #######################################################
    map = cm.get_cmap("BuPu")
    bokehpalette = [mpl.colors.rgb2hex(m) for m in map(np.arange(map.N))]
    mapper = LinearColorMapper(palette=bokehpalette,
                               low=counts.min().min(),
                               high=counts.max().max())
    mapper_2 = LinearColorMapper(palette=bokehpalette,
                                 low=distanceM_3.min().min(),
                                 high=(distanceM_3.max().max()))
    ######################################################

    data = dict(xname=xname,
                yname=yname,
                alphas=alpha,
                count=counts.flatten(),
                xname_2=xname_2,
                yname_2=yname_2,
                alphas_2=alpha_2,
                count_2=nodesAlph.flatten(),
                xname_3=xname_3,
                yname_3=yname_3,
                alphas_3=alpha_3,
                count_3=distancematrix.flatten(),
                count_4=distanceM_3.flatten())

    # Plot -- default
    #######################################################
    p = figure(x_axis_location="above",
               tools="hover,save,wheel_zoom,box_zoom,reset",
               y_range=list(reversed(names)),
               x_range=names,
               tooltips=[('names', '@yname, @xname'), ('count', '@count')])
    p.plot_width = 1200
    p.plot_height = 1000
    p.grid.grid_line_color = None
    p.axis.axis_line_color = None
    p.axis.major_tick_line_color = None
    p.axis.major_label_text_font_size = "8pt"
    p.axis.major_label_standoff = 1
    p.xaxis.major_label_orientation = np.pi / 3

    # Plot -- alphabetical
    #######################################################
    p2 = figure(x_axis_location="above",
                tools="hover,save,wheel_zoom,box_zoom,reset",
                y_range=list(reversed(namesAlph)),
                x_range=namesAlph,
                tooltips=[('names', '@yname_2, @xname_2'),
                          ('count_2', '@count_2')])
    p2.plot_width = 1200
    p2.plot_height = 1000
    p2.grid.grid_line_color = None
    p2.axis.axis_line_color = None
    p2.axis.major_tick_line_color = None
    p2.axis.major_label_text_font_size = "8pt"
    p2.axis.major_label_standoff = 1
    p2.xaxis.major_label_orientation = np.pi / 3

    # Plot -- distance matrix
    ######################################################
    p3 = figure(x_axis_location="above",
                tools="hover,save,wheel_zoom,box_zoom,reset",
                y_range=list(reversed(names)),
                x_range=names,
                tooltips=[('names', '@yname, @xname'),
                          ('count_3', '@count_3')])
    p3.plot_width = 1200
    p3.plot_height = 1000
    p3.grid.grid_line_color = None
    p3.axis.axis_line_color = None
    p3.axis.major_tick_line_color = None
    p3.axis.major_label_text_font_size = "8pt"
    p3.axis.major_label_standoff = 1
    p3.xaxis.major_label_orientation = np.pi / 3

    # Plot -- hierarchy -- increasing
    #######################################################
    p4 = figure(x_axis_location="above",
                tools="hover,save,wheel_zoom,box_zoom,reset",
                y_range=list(reversed(namesHeirRow)),
                x_range=namesHeirColumn,
                tooltips=[('names', '@yname_3, @xname_3'),
                          ('count_4', '@count_4')])
    p4.plot_width = 1200
    p4.plot_height = 1000
    p4.grid.grid_line_color = None
    p4.axis.axis_line_color = None
    p4.axis.major_tick_line_color = None
    p4.axis.major_label_text_font_size = "8pt"
    p4.axis.major_label_standoff = 1
    p4.xaxis.major_label_orientation = np.pi / 3
    #######################################################

    # Plot -- hierarchy -- decreasing
    #######################################################
    p5 = figure(x_axis_location="above",
                tools="hover,save,wheel_zoom,box_zoom,reset",
                y_range=namesHeirRow,
                x_range=list(reversed(namesHeirColumn)),
                tooltips=[('names', '@yname_3, @xname_3'),
                          ('count_4', '@count_4')])
    p5.plot_width = 1200
    p5.plot_height = 1000
    p5.grid.grid_line_color = None
    p5.axis.axis_line_color = None
    p5.axis.major_tick_line_color = None
    p5.axis.major_label_text_font_size = "8pt"
    p5.axis.major_label_standoff = 1
    p5.xaxis.major_label_orientation = np.pi / 3
    #######################################################

    tab1 = Panel(child=p, title="Adjacency Matrix")
    tab2 = Panel(child=p2, title="Alphabetical Adjacency")
    tab3 = Panel(child=p3, title="Distance Matrix")
    tab4 = Panel(child=p4, title="Increasing Distance")
    tab5 = Panel(child=p5, title="Decreasing Distance")

    tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5])

    p.rect('xname',
           'yname',
           0.9,
           0.9,
           source=data,
           color=transform('count', mapper),
           alpha='alphas',
           line_color='#85929E',
           hover_line_color='black',
           hover_color='black')

    p2.rect('xname_2',
            'yname_2',
            0.9,
            0.9,
            source=data,
            fill_color=transform('count_2', mapper),
            alpha='alphas_2',
            line_color='#85929E',
            hover_line_color='black',
            hover_color='black')

    p3.rect('xname_2',
            'yname_2',
            0.9,
            0.9,
            source=data,
            fill_color=transform('count_3', mapper_2),
            alpha='alphas_3',
            line_color='#85929E',
            hover_line_color='black',
            hover_color='black')

    p4.rect('xname_3',
            'yname_3',
            0.9,
            0.9,
            source=data,
            fill_color=transform('count_4', mapper_2),
            alpha='alphas_3',
            line_color='#85929E',
            hover_line_color='black',
            hover_color='black')

    p5.rect('xname_3',
            'yname_3',
            0.9,
            0.9,
            source=data,
            fill_color=transform('count_4', mapper_2),
            alpha='alphas_3',
            line_color='#85929E',
            hover_line_color='black',
            hover_color='black')

    color_bar = ColorBar(color_mapper=mapper,
                         major_label_text_font_size="10pt",
                         ticker=BasicTicker(desired_num_ticks=1),
                         formatter=PrintfTickFormatter(format="%d"),
                         label_standoff=6,
                         border_line_color=None,
                         location=(0, 0))
    p.add_layout(color_bar, 'right')
    p2.add_layout(color_bar, 'right')
    p3.add_layout(color_bar, 'right')
    p4.add_layout(color_bar, 'right')
    p5.add_layout(color_bar, 'right')

    doc.add_root(row(tabs))
示例#11
0
def show_camera(content,
                geom,
                pad_width,
                pad_height,
                label,
                titles=None,
                showlog=True,
                display_range=None,
                content_lowlim=None,
                content_upplim=None):
    """

    Parameters
    ----------
    content: pixel-wise quantity to be plotted, ndarray with shape (N,
    number_of_pixels) where N is the number of different sets of pixel
    values, for example N different data runs or whatever. The shape can also
    be just (number_of_pixels), in case a single camera display is to be shown

    geom: camera geometry
    pad_width: width in pixels of each of the 3 pads in the plot
    pad_height: height in pixels of each of the 3 pads in the plot
    label: string to label the quantity which is displayed, the same for the N
    sets of pixels inside "content"
    titles: list of N strings, with the title specific to each of the sets
    of pixel values to be displayed: for example, indicating run numbers

    content_lowlim: scalar or ndarray of shape(N, number_of_pixels),
    same as content: lowest value of "content" which is considered healthy,
    below which a message will be written out
    content_upplim: highest value considered healthy, same as above
    display_range: range of "content" to be displayed


    Returns
    -------
    [slider, p1, range_slider, p2, p3]: three bokeh figures, intended for
    showing them on the same row, and two sliders, one for the run numbers (
    or whatever "sets" of data we are displaying) and the other for the
    z-range of the plots.
    p1 is the camera display (with "content" in linear & logarithmic scale)
    p2: content vs. pixel
    p3: histogram of content (with one entry per pixel)

    """

    # patch to reduce gaps between bokeh's cam circular pixels:
    camgeom = copy.deepcopy(geom)

    numsets = 1
    if np.ndim(content) > 1:
        numsets = content.shape[0]
    # numsets is the number of different sets of pixel data to be displayed

    allimages = []
    if np.ndim(content) == 1:
        allimages.append(content)
    else:
        for i in range(1, numsets + 1):
            allimages.append(content[i - 1])

    if titles is None:
        titles = [''] * numsets

    # By default we plot the range which contains 99.8 of all events, so that
    # outliers do not prevent us from seing the bulk of the data:
    display_min = np.nanquantile(allimages, 0.001)
    display_max = np.nanquantile(allimages, 0.999)

    if display_range is not None:
        display_min = display_range[0]
        display_max = display_range[1]

    cam = CameraDisplay(camgeom,
                        display_min,
                        display_max,
                        label,
                        titles[0],
                        use_notebook=False,
                        autoshow=False)
    cam.image = allimages[0]
    cam.figure.title.text = titles[0]

    allimageslog = []
    camlog = None
    source1log = None
    color_mapper_log = None
    titlelog = None

    if showlog:
        for image in allimages:
            logcontent = np.copy(image)
            for i, x in enumerate(logcontent):
                # workaround as long as log z-scale is not implemented in bokeh camera:
                if x <= 0:
                    logcontent[i] = np.nan
                else:
                    logcontent[i] = np.log10(image[i])
            allimageslog.append(logcontent)

        camlog = CameraDisplay(camgeom,
                               np.nanquantile(allimageslog, 0.001),
                               np.nanquantile(allimageslog, 0.999),
                               label,
                               titles[0],
                               use_notebook=False,
                               autoshow=False)
        camlog.image = allimageslog[0]
        camlog.figure.title.text = titles[0]
        source1log = camlog.datasource
        color_mapper_log = camlog._color_mapper
        titlelog = camlog.figure.title

    cluster_i = []
    cluster_j = []
    pix_id_in_cluster = []
    for i in camgeom.pix_id:
        data = get_pixel_location(i)
        cluster_i.append(data[0])
        cluster_j.append(data[1])
        pix_id_in_cluster.append(data[2])

    for c in [cam, camlog]:
        if c is None:
            continue
        c.datasource.add(list(c.geom.pix_id), 'pix_id')
        c.datasource.add(cluster_i, 'cluster_i')
        c.datasource.add(cluster_j, 'cluster_j')
        c.datasource.add(pix_id_in_cluster, 'pix_id_in_cluster')

        # c.add_colorbar()
        c.figure.plot_width = pad_width
        c.figure.plot_height = int(pad_height * 0.85)
        c.figure.grid.visible = False
        c.figure.axis.visible = True
        c.figure.xaxis.axis_label = 'X position (m)'
        c.figure.yaxis.axis_label = 'Y position (m)'
        c.figure.add_tools(
            HoverTool(tooltips=[('pix_id', '@pix_id'), ('value', '@image'),
                                ('cluster (i,j)', '(@cluster_i, @cluster_j)'),
                                ('pix # in cluster', '@pix_id_in_cluster')],
                      mode='mouse',
                      point_policy='snap_to_data'))

    tab1 = Panel(child=cam.figure, title='linear')
    if showlog:
        tab2 = Panel(child=camlog.figure, title='logarithmic')
        p1 = Tabs(tabs=[tab1, tab2])
    else:
        p1 = Tabs(tabs=[tab1])
    p1.margin = (0, 0, 0, 25)

    p2 = figure(background_fill_color='#ffffff',
                y_range=(display_min, display_max),
                x_axis_label='Pixel id',
                y_axis_label=label)
    p2.min_border_top = 60
    p2.min_border_bottom = 70

    source2 = ColumnDataSource(
        data=dict(pix_id=cam.geom.pix_id, value=cam.image))
    pixel_data = p2.circle(x='pix_id', y='value', size=2, source=source2)

    if content_lowlim is None:
        content_lowlim = np.nan * np.ones_like(content)
    if content_upplim is None:
        content_upplim = np.nan * np.ones_like(content)

    if np.isscalar(content_lowlim):
        content_lowlim = content_lowlim * np.ones_like(content)
    source2_lowlim = ColumnDataSource(
        data=dict(pix_id=cam.geom.pix_id, value=content_lowlim[0]))
    p2.line(x='pix_id',
            y='value',
            source=source2_lowlim,
            line_dash='dashed',
            color='orange',
            line_width=2)

    if np.isscalar(content_upplim):
        content_upplim = content_upplim * np.ones_like(content)
    source2_upplim = ColumnDataSource(
        data=dict(pix_id=cam.geom.pix_id, value=content_upplim[0]))
    p2.line(x='pix_id',
            y='value',
            source=source2_upplim,
            line_dash='dashed',
            color='red')

    p2.add_tools(
        HoverTool(tooltips=[('(pix_id, value)', '(@pix_id, @value)')],
                  mode='mouse',
                  point_policy='snap_to_data',
                  renderers=[pixel_data]))

    p2.y_range = Range1d(display_min, display_max)

    allhists = []
    alledges = []

    # We define 100 bins between display_min and display_max
    # Note that values beyond that range won't be histogrammed and hence will
    # not appear on the "p3" figure below.
    nbins = 100
    for image in allimages:
        hist, edges = np.histogram(image[~np.isnan(image)],
                                   bins=nbins,
                                   range=(display_min, display_max))
        allhists.append(hist)
        alledges.append(edges)

    source3 = ColumnDataSource(data=dict(top=allhists[0],
                                         bottom=0.7 *
                                         np.ones_like(allhists[0]),
                                         left=alledges[0][:-1],
                                         right=alledges[0][1:]))

    p3 = figure(background_fill_color='#ffffff',
                y_range=(0.7, np.max(allhists) * 1.1),
                x_range=(display_min, display_max),
                x_axis_label=label,
                y_axis_label='Number of pixels',
                y_axis_type='log')
    p3.quad(top='top',
            bottom='bottom',
            left='left',
            right='right',
            source=source3)

    if titles is None:
        titles = [None] * len(allimages)

    cdsdata = dict(z=allimages, hist=allhists, edges=alledges, titles=titles)
    # BEWARE!! these have to be lists of arrays. Not 2D numpy arrays!!
    cdsdata['lowlim'] = [x for x in content_lowlim]
    cdsdata['upplim'] = [x for x in content_upplim]

    if showlog:
        cdsdata['zlog'] = allimageslog

    cds_allimages = ColumnDataSource(data=cdsdata)
    # One has to add here everything that must change when moving the slider:
    callback = CustomJS(args=dict(source1=cam.datasource,
                                  source1log=source1log,
                                  source2=source2,
                                  source2_lowlim=source2_lowlim,
                                  source2_upplim=source2_upplim,
                                  source3=source3,
                                  zz=cds_allimages,
                                  title=cam.figure.title,
                                  titlelog=titlelog,
                                  showlog=showlog),
                        code="""
        var slider_value = cb_obj.value
        var z = zz.data['z']
        varzlow = zz.data['lowlim']
        varzupp = zz.data['upplim']
        var edges = zz.data['edges']
        var hist = zz.data['hist']
        for (var i = 0; i < source1.data['image'].length; i++) {
             source1.data['image'][i] = z[slider_value-1][i]
             if (showlog) {
                 var zlog = zz.data['zlog']
                 source1log.data['image'][i] = zlog[slider_value-1][i]
             }
             source2.data['value'][i] = source1.data['image'][i]
             source2_lowlim.data['value'][i] = varzlow[slider_value-1][i]
             source2_upplim.data['value'][i] = varzupp[slider_value-1][i]
        }
        for (var j = 0; j < source3.data['top'].length; j++) {
            source3.data['top'][j] = hist[slider_value-1][j]
            source3.data['left'][j] = edges[slider_value-1][j]
            source3.data['right'][j] = edges[slider_value-1][j+1]
        }

        title.text = zz.data['titles'][slider_value-1]
        source1.change.emit()
        if (showlog) {
            titlelog.text = title.text
            source1log.change.emit()
        }
        source2.change.emit()
        source2_lowlim.change.emit()
        source2_upplim.change.emit()
        source3.change.emit()
    """)

    slider = None
    if numsets > 1:
        slider_height = 300
        # WARNING: the html won't look nice for number of sets much larger
        # than 300! But in this way we avoid that the slider skips elements:
        if numsets > 299:
            slider_height = numsets + 1
        slider = Slider(start=1,
                        end=numsets,
                        value=1,
                        step=1,
                        title="run",
                        orientation='vertical',
                        show_value=False,
                        height=slider_height)

        slider.margin = (0, 0, 0, 35)
        slider.js_on_change('value', callback)

    callback2 = CustomJS(args=dict(color_mapper=cam._color_mapper,
                                   color_mapper_log=color_mapper_log,
                                   showlog=showlog),
                         code="""
        var range = cb_obj.value
        color_mapper.low = range[0]
        color_mapper.high = range[1]
        color_mapper.change.emit()
        if (showlog) {
            if (range[0] > 0.)
                color_mapper_log.low = Math.log(range[0])/Math.LN10    
            color_mapper_log.high = Math.log(range[1])/Math.LN10
            color_mapper_log.change.emit()
        }
    """)
    step = (display_max - display_min) / 100.
    range_slider = RangeSlider(start=display_min,
                               end=display_max,
                               value=(display_min, display_max),
                               step=step,
                               title="z_range",
                               orientation='vertical',
                               direction='rtl',
                               height=300,
                               show_value=False)
    range_slider.js_on_change('value', callback2)

    return [slider, p1, range_slider, p2, p3]
示例#12
0
# Layout and outpit
# Create a Panel with a title for each tab
tab1 = Panel(child=row(s2, s3), title='Summary')
tab2 = Panel(child=row(plotref_dict["hmsusceptible"],
                       plotref_dict["chsusceptible"]),
             title='Susceptible')
tab3 = Panel(child=row(plotref_dict["hmpresymptomatic"],
                       plotref_dict["chpresymptomatic"]),
             title='Presymptomatic')
tab4 = Panel(child=row(plotref_dict["hmsymptomatic"],
                       plotref_dict["chsymptomatic"]),
             title='Symptomatic')
tab5 = Panel(child=row(plotref_dict["hmrecovered"],
                       plotref_dict["chrecovered"]),
             title='Recovered')
tab6 = Panel(child=row(plotref_dict["hmdead"], plotref_dict["chdead"]),
             title='Dead')

# tab7 = Panel(child=row(plotref_dict["hmRetail"],plotref_dict["hmPrimarySchool"]), title='test')
tab7 = Panel(child=row(s5), title='Venue dangers')

# Put the Panels in a Tabs object
tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5, tab6, tab7])
show(tabs)

# l = grid([
#     [s1,s3],
#     [s2,s4],
# ])

# show(l)
def dashboard(request):

    conf.DATA_REFRESH_WARNING = False  # always reset to false at each refresh

    kpi_totalLAs = kpis.totalLAs()
    kpi_laDeclared = kpis.laDeclared()
    kpi_laNetZero2030 = kpis.laNetZero2030()
    kpis_maxDailyWebsiteUsers = '{:,}'.format(kpis.maxDailyWebsiteUsers())

    p_laDecs = charts.laDeclarationsPlot()
    p_laHexMap = charts.laHexMapPlot()
    p_partyNetZero = charts.partyNetZeroPlot()
    p_laNetZero = charts.laNetZeroPlot()
    p_website = charts.websitePlot()
    p_actionNetworkActivists = charts.actionNetworkActivistsPlot()
    #p_bookSales = charts.bookSalesPlot()

    p_instagram = Panel(child=charts.socialMediaPlot(
        'Instagram', subtitle='@ExtinctionRebellion'),
                        title='Instagram')
    p_facebook = Panel(child=charts.socialMediaPlot(
        'Facebook', subtitle='@ExtinctionRebellion'),
                       title='Facebook')
    p_youTube = Panel(child=charts.socialMediaPlot('YouTube',
                                                   subtitle='Global Account'),
                      title='YouTube')
    p_twitter = Panel(child=charts.socialMediaPlot(
        'Twitter', subtitle='@ExtinctionR & @XRebellionUK'),
                      title='Twitter')
    socialMediaTabs = Tabs(tabs=[
        p_instagram,
        p_facebook,
        p_twitter,
        p_youTube,
    ],
                           css_classes=['chart_tabs'])

    plots = {
        'la_decs_plot': p_laDecs,
        'la_hex_map_plot': p_laHexMap,
        'party_net_zero_plot': p_partyNetZero,
        'la_net_zero_plot': p_laNetZero,
        'website_plot': p_website,
        'social_media_tabs': socialMediaTabs,
        'action_network_activists_plot': p_actionNetworkActivists,
        #'book_sales_plot': p_bookSales
    }

    script, plotDivs = components(plots)

    return render(
        request,
        'dashboard.html',
        {
            'script':
            script,
            'ga_tracking_id':
            conf.GA_TRACKING_ID,
            'data_refresh_warning':
            conf.DATA_REFRESH_WARNING,
            'kpi_total_las':
            kpi_totalLAs,
            'kpi_las_declared':
            kpi_laDeclared,
            'kpi_las_with_2030_net_zero':
            kpi_laNetZero2030,
            'kpi_max_website_users':
            kpis_maxDailyWebsiteUsers,
            'la_decs_title':
            'UK LOCAL AUTHORITIES DECLARING A CLIMATE EMERGENCY',
            'la_decs_plot':
            plotDivs['la_decs_plot'],
            'la_hex_map_title':
            'MAP OF DECLARED LOCAL AUTHORITIES (ENGLAND & WALES)',
            'la_hex_map_plot':
            plotDivs['la_hex_map_plot'],
            'party_party_net_zero_title':
            'NET ZERO TARGET BY UK POLITICAL PARTY',
            'party_party_net_zero_plot':
            plotDivs['party_net_zero_plot'],
            'la_net_zero_title':
            'UK LOCAL AUTHORITY NET ZERO TARGETS',
            'la_net_zero_plot':
            plotDivs['la_net_zero_plot'],
            'website_title':
            'EXTINCTIONREBELLION.UK',
            'website_plot':
            plotDivs['website_plot'],
            'social_media_title':
            'SOCIAL MEDIA',
            'social_media_tabs':
            plotDivs['social_media_tabs'],
            'action_network_activists_title':
            'UK MEMBERSHIP (ACTION NETWORK)',
            'action_network_activists_plot':
            plotDivs['action_network_activists_plot'],

            #'book_sales_title': 'SALES OF ‘THIS IS NOT A DRILL‘',
            #'book_sales_plot': plotDivs['book_sales_plot']
        })
示例#14
0
def photometry_plot(obj_id, user, width=600, height=300):
    """Create scatter plot of photometry for object.
    Parameters
    ----------
    obj_id : str
        ID of Obj to be plotted.
    Returns
    -------
    (str, str)
        Returns (docs_json, render_items) json for the desired plot.
    """

    data = pd.read_sql(
        DBSession().query(
            Photometry,
            Telescope.nickname.label("telescope"),
            Instrument.name.label("instrument"),
        ).join(Instrument, Instrument.id == Photometry.instrument_id).join(
            Telescope, Telescope.id == Instrument.telescope_id).filter(
                Photometry.obj_id == obj_id).filter(
                    Photometry.groups.any(
                        Group.id.in_([g.id for g in user.accessible_groups
                                      ]))).statement,
        DBSession().bind,
    )

    if data.empty:
        return None, None, None

    data['color'] = [get_color(f) for f in data['filter']]
    data['label'] = [
        f'{i} {f}-band' for i, f in zip(data['instrument'], data['filter'])
    ]

    data['zp'] = PHOT_ZP
    data['magsys'] = 'ab'
    data['alpha'] = 1.0
    data['lim_mag'] = -2.5 * np.log10(
        data['fluxerr'] * DETECT_THRESH) + data['zp']

    # Passing a dictionary to a bokeh datasource causes the frontend to die,
    # deleting the dictionary column fixes that
    del data['original_user_data']

    # keep track of things that are only upper limits
    data['hasflux'] = ~data['flux'].isna()

    # calculate the magnitudes - a photometry point is considered "significant"
    # or "detected" (and thus can be represented by a magnitude) if its snr
    # is above DETECT_THRESH
    obsind = data['hasflux'] & (data['flux'].fillna(0.0) / data['fluxerr'] >=
                                DETECT_THRESH)
    data.loc[~obsind, 'mag'] = None
    data.loc[obsind, 'mag'] = -2.5 * np.log10(data[obsind]['flux']) + PHOT_ZP

    # calculate the magnitude errors using standard error propagation formulae
    # https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
    data.loc[~obsind, 'magerr'] = None
    coeff = 2.5 / np.log(10)
    magerrs = np.abs(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])
    data.loc[obsind, 'magerr'] = magerrs
    data['obs'] = obsind
    data['stacked'] = False

    split = data.groupby('label', sort=False)

    finite = np.isfinite(data['flux'])
    fdata = data[finite]
    lower = np.min(fdata['flux']) * 0.95
    upper = np.max(fdata['flux']) * 1.05

    plot = figure(
        plot_width=width,
        plot_height=height,
        active_drag='box_zoom',
        tools='box_zoom,wheel_zoom,pan,reset,save',
        y_range=(lower, upper),
    )

    imhover = HoverTool(tooltips=tooltip_format)
    plot.add_tools(imhover)

    model_dict = {}

    for i, (label, sdf) in enumerate(split):

        # for the flux plot, we only show things that have a flux value
        df = sdf[sdf['hasflux']]

        key = f'obs{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='flux',
            color='color',
            marker='circle',
            fill_color='color',
            alpha='alpha',
            source=ColumnDataSource(df),
        )

        imhover.renderers.append(model_dict[key])

        key = f'bin{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='flux',
            color='color',
            marker='circle',
            fill_color='color',
            source=ColumnDataSource(data=dict(
                mjd=[],
                flux=[],
                fluxerr=[],
                filter=[],
                color=[],
                lim_mag=[],
                mag=[],
                magerr=[],
                stacked=[],
                instrument=[],
            )),
        )

        imhover.renderers.append(model_dict[key])

        key = 'obserr' + str(i)
        y_err_x = []
        y_err_y = []

        for d, ro in df.iterrows():
            px = ro['mjd']
            py = ro['flux']
            err = ro['fluxerr']

            y_err_x.append((px, px))
            y_err_y.append((py - err, py + err))

        model_dict[key] = plot.multi_line(
            xs='xs',
            ys='ys',
            color='color',
            alpha='alpha',
            source=ColumnDataSource(data=dict(xs=y_err_x,
                                              ys=y_err_y,
                                              color=df['color'],
                                              alpha=[1.0] * len(df))),
        )

        key = f'binerr{i}'
        model_dict[key] = plot.multi_line(
            xs='xs',
            ys='ys',
            color='color',
            source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
        )

    plot.xaxis.axis_label = 'MJD'
    plot.yaxis.axis_label = 'Flux (μJy)'
    plot.toolbar.logo = None

    toggle = CheckboxWithLegendGroup(
        labels=list(data.label.unique()),
        active=list(range(len(data.label.unique()))),
        colors=list(data.color.unique()),
    )

    # TODO replace `eval` with Namespaces
    # https://github.com/bokeh/bokeh/pull/6340
    toggle.callback = CustomJS(
        args={
            'toggle': toggle,
            **model_dict
        },
        code=open(
            os.path.join(os.path.dirname(__file__), '../static/js/plotjs',
                         'togglef.js')).read(),
    )

    slider = Slider(start=0.0,
                    end=15.0,
                    value=0.0,
                    step=1.0,
                    title='Binsize (days)')

    callback = CustomJS(
        args={
            'slider': slider,
            'toggle': toggle,
            **model_dict
        },
        code=open(
            os.path.join(os.path.dirname(__file__), '../static/js/plotjs',
                         'stackf.js')).read().replace('default_zp',
                                                      str(PHOT_ZP)).replace(
                                                          'detect_thresh',
                                                          str(DETECT_THRESH)),
    )

    slider.js_on_change('value', callback)

    # Mark the first and last detections
    detection_dates = data[data['hasflux']]['mjd']
    if len(detection_dates) > 0:
        first = round(detection_dates.min(), 6)
        last = round(detection_dates.max(), 6)
        first_color = "#34b4eb"
        last_color = "#8992f5"
        midpoint = (upper + lower) / 2
        line_top = 5 * upper - 4 * midpoint
        line_bottom = 5 * lower - 4 * midpoint
        y = np.linspace(line_bottom, line_top, num=5000)
        first_r = plot.line(
            x=np.full(5000, first),
            y=y,
            line_alpha=0.5,
            line_color=first_color,
            line_width=2,
        )
        plot.add_tools(
            HoverTool(
                tooltips=[("First detection", f'{first}')],
                renderers=[first_r],
            ))
        last_r = plot.line(
            x=np.full(5000, last),
            y=y,
            line_alpha=0.5,
            line_color=last_color,
            line_width=2,
        )
        plot.add_tools(
            HoverTool(
                tooltips=[("Last detection", f'{last}')],
                renderers=[last_r],
            ))

    layout = row(plot, toggle)
    layout = column(slider, layout)

    p1 = Panel(child=layout, title='Flux')

    # now make the mag light curve
    ymax = (np.nanmax((
        np.nanmax(data.loc[obsind, 'mag']) if any(obsind) else np.nan,
        np.nanmax(data.loc[~obsind, 'lim_mag']) if any(~obsind) else np.nan,
    )) + 0.1)
    ymin = (np.nanmin((
        np.nanmin(data.loc[obsind, 'mag']) if any(obsind) else np.nan,
        np.nanmin(data.loc[~obsind, 'lim_mag']) if any(~obsind) else np.nan,
    )) - 0.1)

    xmin = data['mjd'].min() - 2
    xmax = data['mjd'].max() + 2

    plot = figure(
        plot_width=width,
        plot_height=height + 100,
        active_drag='box_zoom',
        tools='box_zoom,wheel_zoom,pan,reset,save',
        y_range=(ymax, ymin),
        x_range=(xmin, xmax),
        toolbar_location='above',
        toolbar_sticky=False,
        x_axis_location='above',
    )

    # Mark the first and last detections again
    detection_dates = data[obsind]['mjd']
    if len(detection_dates) > 0:
        first = round(detection_dates.min(), 6)
        last = round(detection_dates.max(), 6)
        midpoint = (ymax + ymin) / 2
        line_top = 5 * ymax - 4 * midpoint
        line_bottom = 5 * ymin - 4 * midpoint
        y = np.linspace(line_bottom, line_top, num=5000)
        first_r = plot.line(
            x=np.full(5000, first),
            y=y,
            line_alpha=0.5,
            line_color=first_color,
            line_width=2,
        )
        plot.add_tools(
            HoverTool(
                tooltips=[("First detection", f'{first}')],
                renderers=[first_r],
            ))
        last_r = plot.line(
            x=np.full(5000, last),
            y=y,
            line_alpha=0.5,
            line_color=last_color,
            line_width=2,
        )
        plot.add_tools(
            HoverTool(
                tooltips=[("Last detection", f'{last}')],
                renderers=[last_r],
                point_policy='follow_mouse',
            ))

    imhover = HoverTool(tooltips=tooltip_format)
    plot.add_tools(imhover)

    model_dict = {}

    for i, (label, df) in enumerate(split):

        key = f'obs{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='mag',
            color='color',
            marker='circle',
            fill_color='color',
            alpha='alpha',
            source=ColumnDataSource(df[df['obs']]),
        )

        imhover.renderers.append(model_dict[key])

        unobs_source = df[~df['obs']].copy()
        unobs_source.loc[:, 'alpha'] = 0.8

        key = f'unobs{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='lim_mag',
            color='color',
            marker='inverted_triangle',
            fill_color='white',
            line_color='color',
            alpha='alpha',
            source=ColumnDataSource(unobs_source),
        )

        imhover.renderers.append(model_dict[key])

        key = f'bin{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='mag',
            color='color',
            marker='circle',
            fill_color='color',
            source=ColumnDataSource(data=dict(
                mjd=[],
                flux=[],
                fluxerr=[],
                filter=[],
                color=[],
                lim_mag=[],
                mag=[],
                magerr=[],
                instrument=[],
                stacked=[],
            )),
        )

        imhover.renderers.append(model_dict[key])

        key = 'obserr' + str(i)
        y_err_x = []
        y_err_y = []

        for d, ro in df[df['obs']].iterrows():
            px = ro['mjd']
            py = ro['mag']
            err = ro['magerr']

            y_err_x.append((px, px))
            y_err_y.append((py - err, py + err))

        model_dict[key] = plot.multi_line(
            xs='xs',
            ys='ys',
            color='color',
            alpha='alpha',
            source=ColumnDataSource(data=dict(
                xs=y_err_x,
                ys=y_err_y,
                color=df[df['obs']]['color'],
                alpha=[1.0] * len(df[df['obs']]),
            )),
        )

        key = f'binerr{i}'
        model_dict[key] = plot.multi_line(
            xs='xs',
            ys='ys',
            color='color',
            source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
        )

        key = f'unobsbin{i}'
        model_dict[key] = plot.scatter(
            x='mjd',
            y='lim_mag',
            color='color',
            marker='inverted_triangle',
            fill_color='white',
            line_color='color',
            alpha=0.8,
            source=ColumnDataSource(data=dict(
                mjd=[],
                flux=[],
                fluxerr=[],
                filter=[],
                color=[],
                lim_mag=[],
                mag=[],
                magerr=[],
                instrument=[],
                stacked=[],
            )),
        )
        imhover.renderers.append(model_dict[key])

        key = f'all{i}'
        model_dict[key] = ColumnDataSource(df)

        key = f'bold{i}'
        model_dict[key] = ColumnDataSource(df[[
            'mjd',
            'flux',
            'fluxerr',
            'mag',
            'magerr',
            'filter',
            'zp',
            'magsys',
            'lim_mag',
            'stacked',
        ]])

    plot.xaxis.axis_label = 'MJD'
    plot.yaxis.axis_label = 'AB mag'
    plot.toolbar.logo = None

    obj = DBSession().query(Obj).get(obj_id)
    if obj.dm is not None:
        plot.extra_y_ranges = {
            "Absolute Mag": Range1d(start=ymax - obj.dm, end=ymin - obj.dm)
        }
        plot.add_layout(
            LinearAxis(y_range_name="Absolute Mag", axis_label="m - DM"),
            'right')

    now = Time.now().mjd
    plot.extra_x_ranges = {
        "Days Ago": Range1d(start=now - xmin, end=now - xmax)
    }
    plot.add_layout(LinearAxis(x_range_name="Days Ago", axis_label="Days Ago"),
                    'below')

    toggle = CheckboxWithLegendGroup(
        labels=list(data.label.unique()),
        active=list(range(len(data.label.unique()))),
        colors=list(data.color.unique()),
    )

    # TODO replace `eval` with Namespaces
    # https://github.com/bokeh/bokeh/pull/6340
    toggle.callback = CustomJS(
        args={
            'toggle': toggle,
            **model_dict
        },
        code=open(
            os.path.join(os.path.dirname(__file__), '../static/js/plotjs',
                         'togglem.js')).read(),
    )

    slider = Slider(start=0.0,
                    end=15.0,
                    value=0.0,
                    step=1.0,
                    title='Binsize (days)')

    button = Button(label="Export Bold Light Curve to CSV")
    button.callback = CustomJS(
        args={
            'slider': slider,
            'toggle': toggle,
            **model_dict
        },
        code=open(
            os.path.join(os.path.dirname(__file__), '../static/js/plotjs',
                         "download.js")).read().replace('objname',
                                                        obj_id).replace(
                                                            'default_zp',
                                                            str(PHOT_ZP)),
    )

    toplay = row(slider, button)
    callback = CustomJS(
        args={
            'slider': slider,
            'toggle': toggle,
            **model_dict
        },
        code=open(
            os.path.join(os.path.dirname(__file__), '../static/js/plotjs',
                         'stackm.js')).read().replace('default_zp',
                                                      str(PHOT_ZP)).replace(
                                                          'detect_thresh',
                                                          str(DETECT_THRESH)),
    )
    slider.js_on_change('value', callback)

    layout = row(plot, toggle)
    layout = column(toplay, layout)

    p2 = Panel(child=layout, title='Mag')

    tabs = Tabs(tabs=[p2, p1])
    return _plot_to_json(tabs)
示例#15
0
    def initialize_plot(self, plots=None, ranges=None):
        ranges = self.compute_ranges(self.layout, self.keys[-1], None)

        plot_grid = self._compute_grid()
        passed_plots = [] if plots is None else plots
        r_offset = 0
        col_offsets = defaultdict(int)
        tab_plots = []

        for r in range(self.rows):
            # Compute row offset
            row = [(k, sp) for k, sp in self.subplots.items() if k[0] == r]
            row_padded = any(len(sp.layout) > 2 for k, sp in row)
            if row_padded:
                r_offset += 1

            for c in range(self.cols):
                subplot = self.subplots.get((r, c), None)

                # Compute column offset
                col = [(k, sp) for k, sp in self.subplots.items() if k[1] == c]
                col_padded = any(len(sp.layout) > 1 for k, sp in col)
                if col_padded:
                    col_offsets[r] += 1
                c_offset = col_offsets.get(r, 0)

                if subplot is None:
                    continue

                shared_plots = list(passed_plots) if self.shared_axes else None
                subplots = subplot.initialize_plot(ranges=ranges,
                                                   plots=shared_plots)
                nsubplots = len(subplots)

                # If tabs enabled lay out AdjointLayout on grid
                if self.tabs:
                    title = subplot.subplots['main']._format_title(
                        self.keys[-1], dimensions=False)
                    if not title:
                        title = ' '.join(self.paths[r, c])
                    if nsubplots == 1:
                        grid = subplots[0]
                    elif nsubplots == 2:
                        grid = gridplot([subplots],
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    else:
                        grid = [[subplots[2], None], subplots[:2]]
                        grid = gridplot(children=grid,
                                        merge_tools=self.merge_tools,
                                        toolbar_location=self.toolbar)
                    tab_plots.append((title, grid))
                    continue

                # Situate plot in overall grid
                if nsubplots > 2:
                    plot_grid[r + r_offset - 1][c + c_offset - 1] = subplots[2]
                plot_column = plot_grid[r + r_offset]
                if nsubplots > 1:
                    plot_column[c + c_offset - 1] = subplots[0]
                    plot_column[c + c_offset] = subplots[1]
                else:
                    plot_column[c + c_offset - int(col_padded)] = subplots[0]
                passed_plots.append(subplots[0])

        # Wrap in appropriate layout model
        kwargs = dict(sizing_mode=self.sizing_mode)
        if self.tabs:
            plots = filter_toolboxes([p for t, p in tab_plots])
            panels = [Panel(child=child, title=t) for t, child in tab_plots]
            layout_plot = Tabs(tabs=panels)
        else:
            plot_grid = layout_padding(plot_grid, self.renderer)
            plot_grid = filter_toolboxes(plot_grid)
            plot_grid = pad_plots(plot_grid)
            layout_plot = gridplot(children=plot_grid,
                                   toolbar_location=self.toolbar,
                                   merge_tools=self.merge_tools,
                                   **kwargs)

        title = self._get_title_div(self.keys[-1])
        if title:
            self.handles['title'] = title
            layout_plot = Column(title, layout_plot, **kwargs)

        self.handles['plot'] = layout_plot
        self.handles['plots'] = plots

        self._update_callbacks(layout_plot)
        if self.shared_datasource:
            self.sync_sources()

        if self.top_level:
            self.init_links()

        self.drawn = True

        return self.handles['plot']
示例#16
0
def plot_frames(data,
                idx=0,
                col=0,
                scale='linear',
                trace_coeffs=None,
                saturation=0.8,
                width=1000,
                title=None,
                wavecal=None):
    """
    Plot a SOSS frame

    Parameters
    ----------
    data: sequence
        The 3D data to plot
    scale: str
        Plot scale, ['linear', 'log']
    trace_coeffs: sequence
        Plot the traces for the given coefficients
    saturation: float
        The full-well fraction to be considered saturated, (0, 1)
    title: str
        A title for the plot
    wavecal: np.ndarray
        A wavelength calibration map for each pixel
    """
    # Determine subarray
    nframes, nrows, ncols = data.shape

    # Remove infs
    data[data == np.inf] = np.nan

    # Get data, snr, and saturation for plotting
    dat = data
    snr = np.sqrt(data)
    fullWell = 65536.0
    sat = dat > saturation * fullWell
    sat = sat.astype(int)

    # Fix log scale plot values
    if scale == 'log':
        dat[dat < 1.] = 1.
        snr[snr < 1.] = 1.

    # Broadcast the data
    frames = np.arange(nframes)
    columns = np.arange(ncols)
    rows = np.arange(nrows)
    verticals = np.tile(np.arange(ncols), (nrows, 1)).T

    # Wrap the data in ColumnDataSources
    source_available = ColumnDataSource(data=dict(
        **{'counts{}'.format(n): dat[n]
           for n in frames}, **{'snr{}'.format(n): snr[n]
                                for n in frames}, **
        {'saturation{}'.format(n): sat[n]
         for n in frames}))
    source_visible = ColumnDataSource(
        data=dict(counts=[dat[idx]], snr=[snr[idx]], saturation=[sat[idx]]))
    vertical_available = ColumnDataSource(data=dict(
        **{'vertical{}'.format(n): vert
           for n, vert in enumerate(verticals)}))
    vertical_visible = ColumnDataSource(
        data=dict(column=rows, vertical=verticals[col]))
    col_visible = ColumnDataSource(data=dict(columns=rows,
                                             counts=dat[0, :, col],
                                             snr=snr[0, :, col],
                                             saturation=sat[0, :, col]))
    col_dict = {}
    for fnum in frames:
        for cnum in columns:
            for datacube, pname in zip([dat, snr, sat],
                                       ['counts', 'snr', 'saturation']):
                col_dict['{}{}_{}'.format(pname, cnum,
                                          fnum)] = datacube[fnum, :, cnum]
    col_available = ColumnDataSource(data=col_dict)

    # Set the tooltips
    tooltips = [("(x,y)", "($x{int}, $y{int})"), ("ADU/s", "@counts"),
                ("SNR", "@snr"), ('Saturation', '@saturation')]
    col_color = 'blue'

    # Add wavelength calibration if possible
    if isinstance(wavecal, np.ndarray):
        if wavecal.shape == dat[idx].shape:
            source_visible.data['wave1'] = [wavecal]
            tooltips.append(("Wavelength", "@wave1"))
        if wavecal.ndim == 3 and wavecal.shape[0] == 3:
            source_visible.data['wave1'] = [wavecal[0]]
            source_visible.data['wave2'] = [wavecal[1]]
            source_visible.data['wave3'] = [wavecal[2]]
            tooltips.append(("Wave 1", "@wave1"))
            tooltips.append(("Wave 2", "@wave2"))
            tooltips.append(("Wave 3", "@wave3"))

    # Set shared plot params
    x_range = (0, ncols)
    y_range = (0, nrows)
    height = int(nrows / 2.) + 160
    toolbar = 'right'

    # Draw the figures
    tabs = []
    for pdata, pname, ptype, ylabel in zip(
        [dat, snr, sat], [
            'Counts', 'SNR', 'Saturation ({}% Full Well)'.format(
                saturation * 100)
        ], ['counts', 'snr', 'saturation'],
        ['Count Rate [ADU/s]', 'SNR', 'Saturated']):

        # Make the figure
        fig_title = '{} - {}'.format(title, pname)

        # Get min and max
        vmin = np.nanmin(pdata)
        vmax = np.nanmax(pdata)

        # Saturation plot is different
        if ptype == 'saturation':
            formatter = FuncTickFormatter(
                code="""return {0: 'Unsaturated', 1: 'Saturated'}[tick]""")
            color_map = ['#404387', '#FDE724']
            ticker = FixedTicker(ticks=[vmin, vmax])

        # Counts and SNR are similar plots
        else:
            formatter = BasicTickFormatter()
            color_map = 'Viridis256'
            ticker = BasicTicker()

        # Set the plot scale
        if scale == 'log':
            mapper = LogColorMapper(palette=color_map, low=vmin, high=vmax)
        else:
            mapper = LinearColorMapper(palette=color_map, low=vmin, high=vmax)

        # Plot the image data
        fig = figure(x_range=x_range,
                     y_range=y_range,
                     tooltips=tooltips,
                     width=width,
                     height=height,
                     title=fig_title,
                     toolbar_location=toolbar,
                     toolbar_sticky=True)
        fig.image(source=source_visible,
                  image=ptype,
                  x=0,
                  y=0,
                  dw=ncols,
                  dh=nrows,
                  color_mapper=mapper)

        # Plot the line indicating the column
        fig.line('vertical',
                 'column',
                 source=vertical_visible,
                 color=col_color,
                 line_width=3)

        # Add the colorbar
        color_bar = ColorBar(color_mapper=mapper,
                             ticker=ticker,
                             formatter=formatter,
                             orientation="horizontal",
                             location=(0, 0))
        fig.add_layout(color_bar, 'below')

        # Plot the column data
        col_fig = figure(width=width,
                         height=300,
                         toolbar_location=toolbar,
                         toolbar_sticky=True)
        col_fig.step('columns', ptype, source=col_visible, color=col_color)
        col_fig.xaxis.axis_label = 'Row'
        col_fig.yaxis.axis_label = ylabel
        col_fig.y_range = Range1d(vmin * 0.9, vmax * 1.1)
        col_fig.x_range = Range1d(*y_range)

        # Plot the trace polynomials
        if trace_coeffs is not None:
            for coeffs in trace_coeffs:
                Y = np.polyval(coeffs, columns)
                fig.line(columns, Y, color='red')

        # Add the figure to the tab list
        tabs.append(Panel(child=column(fig, col_fig), title=pname))

    # Make the final tabbed figure
    final = Tabs(tabs=tabs)

    # Write JS code
    code = """
        var vis = visible.data;
        var avail = available.data;
        var frame = fr_slide.value.toString(10);

        var viscol = col_vis.data;
        var availcol = col_avail.data;
        var col = col_slide.value.toString(10);

        var visvert = vert_vis.data;
        var availvert = vert_avail.data;

        vis['counts'] = [avail['counts'.concat(frame)]];
        vis['snr'] = [avail['snr'.concat(frame)]];
        vis['saturation'] = [avail['saturation'.concat(frame)]];

        viscol['counts'] = availcol['counts'.concat(col, '_', frame)];
        viscol['snr'] = availcol['snr'.concat(col, '_', frame)];
        viscol['saturation'] = availcol['saturation'.concat(col, '_', frame)];

        visvert['vertical'] = availvert['vertical'.concat(col)];

        visible.change.emit();
        col_vis.change.emit();
        vert_vis.change.emit();
    """

    # Make the column slider
    column_slider = Slider(title='Column',
                           value=col,
                           start=0,
                           end=ncols - 1,
                           step=1)

    # Make the frame slider
    if nframes - 1 > 0:
        frame_slider = Slider(title='Frame',
                              value=idx,
                              start=0,
                              end=nframes - 1,
                              step=1)
    else:
        frame_slider = None
        code = code.replace('fr_slide.value.toString(10);', '0')

    # CustomJS callback to update the three plots on slider changes
    callback = CustomJS(args=dict(visible=source_visible,
                                  available=source_available,
                                  col_vis=col_visible,
                                  col_avail=col_available,
                                  vert_vis=vertical_visible,
                                  vert_avail=vertical_available,
                                  fr_slide=frame_slider,
                                  col_slide=column_slider),
                        code=code)

    # Add callback to column slider
    column_slider.js_on_change('value', callback)

    # Add callback to frame slider
    if frame_slider is not None:
        frame_slider.js_on_change('value', callback)
        return column(final, frame_slider, column_slider)

    else:
        return column(final, column_slider)
示例#17
0
                   toolbar_location="left",
                   tools=[TOOLS])
sp_legend.circle([-30, -30, -30, -30], [25, 8, -9, -26],
                 color=["#0000ff", "#ff0000", "#00ff00", "#00ffff"],
                 radius=5)
sp_legend.text([-10, -10, -10, -10], [24, 7, -10, -27],
               text=["Accepted", "Rejected", "Middle Kappa", "Ignored"],
               text_font_size="12pt",
               text_align="left",
               text_baseline="middle",
               text_font_style="normal")

sp_kappa.yaxis.axis_label_text_font_size = "12pt"
sp_tab_kappa = Panel(child=sp_kappa, title='Sorted by Kappa')
sp_tab_legend = Panel(child=sp_legend, title='Legend')
sp_tabs_kappa = Tabs(tabs=[sp_tab_kappa, sp_tab_legend])

sp_rho = figure(tools=[TOOLS, HoverRho],
                width=325,
                height=250,
                y_axis_label='Rho',
                toolbar_location=None,
                x_range=sp_kappa.x_range)
sp_rho.circle('loc_by_rho', 'rho', size=5, color='comp_color', source=Source)
sp_rho.yaxis.axis_label_text_font_size = "12pt"
sp_tab_rho = Panel(child=sp_rho, title='Sorted by Rho')
sp_tabs_rho = Tabs(tabs=[sp_tab_rho])

sp_var = figure(tools=[TOOLS, HoverVar],
                width=325,
                height=250,
示例#18
0
def plot_frame(frame,
               cols=None,
               scale='linear',
               trace_coeffs=None,
               saturation=0.8,
               title=None,
               wavecal=None):
    """
    Plot a SOSS frame

    Parameters
    ----------
    frame: sequence
        The 2D frame to plot
    cols: int, list, tuple
        The 1D column(s) to plot
    scale: str
        Plot scale, ['linear', 'log']
    trace_coeffs: sequence
        Plot the traces for the given coefficients
    saturation: float
        The full-well fraction to be considered saturated, (0, 1)
    title: str
        A title for the plot
    wavecal: np.ndarray
        A wavelength calibration map for each pixel
    """
    # Determine subarray
    nrows, ncols = frame.shape

    # Get data, snr, and saturation for plotting
    dat = frame
    snr = np.sqrt(frame)
    fullWell = 65536.0
    sat = dat > saturation * fullWell
    sat = sat.astype(int)
    dh, dw = dat.shape

    # Fix if log scale
    if scale == 'log':
        dat[dat < 1.] = 1.

    # Set the source data
    source = dict(data=[dat], snr=[snr], saturation=[sat])

    # Set the tooltips
    tooltips = [("(x,y)", "($x{int}, $y{int})"), ("ADU/s", "@data"),
                ("SNR", "@snr"), ('Saturation', '@saturation')]

    # Add wavelength calibration if possible
    if isinstance(wavecal, np.ndarray):
        if wavecal.shape == frame.shape:
            source['wave1'] = [wavecal]
            tooltips.append(("Wavelength", "@wave1"))
        if wavecal.ndim == 3 and wavecal.shape[0] == 3:
            source['wave1'] = [wavecal[0]]
            source['wave2'] = [wavecal[1]]
            source['wave3'] = [wavecal[2]]
            tooltips.append(("Wave 1", "@wave1"))
            tooltips.append(("Wave 2", "@wave2"))
            tooltips.append(("Wave 3", "@wave3"))

    # Set shared plot params
    x_range = (0, dat.shape[1])
    y_range = (0, dat.shape[0])
    height = int(nrows / 2.) + 160
    toolbar = 'above'

    # Draw the figures
    tabs = []
    for pname, ptype in zip([
            'Counts', 'SNR', 'Saturation ({}% Full Well)'.format(
                saturation * 100)
    ], ['data', 'snr', 'saturation']):

        # Make the figure
        fig_title = '{} - {}'.format(title, pname)
        fig = figure(x_range=x_range,
                     y_range=y_range,
                     tooltips=tooltips,
                     width=1024,
                     height=height,
                     title=fig_title,
                     toolbar_location=toolbar,
                     toolbar_sticky=True)

        # Get the data
        vals = source[ptype][0]

        # Saturation plot is different
        if ptype == 'saturation':
            vmin = 0
            vmax = 1
            formatter = FuncTickFormatter(
                code="""return {0: 'Unsaturated', 1: 'Saturated'}[tick]""")
            color_map = ['#404387', '#FDE724']
            ticker = FixedTicker(ticks=[vmin, vmax])

        # Counts and SNR are similar plots
        else:
            vmin = int(np.nanmin(vals[vals >= 0]))
            vmax = int(np.nanmax(vals[vals < np.inf]))
            formatter = BasicTickFormatter()
            color_map = 'Viridis256'
            ticker = BasicTicker()

        # Set the plot scale
        if scale == 'log':
            mapper = LogColorMapper(palette=color_map, low=vmin, high=vmax)
        else:
            mapper = LinearColorMapper(palette=color_map, low=vmin, high=vmax)

        # Plot the frame
        fig.image(source=source,
                  image=ptype,
                  x=0,
                  y=0,
                  dw=dw,
                  dh=dh,
                  color_mapper=mapper)
        color_bar = ColorBar(color_mapper=mapper,
                             ticker=ticker,
                             formatter=formatter,
                             orientation="horizontal",
                             location=(0, 0))

        # Plot the trace polynomials
        if trace_coeffs is not None:
            X = np.linspace(0, 2048, 2048)

            # Check number of traces
            if np.array(trace_coeffs).ndim == 1:
                trace_coeffs = [trace_coeffs]

            for coeffs in trace_coeffs:
                Y = np.polyval(coeffs, X)
                fig.line(X, Y, color='red', line_dash='dashed')

        # Add the colorbar
        fig.add_layout(color_bar, 'below')

        # Plot the column data
        col_fig = None
        col_colors = ['blue', 'green', 'purple', 'cyan', 'orange']
        if cols is not None:

            # Make sure it's iterable
            if isinstance(cols, int):
                cols = [cols]

            # Initialize column figure
            col_fig = figure(width=1024,
                             height=300,
                             toolbar_location=toolbar,
                             toolbar_sticky=True)

            for n, col in enumerate(cols):
                col_color = col_colors[n]
                col_fig.step(np.arange(256),
                             vals[:, col],
                             color=col_color,
                             legend='Col {}'.format(col))
                col_fig.y_range = Range1d(vmin * 0.9, vmax * 1.1)
                col_fig.x_range = Range1d(*y_range)

                # Add line to image
                fig.line([col, col], [0, 256], color=col_color, line_width=3)

            # Update click policy
            col_fig.legend.click_policy = 'hide'

        if col_fig is not None:

            # Add the figure to the tab list
            tabs.append(Panel(child=column([fig, col_fig]), title=pname))

        else:

            # No column object
            tabs.append(Panel(child=fig, title=pname))

    # Make the final tabbed figure
    final = Tabs(tabs=tabs)

    return final
def new_create_app(doc, folder, dataref, data_name, data_name2, netmode, activities, manip_mode, extension="2D/dipsi2phpr_20_bucketlist.csv", correction=True, B=0.5, A=0.1, sym=True, net=True, loadwith="PP2D", nfeatures=100):
    """
    This function creates the complete bokeh application for Plasmodesma results analysis.
    - doc is the current document in which the app is made.
    - folder is the folder in which the results of plasmodesma are stored.
    - activities: the activites corresponding to the different fractions present in the folder.
    - name is the name of the first data to display & analyse.
    - name2 is the name of the second data to display & analyse.
    - netmode is the cleaning/denoising mode desired for plasmodesma (BucketUtilities).
    """
    BU.NETMODE = netmode
    name = os.path.join(folder, data_name)
    name2 = os.path.join(folder, data_name2)
    refname = os.path.join(folder, dataref)
    def load(loadwith, name, net=net, sym=sym):
        """
        Used to decide which type of loading(from bucketutilities) is used.
        """
        if loadwith == "Std2D":
            return BU.loadStd2D(name, net=net, sym=sym)
        if loadwith == "PP2D":
            return BU.loadPP2D(name, net=net, sym=sym)
    Im1 = load(loadwith=loadwith, name=name, net=net, sym=sym)
    Im2 = load(loadwith=loadwith, name=name2, net=net, sym=sym)
    Imref = load(loadwith="Std2D", name=refname, net=net, sym=sym)
    X = prepare_analysis(folder, dataref, extension=extension, sym=sym, net=net)
    
    Graph1 =  BokehApp_Slider_Plot(*Im1, manip_mode=manip_mode, title=data_name)
    Graph2 =  BokehApp_Slider_Plot(*Im2, dbk=Graph1.dbk, manip_mode=manip_mode, title=data_name2)
    GraphRatio =  BokehApp_Slider_Plot(Im1[0], Im1[1], Im1[2]/(Im2[2]+1e-5), dbk=Graph1.dbk, manip_mode=manip_mode, 
                                         title="Ratio")
    GraphRatio.add_multiline(*Imref, manip_mode=manip_mode, title="Reference", cmap=cm.autumn, levels=[1])
    GraphSubstract =  BokehApp_Slider_Plot(Im1[0], Im1[1], Im1[2]-Im2[2], dbk=Graph1.dbk, manip_mode=manip_mode, 
                                             title="Subtraction")
    GraphSubstract.add_multiline(*Imref, manip_mode=manip_mode, title="Reference", cmap=cm.autumn, levels=[1])
    if not correction: #A way to tell t(y) to do nothing
        A = 0
        B = 0

    GraphRFE =  AnalysisPlots(X=X, Y=activities, A=A, B=B, D1=Im1, D2=Im2, nfeatures=nfeatures, manip_mode=manip_mode, mode="RFE", dbk=Graph1.dbk, title="RFE")
    GraphRFE.add_multiline(*Imref, manip_mode=manip_mode, title="Reference", cmap=cm.autumn, levels=[1])
    
    GraphLinReg =  AnalysisPlots(X=X, Y=activities, A=A, B=B, D1=Im1, D2=Im2, nfeatures=nfeatures, manip_mode=manip_mode, mode="LinReg",dbk=Graph1.dbk, title="Linear Regression")
    GraphLinReg.add_multiline(*Imref, manip_mode=manip_mode, title="Reference", cmap=cm.autumn, levels=[1])

    # GraphLogistReg =  AnalysisPlots(X=X,Y=Y,D1=Im1, D2=Im2, nfeatures=nfeatures,manip_mode=manip_mode, mode="LogisticRegr",dbk=Graph1.dbk, title="Logisitic Regression")
    # GraphLogistReg.add_multiline(*Imref,manip_mode=manip_mode,title="Reference",cmap=cm.autumn, levels=[1])
    
    # Set up layouts and add to document
    tab1 = Panel(child=column(row(Graph1.widget, Graph2.widget),row(GraphRatio.widget, GraphSubstract.widget)), 
                 title="Visualization")
    tab2 = Panel(child=column(row(GraphRFE.widget, GraphLinReg.widget)), title="Global Analysis")
    
    doc.add_root(Tabs(tabs=[tab1, tab2]))
    
    doc.title = "SMARTE"

    doc.theme = Theme(json=yaml.load("""
        attrs:
            Figure:
                background_fill_color: "#DDDDDD"
                outline_line_color: white
                toolbar_location: right
                height: 450
                width: 450
            Grid:
                grid_line_dash: [6, 4]
                grid_line_color: white
    """))
示例#20
0
country = ["India", "Italy", "New Zealand"]
n_days = [75, 150, 225, 300]

t = None
initial_conditions = None
for c in country:
    for n in n_days:
        key = c + "," + str(n)
        S[key], I[key], R[key], params[
            key], initial_conditions, t = estimate_params(
                confirmed_csv, recovered_csv, death_csv, population_dict, c,
                start_date, min(n, 314), n)
        S_p[key], I_p[key], R_p[key] = predict_SIR(deriv_SIR,
                                                   initial_conditions, t,
                                                   params[key])
#*******************************************************************************
#***********************Visualization*******************************************
output_file("SIR_dashboard.html")

display_tabs = []
display_tabs.append(visualize(country, n_days, S, I, R, S_p, I_p, R_p))
display_tabs.append(visualize_600(country, n_days, initial_conditions))
display_tabs.append(visualize_parameter_sweep(params, initial_conditions, t))

dashboard = Tabs(tabs=display_tabs)
show(dashboard)
#*******************************************************************************

################################################################################
#############################End################################################
################################################################################
示例#21
0
def _create_ui_components() -> (Figure, ColumnDataSource):  # pylint: disable=too-many-statements
    global asp_table_source, asp_filter_src, op_table_source, op_filter_src
    global stats, aspects, tabs, lexicons_dropdown
    stats = pd.DataFrame(columns=['Quantity', 'Score'])
    aspects = pd.Series([])

    def new_col_data_src():
        return ColumnDataSource({'file_contents': [], 'file_name': []})

    large_text = HTMLTemplateFormatter(template='''<div><%= value %></div>''')

    def data_column(title):
        return TableColumn(field=title,
                           title='<span class="header">' + title + '</span>',
                           formatter=large_text)

    asp_table_columns = [
        data_column('Term'),
        data_column('Alias1'),
        data_column('Alias2'),
        data_column('Alias3')
    ]
    op_table_columns = [
        data_column('Term'),
        data_column('Score'),
        data_column('Polarity')
    ]

    asp_table_source = empty_table('Term', 'Alias1', 'Alias2', 'Alias3')
    asp_filter_src = empty_table('Term', 'Alias1', 'Alias2', 'Alias3')
    asp_src = new_col_data_src()

    op_table_source = empty_table('Term', 'Score', 'Polarity', 'Polarity')
    op_filter_src = empty_table('Term', 'Score', 'Polarity', 'Polarity')
    op_src = new_col_data_src()

    asp_table = DataTable(source=asp_table_source,
                          selectable='checkbox',
                          columns=asp_table_columns,
                          editable=True,
                          width=600,
                          height=500)
    op_table = DataTable(source=op_table_source,
                         selectable='checkbox',
                         columns=op_table_columns,
                         editable=True,
                         width=600,
                         height=500)

    asp_examples_box = _create_examples_table()
    op_examples_box = _create_examples_table()
    asp_layout = layout([[asp_table, asp_examples_box]])
    op_layout = layout([[op_table, op_examples_box]])
    asp_tab = Panel(child=asp_layout, title="Aspect Lexicon")
    op_tab = Panel(child=op_layout, title="Opinion Lexicon")
    tabs = Tabs(tabs=[asp_tab, op_tab], width=700, css_classes=['mytab'])

    lexicons_menu = [("Open", "open"), ("Save", "save")]
    lexicons_dropdown = Dropdown(label="Edit Lexicons",
                                 button_type="success",
                                 menu=lexicons_menu,
                                 width=140,
                                 height=31,
                                 css_classes=['mybutton'])

    train_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    train_dropdown = Dropdown(label="Extract Lexicons",
                              button_type="success",
                              menu=train_menu,
                              width=162,
                              height=31,
                              css_classes=['mybutton'])

    inference_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    inference_dropdown = Dropdown(label="Classify",
                                  button_type="success",
                                  menu=inference_menu,
                                  width=140,
                                  height=31,
                                  css_classes=['mybutton'])

    text_status = TextInput(value="Select training data",
                            title="Train Run Status:",
                            css_classes=['statusText'])
    text_status.visible = False

    train_src = new_col_data_src()
    infer_src = new_col_data_src()

    with open(join(dirname(__file__), "dropdown.js")) as f:
        args = dict(clicked=lexicons_dropdown,
                    asp_filter=asp_filter_src,
                    op_filter=op_filter_src,
                    asp_src=asp_src,
                    op_src=op_src,
                    tabs=tabs,
                    text_status=text_status,
                    train_src=train_src,
                    infer_src=infer_src,
                    train_clicked=train_dropdown,
                    infer_clicked=inference_dropdown,
                    opinion_lex_generic="")
        code = f.read()

    args['train_clicked'] = train_dropdown
    train_dropdown.js_on_change('value', CustomJS(args=args, code=code))

    args['train_clicked'] = inference_dropdown
    inference_dropdown.js_on_change('value', CustomJS(args=args, code=code))

    args['clicked'] = lexicons_dropdown
    lexicons_dropdown.js_on_change('value', CustomJS(args=args, code=code))

    def update_filter_source(table_source, filter_source):
        df = table_source.to_df()
        sel_inx = sorted(table_source.selected.indices)
        df = df.iloc[sel_inx, 1:]
        new_source = ColumnDataSource(df)
        filter_source.data = new_source.data

    def update_examples_box(data, examples_box, old, new):
        examples_box.source.data = {'Examples': []}
        unselected = list(set(old) - set(new))
        selected = list(set(new) - set(old))
        if len(selected) <= 1 and len(unselected) <= 1:
            examples_box.source.data.update({
                'Examples':
                [str(data.iloc[unselected[0], i])
                 for i in range(4, 24)] if len(unselected) != 0 else
                [str(data.iloc[selected[0], i]) for i in range(4, 24)]
            })

    def asp_selected_change(_, old, new):
        global asp_filter_src, asp_table_source, aspects_data
        update_filter_source(asp_table_source, asp_filter_src)
        update_examples_box(aspects_data, asp_examples_box, old, new)

    def op_selected_change(_, old, new):
        global op_filter_src, op_table_source, opinions_data
        update_filter_source(op_table_source, op_filter_src)
        update_examples_box(opinions_data, op_examples_box, old, new)

    def read_csv(file_src, headers=False, index_cols=False, readCSV=True):
        if readCSV:
            raw_contents = file_src.data['file_contents'][0]

            if len(raw_contents.split(",")) == 1:
                b64_contents = raw_contents
            else:
                # remove the prefix that JS adds
                b64_contents = raw_contents.split(",", 1)[1]
            file_contents = base64.b64decode(b64_contents)
            return pd.read_csv(io.BytesIO(file_contents),
                               encoding="ISO-8859-1",
                               keep_default_na=False,
                               na_values={None},
                               engine='python',
                               index_col=index_cols,
                               header=0 if headers else None)
        return file_src

    def read_parsed_files(file_content, file_name):
        try:
            # remove the prefix that JS adds
            b64_contents = file_content.split(",", 1)[1]
            file_content = base64.b64decode(b64_contents)
            with open(SENTIMENT_OUT / file_name, 'w') as json_file:
                data_dict = json.loads(file_content.decode("utf-8"))
                json.dump(data_dict, json_file)
        except Exception as e:
            print(str(e))

    # pylint: disable=unused-argument
    def train_file_callback(attr, old, new):
        global train_data
        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)
        train = TrainSentiment(parse=True, rerank_model=None)
        if len(train_src.data['file_contents']) == 1:
            train_data = read_csv(train_src, index_cols=0)
            file_name = train_src.data['file_name'][0]
            raw_data_path = SENTIMENT_OUT / file_name
            train_data.to_csv(raw_data_path, header=False)
            print(f'Running_SentimentTraining on data...')
            train.run(data=raw_data_path)
        else:
            f_contents = train_src.data['file_contents']
            f_names = train_src.data['file_name']
            raw_data_path = SENTIMENT_OUT / train_src.data['file_name'][
                0].split('/')[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print(f'Running_SentimentTraining on data...')
            train.run(parsed_data=raw_data_path)

        text_status.value = "Lexicon extraction completed"

        with io.open(AcquireTerms.acquired_aspect_terms_path, "r") as fp:
            aspect_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(aspect_data_csv))
        file_data = file_data.decode("utf-8")
        asp_src.data = {
            'file_contents': [file_data],
            'file_name': ['nameFile.csv']
        }

        out_path = RerankTerms.out_dir / 'generated_opinion_lex_reranked.csv'
        with io.open(out_path, "r") as fp:
            opinion_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(opinion_data_csv))
        file_data = file_data.decode("utf-8")
        op_src.data = {
            'file_contents': [file_data],
            'file_name': ['nameFile.csv']
        }

    def show_analysis() -> None:
        global stats, aspects, plot, source, tabs
        plot, source = _create_plot()
        events_table = _create_events_table()

        # pylint: disable=unused-argument
        def _events_handler(attr, old, new):
            _update_events(events_table, events_type.active)

        # Toggle display of in-domain / All aspect mentions
        events_type = RadioButtonGroup(
            labels=['All Events', 'In-Domain Events'], active=0)

        analysis_layout = layout([[plot], [events_table]])

        # events_type display toggle disabled
        # analysis_layout = layout([[plot],[events_type],[events_table]])

        analysis_tab = Panel(child=analysis_layout, title="Analysis")
        tabs.tabs.insert(2, analysis_tab)
        tabs.active = 2
        events_type.on_change('active', _events_handler)
        source.selected.on_change('indices', _events_handler)  # pylint: disable=no-member

    # pylint: disable=unused-argument
    def infer_file_callback(attr, old, new):

        # run inference on input data and current aspect/opinion lexicons in view
        global infer_data, stats, aspects

        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)

        df_aspect = pd.DataFrame.from_dict(asp_filter_src.data)
        aspect_col_list = ['Term', 'Alias1', 'Alias2', 'Alias3']
        df_aspect = df_aspect[aspect_col_list]
        df_aspect.to_csv(SENTIMENT_OUT / 'aspects.csv',
                         index=False,
                         na_rep="NaN")

        df_opinion = pd.DataFrame.from_dict(op_filter_src.data)
        opinion_col_list = ['Term', 'Score', 'Polarity', 'isAcquired']
        df_opinion = df_opinion[opinion_col_list]
        df_opinion.to_csv(SENTIMENT_OUT / 'opinions.csv',
                          index=False,
                          na_rep="NaN")

        solution = SentimentSolution()

        if len(infer_src.data['file_contents']) == 1:
            infer_data = read_csv(infer_src, index_cols=0)
            file_name = infer_src.data['file_name'][0]
            raw_data_path = SENTIMENT_OUT / file_name
            infer_data.to_csv(raw_data_path, header=False)
            print(f'Running_SentimentInference on data...')
            text_status.value = "Running classification on data..."
            stats = solution.run(data=raw_data_path,
                                 aspect_lex=SENTIMENT_OUT / 'aspects.csv',
                                 opinion_lex=SENTIMENT_OUT / 'opinions.csv')
        else:
            f_contents = infer_src.data['file_contents']
            f_names = infer_src.data['file_name']
            raw_data_path = SENTIMENT_OUT / infer_src.data['file_name'][
                0].split('/')[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print(f'Running_SentimentInference on data...')
            text_status.value = "Running classification on data..."
            stats = solution.run(parsed_data=raw_data_path,
                                 aspect_lex=SENTIMENT_OUT / 'aspects.csv',
                                 opinion_lex=SENTIMENT_OUT / 'opinions.csv')

        aspects = pd.read_csv(SENTIMENT_OUT / 'aspects.csv',
                              encoding='utf-8')['Term']
        text_status.value = "Classification completed"
        show_analysis()

    # pylint: disable=unused-argument
    def asp_file_callback(attr, old, new):
        global aspects_data, asp_table_source
        aspects_data = read_csv(asp_src, headers=True)
        # Replaces None values by empty string
        aspects_data = aspects_data.fillna('')
        new_source = ColumnDataSource(aspects_data)
        asp_table_source.data = new_source.data
        asp_table_source.selected.indices = list(range(len(aspects_data)))

    # pylint: disable=unused-argument
    def op_file_callback(attr, old, new):
        global opinions_data, op_table_source, lexicons_dropdown, df_opinion_generic
        df = read_csv(op_src, headers=True)
        # Replaces None values by empty string
        df = df.fillna('')
        # Placeholder for generic opinion lexicons from the given csv file
        df_opinion_generic = df[df['isAcquired'] == 'N']
        # Update the argument value for the callback customJS
        lexicons_dropdown.js_property_callbacks.get(
            'change:value')[0].args['opinion_lex_generic'] \
            = df_opinion_generic.to_dict(orient='list')
        opinions_data = df[df['isAcquired'] == 'Y']
        new_source = ColumnDataSource(opinions_data)
        op_table_source.data = new_source.data
        op_table_source.selected.indices = list(range(len(opinions_data)))

    # pylint: disable=unused-argument
    def txt_status_callback(attr, old, new):
        print("Previous label: " + old)
        print("Updated label: " + new)

    text_status.on_change("value", txt_status_callback)

    asp_src.on_change('data', asp_file_callback)
    # pylint: disable=no-member
    asp_table_source.selected.on_change('indices', asp_selected_change)

    op_src.on_change('data', op_file_callback)
    op_table_source.selected.on_change('indices', op_selected_change)  # pylint: disable=no-member

    train_src.on_change('data', train_file_callback)
    infer_src.on_change('data', infer_file_callback)

    return layout(
        [[_create_header(train_dropdown, inference_dropdown, text_status)],
         [tabs]])
示例#22
0
                         inline=True)
n_materials.on_change('active', repredict)
n_materials_title = Div(text="""# Materials """)

btn_rst = Button(label='reset')
btn_rst.on_click(selectSpectra_reset)
# =============================================================================
# ~ Layout setup
# =============================================================================
div1 = Div(text="""<h2>dw</h2>""", )
div2 = Div(text="""<h2>I</h2>""", )
#blank = Div(text="""  """, width=400)
# row_feature = row([n_features_input_title,  Column(n_features_input)])
row_pca = row([n_pca_input_title, Column(n_pca_input)])
row_materials = row(n_materials_title, Column(n_materials))

grid_raw = gridplot([[div1, r, p], [div2, s, q]])

grid_cluster = row(column([varBar, row_pca]),
                   column([row_materials, labeled_materials_image, btn_rst]))

a = Panel(child=mean, title='Average Spectrum')
b = Panel(child=multi_plot, title='Spectra per pixel')

right_panel = column([Column(slider), grid_raw])
left_panel = column([grid_cluster, Tabs(tabs=[a, b])])
layout = row(left_panel, right_panel)

curdoc().title = 'Analysis of file ' + rm.file.split('\\\\')[-1]
curdoc().add_root(layout)
示例#23
0
    def initialize_plot(self, plots=None, ranges=None):
        ranges = self.compute_ranges(self.layout, self.keys[-1], None)
        passed_plots = [] if plots is None else plots
        plots = [[] for _ in range(self.rows)]
        tab_titles = {}
        insert_rows, insert_cols = [], []
        adjoined = False
        for r, c in self.coords:
            subplot = self.subplots.get((r, c), None)
            if subplot is not None:
                shared_plots = passed_plots if self.shared_axes else None
                subplots = subplot.initialize_plot(ranges=ranges,
                                                   plots=shared_plots)

                # Computes plotting offsets depending on
                # number of adjoined plots
                offset = sum(r >= ir for ir in insert_rows)
                if len(subplots) > 2:
                    adjoined = True
                    # Add pad column in this position
                    insert_cols.append(c)
                    if r not in insert_rows:
                        # Insert and pad marginal row if none exists
                        plots.insert(r + offset,
                                     [None for _ in range(len(plots[r]))])
                        # Pad previous rows
                        for ir in range(r):
                            plots[ir].insert(c + 1, None)
                        # Add to row offset
                        insert_rows.append(r)
                        offset += 1
                    # Add top marginal
                    plots[r + offset - 1] += [subplots.pop(-1), None]
                elif len(subplots) > 1:
                    adjoined = True
                    # Add pad column in this position
                    insert_cols.append(c)
                    # Pad previous rows
                    for ir in range(r):
                        plots[r].insert(c + 1, None)
                    # Pad top marginal if one exists
                    if r in insert_rows:
                        plots[r + offset - 1] += 2 * [None]
                else:
                    # Pad top marginal if one exists
                    if r in insert_rows:
                        plots[r + offset - 1] += [None] * (1 +
                                                           (c in insert_cols))
                plots[r + offset] += subplots
                if len(subplots) == 1 and c in insert_cols:
                    plots[r + offset].append(None)
                passed_plots.append(subplots[0])

            if self.tabs:
                if isinstance(self.layout, Layout):
                    tab_titles[r, c] = ' '.join(self.paths[r, c])
                else:
                    dim_vals = zip(self.layout.kdims, self.paths[r, c])
                    tab_titles[r, c] = ', '.join(
                        [d.pprint_value_string(k) for d, k in dim_vals])

        # Replace None types with empty plots
        # to avoid bokeh bug
        if adjoined:
            plots = layout_padding(plots)

        # Wrap in appropriate layout model
        if self.tabs:
            panels = [
                Panel(child=child, title=str(tab_titles.get((r, c))))
                for r, row in enumerate(plots) for c, child in enumerate(row)
                if child is not None
            ]
            layout_plot = Tabs(tabs=panels)
        elif bokeh_version >= '0.12':
            plots = filter_toolboxes(plots)
            plots, width = pad_plots(plots)
            layout_plot = gridplot(children=plots, width=width)
        elif len(plots) == 1 and not adjoined:
            layout_plot = VBox(children=[HBox(children=plots[0])])
        elif len(plots[0]) == 1:
            layout_plot = VBox(children=[p[0] for p in plots])
        else:
            layout_plot = BokehGridPlot(children=plots)

        title = self._get_title(self.keys[-1])
        if title:
            self.handles['title'] = title
            layout_plot = Column(title, layout_plot)
        self.handles['plot'] = layout_plot
        self.handles['plots'] = plots
        if self.shared_datasource:
            self.sync_sources()

        self.drawn = True

        return self.handles['plot']
示例#24
0
def plot_year():
    df = lldb.to_df()
    req_levels = mll.required_levels.required_levels
    height = 450
    tabs = []

    for lake, color in zip(df.columns, palette):
        p = figure(title=lake.title(),
                   x_axis_label=None,
                   x_axis_type='datetime',
                   y_axis_label='Lake Height (feet above sea level)',
                   tools=[],
                   toolbar_location=None,
                   height=height,
                   sizing_mode='stretch_width',
                   css_classes=['no-interaction'])

        curr_year = df.index.year.max()
        df = df[df.index.year >= curr_year - 8]
        gb = df.copy().groupby(df.index.year)
        curr_year_df = gb.get_group(curr_year)
        curr_day_of_year = curr_year_df.index.dayofyear.max()
        curr_day_of_year = min(curr_day_of_year, 365)  # leap years don't exist
        first_day_of_year = curr_year_df.index[0]
        max_line = p.line([curr_year_df.index.min(),
                           curr_year_df.index.max()],
                          2 * [req_levels.loc[lake, 'summer_maximum']],
                          color='#000000',
                          line_width=2,
                          line_dash=[5, 5],
                          line_alpha=0.5)
        for year, idx in gb.groups.items():
            year_df = df.loc[idx].copy()
            year_df = year_df[year_df.index.dayofyear <= curr_day_of_year]
            year_df.index = year_df.index.shift(
                1, first_day_of_year - year_df.index[0])
            line = p.line(year_df.index,
                          year_df[lake],
                          color=color,
                          line_width=2,
                          line_alpha=1 if year == curr_year else 0.3)
            if year == curr_year:
                this_year_line = line
            else:
                other_year_line = line

        p.xaxis.formatter = DatetimeTickFormatter(
            days=['%b %d'],
            months=['%b'],
            years=['%b'],
        )
        legend = Legend(items=[('This year', [this_year_line]),
                               ('Past 7 years', [other_year_line]),
                               ('State max', [max_line])],
                        location=(0, 20))
        p.add_layout(legend, 'below')
        p.legend.orientation = "vertical"
        tabs.append(Panel(child=p, title=lake.title()))

    script, div = components(Tabs(tabs=tabs))
    return script, div
示例#25
0
    src = make_dataset(initial_products, 'India', priceDf)

    p = make_plot(src)

    # Put controls in a single element
    controls = WidgetBox(products_selection)

    # Create a row layout
    layout = row(controls, p)

    # Make a tab with the layout
    tab = Panel(child=layout, title = 'Product price over time')

    return tab



priceDf = pd.read_csv("data/only_complete_years_data.csv")
#
#output_file('js_on_change_test.html')

#plot = plotProductPrice(priceDf, 'Wheat', 'India')
#show(plot)

tab1 = priceOverTimeTab(priceDf)

# Put all the tabs into one application
tabs = Tabs(tabs = [tab1])

show(tabs)
示例#26
0
def plot_timeline():
    df = lldb.to_df()
    req_levels = mll.required_levels.required_levels

    height = 700

    hover = HoverTool(names=[lake.title() for lake in df],
                      tooltips=[('lake', '$name'), ('date', '$x{%F}'),
                                ('height above sea level', '$y{0.00} ft')],
                      formatters={'$x': 'datetime'})
    p = figure(title="Madison Lake Levels",
               x_axis_label='date',
               x_axis_type='datetime',
               y_axis_label='Lake Height (feet above sea level)',
               tools=["pan,wheel_zoom,box_zoom,reset,previewsave", hover],
               height=height,
               sizing_mode='stretch_width')
    p.toolbar.logo = None

    levels = []
    maxes = []
    for lake, color in zip(df.columns, palette):
        levels.append(
            p.line(df.index,
                   df[lake],
                   color=color,
                   line_width=2,
                   name=lake.title()))
        maxes.append(
            p.line([df.index.min(), df.index.max()],
                   2 * [req_levels.loc[lake, 'summer_maximum']],
                   color=color,
                   line_width=2,
                   line_dash=[5, 5],
                   line_alpha=0.8))
    _msg = p.circle([], [], color='#ffffff')
    legend_items = [('Click to hide', [_msg])]
    for lake, level, _max in zip(df.columns, levels, maxes):
        lake = lake.title()
        legend_items.extend([(lake, [level]), (lake + ' max', [_max])])
    legend = Legend(items=legend_items, location=(0, 0))
    legend.click_policy = 'hide'
    p.add_layout(legend, 'left')
    tab1 = Panel(child=p, title="Absolute levels")

    hover = HoverTool(names=[lake.title() for lake in df],
                      tooltips=[('lake', '$name'), ('date', '$x{%F}'),
                                ('height vs. max', '$y{+0.00} ft')],
                      formatters={'$x': 'datetime'})
    p = figure(title="Madison Lake Levels - difference from state max",
               x_axis_label='date',
               x_axis_type='datetime',
               y_axis_label='Lake Height (feet above State Max)',
               tools=["pan,wheel_zoom,box_zoom,reset,previewsave", hover],
               height=height,
               sizing_mode='stretch_width')
    p.toolbar.logo = None

    levels = []
    for lake, color in zip(df.columns, palette):
        levels.append(
            p.line(df.index,
                   df[lake] - req_levels.loc[lake, 'summer_maximum'],
                   name=lake.title(),
                   color=color,
                   line_width=2))
    _msg = p.circle([], [], color='#ffffff')
    legend_items = [('Click to hide', [_msg])]
    legend_items.append(('State Max', [
        p.line([df.index.min(), df.index.max()], [0, 0],
               color='black',
               line_dash=[5, 5])
    ]))
    for lake, level in zip(df.columns, levels):
        lake = lake.title()
        legend_items.append((lake, [level]))
    legend = Legend(items=legend_items, location=(0, 0))
    legend.click_policy = 'hide'
    p.add_layout(legend, 'left')

    tab2 = Panel(child=p, title='Levels compared to state maximum')

    tabs = Tabs(tabs=[tab1, tab2])

    script, div = components(tabs)
    return flask.render_template('plot.html',
                                 bokeh_script=script,
                                 plot_div=div)
示例#27
0
def timing_plot(genfn):
    "Draw a timing plot for a prime generator function"
    if not check_fn(genfn):
        return

    global _lines

    def plot(fig, name, vals, num, dash='solid'):
        "Add a line with points to a plot"
        col = _palette[num % len(_palette)]
        fig.line('x', 'y', legend_label=name, source=vals, line_dash=dash, color=col)
        fig.scatter('x', 'y', legend_label=name, source=vals, marker='o', color=col)
    name = genfn.__name__
    exist = None
    args = dict(plot_width=800, plot_height=400, toolbar_location='above', title="Timing")
    linfig = figure(y_range=[0, 1], x_range=DataRange1d(start=0), **args)
    logfig = figure(y_range=[1e-6, 1], x_range=DataRange1d(start=1),
                    x_axis_type='log', y_axis_type='log', **args)
    num = 0
    # add previous lines
    for k, v in _lines.items():
        plot(linfig, k, v, num, 'dashed')
        plot(logfig, k, v, num, 'dashed')
        if k == name:
            exist = num
        num += 1
    source = ColumnDataSource(data=dict(x=[], y=[]))
    for fig in (linfig, logfig):
        plot(fig, name, source, exist or num)
        fig.xaxis.axis_label = "Primes"
        fig.xaxis.formatter = NumeralTickFormatter(format='0[.]0 a')
        fig.xgrid.minor_grid_line_color = 'lightgrey'
        fig.xgrid.minor_grid_line_alpha = 0.2
        fig.yaxis.axis_label = "Seconds"
        fig.legend.location = 'bottom_right'
        fig.legend.click_policy = 'hide'
        fig.legend.background_fill_alpha = 0.5
    linfig.yaxis.formatter = BasicTickFormatter()
    logfig.yaxis.formatter = BasicTickFormatter(use_scientific=True, precision=0)
    lintab = Panel(child=linfig, title="Linear")
    logtab = Panel(child=logfig, title="Log")
    tabs = Tabs(tabs=[lintab, logtab])
    handle = None
    if _incremental:
        # Incremental: show plot now, then incrementally add points
        handle = show(tabs, notebook_handle=True)

    try:
        genfn()
        combined = True
    except TypeError:
        combined = False
    if combined:
        # Generate line in one go
        plot_line_combined(genfn, source, handle)
    else:
        # Generator takes size, need to generate points separately
        plot_line_separate(genfn, source, handle)

    if not _incremental:
        # Plot not shown yet, show it now
        show(tabs)
    # save line data to show on next plot
    _lines[name] = source.data
示例#28
0
def modify_doc(doc):
    repo_box = TextInput(value='/project/tmorton/DM-12873/w44',
                         title='rerun',
                         css_classes=['customTextInput'])

    # Object plots
    object_hvplots = [
        renderer.get_widget(dmap, None, doc) for dmap in object_dmaps
    ]

    object_plots = [
        layout([hvplot.state], sizing_mode='fixed')
        for hvplot in object_hvplots
    ]
    object_tabs = Tabs(tabs=[
        Panel(child=plot, title=name)
        for plot, name in zip(object_plots, config['sections']['object'])
    ])
    object_panel = Panel(child=object_tabs, title='Object Catalogs')

    # Source plots
    source_categories = config['sections']['source']
    # source_hvplots = {c : renderer.get_widget(source_dmaps[c], None, doc)
    #                     for c in source_categories}

    # # source_plots = {c : layout([source_hvplots[c].state], sizing_mode='fixed')
    # #                 for c in source_categories}
    # source_plots = {c : gridplot([[None]]) for c in source_categories}
    # source_tract_select = {c : RadioButtonGroup(labels=[str(t) for t in get_tracts(butler)], active=0)
    #                             for c in source_categories}
    # source_filt_select = {c : RadioButtonGroup(labels=wide_filters, active=2)
    #                             for c in source_categories}

    # def update_source(category):
    #     def update(attr, old, new):
    #         t_sel = source_tract_select[category]
    #         f_sel = source_filt_select[category]
    #         new_tract = int(t_sel.labels[t_sel.active])
    #         new_filt = f_sel.labels[f_sel.active]
    #         logging.info('updating {} to tract={}, filt={}...'.format(category, new_tract, new_filt))
    #         dmap = get_source_dmap(butler, category, tract=new_tract, filt=new_filt)
    #         new_hvplot = renderer.get_widget(dmap, None, doc)
    #         source_plots[category].children[0] = new_hvplot.state
    #         logging.info('update complete.')
    #     return update

    # source_tab_panels = []
    # for category in source_categories:
    #     tract_select = source_tract_select[category]
    #     filt_select = source_filt_select[category]
    #     plot = source_plots[category]

    #     tract_select.on_change('active', update_source(category))
    #     filt_select.on_change('active', update_source(category))

    #     l = layout([[tract_select, filt_select], [plot]], sizing_mode='fixed')
    #     source_tab_panels.append(Panel(child=l, title=category))

    # source_tabs = Tabs(tabs=source_tab_panels)
    # source_panel = Panel(child=source_tabs, title='Source Catalogs')

    # Color plots
    color_categories = config['sections']['color']
    color_hvplots = {
        c: renderer.get_widget(color_dmaps[c], None, doc)
        for c in color_categories
    }
    color_plots = {
        c: layout([color_hvplots[c].state], sizing_mode='fixed')
        for c in color_categories
    }

    color_tabs = Tabs(
        tabs=[Panel(child=color_plots[c], title=c) for c in color_categories])
    color_panel = Panel(child=color_tabs, title='Color')

    def update_repo(attr, old, new):
        global butler
        butler = Butler(new)
        logging.info('Changing butler to {}'.format(new))

        # Update Object plots
        logging.info('Updating object plots...')
        object_dmaps = get_object_dmaps(butler=butler)

        new_object_hvplots = [
            renderer.get_widget(dmap, None, doc) for dmap in object_dmaps
        ]

        for plot, new_plot in zip(object_plots, new_object_hvplots):
            plot.children[0] = new_plot.state

        # Update Source plots
        # for c in source_categories:
        #     source_tract_select[c].labels = [str(t) for t in get_tracts(butler)]

        # # # THIS MUST BE FIXED.  PERHAPS SOURCE PLOTS SHOULD BE EMPTY UNTIL ACTIVATED
        # logging.info('Updating source plots...')
        # source_plots = {c : gridplot([[None]]) for c in source_categories}

        # source_dmaps = get_source_dmaps(butler=butler)
        # new_source_hvplots = {c : renderer.get_widget(source_dmaps[c], None, doc)
        #                       for c in source_categories}
        # for plot,new_plot in zip(source_plots, new_source_hvplots):
        #     plot.children[0] = new_plot.state

        # Update Color plots
        logging.info('Updating color plots...')
        color_dmaps = get_color_dmaps(butler=butler)
        new_color_hvplots = {
            c: renderer.get_widget(color_dmaps[c], None, doc)
            for c in color_categories
        }
        for plot, new_plot in zip(color_plots, new_color_hvplots):
            plot.children[0] = new_plot.state

    repo_box.on_change('value', update_repo)

    # uber_tabs = Tabs(tabs=[object_panel, source_panel, color_panel])
    uber_tabs = Tabs(tabs=[object_panel, color_panel])

    doc.add_root(repo_box)
    doc.add_root(uber_tabs)
    return doc
示例#29
0
controls_callback.args['format_buttons'] = format_buttons

interest_buttons = RadioButtonGroup(
    labels=['METTR', 'METR', 'Cost of Capital', 'Depreciation'],
    active=0,
    width=700,
    callback=controls_callback)
controls_callback.args['interest_buttons'] = interest_buttons

type_buttons = RadioButtonGroup(
    labels=['Typically Financed', 'Equity Financed', 'Debt Financed'],
    active=0,
    width=700,
    callback=controls_callback)
controls_callback.args['type_buttons'] = type_buttons

# Create Tabs
tab = Panel(child=p, title='Equipment')
tab2 = Panel(child=p2, title='Structures')
tabs = Tabs(tabs=[tab, tab2])
layout = gridplot(children=[[tabs], [c_nc_buttons, interest_buttons],
                            [format_buttons, type_buttons]])

if 'show' in sys.argv:
    show(layout)

# Create components
js, div = components(layout)
cdn_js = CDN.js_files[0]
cdn_css = CDN.css_files[0]
示例#30
0
def make_plot(game):
    # TODO: dynamic plot sizing once v 1.0.5 bokeh is released and scaled_width bug is fixed
    # define styling, tools, etc.
    hover_tool = HoverTool(tooltips=[('time', '@date{%m-%d %H:%M}'),
                                     ('odds', '@odds{0.0 a}'),
                                     ('price', '@prices{0.0 a}')],
                           formatters={'date': 'datetime'},
                           mode='vline')
    hover_tool_ml = HoverTool(tooltips=[('time', '@date{%m-%d %H:%M}'),
                                        ('price', '@prices{0.0 a}')],
                              formatters={'date': 'datetime'},
                              mode='vline')

    tools = "pan"

    # fetch data from db as column source
    home_spread_source = convert_odds_to_source(game, 'home_spread')
    away_spread_source = convert_odds_to_source(game, 'away_spread')
    home_ml_source = convert_odds_to_source(game, 'home_ml')
    away_ml_source = convert_odds_to_source(game, 'away_ml')
    over_source = convert_odds_to_source(game, 'over')
    under_source = convert_odds_to_source(game, 'under')

    # generate dynamic color scale and assign to spreads and totals (ml needs no color)
    colors = Config.COLORS
    spread_bins, sp_min, sp_max = generate_color_bins(
        np.concatenate((home_spread_source.data['prices'],
                        away_spread_source.data['prices'])))
    over_bins, op_min, op_max = generate_color_bins(
        np.concatenate(
            (over_source.data['prices'], under_source.data['prices'])))
    home_spread_source.data['color'] = [
        colors[min(max(bisect_left(spread_bins, price) - 1, 0),
                   len(colors) - 1)]
        for price in home_spread_source.data['prices']
    ]
    away_spread_source.data['color'] = [
        colors[min(max(bisect_left(spread_bins, price) - 1, 0),
                   len(colors) - 1)]
        for price in away_spread_source.data['prices']
    ]
    over_source.data['color'] = [
        colors[min(max(bisect_left(over_bins, price) - 1, 0),
                   len(colors) - 1)] for price in over_source.data['prices']
    ]
    under_source.data['color'] = [
        colors[min(max(bisect_left(over_bins, price) - 1, 0),
                   len(colors) - 1)] for price in under_source.data['prices']
    ]

    # gen color bar maps
    if sp_min < 0 < sp_max:
        spread_pal = colors
    elif sp_min < 0 and sp_max < 0:
        spread_pal = colors[:7]
    else:
        spread_pal = colors[8:]
    if op_min < 0 < op_max:
        over_pal = colors
    elif op_min < 0 and op_max < 0:
        over_pal = colors[:7]
    else:
        over_pal = colors[8:]
    spread_cmap = LinearColorMapper(palette=spread_pal,
                                    low=sp_min,
                                    high=sp_max)
    ou_cmap = LinearColorMapper(palette=over_pal, low=op_min, high=op_max)

    # gen figures and add tools, color bars
    spread = figure(tools=tools,
                    plot_width=700,
                    plot_height=300,
                    x_axis_type="datetime",
                    toolbar_location=None)
    ml = figure(tools=tools,
                plot_width=700,
                plot_height=300,
                x_axis_type="datetime",
                toolbar_location=None)
    over = figure(tools=tools,
                  plot_width=700,
                  plot_height=300,
                  x_axis_type="datetime",
                  toolbar_location=None)
    spread.add_layout(
        ColorBar(color_mapper=spread_cmap,
                 location=(0, 0),
                 ticker=FixedTicker(ticks=[sp_min, sp_max])), 'right')
    over.add_layout(
        ColorBar(color_mapper=ou_cmap,
                 location=(0, 0),
                 ticker=FixedTicker(ticks=[op_min, op_max])), 'right')
    spread.add_tools(hover_tool)
    ml.add_tools(hover_tool_ml)
    over.add_tools(hover_tool)

    # make scatter plots
    spread.scatter(x='date',
                   y='odds',
                   source=home_spread_source,
                   color='color',
                   size=Config.MARKER_SIZE)
    spread.scatter(x='date',
                   y='odds',
                   source=away_spread_source,
                   color='color',
                   size=Config.MARKER_SIZE)
    ml.scatter(x='date',
               y='prices',
               source=home_ml_source,
               size=Config.MARKER_SIZE)
    ml.scatter(x='date',
               y='prices',
               source=away_ml_source,
               size=Config.MARKER_SIZE)
    over.scatter(x='date',
                 y='odds',
                 source=over_source,
                 color='color',
                 size=Config.MARKER_SIZE)
    over.scatter(x='date',
                 y='odds',
                 source=under_source,
                 color='color',
                 size=Config.MARKER_SIZE)

    # set up each figure as as tab for easy viz
    spread_tab = Panel(child=spread, title="spread")
    ml_tab = Panel(child=ml, title="moneylines")
    over_tab = Panel(child=over, title="over/under")

    return Tabs(tabs=[spread_tab, ml_tab, over_tab])