Exemple #1
0
def draw_graph(ticker, df, lines):
    df["date"] = pd.to_datetime(df.index)

    inc = df.trend == 1
    dec = df.trend == -1
    w = 12*60*60*1000 # half day in ms

    width_30_min = 30*60*1000 # half day in ms

    df['open'] = df['high']
    df['close'] = df['low']

    TOOLS = "pan,wheel_zoom,box_zoom,reset,save"

    p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1600, title=ticker)
    p.xaxis.major_label_orientation = pi/4
    p.grid.grid_line_alpha = 0.3

    p.segment(df.date, df.high, df.date, df.low, color="black")
    p.vbar(df.date[inc], width_30_min, df.open[inc], df.close[inc], fill_color="#D5E1DD", line_color="black")
    p.vbar(df.date[dec], width_30_min, df.open[dec], df.close[dec], fill_color="#F2583E", line_color="red")

    # p.line(df.date[inc], df.open[inc], line_width=3)

    x = []
    y = []
    for start, end in pairwise(lines):
        start_time, start_price = start
        end_time, end_price = end
        x.append([start_time, end_time])
        y.append([start_price, end_price])

    colors = generate_colors_from_price(y)
    p.multi_line(x, y, line_color=colors, line_width=3)

    # output_file(ticker+".html", title=ticker)
    # show(p)
    export_png(p, filename=ticker + '.png')
Exemple #2
0
def generate_all(gerrit):

    counter = defaultdict(int)
    duration = defaultdict(list)
    project_count = defaultdict(set)

    for review in gerrit.reviews():
        try:
            date_string = review['created']
            dt = pendulum.parse(date_string)
            month = datetime.datetime(dt.year, dt.month, 1)
            counter[month] += 1
            if 'submitted' in review:
                dts = pendulum.parse(review['submitted'])
                d = (dts - dt).seconds
                duration[month].append(d)
            project_count[month].add(review['project'])
        except KeyError:
            for key, value in review.items():
                print(key, str(value)[:20])
            print(review.keys())
            return

    width = 1200
    height = 600

    summed = {k: sum(v) / len(v) for k, v in duration.items()}
    lened = {k: len(v) for k, v in project_count.items()}

    p = plotting.figure(plot_width=width, plot_height=height, x_axis_type="datetime")
    data = sorted(counter.items())
    df = pandas.DataFrame(data, columns=['Date', 'Review Count'])
    p.line(df['Date'], df['Review Count'], color='navy')
    export_png(p, filename="graphs/review_count.png")

    p = plotting.figure(plot_width=width, plot_height=height, x_axis_type="datetime")
    data = sorted(summed.items())
    df = pandas.DataFrame(data, columns=['Date', 'Average Duration'])
    p.line(df['Date'], df['Average Duration'], color='navy')
    export_png(p, filename="graphs/review_duration.png")

    p = plotting.figure(plot_width=width, plot_height=height, x_axis_type="datetime")
    data = sorted(lened.items())
    df = pandas.DataFrame(data, columns=['Date', 'Unique Projects'])
    p.line(df['Date'], df['Unique Projects'], color='navy')
    export_png(p, filename="graphs/project_count.png")
def generate_schedule_graph(final_orders):
    DF = ps.DataFrame(columns=['Item', 'Start', 'End', 'Status', 'Color'])
    items = []
    dt = datetime.datetime.today()
    start = dt - datetime.timedelta(days=dt.weekday())
    end = start + datetime.timedelta(days=14)
    # limit = datetime.datetime.today() + datetime.timedelta(days=7)
    today = datetime.datetime.today()
    for order in final_orders:
        if start <= datetime.datetime.strptime(order['end_datetime'],
                                               '%Y-%m-%d %H:%M') <= end:
            l = [
                str(order['order_id']) + '-' + order['product_name'],
                order['start_datetime'], order['end_datetime']
            ]
            if int(order['status']) == 0:
                l.append('Pending')
                l.append('Orange')
            elif int(order['status']) == 1:
                l.append('Progress')
                l.append('Blue')
            else:
                l.append('Completed')
                l.append('Green')
            items.append(l)
    for i, Dat in enumerate(items[::-1]):
        DF.loc[i] = Dat
    DF['Start_dt'] = ps.to_datetime(DF.Start)
    DF['End_dt'] = ps.to_datetime(DF.End)

    G = figure(title='Polishing Schedule (Bi-Weekly)',
               x_axis_type='datetime',
               width=1200,
               height=400,
               y_range=DF.Item.tolist(),
               x_range=Range1d(DF.Start_dt.min(),
                               DF.End_dt.max(),
                               min_interval=datetime.timedelta(minutes=30)))
    G.xaxis.formatter = DatetimeTickFormatter(
        hours=["%d %b %y, %H:%m"],
        days=["%d %b %y, %H:%m"],
        months=["%d %b %y, %H:%m"],
        years=["%d %b %y, %H:%m"],
    )
    G.xaxis.major_label_orientation = pi / 3
    tick_vals = pd.to_datetime(
        date_range(DF.Start_dt.min(), DF.End_dt.max(), 4,
                   'hours')).astype(int) / 10**6
    G.xaxis.ticker = FixedTicker(ticks=list(tick_vals))
    hover = HoverTool(tooltips="Product: @Item<br>\
    Start: @Start<br>\
    End: @End<br>\
    Status: @Status")
    G.add_tools(hover)

    DF['ID'] = DF.index + 0.8
    DF['ID1'] = DF.index + 1.2
    CDS = ColumnDataSource(DF)
    G.quad(left='Start_dt',
           right='End_dt',
           bottom='ID',
           top='ID1',
           source=CDS,
           color="Color",
           legend='Status',
           alpha=0.8)
    G.legend.click_policy = "hide"
    # G.rect(,"Item",source=CDS)
    # show(G)
    export_png(G, filename="static/schedule.png")
    output_file('static/schedule.html', mode='inline')
    save(G)
Exemple #4
0
def stats_plot(learner: RefaelLearner, database):
    if not os.path.exists(
            os.path.join(learner.base_dir(), 'fig', 'stats_graphs')):
        os.mkdir(os.path.join(learner.base_dir(), 'fig', 'stats_graphs'))

    figures = []
    titles = [
        "Number of graphs over time", "Total number of nodes over time",
        "Total number of edges over time", "Average node degree over time",
        "Number of blacks over time", "Number of whites over time"
    ]
    y_labels = [
        "number of graphs", "number of nodes", "number of edges",
        "average node degree", "number of blacks", "number of whites"
    ]
    y_vals = []

    for i in range(6):
        figures.append(
            figure(plot_width=600,
                   plot_height=250,
                   title=titles[i],
                   x_axis_label='time',
                   y_axis_label=y_labels[i]))
        y_vals.append([])

    for t in range(len(database.multi_graphs_by_time)):
        mg = database.multi_graphs_by_time[t]
        ids = mg._list_id
        valids = [gid for gid in ids if mg._graph_valid[gid]]

        y_vals[0].append(len(valids))
        y_vals[1].append(sum([mg.node_count(k) for k in valids]))
        y_vals[2].append(sum([mg.edge_count(k) for k in valids]))

        degs = []
        gnxs = [mg.get_gnx(gid) for gid in valids]
        for gnx in gnxs:
            degs = degs + [
                t[1] for t in list(gnx.degree([x for x in gnx.nodes()]))
            ]
        avg_deg = sum(degs) / sum([mg.node_count(k) for k in valids])
        y_vals[3].append(avg_deg)

        labels = database._database._labels
        valid_labels = [labels[gr] for gr in valids]
        if REFAEL_PARAM['white_label']:
            y_vals[5].append(sum(valid_labels))
            y_vals[4].append(len(valid_labels) - sum(valid_labels))
        else:
            y_vals[4].append(sum(valid_labels))
            y_vals[5].append(len(valid_labels) - sum(valid_labels))
    for i in range(len(figures)):
        figures[i].line(list(range(len(database.multi_graphs_by_time))),
                        y_vals[i],
                        line_color='blue')
        export_png(
            figures[i],
            os.path.join(
                learner.base_dir(), "fig", "stats_graphs", titles[i] + "_mn_" +
                str(learner._params["min_nodes"]) + ".png"))
Exemple #5
0
 def show(p):
     filename = 'temp_img.png'
     ipd.display(ipd.Image(export_png(p, filename=filename)))
     os.remove(filename)
Exemple #6
0
 def save_img(self, fname='out.png'):
     from bokeh.io import export_png
     export_png(self.plts, filename=fname)
             p_ack.yaxis.axis_label = "Acknowledgement Numbers"
             p_ack.yaxis[0].formatter = BasicTickFormatter(use_scientific=False)
             p_ack.scatter(x='x', y='w', color='colorsack', legend="ack values", alpha=0.5, source=ack_sourceplot)
             output_name = "color_scatter_{}_{}_{}-{}_syn+ack".format(source,date,start_hour,end_hour)
             output_dir = "{}{}/{}/{}/{}".format(outputdir,date.split('-')[0],date.split('-')[1],date.split('-')[2],h)
             if not os.path.exists(output_dir):
                 os.makedirs(output_dir)
             output_file("{}/{}.html".format(output_dir,output_name),
                         title="TCP ISN values in Honeypot", mode='inline')
             # Write the html file and save it
             p = column(p_seq,p_ack)
             save(p)
             print("{} - {}".format(start_hour,end_hour))
             # Export the plot into a png file
             if export:
                 export_png(p, filename = "{}/{}.png".format(output_dir,output_name))
 else:
     ports = args.port_filter
     minutes = 0
     # For each period of time corresponding to the timeline
     for nb in range(1,int(occurrence_num_hour+1)):
         start_min = format(minutes, '02d')
         it_minutes = minutes
         start_hour, end_hour = string_timeline(h, start_min, format((minutes+timeline),'02d'))
         title = " {} collected on {} between {} and {}".format(source, date, start_hour, end_hour)
         # Definition of the sequence numbers plot
         p_seq = figure(width=1500,height=700,tools=TOOLS, x_axis_type="datetime", title="TCP sequence values in Honeypot {}".format(title))
         hoverseq = p_seq.select(dict(type=HoverTool))
         hoverseq.tooltips = [
                 ("index", "$index"),
                 ("timestamp", "@x{%F %H:%M:%S}"),
Exemple #8
0
#image = Image.open('lena.png').convert('LA').resize((width, height))
#data = {"x": [], "y": [], "value": []}
#for x in range(0, width):
#    for y in range(0, height):
#        data["x"    ].append(x)
#        data["y"    ].append(y)
#        data["value"].append(image.getpixel((image.size[0] - 1 - x, image.size[1] - 1 - y))[0])

initial_data = {
    "x": [],
    "y": [],
    "value": [0, 225, 0, 225, 225, 225, 0, 225, 0]
}
for x in range(0, 3):
    for y in range(0, 3):
        initial_data["x"].append(x)
        initial_data["y"].append(y)

data = {"x": [], "y": [], "value": [0, 1, 0, 2, 4, 3, 0, 10, 0]}
for x in range(0, 3):
    for y in range(0, 3):
        data["x"].append(x)
        data["y"].append(y)

plot = row(generate_initial(initial_data), generate_initial(initial_data))

output_file("figure_1.html")
export_png(plot, filename="figure_1.png")
show(plot)
Exemple #9
0
def process_isn(src_dir,source,hour,outputdir,seq,ack):
    if not os.path.exists(outputdir):
        os.makedirs(outputdir)
    w_input = []
    x_input = []
    y_input = []
    z_input = []
    braces = "{}"
    date = src_dir.split('/')[-4:-1]
    TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,"
    # Command used to display an entire day of ISN values
    if hour is None:
        src_file = "{}{}".format(src_dir,braces)
        cmd = "ls {} | parallel --line-buffer --gnu tshark -n -q -Tfields -e frame.time_epoch -e tcp.seq -e tcp.ack \
        -e tcp.dstport -E separator=/s -o tcp.relative_sequence_numbers:FALSE -r {}".format(src_dir,src_file)
        width = 3600
        outputname = "color_scatter_{}_{}{}{}".format(source,date[0],date[1],date[2])
        title = " {} collected on {}/{}/{}".format(source, date[0],date[1],date[2])
    # Command used to display ISN values for the defined timeline
    else:
        filename = "{}-{}{}{}{}*".format(source,date[0],date[1],date[2],hour)
        cmd = "ls {}{} | parallel --line-buffer --gnu tshark -n -q -Tfields -e frame.time_epoch -e tcp.seq -e tcp.ack \
        -e tcp.dstport -E separator=/s -o tcp.relative_sequence_numbers:FALSE -r {}".format(src_dir,filename,braces)
        width = 1500
        outputname = "color_scatter_{}_{}{}{}_h{}".format(source,date[0],date[1],date[2],hour)
        title = " {} collected on {}/{}/{} {}".format(source,date[0],date[1],date[2],time_space(hour))

    proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    lines = proc.stdout.readlines()
    if not lines:
        print("There is no data available at the time you specified, please check the files and run the command again with a valid time value.")
        sys.exit(1)

    for line in lines:
        line = line[:-1].decode()
        timestamp, iseq, iack, dport = line.split(' ')
        a,_ = timestamp.split('.')
        dobj = datetime.datetime.fromtimestamp(float(a))
        stime = dobj.strftime("%Y-%m-%d %H:%M:%S")
        x_input.append(stime)
        y_input.append(iseq)
        w_input.append(iack)
        if dport == '':
            dport = 0
        z_input.append(int(dport))
    proc.wait()
    x = np.array(x_input,dtype=np.datetime64)
    z = np.array(z_input)
    colors = [
        "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+z*2, 30+z*2)
    ]
    type_string = ""

    # Definition of the sequence numbers plot
    if seq:
        type_string+="_seq"
        y = np.array(y_input)
        p_seq = figure(width=width,height=700,tools=TOOLS, x_axis_type="datetime", title="TCP sequence values in Honeypot {}".format(title))
        hoverseq = p_seq.select(dict(type=HoverTool))
        hoverseq.tooltips = [
                ("index", "$index"),
                ("timestamp", "@x"),
                ("number", "@y{0,0}")
                ]
        p_seq.xaxis.axis_label = "Time"
        p_seq.yaxis[0].formatter = BasicTickFormatter(use_scientific=False)
        p_seq.scatter(x, y, color=colors, legend="seq values", alpha=0.5, )
        p = p_seq

    # Definition of the acknowledgement numbers plot
    if ack:
        type_string+="_ack"
        w = np.array(w_input)
        p_ack = figure(width=width,height=700,tools=TOOLS, x_axis_type="datetime", title="TCP acknowledgement values in Honeypot {}".format(title))
        hoverack = p_ack.select(dict(type=HoverTool))
        hoverack.tooltips = [
                ("index", "$index"),
                ("timestamp", "@x"),
                ("number", "@y{0,0}")
                ]
        p_ack.xaxis.axis_label = "Time"
        p_ack.yaxis[0].formatter = BasicTickFormatter(use_scientific=False)
        p_ack.scatter(x, w, color=colors, legend="ack values", alpha=0.5, )
        p = p_ack
    output_file_name = "{}{}{}".format(outputdir,outputname,type_string)
#    output_file("{}{}{}.html".format(outputdir,outputname,type_string),
#            title="TCP ISN values in Honeypot", mode='inline')
    output_file("{}.html".format(output_file_name),
            title="TCP ISN values in Honeypot", mode='inline')
    if seq and ack:
        p = column(p_seq,p_ack)
    show(p)
    export_png(p, filename="{}.png".format(output_file_name))
    s.xaxis.axis_label_text_font_size = "14pt"
    s.xaxis.major_label_text_font_size = "11pt"
    s.yaxis.axis_label_text_font_size = "14pt"
    s.yaxis.major_label_text_font_size = "12pt"
    s.xaxis.major_label_text_font = "times"
    s.yaxis.major_label_text_font = "times"
    s.title.text_font_size = '16pt'
    s.title.text_font = "times"
    s.title.text_font_style = 'normal'

    return s


def plot_temporal_clusters(dataframe, case, save_png=False):
    """
    Plot combination of spatial clusters for different periods as cos/sin plot.
    :param dataframe: (pd.DataFrame) a data frame that stores location data for visualization (must have "longitude"
    and "latitude" columns)
    :param save_png: (bool) save or not the picture to png-format
    :return: None
    """
    s1 = plot_time(dataframe, 'Day', case)
    s2 = plot_time(dataframe, 'Week', case, color_bar=True)

    p = gridplot([[s1, s2]])

    if save_png:
        export_png(p, filename=f"../output/time_clusters.png")
    else:
        show(p)
Exemple #11
0
def average_day(indoor, outdoor, site_number, time_period, shift):
    
     # file path based on the time period
    if time_period == '1':
       # print('1')
        filepath = '/Users/matthew/Desktop/thesis/Final_Figures/In_out_compare_1/'
        y_scale_option = (-0.5, 10)
    elif time_period == '2':
        filepath = '/Users/matthew/Desktop/thesis/Final_Figures/In_out_compare_2/'
        y_scale_option = (-0.5, 7)
    elif time_period == '3':
        filepath = '/Users/matthew/Desktop/thesis/Final_Figures/In_out_compare_3/'
        y_scale_option = (0, 250)
    elif time_period == '4':
        filepath = '/Users/matthew/Desktop/thesis/Final_Figures/In_out_compare_4/'
        y_scale_option = (-0.5, 10)
    elif time_period == '5':
        filepath = '/Users/matthew/Desktop/thesis/Final_Figures/In_out_compare_5/'
        y_scale_option = (0, 90)
    
    
    PlotType = 'HTMLfile'
    
    dates = pd.DataFrame({})

    files = glob('/Users/matthew/Desktop/daily_test/dummy_date*.csv')
    files.sort()
    for file in files:
        dates = pd.concat([dates, pd.read_csv(file)], sort=False)

    indoor_average_day = pd.DataFrame({})
    outdoor_average_day = pd.DataFrame({})
    # use for unshifted corrected data
    if shift == 'unshifted':
        indoor_average_day['PM2_5_hourly_avg'] = indoor['PM2_5_corrected'].groupby(indoor.index.hour).mean()
    # use for shifted corrected data - don't actually need this
   # elif shift == 'shifted':
   #     indoor_average_day['PM2_5_hourly_avg'] = indoor['PM2_5_corrected_shift'].groupby(indoor.index.hour).mean()
        
    indoor_average_day['times'] = pd.to_datetime(dates['times'])
    indoor_average_day = indoor_average_day.sort_values('times')
    indoor_average_day.index = indoor_average_day.times
    
    
    outdoor_average_day['PM2_5_hourly_avg'] = outdoor['PM2_5_corrected'].groupby(outdoor.index.hour).mean()
    outdoor_average_day['times'] = pd.to_datetime(dates['times'])
    outdoor_average_day = outdoor_average_day.sort_values('times')
    outdoor_average_day.index = outdoor_average_day.times
    
    averages = pd.DataFrame({})
    averages[indoor.iloc[0]['Location'] + '_IAQU'] = indoor_average_day.PM2_5_hourly_avg.round(2)
    averages[indoor.iloc[0]['Location'] + '_CN'] = outdoor_average_day.PM2_5_hourly_avg.round(2)
    print(averages)
    #print('outdoor', (outdoor_average_day.PM2_5_hourly_avg).round(2))
    #print('indoor' , (indoor_average_day.PM2_5_hourly_avg).round(2))
        
    if PlotType=='notebook':
        output_notebook()
    else:
        output_file('/Users/matthew/Desktop/clarity_PM2.5_time_series_legend_mute.html')
            
    p1 = figure(plot_width=900,
                        plot_height=450,
                        x_axis_type='datetime',
                        x_axis_label='Time (hrs)',
                        y_axis_label='PM2.5 (ug/m³)',
                        y_range = y_scale_option)
    p1.title.text = site_number
    p1.title.text_font_size = '14pt'
    p1.title.text_font = 'times'
            
    p1.triangle(indoor_average_day.index,     indoor_average_day.PM2_5_hourly_avg, size = 8,             color='black',             line_width=2, muted_color='black', muted_alpha=0.2)
    p1.line(indoor_average_day.index,     indoor_average_day.PM2_5_hourly_avg,             color='black',             line_width=2, muted_color='black', muted_alpha=0.2)
    p1.circle(outdoor_average_day.index,       outdoor_average_day.PM2_5_hourly_avg,    size = 8,          color='black',              line_width=2, muted_color='blue', muted_alpha=0.2)
    p1.line(outdoor_average_day.index,       outdoor_average_day.PM2_5_hourly_avg,              color='black',              line_width=2, muted_color='blue', muted_alpha=0.2)
    
    
    p1.legend.click_policy="mute"
    figure_format(p1)
    p1.legend.location='top_center'
    p1.xaxis.formatter = DatetimeTickFormatter(days="", hours="%H", seconds="" )
    
    p1.yaxis.major_label_text_font = "times"
    p1.xaxis.major_label_text_font = "times"
    
    if shift == 'unshifted':
        export_png(p1, filename=filepath + 'hourly_averages_' + indoor.iloc[0]['Location'] + '.png')

    
    
    tab1 = Panel(child=p1, title="Average Hour Values")
    
    tabs = Tabs(tabs=[ tab1])
    
   # show(tabs) 
    
    return(p1, averages)
    def plotMatrix(self, npy, fileName, scale=None, residueMapName=None):

        # Rescales matrix if a scale/length is specified
        if scale is not None:
            npy = self.rescaleMatrix(npy, scale)

        # Interactive Plot Tools
        TOOLS = 'hover,save,pan,box_zoom,reset,wheel_zoom'

        # Defining color values
        vmin = -5
        vmax = 5

        # New Color Map Bokeh
        blueRedColors = [
            '#FF0000', '#FF1111', '#FF2222', '#FF3333', '#FF4444', '#FF5555',
            '#FF6666', '#FF7777', '#FF8888', '#FF9999', '#FFAAAA', '#FFBBBB',
            '#FFCCCC', '#FFDDDD', '#FFEEEE', '#FFFFFF', '#EEEEFF', '#DDDDFF',
            '#CCCCFF', '#BBBBFF', '#AAAAFF', '#9999FF', '#8888FF', '#7777FF',
            '#6666FF', '#5555FF', '#4444FF', '#3333FF', '#2222FF', '#1111FF',
            '#0000FF'
        ]

        #vmin = np.amin(npy)
        #vmax = np.amax(npy)

        # Reformatting data for plotting

        xyPairList = [None] * npy.shape[0] * npy.shape[1]

        # Creating list
        for i in range(0, npy.shape[0]):
            for j in range(0, npy.shape[1]):
                xyPairList[i + j * npy.shape[0]] = (i, j)

        if residueMapName is not None:
            # Loading map from covariance index to residue pairs
            distMap = list(
                np.load(residueMapName, allow_pickle=True).item().values())

            covMap = np.array([[(ti, tj) for ti in distMap] for tj in distMap])
            covMap = covMap.reshape(covMap.shape[0] * covMap.shape[1], -1)

            # Defining fields to be displayed in hover tooltips
            source = ColumnDataSource(
                data={
                    'x': np.transpose(xyPairList)[0],
                    'y': np.transpose(xyPairList)[1],
                    'covValues': npy.flatten(),
                    'covMap': covMap
                })
            tooltipList = [('xCoord', '@x'), ('yCoord', '@y'),
                           ('Covariance Value', '@covValues'),
                           ('residuePair', '@covMap')]
        else:
            # Defining fields to be displayed in hover tooltips
            source = ColumnDataSource(
                data={
                    'x': np.transpose(xyPairList)[0],
                    'y': np.transpose(xyPairList)[1],
                    'covValues': npy.flatten()
                })
            tooltipList = [('xCoord', '@x'), ('yCoord', '@y'),
                           ('Distance Difference Value', '@covValues')]

        # Plotting
        color_mapper = LinearColorMapper(palette=blueRedColors,
                                         low=vmin,
                                         high=vmax)

        plot = figure(x_range=(-0.5, len(npy) - 0.5),
                      y_range=(-0.5, len(npy) - 0.5),
                      tools=TOOLS,
                      toolbar_location='below',
                      tooltips=tooltipList)
        plot.rect(x='x',
                  y='y',
                  width=1,
                  height=1,
                  source=source,
                  fill_color={
                      'field': 'covValues',
                      'transform': color_mapper
                  },
                  line_color=None)

        color_bar = ColorBar(color_mapper=color_mapper,
                             label_standoff=12,
                             border_line_color=None, location=(0,0),
                             ticker=BasicTicker(\
                                desired_num_ticks=len(blueRedColors)))

        plot.add_layout(color_bar, 'right')

        output_file(fileName + '.html')
        save(plot)
        self.logger.info('Computation complete, plot outputted to: '\
                         + fileName + '.html')

        export_png(plot, filename=fileName + '.png')
        self.logger.info('Computation complete, plot outputted to: '\
                         + fileName + '.png')
Exemple #13
0
# Importing the packages
from cat_analysis.figure_plotter import *
from cat_analysis.modeling import *
from cat_analysis.data_cleanup import *

from bokeh.io import export_png
import pandas as pd


# Reading in the tidy dataframe 
df_tidy = tidy_reader("data/tidy_mt_catastrophe.xlsx")

# Extracting the data for 12uM conc
data_12 = (df_tidy.loc[df_tidy["Concentration (uM)"] == 12, "Time to Catastrophe (s)"]).values


# Calculating the MLEs for the Parameters modeled as Gamma Distribution
parameters = mle_model(data_12)

# Extracing the alpha and beta
beta1_mle = parameters[0]
beta2_mle = parameters[1]

# Making the plot 
story_plot = single_data_story_plotter(data_12, 
                                       beta1_mle,
                                       beta2_mle)

# Saving the figure
export_png(story_plot, filename = "figure_3.png")
Exemple #14
0
 def save_picture(self):
     # export the graph
     export_png(self.graphs_layout, filename="graph.png")
from cat_analysis.figure_plotter import *
from cat_analysis.modeling import *
from cat_analysis.data_cleanup import *

from bokeh.io import export_png
import pandas as pd

# Reading in the tidy dataframe
df_tidy = tidy_reader("data/tidy_mt_catastrophe.xlsx")

# Extracting the data for 12uM conc
data_12 = (df_tidy.loc[df_tidy["Concentration (uM)"] == 12,
                       "Time to Catastrophe (s)"]).values

# AIC with bootstrap samples
# Gamma
df_gamma = bootstrap_aic(mle_iid_gamma, data_12, size=10000)

# Story
df_model = bootstrap_aic(mle_model, data_12, size=10000)

# Concatnating
df_aic = pd.concat([df_gamma, df_model], ignore_index=True)

# Plotting the figure
aic_ecdf = aic_ecdf_plotter(df_aic,
                            "AIC Values for Gamma and Story Distribution")

# Save the figure
export_png(aic_ecdf, filename="figure_5.png")
def _scatter_plot(umap_mtx,
                  pid_subc_list,
                  colors,
                  fig_height,
                  fig_width,
                  label,
                  title='',
                  save_fig=None):
    """
    Bokeh scatterplot to visualize in jupyter clusters and subject info.

    :param umap_mtx: Array with UMAP projections
    :type umap_mtx: numpy array
    :param pid_subc_list: list of pids ordered as in umap_mtx and subcluster labels
    :type pid_subc_list: list of tuples
    :param colors: Color list
    :type colors: list
    :param fig_height: figure height
    :type fig_height: int
    :param fig_width: figure width
    :type fig_width: int
    :param label: dictionary of class numbers and subtype labels
    :type label: dict
    :param title: figure title
    :type title: str
    :param save_fig: flag to enable figure saving, defaults to None
    :type save_fig: str
    """

    pid_list = list(map(lambda x: x[0], pid_subc_list))
    subc_list = list(map(lambda x: x[1], pid_subc_list))
    df_dict = {
        'x': umap_mtx[:, 0].tolist(),
        'y': umap_mtx[:, 1].tolist(),
        'pid_list': pid_list,
        'subc_list': subc_list
    }

    df = pd.DataFrame(df_dict).sort_values('subc_list')

    source = ColumnDataSource(
        dict(x=df['x'].tolist(),
             y=df['y'].tolist(),
             pid=df['pid_list'].tolist(),
             subc=list(map(lambda x: label[str(x)], df['subc_list'].tolist())),
             col_class=[str(i) for i in df['subc_list'].tolist()]))

    labels = [str(i) for i in df['subc_list']]
    cmap = CategoricalColorMapper(factors=sorted(pd.unique(labels)),
                                  palette=colors)
    TOOLTIPS = [('pid', '@pid'), ('subc', '@subc')]

    plotTools = 'box_zoom, wheel_zoom, pan,  crosshair, reset, save'

    output_notebook()
    p = figure(plot_width=fig_width * 80,
               plot_height=fig_height * 80,
               tools=plotTools,
               title=title)
    p.add_tools(HoverTool(tooltips=TOOLTIPS))
    p.circle('x',
             'y',
             legend_group='subc',
             source=source,
             color={
                 'field': 'col_class',
                 "transform": cmap
             },
             size=12)
    p.xaxis.major_tick_line_color = None
    p.xaxis.minor_tick_line_color = None
    p.yaxis.major_tick_line_color = None
    p.yaxis.minor_tick_line_color = None
    p.xaxis.major_label_text_color = None
    p.yaxis.major_label_text_color = None
    p.grid.grid_line_color = None
    p.title.text_font_size = '13pt'
    p.legend.label_text_font_size = '18pt'
    p.legend.location = 'top_left'
    if save_fig is not None:
        export_png(p, filename=f'./plot/{save_fig}.png')
    else:
        show(p)
Exemple #17
0
def plot_bokeh(self, filename=None, show=True, savepng=False):
    curdoc().theme = Theme(BOKEH_THEME_FILE)

    if filename is not None:
        if not filename.endswith('.html'):
            filename += '.html'
        else:
            pass
        bkp.output_file(filename)
    else:
        pass

    # Format the date correction
    self.data['utcstr'] = self.data['utc'].apply(lambda x: x.isoformat()[0:19])

    # Put the dataframe in a useable form
    self.data['lower'] = self.data.radiance - self.data.sigma
    self.data['upper'] = self.data.radiance + self.data.sigma
    self.data['lattandeg'] = self.data.lattan * 180 / np.pi

    mask = self.data.alttan != self.data.alttan.max()
    if np.any(mask):
        m = self.data[self.data.alttan != self.data.alttan.max()].alttan.max()
    else:
        m = 1e10
    col = np.interp(self.data.alttan, np.linspace(0, m, 256),
                    np.arange(256)).astype(int)
    self.data['color'] = [Turbo256[c] for c in col]
    source = bkp.ColumnDataSource(self.data)

    # Tools
    tools = [
        'pan', 'box_zoom', 'wheel_zoom', 'xbox_select', 'hover', 'reset',
        'save'
    ]

    # tool tips
    tips = [('index', '$index'), ('UTC', '@utcstr'),
            ('Radiance', '@radiance{0.2f} kR'),
            ('LTtan', '@loctimetan{2.1f} hr'),
            ('Lattan', '@lattandeg{3.1f} deg'), ('Alttan', '@alttan{0.f} km')]

    # Make the figure
    width, height = 1200, 600
    fig0 = bkp.figure(plot_width=width,
                      plot_height=height,
                      x_axis_type='datetime',
                      title=f'{self.species}, {self.query}',
                      x_axis_label='UTC',
                      y_axis_label='Radiance (kR)',
                      y_range=[0, self.data.radiance.max() * 1.5],
                      tools=tools,
                      active_drag="xbox_select")

    # plot the data
    dplot = fig0.circle(x='utc',
                        y='radiance',
                        size=7,
                        color='black',
                        legend_label='Data',
                        hover_color='yellow',
                        source=source,
                        selection_color='orange')
    fig0.line(x='utc',
              y='radiance',
              color='black',
              legend_label='Data',
              source=source)
    fig0.xaxis.ticker = DatetimeTicker(num_minor_ticks=5)

    # Add error bars
    fig0.add_layout(
        Whisker(source=source, base='utc', upper='upper', lower='lower'))
    renderers = [dplot]

    # Plot the model
    col = color_generator()
    modplots, maskedplots = [], []
    for modkey, result in self.model_result.items():
        c = next(col)
        # fig0.line(x='utc', y=modkey, source=source,
        #           legend_label=result.label, color=c)

        maskkey = modkey.replace('model', 'mask')
        mask = (self.data[maskkey]).to_list()
        view = CDSView(source=source, filters=[BooleanFilter(mask)])
        modplots.append(
            fig0.circle(x='utc',
                        y=modkey,
                        size=9,
                        color=c,
                        source=source,
                        legend_label=result.label,
                        view=view))
        maskkey = modkey.replace('model', 'mask')
        mask = np.logical_not(self.data[maskkey]).to_list()
        view = CDSView(source=source, filters=[BooleanFilter(mask)])
        maskedplots.append(
            fig0.circle(x='utc',
                        y=modkey,
                        size=9,
                        source=source,
                        line_color=c,
                        fill_color='yellow',
                        view=view,
                        legend_label=result.label + ' (Data Point Not Used)'))
        renderers.extend(modplots)
        renderers.extend(maskedplots)

    datahover = HoverTool(tooltips=tips, renderers=renderers)
    fig0.add_tools(datahover)

    ##############
    # Plot tangent point
    color_mapper = LinearColorMapper(palette="Turbo256", low=0, high=m)

    width, height = 1200, 600
    tools = [
        'pan', 'box_zoom', 'wheel_zoom', 'box_select', 'hover', 'reset', 'save'
    ]
    fig1 = bkp.figure(plot_width=width,
                      plot_height=height,
                      title=f'Tangent Point Location',
                      x_axis_label='Local Time (hr)',
                      y_axis_label='Latitude (deg)',
                      x_range=[0, 24],
                      y_range=[-90, 90],
                      tools=tools,
                      active_drag="box_select")
    tanplot = fig1.circle(x='loctimetan',
                          y='lattandeg',
                          size=5,
                          selection_color='orange',
                          hover_color='purple',
                          source=source,
                          color='color')
    fig1.xaxis.ticker = SingleIntervalTicker(interval=6, num_minor_ticks=6)
    fig1.yaxis.ticker = SingleIntervalTicker(interval=45, num_minor_ticks=3)
    color_bar = ColorBar(color_mapper=color_mapper,
                         title='Altitude (km)',
                         label_standoff=12,
                         border_line_color=None,
                         location=(0, 0))
    fig1.add_layout(color_bar, 'right')
    datahover = HoverTool(tooltips=tips, renderers=[tanplot])
    fig1.add_tools(datahover)

    grid = column(fig0, fig1)

    if filename is not None:
        bkp.output_file(filename)
        if savepng:
            export_png(grid, filename=filename.replace('.html', '.png'))
        else:
            pass
        bkp.save(grid)
    else:
        pass

    if show:
        bkp.show(grid)

    return fig0, fig1
Exemple #18
0
plot = figure(output_backend = 'webgl')

plot.line(array_x, array_y)

#Output the plot

output_file('numpy_line.html')

show(plot)

---------------------------------------------------------------------------------------------

#Exporting plots as PNG images

#Package installations on terminal/PowerShell

conda install selenium phantomjs pillow

pip3 install selenium

#Import the required packages

from bokeh.io import export_png

#Export the png image

export_png(plot, filename="safe_states.png")

-----------------------------------------------------------------------------------------------
Exemple #19
0
def make_fitted_plot(self,
                     result,
                     filestart='fitted',
                     show=True,
                     ut=None,
                     smooth=False,
                     savepng=False):
    curdoc().theme = Theme(BOKEH_THEME_FILE)

    if smooth:
        kernel = Gaussian2DKernel(x_stddev=1)
        source = convolve(result['abundance'], kernel, boundary='wrap')
        packets = convolve(result['p_available'], kernel, boundary='wrap')
    else:
        source = result['abundance']
        packets = result['p_available']

    # Tools
    tools = ['save']

    local_time = (result['longitude'].value * 12 / np.pi + 12) % 24
    arg = np.argsort(local_time[:-1])
    source, packets = source[arg, :], packets[arg, :]

    # Distribution of available packets
    fig0 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      title=f'{self.species}, {self.query}, Available Packets',
                      x_axis_label='Local Time (hr)',
                      y_axis_label='Latitude (deg)',
                      x_range=[0, 24],
                      y_range=[-90, 90],
                      tools=tools)
    fig0.title.text_font_size = FONTSIZE
    fig0.xaxis.axis_label_text_font_size = FONTSIZE
    fig0.yaxis.axis_label_text_font_size = FONTSIZE
    fig0.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig0.yaxis.major_label_text_font_size = NUMFONTSIZE
    fig0.xaxis.ticker = FixedTicker(ticks=[0, 6, 12, 18, 24])
    fig0.yaxis.ticker = FixedTicker(ticks=[-90, 45, 0, 45, 90])

    fig0.image(image=[packets.transpose()],
               x=0,
               y=-90,
               dw=24,
               dh=180,
               palette='Spectral11')

    # Distribution of packets used in the final model
    fig1 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      title=f'{self.species}, {self.query}, Packets Used',
                      x_axis_label='Local Time (hr)',
                      y_axis_label='Latitude (deg)',
                      x_range=[0, 24],
                      y_range=[-90, 90],
                      tools=tools)
    fig1.title.text_font_size = FONTSIZE
    fig1.xaxis.axis_label_text_font_size = FONTSIZE
    fig1.yaxis.axis_label_text_font_size = FONTSIZE
    fig1.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig1.yaxis.major_label_text_font_size = NUMFONTSIZE
    fig1.xaxis.ticker = FixedTicker(ticks=[0, 6, 12, 18, 24])
    fig1.yaxis.ticker = FixedTicker(ticks=[-90, 45, 0, 45, 90])

    fig1.image(image=[source.transpose()],
               x=0,
               y=-90,
               dw=24,
               dh=180,
               palette='Spectral11')

    fig2 = bkp.figure(
        plot_width=WIDTH,
        plot_height=HEIGHT,
        title=f'{self.species}, {self.query}, Speed Distribution',
        x_axis_label='Speed (km/s)',
        y_axis_label='Relative Number',
        y_range=[0, 1.2],
        tools=tools)
    fig2.title.text_font_size = FONTSIZE
    fig2.xaxis.axis_label_text_font_size = FONTSIZE
    fig2.yaxis.axis_label_text_font_size = FONTSIZE
    fig2.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig2.yaxis.major_label_text_font_size = NUMFONTSIZE

    fig2.line(x=result['velocity'][:-1],
              y=result['v_available'],
              legend_label='Packets Available',
              color='red')
    fig2.line(x=result['velocity'][:-1],
              y=result['vdist'],
              legend_label='Packets Used',
              color='blue')

    # Full orbit time series
    # Format the date correction
    self.data['utcstr'] = self.data['utc'].apply(lambda x: x.isoformat()[0:19])

    # Put the dataframe in a useable form
    self.data['lower'] = self.data.radiance - self.data.sigma
    self.data['upper'] = self.data.radiance + self.data.sigma
    self.data['lattandeg'] = self.data.lattan * 180 / np.pi

    m = self.data[self.data.alttan != self.data.alttan.max()].alttan.max()
    col = np.interp(self.data.alttan, np.linspace(0, m, 256),
                    np.arange(256)).astype(int)
    self.data['color'] = [Turbo256[c] for c in col]
    source = bkp.ColumnDataSource(self.data)

    # Tools
    tools = [
        'pan', 'box_zoom', 'wheel_zoom', 'xbox_select', 'hover', 'reset',
        'save'
    ]

    # tool tips
    tips = [('index', '$index'), ('UTC', '@utcstr'),
            ('Radiance', '@radiance{0.2f} kR'),
            ('LTtan', '@loctimetan{2.1f} hr'),
            ('Lattan', '@lattandeg{3.1f} deg'), ('Alttan', '@alttan{0.f} km')]

    # Make the radiance figure
    title_ = f'{self.species}, {self.query}'
    if ut is not None:
        title_ += f', UTC = {ut.isoformat()}'
    else:
        pass

    fig3 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      x_axis_type='datetime',
                      title=title_,
                      x_axis_label='UTC',
                      y_axis_label='Radiance (kR)',
                      y_range=[0, self.data.radiance.max() * 1.5],
                      tools=tools,
                      active_drag="xbox_select")
    fig3.title.text_font_size = FONTSIZE
    fig3.xaxis.axis_label_text_font_size = FONTSIZE
    fig3.yaxis.axis_label_text_font_size = FONTSIZE
    fig3.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig3.yaxis.major_label_text_font_size = NUMFONTSIZE

    # plot the data
    dplot = fig3.circle(x='utc',
                        y='radiance',
                        size=7,
                        color='black',
                        legend_label='Data',
                        hover_color='yellow',
                        source=source,
                        selection_color='orange')
    fig3.line(x='utc',
              y='radiance',
              color='black',
              legend_label='Data',
              source=source)
    fig3.xaxis.ticker = DatetimeTicker(num_minor_ticks=5)

    # Add error bars
    fig3.add_layout(
        Whisker(source=source, base='utc', upper='upper', lower='lower'))
    renderers = [dplot]

    # Plot the model
    col = color_generator()
    modplots, maskedplots = [], []
    for modkey, result in self.model_result.items():
        if result.fitted:
            c = next(col)
            fig3.line(x='utc',
                      y=modkey,
                      source=source,
                      legend_label=result.label,
                      color=c)

            maskkey = modkey.replace('model', 'mask')
            mask = self.data[maskkey].to_list()
            view = CDSView(source=source, filters=[BooleanFilter(mask)])
            modplots.append(
                fig3.circle(x='utc',
                            y=modkey,
                            size=7,
                            color=c,
                            source=source,
                            legend_label=result.label,
                            view=view))

            maskkey = modkey.replace('model', 'mask')
            mask = np.logical_not(self.data[maskkey]).to_list()
            view = CDSView(source=source, filters=[BooleanFilter(mask)])
            maskedplots.append(
                fig3.circle(x='utc',
                            y=modkey,
                            size=7,
                            source=source,
                            line_color=c,
                            fill_color='yellow',
                            view=view,
                            legend_label=result.label +
                            '(Data Point Not Used)'))
            renderers.extend(modplots)
            renderers.extend(maskedplots)

        if ut is not None:
            yr = fig3.y_range
            fig3.line(x=[ut, ut], y=[0, 1e5], color='purple')
            fig3.y_range = yr
        else:
            pass

    datahover = HoverTool(tooltips=tips, renderers=renderers)
    fig3.add_tools(datahover)

    grid = gridplot([[fig3, fig2], [fig0, fig1]])

    # Save png version
    if savepng:
        export_png(grid, filename=filestart + '.png')
    else:
        pass

    bkp.output_file(filestart + '.html')
    bkp.save(grid)  # html files not needed

    if show:
        bkp.show(grid)
    else:
        pass

    return grid
def main():
    os.chdir(r'../data')
    width = 0.00025
    height = 0.0025
    label_font_size = "11pt"
    add_legend = False
    legend_font_size = "24pt"
    legend_spacing = 20
    legend_padding = 20
    kwargs = dict(
        water_file='water_1698.shp',
        islands_file='islands_1698.shp',
        plot_height=2400,
        plot_width=2400,
        x_axis_location=None,
        y_axis_location=None,
        # output_backend='svg',
    )

    fig1 = draw_population_map(population_file='population_1570.csv',
                               districts_file='districts_1637.shp',
                               y_range=(60.705, 60.717),
                               x_range=(28.722, 28.743),
                               title='1570',
                               width=width,
                               height=height,
                               kind='bar',
                               label_font_size=label_font_size,
                               add_legend=add_legend,
                               legend_font_size=legend_font_size,
                               legend_padding=legend_padding,
                               legend_spacing=legend_spacing,
                               locations={
                                   'i': Coordinates(28.73, 60.7125),
                                   'ii': Coordinates(28.732, 60.7105),
                                   'iii': Coordinates(28.7315, 60.7137),
                                   'iv': Coordinates(28.734, 60.713),
                                   'Valli': Coordinates(28.737, 60.7105),
                               },
                               **kwargs)
    fig2 = draw_population_map(population_file='population_1630.csv',
                               districts_file='districts_1703.shp',
                               y_range=(60.705, 60.718),
                               x_range=(28.72, 28.743),
                               title='1630',
                               width=width,
                               height=height,
                               label_font_size=label_font_size,
                               add_legend=add_legend,
                               legend_font_size=legend_font_size,
                               legend_padding=legend_padding,
                               legend_spacing=legend_spacing,
                               kind='bar',
                               locations={
                                   'Linnoitus': Coordinates(28.732, 60.712),
                                   'Siikaniemi': Coordinates(28.724, 60.712),
                                   'Valli': Coordinates(28.737, 60.710),
                                   'Pantsarlahti':
                                   Coordinates(28.738, 60.7057),
                               },
                               **kwargs)
    fig3 = draw_population_map(population_file='population_1700.csv',
                               districts_file='districts_1703.shp',
                               y_range=(60.705, 60.718),
                               x_range=(28.72, 28.743),
                               title='1700',
                               width=width,
                               height=height,
                               label_font_size=label_font_size,
                               add_legend=add_legend,
                               legend_font_size=legend_font_size,
                               legend_padding=legend_padding,
                               legend_spacing=legend_spacing,
                               kind='bar',
                               locations={
                                   'Linnoitus': Coordinates(28.732, 60.712),
                                   'Siikaniemi': Coordinates(28.724, 60.712),
                                   'Valli': Coordinates(28.737, 60.710),
                                   'Pantsarlahti':
                                   Coordinates(28.738, 60.7059),
                               },
                               **kwargs)

    os.chdir(r'../figures')
    # export_svgs(fig1, filename="1570.svg")
    # export_svgs(fig2, filename="1630.svg")
    # export_svgs(fig3, filename="1700.svg")
    export_png(fig1, filename="1570.png")
    export_png(fig2, filename="1630.png")
    export_png(fig3, filename="1700.png")
    output_file(r'karonen.html')
    show(gridplot([fig1, fig2, fig3], ncols=1))
Exemple #21
0
def plot(products):
    months = ["November", "December", "January", "February", "March", "April"]
    purchases = []

    colors = [
        "#BAFD00", "#95CB00", "#719A00", "#537200", "#1B7200", "#27A600",
        "#32D800", "#40F50A", "#66FF37", "#B0FF98"
    ]

    data = {'months': months}
    for product in products:
        purchases.append(product.name)
        data[product.name] = product.nextSixMonths

    colors = colors[0:len(purchases)]

    #Calculate maxinum y-range for bar chart. Only check last and first items.
    sumV = []
    for i in range(0, 2):
        sum = 0
        for purchase in purchases:
            if (i == 0):
                sum += (data[purchase][0])
            else:
                nextSixMonths = data[purchase]
                sum += nextSixMonths[len(nextSixMonths) - 1]
        sumV.append(sum)

    y_range_end = max(sumV) + max(sumV) / 2

    output_file("stacked.html")

    p = figure(x_range=months,
               plot_height=250,
               title="Price breakdown for the next six months",
               toolbar_location=None,
               tools="")

    p.vbar_stack(purchases,
                 x='months',
                 width=0.45,
                 color=colors,
                 source=data,
                 legend_label=purchases)

    p.y_range.start = 0
    p.y_range.end = y_range_end
    #Don't touch, will F up formatting if more than ~3 items.
    #p.plot_width = 400
    p.x_range.range_padding = 0.1
    p.xgrid.grid_line_color = None
    p.axis.minor_tick_line_color = None
    p.outline_line_color = None
    p.legend.location = "top_left"
    p.legend.orientation = "horizontal"

    show(p)

    if (len(sys.argv) > 2):
        path = sys.argv[2]
    else:
        path = "."
    randomInt = random.randrange(100000, 99999999)
    export_png(p, filename="{0}/plot{1}.png".format(path, randomInt))
    print("/plot{0}.png".format(randomInt))
    sys.stdout.flush()
Exemple #22
0
def main(cfgfile, starttime=None, endtime=None, trajfile="", trajtype='plane',
         flashnr=0, infostr="", MULTIPROCESSING_DSET=False,
         MULTIPROCESSING_PROD=False, PROFILE_MULTIPROCESSING=False):
    """
    Main flow control. Processes radar data off-line over a period of time
    given either by the user, a trajectory file, or determined by the last
    volume processed and the current time. Multiple radars can be processed
    simultaneously

    Parameters
    ----------
    cfgfile : str
        path of the main config file
    starttime, endtime : datetime object
        start and end time of the data to be processed
    trajfile : str
        path to file describing the trajectory
    trajtype : str
        type of trajectory file. Can be either 'plane' or 'lightning'
    flashnr : int
        If larger than 0 will select a flash in a lightning trajectory file.
        If 0 the data corresponding to the trajectory of all flashes will be
        plotted
    infostr : str
        Information string about the actual data processing
        (e.g. 'RUN57'). This string is added to product files.
    MULTIPROCESSING_DSET : Bool
        If true the generation of datasets at the same processing level will
        be parallelized
    MULTIPROCESSING_PROD : Bool
        If true the generation of products from each dataset will be
        parallelized
    PROFILE_MULTIPROCESSING : Bool
        If true and code parallelized the multiprocessing is profiled

    """
    print("- PYRAD version: %s (compiled %s by %s)" %
          (pyrad_version.version, pyrad_version.compile_date_time,
           pyrad_version.username))
    print("- PYART version: " + pyart_version.version)

    # Define behaviour of warnings
    warnings.simplefilter('always')  # always print matching warnings
    # warnings.simplefilter('error')  # turn matching warnings into exceptions
    warnings.formatwarning = _warning_format  # define format

    if ALLOW_USER_BREAK:
        input_queue = _initialize_listener()

    if not _DASK_AVAILABLE:
        MULTIPROCESSING_DSET = False
        MULTIPROCESSING_PROD = False
        PROFILE_MULTIPROCESSING = False

    # check if multiprocessing profiling is necessary
    if not MULTIPROCESSING_DSET and not MULTIPROCESSING_PROD:
        PROFILE_MULTIPROCESSING = False
    elif MULTIPROCESSING_DSET and MULTIPROCESSING_PROD:
        PROFILE_MULTIPROCESSING = False

    if MULTIPROCESSING_DSET and MULTIPROCESSING_PROD:
        # necessary to launch tasks from tasks
        Client()

    if PROFILE_MULTIPROCESSING:
        prof = Profiler()
        rprof = ResourceProfiler()
        cprof = CacheProfiler()

        prof.register()
        rprof.register()
        cprof.register()

    cfg = _create_cfg_dict(cfgfile)
    datacfg = _create_datacfg_dict(cfg)

    starttime, endtime, traj = _get_times_and_traj(
        trajfile, starttime, endtime, cfg['ScanPeriod'],
        last_state_file=cfg['lastStateFile'], trajtype=trajtype,
        flashnr=flashnr)

    if infostr:
        print('- Info string : ' + infostr)

    # get data types and levels
    datatypesdescr_list = list()
    for i in range(1, cfg['NumRadars']+1):
        datatypesdescr_list.append(
            _get_datatype_list(cfg, radarnr='RADAR'+'{:03d}'.format(i)))

    dataset_levels = _get_datasets_list(cfg)

    masterfilelist, masterdatatypedescr, masterscan = _get_masterfile_list(
        datatypesdescr_list[0], starttime, endtime, datacfg,
        scan_list=datacfg['ScanList'])

    nvolumes = len(masterfilelist)
    if nvolumes == 0:
        raise ValueError(
            "ERROR: Could not find any valid volumes between " +
            starttime.strftime('%Y-%m-%d %H:%M:%S') + " and " +
            endtime.strftime('%Y-%m-%d %H:%M:%S') + " for " +
            "master scan '" + str(masterscan) +
            "' and master data type '" + masterdatatypedescr +
            "'")
    print('- Number of volumes to process: ' + str(nvolumes))
    print('- Start time: ' + starttime.strftime("%Y-%m-%d %H:%M:%S"))
    print('- end time: ' + endtime.strftime("%Y-%m-%d %H:%M:%S"))

    # initial processing of the datasets
    print('\n\n- Initializing datasets:')
    dscfg, traj = _initialize_datasets(
        dataset_levels, cfg, traj=traj, infostr=infostr)

    # process all data files in file list or until user interrupts processing
    for masterfile in masterfilelist:
        if ALLOW_USER_BREAK:
            # check if user has requested exit
            try:
                input_queue.get_nowait()
                warn('Program terminated by user')
                break
            except queue.Empty:
                pass

        print('\n- master file: ' + os.path.basename(masterfile))

        master_voltime = get_datetime(masterfile, masterdatatypedescr)

        radar_list = _get_radars_data(
            master_voltime, datatypesdescr_list, datacfg,
            num_radars=datacfg['NumRadars'])

        # process all data sets
        dscfg, traj = _process_datasets(
            dataset_levels, cfg, dscfg, radar_list, master_voltime, traj=traj,
            infostr=infostr, MULTIPROCESSING_DSET=MULTIPROCESSING_DSET,
            MULTIPROCESSING_PROD=MULTIPROCESSING_PROD)

        # delete variables
        del radar_list

        gc.collect()

    # post-processing of the datasets
    print('\n\n- Post-processing datasets:')
    dscfg, traj = _postprocess_datasets(
        dataset_levels, cfg, dscfg, traj=traj, infostr=infostr)

    if PROFILE_MULTIPROCESSING:
        prof.unregister()
        rprof.unregister()
        cprof.unregister()

        bokeh_plot = visualize([prof, rprof, cprof], show=False, save=False)

        profile_path = os.path.expanduser('~')+'/profiling/'
        if not os.path.isdir(profile_path):
            os.makedirs(profile_path)

        export_png(bokeh_plot, filename=(
            profile_path+datetime.utcnow().strftime('%Y%m%d%H%M%S') +
            '_profile.png'))

    print('- This is the end my friend! See you soon!')
Exemple #23
0
vline1 = Span(location= dt.date(2020, 3, 27), 
             dimension='height', 
             line_color='black',
             line_dash='dashed')
vline2 = Span(location= dt.date(2020, 3, 30), 
             dimension='height', 
             line_color='black',
             line_dash='dashed')

p.renderers.extend([vline1, vline2])

# Additional formatting
p.left[0].formatter.use_scientific = False
p.toolbar.logo = None
p.toolbar_location = None
p.xaxis.axis_label = "Date"
p.yaxis.axis_label = "Movement Day"
p.title.text_font_size = '15pt'
p.xaxis.axis_label_text_font_size = "12pt"
p.yaxis.axis_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "10pt"
p.xaxis.major_label_text_font_size = "10pt"

# Display plot
show(p)

# Export
export_png(p, 
           filename= OUT_path + "all_movement.png")
Exemple #24
0
             fill_color=color,
             legend_label=names)

#Styling
p.xaxis.axis_label_text_font_size = "16pt"
p.yaxis.axis_label_text_font_size = "16pt"
p.xaxis.major_label_text_font_size = "14pt"
p.yaxis.major_label_text_font_size = "14pt"

# Eliminate grid
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
#Set outline
p.outline_line_width = 1
p.outline_line_color = "black"
#Eliminate minor ticks
p.yaxis.minor_tick_line_color = None
#Set font in axes to normal
p.xaxis.axis_label_text_font_style = "normal"
p.yaxis.axis_label_text_font_style = "normal"

bokeh.io.show(p)
#Export figure
export_png(
    p,
    filename=
    f'hotexamples_com/figures/genetics/metatranscriptomics/stab8_curson2018_bact_depths.png'
)

# %%
Exemple #25
0
df = df[df['medals.total'] > 8]
df = df.sort("medals.total", ascending=False)

# get the countries and we group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values

# build a dict containing the grouped data
medals = OrderedDict()
medals['bronze'] = bronze
medals['silver'] = silver
medals['gold'] = gold

# any of the following commented are also valid Donut inputs
#medals = list(medals.values())
#medals = np.array(list(medals.values()))
#medals = pd.DataFrame(medals)

output_file("./out/test2.html")

donut = Donut(medals, countries)

show(donut)

# Export the figure
export_png(p, filename="./out/test2.png")

# EOF
Exemple #26
0
def num_graphs_vs_min_nodes(learner_mn_list):
    # learner_mn_list: a list of tuples, (RefaelLearner, min_nodes)
    num_graphs_fig = figure(
        plot_width=800,
        plot_height=350,
        title="Number of graphs (time) for several min. nodes values",
        x_axis_label='time',
        y_axis_label="Number of graphs")
    num_blacks_fig = figure(
        plot_width=800,
        plot_height=350,
        title="Number of blacks (time) for several min. nodes values",
        x_axis_label='time',
        y_axis_label="Number of blacks")
    num_whites_fig = figure(
        plot_width=800,
        plot_height=350,
        title="Number of whites (time) for several min. nodes values",
        x_axis_label='time',
        y_axis_label="Number of whites")
    figs = [num_graphs_fig, num_blacks_fig, num_whites_fig]
    colors = [
        'blue', '', '', 'orange', '', 'red', '', '', 'purple', 'yellow',
        'green'
    ]
    titles = ['graphs_per_time', 'blacks_per_time', 'whites_per_time']
    for learner, mn in learner_mn_list:
        if not os.path.exists(
                os.path.join(learner.base_dir(), 'fig', 'stats_graphs')):
            os.mkdir(os.path.join(learner.base_dir(), 'fig', 'stats_graphs'))
        learner.data_loader().filter_by_nodes(
            min_nodes=mn) if mn else learner.data_loader().features_by_time
        db = learner.data_loader()
        db._load_database()

        y_vals = [[], [], []]

        for t in range(len(db.multi_graphs_by_time)):
            mg = db.multi_graphs_by_time[t]
            ids = mg._list_id
            valids = [gid for gid in ids if mg._graph_valid[gid]]

            y_vals[0].append(len(valids))

            labels = db._database._labels
            valid_labels = [labels[gr] for gr in valids]
            if REFAEL_PARAM['white_label']:
                y_vals[2].append(sum(valid_labels))
                y_vals[1].append(len(valid_labels) - sum(valid_labels))
            else:
                y_vals[2].append(sum(valid_labels))
                y_vals[1].append(len(valid_labels) - sum(valid_labels))
        for i in range(len(figs)):
            figs[i].line(list(range(len(db.multi_graphs_by_time))),
                         y_vals[i],
                         line_color=colors[mn],
                         legend="min. nodes: " + str(mn))
            figs[i].legend.location = "top_left"
            figs[i].toolbar.logo = None
            figs[i].toolbar_location = None
    for i in range(len(figs)):
        export_png(
            figs[i],
            os.path.join(os.getcwd(), "..", "fig", "stats_graphs",
                         str(titles[i]) + ".png"))
from bokeh.io import export_png
import pandas as pd

# Reading in the tidy dataframe
df_tidy = tidy_reader("data/tidy_mt_catastrophe.xlsx")

# Extracting the data for 12uM conc
data_12 = (df_tidy.loc[df_tidy["Concentration (uM)"] == 12,
                       "Time to Catastrophe (s)"]).values

# Calculating the MLEs for the Parameters modeled as Gamma Distribution
parameters_gamma = mle_iid_gamma(data_12)

# Extracing the alpha and beta
alpha_mle = parameters_gamma[0]
beta_mle = parameters_gamma[1]

# Calculating the MLEs for the Parameters modeled as Gamma Distribution
parameters_story = mle_model(data_12)

# Extracing the alpha and beta
beta1_mle = parameters_story[0]
beta2_mle = parameters_story[1]

# Making the plot
comparison_plot = single_data_gs_plotter(data_12, alpha_mle, beta_mle,
                                         beta1_mle, beta2_mle)

# Saving the figure
export_png(comparison_plot, filename="figure_4.png")
Exemple #28
0
def makeHeatmap(df, what, on_what, colors, range_x, range_y, width, height):
    """ Make a bokeh heatmap from a pandas dataframe 

    Args : 
        - df (pd df) : an already stacked pandas dataframe
        - what (str) : indicates the column name being plotted : 'counts' or 'frequencies'
        - on_what (str) : indicates the type of data : 'codons', 'amino-acids', 'amino-acids-types'
        - colors (list of str) : list with hexadecimals colors
        - range_x (list of str) : list of 'on_what' for an ordered x_axis
        - range_y (list of str) : list of the species name for an ordered y_axis. Obtained by makeLineOrderFromTree()
        - width (int) : the figure height
        - height (int) : the figure width

    """
    title = "{} of {}".format(what, on_what)

    # Color range
    # df.what did not work (what was not recognized as a variable)
    mapper = LinearColorMapper(palette=colors,
                               low=df.loc[:, [what]].min()[-1],
                               high=df.loc[:, [what]].max()[-1])

    # Heatmap. no tool bar for png
    p = figure(title=title,
               x_range=range_x,
               y_range=range_y,
               x_axis_location="above",
               plot_width=width,
               plot_height=height,
               toolbar_location=None,
               tools="")

    # Misc. styling
    p.grid.grid_line_color = None
    p.axis.axis_line_color = None
    p.axis.major_tick_line_color = None
    p.axis.major_label_text_font_size = "10pt"
    p.axis.major_label_standoff = 0
    p.xaxis.major_label_orientation = pi / 3

    p.circle(x=on_what,
             y="Species",
             size='pvalues',
             source=df,
             fill_color={
                 'field': what,
                 'transform': mapper
             },
             line_color=None)

    # legend numbers format
    if what == "counts":
        frmt = "%d"
    elif what == "frequencies":
        frmt = "%.3f"

    # Add color bar
    color_bar = ColorBar(color_mapper=mapper,
                         major_label_text_font_size="10pt",
                         ticker=BasicTicker(desired_num_ticks=len(colors)),
                         formatter=PrintfTickFormatter(format=frmt),
                         label_standoff=15,
                         border_line_color=None,
                         location=(0, 0))
    p.add_layout(color_bar, 'right')

    # output
    export_png(p, filename='output_{}.png'.format(what))
Exemple #29
0
import pandas as pd
import numpy as np
from collections import OrderedDict
from bokeh.plotting import figure
from bokeh.core.properties import value
from bokeh.io import export_png

fname = "BICG_16to1"
df_cpu = pd.read_csv("data/"+fname+"_LatReq_CPU.csv", names=["tick", "val"])
df_gpu = pd.read_csv("data/"+fname+"_LatReq_GPU.csv", names=["tick", "val"])
df_cpu['gpu'] = df_gpu['val']

length = len(df_cpu.index)
maxTick = max(df_cpu['tick'].max(), df_gpu['tick'].max())
gpuMax = df_gpu['val'].max()
cpuMax = df_cpu['val'].max()
maxVal = max(gpuMax, cpuMax)
x_range = (0, length)
y_range = (0, maxVal)

title = fname_"Lat/Req"
plot = figure(plot_width=2000, plot_height=600, title=title)
plot.title.align = 'center';
plot.title.text_font_size = '30pt';
#plot.scatter(df_cpu['tick'], df_cpu['val'], size=3, marker='circle', fill_color='blue', line_color=None, alpha=1)
#plot.scatter(df_cpu['tick'], df_cpu['gpu'], size=3, marker='circle', fill_color='red', line_color=None, alpha=1)
plot.line(df_cpu['tick'], df_cpu['val'], line_width=1, line_color='blue')
plot.line(df_cpu['tick'], df_cpu['gpu'], line_width=1, line_color='red')
output_fname="out/"+fname+"_LatReq_200_line.png"
export_png(plot, filename=output_fname)
Exemple #30
0
                        4 * sizebase * sviw_[m] / sviw_[m].max(),
                        line_color=None)
        else:
            fig.scatter(svipm_[:, 0],
                        svipm_[:, 1],
                        fill_color='blue',
                        alpha=0.6,
                        size=sizebase * (sviw_[svimsz, :] > 0) +
                        4 * sizebase * sviw_[m, :] / sviw_[m, :].max(),
                        line_color=None)
        if size_x_axis:
            plot_gaussian(fig, svimuw_[m, :], (4. / 9.) * sviSigw_[m, :],
                          (4. / 9.) * Sig_, pal[0], 17, 9, 1, alphagauss,
                          'solid', nms[1][1])
        else:
            plot_gaussian(fig, svimuw_[m, :], (4. / 9.) * sviSigw_[m, :],
                          (4. / 9.) * Sig_, pal[0], 17, 9, 1, alphagauss,
                          'solid', nms[1][1])
        postprocess_plot(fig, '36pt', orientation='horizontal', glyph_width=80)
        fig.legend.background_fill_alpha = 0.
        fig.legend.border_line_alpha = 0.
        fig.legend.visible = (m == Ms[-1])
        fig.xaxis.visible = False
        fig.yaxis.visible = False
        figs.append(fig)
        export_png(fig,
                   filename=os.path.join(
                       fldr_figs, "d" + str(d) + "_pts" + str(m) + ".png"),
                   height=1500,
                   width=1500)
Exemple #31
0
# Omit team ID segment from the activity path column  NOTE
df['ActivityPath'] = df['ActivityPath'].replace(r'/(accountor|elisa|vainu)',
                                                r'',
                                                regex=True)
df['ActivityPath'] = df['ActivityPath'].replace(r'^asc$',
                                                r'asc/coach',
                                                regex=True)

print(df.index)
print(df.head())

### Vertical bar plots of time use by month

boio.output_file('./out/asc_time_use_by_month.html')
p = plot_time_use_by_time_period(df, by='M')
boio.export_png(p, filename="./out/asc_time_use_by_month.png")
boio.save(p)
boplot.reset_output()

boio.output_file('./out/asc_time_use_by_week.html')
p = plot_time_use_by_time_period(df, by='W')
boio.export_png(p, filename="./out/asc_time_use_by_week.png")
boio.save(p)
boplot.reset_output()

### Plots of time use by activity

boio.output_file('./out/asc_time_use_by_activity_category.html')
p = plot_time_use_by_activity(df, by='C', zlbl=9, zheight=240)
boio.export_png(p, filename="./out/asc_time_use_by_activity_category.png")
boio.save(p)
Exemple #32
0
                              color=color,
                              legend="seq values - port {}".format(port),
                              alpha=0.5)

            if ack:
                w = np.array(w_input)
                p_ack.scatter(x,
                              w,
                              color=color,
                              legend="ack values - port {}".format(port),
                              alpha=0.5)
            type_string += "_{}".format(port)
        if seq:
            p_seq.legend.click_policy = "hide"
            p = p_seq
        if ack:
            p_ack.legend.click_policy = "hide"
            p = p_ack
    output_file_name = "{}{}{}".format(outputdir, outputname, type_string)
    output_file("{}.html".format(output_file_name),
                title="TCP ISN values in Honeypot",
                mode='inline')
    # In case of two plots
    if seq and ack:
        p = column(p_seq, p_ack)
    # Draw the plot(s)
    save(p)
    # Export the plot as .png
    export_png(p, filename="{}.png".format(output_file_name))
#    os.system("/usr/bin/phantomjs /usr/share/doc/phantomjs/examples/rasterize.js {0}.html {0}.png".format(output_file_name))
                p.line(DF.index, DF[ticker]*100, line_width=2, legend = ticker, color = next(colors))
        else: # add additional notation
            p.line(DF.index, DF[ticker]*100, line_width=2, legend = str(ticker + " (" + note_dict[ticker] + ")"), color = next(colors))


    p.legend.location= legend_position
    
    return (p)

p = plot_return (DF1, note_dict = dictionary)
show (p)

# export to png
try:
    export_png(p, filename="Fig 2.1.PNG")
except:
    pass

#work around because Bokeh will not load when uploaded as a Jupyter Notebook on Github

%matplotlib inline
from IPython.display import Image
try:
    display(Image(filename = "Fig 2.1.PNG"))
except:
    pass

#%% [markdown]

## 4. Summary
Exemple #34
0
                    w_input.append(red.hget(line,'tcpack').decode())
                t,date,h,m = update_time(t,date,h,m)
            x = np.array(x_input, dtype=np.datetime64)
            color = palette[ports.index(port)%20]
            if seq:
                y = np.array(y_input)
                p_seq.scatter(x, y, color=color, legend="seq values - port {}".format(port), alpha=0.5)

            if ack:
                w = np.array(w_input)
                p_ack.scatter(x, w, color=color, legend="ack values - port {}".format(port), alpha=0.5)
            type_string+="_{}".format(port)
        if seq:
            p_seq.legend.click_policy = "hide"
            p = p_seq
        if ack:
            p_ack.legend.click_policy = "hide"
            p = p_ack
    output_file_name = "{}{}{}".format(outputdir,outputname,type_string)
    output_file("{}.html".format(output_file_name),
            title="TCP ISN values in Honeypot", mode='inline')
    # In case of two plots
    if seq and ack:
        p = column(p_seq,p_ack)
    # Draw the plot(s)
    save(p)
    # Export the plot as .png
    if export:
        export_png(p, filename="{}.png".format(output_file_name))
#    os.system("/usr/bin/phantomjs /usr/share/doc/phantomjs/examples/rasterize.js {0}.html {0}.png".format(output_file_name))
Exemple #35
0
def make_ridge_plot_w_examples(aoi_name,
                               file_path,
                               output_filename,
                               add_observations=True,
                               bandwidth=.05,
                               block_list=[]):

    output_file(output_filename)

    max_density = 1
    probly_df = load_aoi(file_path)

    missing_compl = probly_df['complexity'].isna()
    probly_df = probly_df.loc[~missing_compl]
    probly_df['count'] = 1
    probly_df_gb = probly_df.groupby('complexity')

    cats_int = np.arange(probly_df['complexity'].max() + 1).astype('uint8')
    cats_str = ["Complexity {}".format(i) for i in cats_int]
    int_to_str = {i: s for i, s in zip(cats_int, cats_str)}
    str_to_int = {s: i for i, s in zip(cats_int, cats_str)}

    #x = linspace(-20,110, 500)
    target_col = 'bldg_density'
    #x = np.linspace(-10, 10, 500)
    x = np.linspace(0, max_density, 500)
    x_prime = np.concatenate([np.array([0]), x, np.array([1])])
    SCALE = .35 * max_density

    #source = ColumnDataSource(data=dict(x=x))
    source = ColumnDataSource(data=dict(x=x_prime))

    title = "Block building density and\nblock complexity: {}".format(aoi_name)
    size = 900

    # Make the main figure
    p = figure(toolbar_location='above',
               border_fill_color='blue',
               border_fill_alpha=0.25,
               y_range=cats_str,
               plot_height=size,
               plot_width=size,
               x_range=(0, 1.0))
    add_title(p,
              main_title=title,
              sub_title='Distribution of block density by complexity level')
    # p.title.text_font_size = '20pt'
    # p.title.text_font_style = 'bold'
    # p.title.text_color = 'black'

    # Now make the histogram count figure
    obs_count = probly_df_gb.sum()[['count']].reset_index()
    obs_count['complexity_str'] = obs_count['complexity'].apply(
        lambda x: int_to_str[x])
    print(obs_count)
    hist = figure(toolbar_location=None,
                  border_fill_color='blue',
                  border_fill_alpha=0.25,
                  plot_width=100,
                  plot_height=p.plot_height,
                  y_range=p.y_range,
                  x_range=(0, obs_count['count'].max()))
    hist.hbar(y='complexity_str',
              right='count',
              source=obs_count,
              height=1,
              line_color=None,
              fill_color='black',
              fill_alpha=.5)
    add_title(hist, sub_title='Complexity hist.')
    hist.ygrid.grid_line_color = None
    hist.yaxis.visible = False
    hist.xaxis[0].ticker.desired_num_ticks = 5
    hist.xaxis.major_label_orientation = np.pi / 4
    hist.xaxis.minor_tick_line_color = None
    hist.xaxis.major_label_text_color = 'black'

    for i, cat_s in enumerate(reversed(cats_str)):

        cat_i = str_to_int[cat_s]
        if cat_i not in probly_df_gb.groups.keys():
            p.line([0, 1], [cat_s, cat_s], line_color='black')
            continue

        #if cat_i not in probly_df.groups
        cat_data = probly_df_gb.get_group(cat_i)['bldg_density'].values
        cat_x = [cat_s] * len(cat_data)
        # Add circles for observations
        if add_observations:
            p.circle(cat_data,
                     cat_x,
                     fill_alpha=0.5,
                     size=5,
                     fill_color='black')

        print("Processing cat = {}".format(cat_i))
        print("shape = {}".format(cat_data.shape))
        if cat_data.shape[0] == 1:
            p.line([0, 1], [cat_s, cat_s], line_color='black')
            continue

        #pdf = gaussian_kde(cat_data)
        kernel_density = sm.nonparametric.KDEMultivariate(data=cat_data,
                                                          var_type='c',
                                                          bw=[bandwidth])
        y = ridge(cat_s, kernel_density.pdf(x), SCALE)
        source.add(y, cat_s)
        p.patch('x',
                cat_s,
                color=get_color(cat_i),
                alpha=0.6,
                line_color="black",
                source=source)

        # Get count for cat_s
        #print(obs_count)
        #cat_bool = obs_count['complexity']==cat_i
        #cur_count = obs_count['count'].loc[cat_bool].item()
        #print("x = {} y = {}".format(cur_count, cat_s))
        #hist.circle(x=[cur_count], y=[cat_s], size=100, fill_color='black')

    p.outline_line_color = None

    p.xaxis.ticker = FixedTicker(ticks=list(np.linspace(0, max_density, 11)))

    p.ygrid.grid_line_color = None
    p.xgrid.grid_line_color = "#dddddd"
    #p.xgrid.ticker = p.xaxis.ticker

    p.axis.minor_tick_line_color = None
    p.axis.major_tick_line_color = None
    p.axis.axis_line_color = None
    p.xaxis[0].formatter = NumeralTickFormatter(format="0%")

    # p.yaxis.axis_label = 'Block density'
    # p.yaxis.axis_label_text_font_style = 'bold'
    # p.yaxis.axis_label_text_font_size = '14pt'
    p.yaxis.major_label_text_font_size = '12pt'
    p.yaxis.major_label_text_font_style = 'bold'
    p.yaxis.major_label_text_color = 'black'

    p.xaxis.axis_label = 'Block density'
    p.xaxis.axis_label_text_font_style = 'bold'
    p.xaxis.axis_label_text_font_size = '14pt'
    p.xaxis.axis_label_text_color = 'black'
    p.xaxis.major_label_text_font_size = '12pt'
    p.xaxis.major_label_text_font_style = 'bold'
    p.xaxis.major_label_text_color = 'black'
    #p.x_range.range_padding = 0.1

    # Plot a 1,2,3,4 int on the main plot signalling where the example block is
    text_x = []
    text_y = []
    text = []
    for i, block_id in enumerate(block_list):
        block_obs = probly_df[probly_df['block_id'] == block_id].iloc[0]
        text_x.append(block_obs['bldg_density'])
        text_y.append("Complexity {}".format(block_obs['complexity']))
        text.append(str(i + 1))
    p.text(x=text_x, y=text_y, text=text, angle=0)

    # Add subplots
    sub_layout_list = make_block_example_grid(probly_df, HEIGHT, WIDTH,
                                              block_list)
    columns = 1
    toolbar_location = None
    grid = gridplot(children=sub_layout_list,
                    ncols=columns,
                    toolbar_location=toolbar_location)

    # Add subplots -- reblocked
    sub_layout_list_reblocked = make_block_example_grid(probly_df,
                                                        HEIGHT,
                                                        WIDTH,
                                                        block_list,
                                                        add_reblock=True,
                                                        region='Africa')
    grid_reblocked = gridplot(children=sub_layout_list_reblocked,
                              ncols=columns,
                              toolbar_location=toolbar_location)

    fig_list = [p, hist, grid]
    counts_df = obs_count
    #return fig_list, counts_df

    #final_layout = row([p, hist, grid])
    fig10 = figure(plot_height=p.height, plot_width=p.width)
    fig11 = figure(plot_height=hist.height, plot_width=hist.width)

    #right_plot = column([grid, grid_reblocked]) if columns==2 else row([grid, grid_reblocked])
    upper_left = row([p, hist])

    if aoi_name == "Freetown":
        left_plot = column(
            [upper_left,
             row([make_freetown_summary(),
                  make_filler_fig()])])
    elif aoi_name == "Monrovia":
        probly_df.sort_values('bldg_density', inplace=True)
        blocks15 = list(
            probly_df[probly_df['complexity'] == 13]['block_id'].values)
        blocks8 = list(
            probly_df[probly_df['complexity'] == 8]['block_id'].values)
        block_list = [blocks8[0], blocks8[-1], blocks15[0], blocks15[-1]]

        plot_list = make_block_example_grid(probly_df,
                                            HEIGHT,
                                            WIDTH,
                                            block_list,
                                            add_reblock=False)
        mon_grid = gridplot(children=plot_list, ncols=2, toolbar_location=None)
        left_plot = column([upper_left, row([mon_grid, make_filler_fig()])])
    else:
        left_plot = column([upper_left])

    #final_layout = row([left_plot, right_plot])
    final_layout = row([left_plot, grid_reblocked])

    #final_layout = gridplot([[p, hist, grid],
    #                       [bottom_fig, grid_reblocked]])
    #show(final_layout)
    #return fig_list, counts_df, probly_df

    save(final_layout, output_filename)
    #return final_layout
    final_layout.background = 'white'
    export_png(final_layout, str(output_filename).replace("html", "png"))

    #export_svgs(final_layout, str(output_filename).replace("html", "svg"))
    return final_layout