def test_shade(attr):
    x = getattr(agg, attr)
    cmap = ['pink', 'red']
    img = tf.shade(x, cmap=cmap, how='log')
    sol = np.array([[0, 4291543295, 4286741503],
                    [4283978751, 0, 4280492543],
                    [4279242751, 4278190335, 0]], dtype='u4')
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
    img = tf.shade(x, cmap=cmap, how='cbrt')
    sol = np.array([[0, 4291543295, 4284176127],
                    [4282268415, 0, 4279834879],
                    [4278914047, 4278190335, 0]], dtype='u4')
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
    img = tf.shade(x, cmap=cmap, how='linear')
    sol = np.array([[0, 4291543295, 4289306879],
                    [4287070463, 0, 4282597631],
                    [4280361215, 4278190335, 0]])
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
    img = tf.shade(x, cmap=cmap, how='eq_hist')
    sol = xr.DataArray(eq_hist_sol[attr], coords=coords, dims=dims)
    assert img.equals(sol)
    img = tf.shade(x, cmap=cmap,
                   how=lambda x, mask: np.where(mask, np.nan, x ** 2))
    sol = np.array([[0, 4291543295, 4291148543],
                    [4290030335, 0, 4285557503],
                    [4282268415, 4278190335, 0]], dtype='u4')
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
def test_shade(attr, span):
    x = getattr(agg, attr)
    cmap = ['pink', 'red']

    img = tf.shade(x, cmap=cmap, how='log', span=span)
    sol = solutions['log']
    assert img.equals(sol)

    img = tf.shade(x, cmap=cmap, how='cbrt', span=span)
    sol = solutions['cbrt']
    assert img.equals(sol)

    img = tf.shade(x, cmap=cmap, how='linear', span=span)
    sol = solutions['linear']
    assert img.equals(sol)

    # span option not supported with how='eq_hist'
    img = tf.shade(x, cmap=cmap, how='eq_hist')
    sol = xr.DataArray(eq_hist_sol[attr], coords=coords, dims=dims)
    assert img.equals(sol)

    img = tf.shade(x, cmap=cmap,
                   how=lambda x, mask: np.where(mask, np.nan, x ** 2))
    sol = np.array([[0, 4291543295, 4291148543],
                    [4290030335, 0, 4285557503],
                    [4282268415, 4278190335, 0]], dtype='u4')
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
示例#3
0
    def test_pipeline(self):
        df = pd.DataFrame({
            'x': np.array(([0.] * 10 + [1] * 10)),
            'y': np.array(([0.] * 5 + [1] * 5 + [0] * 5 + [1] * 5)),
            'f64': np.arange(20, dtype='f8')
        })
        df.f64.iloc[2] = np.nan

        cvs = ds.Canvas(plot_width=2, plot_height=2, x_range=(0, 1), y_range=(0, 1))

        pipeline = ds.Pipeline(df, ds.Point('x', 'y'))
        img = pipeline((0, 1), (0, 1), 2, 2)
        agg = cvs.points(df, 'x', 'y', ds.count())
        self.assertTrue(img.equals(tf.shade(agg)))

        color_fn = lambda agg: tf.shade(agg, 'pink', 'red')
        pipeline.color_fn = color_fn
        img = pipeline((0, 1), (0, 1), 2, 2)
        self.assertTrue(img.equals(color_fn(agg)))

        transform_fn = lambda agg: agg + 1
        pipeline.transform_fn = transform_fn
        img = pipeline((0, 1), (0, 1), 2, 2)
        self.assertTrue(img.equals(color_fn(transform_fn(agg))))

        pipeline = ds.Pipeline(df, ds.Point('x', 'y'), ds.sum('f64'))
        img = pipeline((0, 1), (0, 1), 2, 2)
        agg = cvs.points(df, 'x', 'y', ds.sum('f64'))
        self.assertTrue(img.equals(tf.shade(agg)))
def test_shade_bool():
    data = ~np.eye(3, dtype='bool')
    x = xr.DataArray(data, coords=coords, dims=dims)
    sol = xr.DataArray(np.where(data, 4278190335, 0).astype('uint32'),
                       coords=coords, dims=dims)
    img = tf.shade(x, cmap=['pink', 'red'], how='log')
    assert img.equals(sol)
    img = tf.shade(x, cmap=['pink', 'red'], how='cbrt')
    assert img.equals(sol)
    img = tf.shade(x, cmap=['pink', 'red'], how='linear')
    assert img.equals(sol)
    img = tf.shade(x, cmap=['pink', 'red'], how='eq_hist')
    assert img.equals(sol)
示例#5
0
    def render_image(self):
        pix = tf.shade(self.agg, cmap=self.color_ramp, color_key=self.colormap, how=self.transfer_function)

        if self.spread_size > 0:
            pix = tf.spread(pix, px=self.spread_size)

        return pix
示例#6
0
def update_image(dataframe):
    global dims
    dims_data = dims.data

    if not dims_data['width'] or not dims_data['height']:
        return

    plot_width = int(math.ceil(dims_data['width'][0]))
    plot_height = int(math.ceil(dims_data['height'][0]))
    x_range = (dims_data['xmin'][0], dims_data['xmax'][0])
    y_range = (dims_data['ymin'][0], dims_data['ymax'][0])

    canvas = ds.Canvas(plot_width=plot_width,
                       plot_height=plot_height,
                       x_range=x_range,
                       y_range=y_range)

    agg = canvas.points(dataframe, 'dropoff_x', 'dropoff_y',
                        ds.count('trip_distance'))

    img = tf.shade(agg, cmap=BuGn9, how='log')

    new_data = {}
    new_data['image'] = [img.data]
    new_data['x'] = [x_range[0]]
    new_data['y'] = [y_range[0]]
    new_data['dh'] = [y_range[1] - y_range[0]]
    new_data['dw'] = [x_range[1] - x_range[0]]

    image_source.stream(new_data, 1)
示例#7
0
 def update_image():
 
     global dims, raster_data
 
     dims_data = dims.data
 
     if not dims_data['width'] or not dims_data['height']:
         return
 
     xmin = max(dims_data['xmin'][0], raster_data.bounds.left)
     ymin = max(dims_data['ymin'][0], raster_data.bounds.bottom)
     xmax = min(dims_data['xmax'][0], raster_data.bounds.right)
     ymax = min(dims_data['ymax'][0], raster_data.bounds.top)
 
     canvas = ds.Canvas(plot_width=dims_data['width'][0],
                        plot_height=dims_data['height'][0],
                        x_range=(xmin, xmax),
                        y_range=(ymin, ymax))
 
     agg = canvas.raster(raster_data)
     img = tf.shade(agg, cmap=Hot, how='linear')
 
     new_data = {}
     new_data['image'] = [img.data]
     new_data['x'] = [xmin]
     new_data['y'] = [ymin]
     new_data['dh'] = [ymax - ymin]
     new_data['dw'] = [xmax - xmin]
     image_source.stream(new_data, 1)
def test_shade_cmap_non_categorical_alpha(cmap):
    img = tf.shade(agg.a, how='log', cmap=cmap)
    sol = np.array([[         0,  671088640, 1946157056],
                    [2701131776,          0, 3640655872],
                    [3976200192, 4278190080,          0]])
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
def test_shade_cmap():
    cmap = ['red', (0, 255, 0), '#0000FF']
    img = tf.shade(agg.a, how='log', cmap=cmap)
    sol = np.array([[0, 4278190335, 4278236489],
                    [4280344064, 0, 4289091584],
                    [4292225024, 4294901760, 0]])
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
def test_shade_mpl_cmap():
    cm = pytest.importorskip('matplotlib.cm')
    img = tf.shade(agg.a, how='log', cmap=cm.viridis)
    sol = np.array([[5505348, 4283695428, 4287524142],
                    [4287143710, 5505348, 4282832267],
                    [4280213706, 4280608765, 5505348]])
    sol = xr.DataArray(sol, coords=coords, dims=dims)
    assert img.equals(sol)
def test_shade_should_handle_zeros_array():
    data = np.array([[0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0]], dtype='uint32')
    arr = xr.DataArray(data, dims=['x', 'y'])
    img = tf.shade(arr, cmap=['white', 'black'], how='linear')
    assert img is not None
示例#12
0
def timed_agg(df, filepath, plot_width=int(900), plot_height=int(900*7.0/12), cache_ranges=True):
    global CACHED_RANGES
    start = time.time()
    cvs = ds.Canvas(plot_width, plot_height, x_range=CACHED_RANGES[0], y_range=CACHED_RANGES[1])
    agg = cvs.points(df, p.x, p.y)
    end = time.time()
    if cache_ranges:
        CACHED_RANGES = (cvs.x_range, cvs.y_range)
    img = export_image(tf.shade(agg),filepath,export_path=".")
    return img, end-start
示例#13
0
def test_pipeline():
    pipeline = ds.Pipeline(df, ds.Point('x', 'y'))
    img = pipeline((0, 1), (0, 1), 2, 2)
    agg = cvs.points(df, 'x', 'y', ds.count())
    assert img.equals(tf.shade(agg))

    color_fn = lambda agg: tf.shade(agg, 'pink', 'red')
    pipeline.color_fn = color_fn
    img = pipeline((0, 1), (0, 1), 2, 2)
    assert img.equals(color_fn(agg))

    transform_fn = lambda agg: agg + 1
    pipeline.transform_fn = transform_fn
    img = pipeline((0, 1), (0, 1), 2, 2)
    assert img.equals(color_fn(transform_fn(agg)))

    pipeline = ds.Pipeline(df, ds.Point('x', 'y'), ds.sum('f64'))
    img = pipeline((0, 1), (0, 1), 2, 2)
    agg = cvs.points(df, 'x', 'y', ds.sum('f64'))
    assert img.equals(tf.shade(agg))
示例#14
0
def create_image(x_range=x_range, y_range=y_range, w=plot_width, h=plot_height, 
                 aggregator=ds.count(), categorical=None, black=False, cmap=None):
    opts={}
    if categorical and cmap:
        opts['color_key'] = categorical_color_key(len(df[aggregator.column].unique()),cmap)       

    cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
    agg = cvs.line(df, 'longitude', 'latitude',  aggregator)
    img = tf.shade(agg, cmap=inferno, **opts)
        
    if black: img = tf.set_background(img, 'black')
    return img
def waveforms_datashader(waveforms, x_values, dir_name = "datashader_temp"):

	# Make a pandas dataframe with two columns, x and y, holding all the data. The individual waveforms are separated by a row of NaNs

	# First downsample the waveforms 10 times (to remove the effects of 10 times upsampling during de-jittering)
	waveforms = waveforms[:, ::10]
	# Then make a new array of waveforms - the last element of each waveform is a NaN
	new_waveforms = np.zeros((waveforms.shape[0], waveforms.shape[1] + 1))
	new_waveforms[:, -1] = np.nan
	new_waveforms[:, :-1] = waveforms

	# Now make an array of x's - the last element is a NaN
	x = np.zeros(x_values.shape[0] + 1)
	x[-1] = np.nan
	x[:-1] = x_values

	# Now make the dataframe
	df = pd.DataFrame({'x': np.tile(x, new_waveforms.shape[0]), 'y': new_waveforms.flatten()})	

	# Datashader function for exporting the temporary image with the waveforms
	export = partial(export_image, background = "white", export_path=dir_name)

	# Produce a datashader canvas
	canvas = ds.Canvas(x_range = (np.min(x_values), np.max(x_values)), 
			   y_range = (df['y'].min() - 10, df['y'].max() + 10),
			   plot_height=1200, plot_width=1600)
	# Aggregate the data
	agg = canvas.line(df, 'x', 'y', ds.count())   
	# Transfer the aggregated data to image using log transform and export the temporary image file
	export(tf.shade(agg, how='eq_hist'),'tempfile')

	# Read in the temporary image file
	img = imread(dir_name + "/tempfile.png")
	
	# Figure sizes chosen so that the resolution is 100 dpi
	fig,ax = plt.subplots(1, 1, figsize = (8,6), dpi = 200)
	# Start plotting
	ax.imshow(img)
	# Set ticks/labels - 10 on each axis
	ax.set_xticks(np.linspace(0, 1600, 10))
	ax.set_xticklabels(np.floor(np.linspace(np.min(x_values), np.max(x_values), 10)))
	ax.set_yticks(np.linspace(0, 1200, 10))
	ax.set_yticklabels(np.floor(np.linspace(df['y'].max() + 10, df['y'].min() - 10, 10)))

	# Delete the dataframe
	del df, waveforms, new_waveforms

	# Also remove the directory with the temporary image files
	shutil.rmtree(dir_name, ignore_errors = True)

	# Return and figure and axis for adding axis labels, title and saving the file
	return fig, ax
def test_shade_category():
    coords = [np.array([0, 1]), np.array([2, 5])]
    cat_agg = xr.DataArray(np.array([[(0, 12, 0), (3, 0, 3)],
                                    [(12, 12, 12), (24, 0, 0)]]),
                           coords=(coords + [['a', 'b', 'c']]),
                           dims=(dims + ['cats']))

    colors = [(255, 0, 0), '#0000FF', 'orange']

    img = tf.shade(cat_agg, color_key=colors, how='log', min_alpha=20)
    sol = np.array([[2583625728, 335565567],
                    [4283774890, 3707764991]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert img.equals(sol)

    colors = dict(zip('abc', colors))

    img = tf.shade(cat_agg, color_key=colors, how='cbrt', min_alpha=20)
    sol = np.array([[2650734592, 335565567],
                    [4283774890, 3657433343]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert img.equals(sol)

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)
    sol = np.array([[1140785152, 335565567],
                    [4283774890, 2701132031]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert img.equals(sol)

    img = tf.shade(cat_agg, color_key=colors,
                   how=lambda x, m: np.where(m, np.nan, x) ** 2,
                   min_alpha=20)
    sol = np.array([[503250944, 335565567],
                    [4283774890, 1744830719]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert img.equals(sol)
def check_span(x, cmap, how, sol):
    # Copy inputs that will be modified
    sol = sol.copy()
    x = x.copy()

    # All data no span
    img = tf.shade(x, cmap=cmap, how=how, span=None)
    assert img.equals(sol)

    # All data with span
    img = tf.shade(x, cmap=cmap, how=how, span=float_span)
    assert img.equals(sol)

    # Decrease smallest. This value should be clipped to span[0] and the
    # resulting image should be identical
    x[0, 1] = 10
    x_input = x.copy()
    img = tf.shade(x, cmap=cmap, how=how, span=float_span)
    assert img.equals(sol)

    # Check that clipping doesn't alter input array
    assert x.equals(x_input)

    # Increase largest. This value should be clipped to span[1] and the
    # resulting image should be identical
    x[2, 1] = 18
    x_input = x.copy()
    img = tf.shade(x, cmap=cmap, how=how, span=float_span)
    assert img.equals(sol)

    # Check that clipping doesn't alter input array
    assert x.equals(x_input)

    # zero out smallest. If span is working properly the zeroed out pixel
    # will be masked out and all other pixels will remain unchanged
    x[0, 1] = 0 if x.dtype.kind == 'i' else np.nan
    img = tf.shade(x, cmap=cmap, how=how, span=float_span)
    sol[0, 1] = sol[0, 0]
    assert img.equals(sol)

    # zero out the largest value
    x[2, 1] = 0 if x.dtype.kind == 'i' else np.nan
    img = tf.shade(x, cmap=cmap, how=how, span=float_span)
    sol[2, 1] = sol[0, 0]
    assert img.equals(sol)
示例#18
0
lat = grid.point_latitude['data']
lon = grid.point_longitude['data']

df = pd.DataFrame({'lat':lat.flatten(), 'lon':lon.flatten(), 'ref':ref.flatten(), 'rho':rho.flatten(), 'vel':vel.flatten()})
df = df.dropna(axis=0, how='all', subset=['ref', 'rho', 'vel'])

cvs = ds.Canvas(plot_width=1000, plot_height=1000)
agg = cvs.points(df, x='lon', y='lat')
coords_lat, coords_lon = agg.coords['lat'].values, agg.coords['lon'].values
coordinates = [[coords_lon[0], coords_lat[0]],
               [coords_lon[-1], coords_lat[0]],
               [coords_lon[-1], coords_lat[-1]],
               [coords_lon[0], coords_lat[-1]]]

img = tf.shade(agg, cmap=mpl.cm.get_cmap('magma_r'))[::-1].to_pil()


fig = px.scatter_mapbox(df, lat='lat', lon='lon', color='ref', color_continuous_scale=px.colors.sequential.Magma_r, zoom=3, range_color=[-5,50])
fig.update_layout(mapbox_style = 'open-street-map',
                  mapbox_layers = [
                          {
                                  "sourcetype": "image",
                                  "source": img,
                                  "coordinates": coordinates}])
fig.show()




示例#19
0
def update_plots(
    relayout_data,
    selected_radio,
    selected_range,
    selected_created,
):
    cell_towers_ddf = get_dataset(client, "cell_towers_ddf")
    data_4326 = get_dataset(client, "data_4326")
    data_center_4326 = get_dataset(client, "data_center_4326")
    data_3857 = get_dataset(client, "data_3857")

    t0 = time.time()
    coordinates_4326 = relayout_data and relayout_data.get(
        "mapbox._derived", {}).get("coordinates", None)

    if coordinates_4326:
        lons, lats = zip(*coordinates_4326)
        lon0, lon1 = max(min(lons),
                         data_4326[0][0]), min(max(lons), data_4326[1][0])
        lat0, lat1 = max(min(lats),
                         data_4326[0][1]), min(max(lats), data_4326[1][1])
        coordinates_4326 = [
            [lon0, lat0],
            [lon1, lat1],
        ]
        coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
        # position = {}
        position = {
            "zoom": relayout_data.get("mapbox.zoom", None),
            "center": relayout_data.get("mapbox.center", None),
        }
    else:
        position = {
            "zoom": 0.5,
            "pitch": 0,
            "bearing": 0,
            "center": {
                "lon": data_center_4326[0][0],
                "lat": data_center_4326[0][1]
            },
        }
        coordinates_3857 = data_3857
        coordinates_4326 = data_4326

    new_coordinates = [
        [coordinates_4326[0][0], coordinates_4326[1][1]],
        [coordinates_4326[1][0], coordinates_4326[1][1]],
        [coordinates_4326[1][0], coordinates_4326[0][1]],
        [coordinates_4326[0][0], coordinates_4326[0][1]],
    ]

    x_range, y_range = zip(*coordinates_3857)
    x0, x1 = x_range
    y0, y1 = y_range

    # Build query expressions
    query_expr_xy = (
        f"(x_3857 >= {x0}) & (x_3857 <= {x1}) & (y_3857 >= {y0}) & (y_3857 <= {y1})"
    )
    query_expr_range_created_parts = []

    # Handle range selection
    range_slice = slice(None, None)
    if selected_range:
        log10_r0, log10_r1 = selected_range["range"]["x"]
        if log10_r1 < log10_r0:
            log10_r0, log10_r1 = log10_r1, log10_r0
        range_slice = slice(log10_r0, log10_r1)

        query_expr_range_created_parts.append(
            f"(log10_range >= {log10_r0}) & (log10_range <= {log10_r1})")

    # Handle created selection
    created_slice = slice(None, None)
    if selected_created:
        created_dt0, created_dt1 = pd.to_datetime(
            selected_created["range"]["x"])
        if created_dt1 < created_dt0:
            created_dt0, created_dt1 = created_dt1, created_dt0
        created_slice = slice(created_dt0, created_dt1)

        created0, created1 = pd.Series([created_dt0,
                                        created_dt1]).astype("int")
        query_expr_range_created_parts.append(
            f"(created >= {created0}) & (created <= {created1})")

    # Get selected radio categories
    selected_radio_categories = radio_categories
    if selected_radio:
        selected_radio_categories = list(
            set(point["y"] for point in selected_radio["points"]))

    # Build dataframe containing rows that satisfy the range and created selections
    if query_expr_range_created_parts:
        query_expr_range_created = " & ".join(query_expr_range_created_parts)
        ddf_selected_range_created = cell_towers_ddf.query(
            query_expr_range_created)
    else:
        ddf_selected_range_created = cell_towers_ddf

    # Build dataframe containing rows of towers within the map viewport
    ddf_xy = cell_towers_ddf.query(
        query_expr_xy) if query_expr_xy else cell_towers_ddf

    # Build map figure
    # Create datashader aggregation of x/y data that satisfies the range and created
    # histogram selections
    cvs = ds.Canvas(plot_width=700,
                    plot_height=400,
                    x_range=x_range,
                    y_range=y_range)
    agg = cvs.points(ddf_selected_range_created,
                     x="x_3857",
                     y="y_3857",
                     agg=ds.count_cat("radio"))

    # Downselect aggregation to include only the select radio categories
    if selected_radio_categories:
        agg = agg.sel(radio=selected_radio_categories)

    # Count the number of selected towers
    n_selected = int(agg.sum())

    # Build indicator figure
    n_selected_indicator = {
        "data": [{
            "type": "indicator",
            "value": n_selected,
            "number": {
                "font": {
                    "color": "#263238"
                }
            },
        }],
        "layout": {
            "template": template,
            "height": 150,
            "margin": {
                "l": 10,
                "r": 10,
                "t": 10,
                "b": 10
            },
        },
    }

    if n_selected == 0:
        # Nothing to display
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}
        layers = []
    elif n_selected < 5000:
        # Display each individual point using a scattermapbox trace. This way we can
        # give each individual point a tooltip
        ddf_small_expr = " & ".join(
            [query_expr_xy] + [f"(radio in {selected_radio_categories})"] +
            query_expr_range_created_parts)
        ddf_small = cell_towers_ddf.query(ddf_small_expr)
        (
            lat,
            lon,
            radio,
            log10_range,
            description,
            mcc,
            net,
            created,
            status,
        ) = dask.compute(
            ddf_small.lat,
            ddf_small.lon,
            ddf_small.radio,
            ddf_small.log10_range,
            ddf_small.Description,
            ddf_small.mcc,
            ddf_small.net,
            ddf_small.created,
            ddf_small.Status,
        )

        # Format creation date column for tooltip
        created = pd.to_datetime(created.tolist()).strftime("%x")

        # Build colorscale to give scattermapbox points the appropriate color
        radio_colorscale = [[v, radio_colors[cat]] for v, cat in zip(
            np.linspace(0, 1, len(radio.cat.categories)), radio.cat.categories)
                            ]

        # Build array of the integer category codes to use as the numeric color array
        # for the scattermapbox trace
        radio_codes = radio.cat.codes

        # Build marker properties dict
        marker = {
            "color": radio_codes,
            "colorscale": radio_colorscale,
            "cmin": 0,
            "cmax": 3,
            "size": 5,
            "opacity": 0.6,
        }

        # Build customdata array for use in hovertemplate
        def to_str_unknown(cat_series):
            result = cat_series.astype(str)
            result[pd.isnull(cat_series)] = "Unknown"
            return result

        customdata = list(
            zip(
                radio.astype(str),
                ((10**log10_range)).astype(int),
                [s[:25] for s in to_str_unknown(description)],
                mcc,
                net,
                created,
                to_str_unknown(status),
            ))
        layers = []
    else:
        # Shade aggregation into an image that we can add to the map as a mapbox
        # image layer
        img = tf.shade(agg, color_key=radio_colors, min_alpha=100).to_pil()

        # Resize image to map size to reduce image blurring on zoom.
        img = img.resize((1400, 800))

        # Add image as mapbox image layer. Note that as of version 4.4, plotly will
        # automatically convert the PIL image object into a base64 encoded png string
        layers = [{
            "sourcetype": "image",
            "source": img,
            "coordinates": new_coordinates
        }]

        # Do not display any mapbox markers
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}

    # Build map figure
    map_graph = {
        "data": [{
            "type":
            "scattermapbox",
            "lat":
            lat,
            "lon":
            lon,
            "customdata":
            customdata,
            "marker":
            marker,
            "hovertemplate": ("<b>%{customdata[2]}</b><br>"
                              "MCC: %{customdata[3]}<br>"
                              "MNC: %{customdata[4]}<br>"
                              "radio: %{customdata[0]}<br>"
                              "range: %{customdata[1]:,} m<br>"
                              "created: %{customdata[5]}<br>"
                              "status: %{customdata[6]}<br>"
                              "longitude: %{lon:.3f}&deg;<br>"
                              "latitude: %{lat:.3f}&deg;<br>"
                              "<extra></extra>"),
        }],
        "layout": {
            "template":
            template,
            "uirevision":
            True,
            "mapbox": {
                "style": "light",
                "accesstoken": token,
                "layers": layers,
            },
            "margin": {
                "r": 0,
                "t": 0,
                "l": 0,
                "b": 0
            },
            "height":
            500,
            "shapes": [{
                "type": "rect",
                "xref": "paper",
                "yref": "paper",
                "x0": 0,
                "y0": 0,
                "x1": 1,
                "y1": 1,
                "line": {
                    "width": 2,
                    "color": "#B0BEC5",
                },
            }],
        },
    }

    map_graph["layout"]["mapbox"].update(position)

    # Use datashader to histogram range, created, and radio simultaneously
    agg_range_created_radio = compute_range_created_radio_hist(client)

    # Build radio histogram
    selected_radio_counts = (agg_range_created_radio.sel(
        log10_range=range_slice,
        created=created_slice).sum(["log10_range", "created"]).to_series())
    radio_histogram = build_radio_histogram(selected_radio_counts,
                                            selected_radio is None)

    # Build range histogram
    selected_range_counts = (agg_range_created_radio.sel(
        radio=selected_radio_categories,
        created=created_slice).sum(["radio", "created"]).to_series())
    range_histogram = build_range_histogram(selected_range_counts,
                                            selected_range is None)

    # Build created histogram
    selected_created_counts = (agg_range_created_radio.sel(
        radio=selected_radio_categories,
        log10_range=range_slice).sum(["radio", "log10_range"]).to_series())
    created_histogram = build_created_histogram(selected_created_counts,
                                                selected_created is None)

    print(f"Update time: {time.time() - t0}")
    return (
        n_selected_indicator,
        map_graph,
        radio_histogram,
        range_histogram,
        created_histogram,
    )
示例#20
0
def shade_line(data, colors=None, **kwargs):
    """"""

    if "plot_width" not in kwargs or "plot_height" not in kwargs:
        raise ValueError(
            "Please provide plot_width and plot_height for the canvas.")

    if isinstance(data, (list, tuple)) and isinstance(colors, (list, tuple)):
        if len(data) != len(colors):
            raise ValueError("colors should have the same length as data.")

    if isinstance(data, (dict, pd.DataFrame)):
        data = [data]
    if colors and isinstance(colors, str):
        colors = [colors] * len(data)

    if "x_range" not in kwargs or "y_range" not in kwargs:
        x_range, y_range = get_ranges(data)
        if "x_range" not in kwargs:
            kwargs["x_range"] = x_range
        if "y_range" not in kwargs:
            kwargs["y_range"] = y_range

    kwargs["x_range"], kwargs["y_range"] = _normalize_ranges(
        kwargs["x_range"], kwargs["y_range"])

    cvs = ds.Canvas(**kwargs)
    aggs = []
    cs = []

    for i, line in enumerate(data):
        df = line
        if not isinstance(line, pd.DataFrame):
            df = pd.DataFrame(line).astype(float)

        plot = True
        if "x_range" in kwargs and "y_range" in kwargs:
            plot = _is_data_in_range(df, "x", "y", kwargs["x_range"],
                                     kwargs["y_range"])
        elif "x_range" in kwargs:
            plot = _is_data_in_range(df, "x", "y", kwargs["x_range"])
        elif "y_range" in kwargs:
            plot = _is_data_in_range(df, "x", "y", y_range=kwargs["y_range"])

        if len(df["x"]) == 0 or len(df["y"]) == 0:
            plot = False

        if plot:
            aggs.append(cvs.line(df, "x", "y"))
            if colors:
                cs.append(colors[i])

    if not aggs:
        return xr.DataArray(
            np.zeros((kwargs["plot_height"], kwargs["plot_width"]), dtype=int))
    if colors:
        imgs = [tf.shade(aggs[i], cmap=[c]) for i, c in enumerate(cs)]
        return tf.stack(*imgs)
    else:
        imgs = [tf.shade(aggs[i]) for i in range(len(data))]
        return tf.stack(*imgs)
示例#21
0
文件: customZoom.py 项目: dpinney/omf
def nodesplot(nodes, name=None, canvas=None, cat=None):
    canvas = ds.Canvas(**cvsopts) if canvas is None else canvas
    aggregator=None if cat is None else ds.count_cat(cat)
    agg=canvas.points(nodes,'x','y',aggregator)
    return tf.spread(tf.shade(agg, cmap=["#FF3333"]), px=3, name=name)
from datashader import utils

os.chdir("//sbs2003/Daten-CME/")

t1 = time.time()


def data_pool(file):
    df = dd.read_parquet(file)
    print(file + " loaded")
    return df


data = None

if __name__ == '__main__':
    print(datetime.datetime.now())
    t1 = time.time()
    files = glob.iglob('*.csv_2_.parquet')
    p = Pool(os.cpu_count())
    data = dd.concat(p.map(data_pool, files))  # reset_index(drop=True))
    canvas = ds.Canvas(x_range=(-74.25, -73.7),
                       y_range=(40.5, 41),
                       plot_width=8000,
                       plot_height=8000)
    agg = canvas.points(data, 'End_Lon', 'End_Lat')
    pic = tf.set_background(tf.shade(agg, cmap=reversed(blues)),
                            color="#364564")  #364564
    utils.export_image(pic, "NYCPlot fn1", fmt=".png")
    print("time needed", time.time() - t1)
示例#23
0
def mock_shader_func(agg, span=None):
    img = tf.shade(agg, cmap=viridis, span=span, how='log')
    img = tf.set_background(img, 'black')
    return img
示例#24
0
        
    if black: img = tf.set_background(img, 'black')
    return img

def tests_datashader():
    import datashader as ds
    import datashader.transfer_functions as tf
    import pandas as pd

    df = pd.read_csv('/Users/iregon/C3S/dessaps/test_data/imma_converter/observations-sst-2014-6.psv',usecols=[6,7,14],sep="|",skiprows=0) 

    agg_mean = cvs.points(df, 'longitude', 'latitude', ds.mean('observation_value'))
    agg_max = cvs.points(df, 'longitude', 'latitude', ds.max('observation_value'))
    agg_min = cvs.points(df, 'longitude', 'latitude', ds.min('observation_value'))
    agg_count = cvs.points(df, 'longitude', 'latitude', ds.count('observation_value'))
    #tf.shade(agg.where(agg > 0), cmap=["lightblue", "darkblue"])
    #img = tf.shade(agg.where(agg > 0), cmap=['green', 'yellow', 'red'], how='linear', span=[275,305])
    
 
    
df = pd.read_csv('/Users/iregon/C3S/dessaps/test_data/imma_converter/observations-sst-2014-6.psv',usecols=[6,7,14],sep="|",skiprows=0) 
bounds = dict(x_range = (-180, 180), y_range = (-90, 90))
plot_width = 360*10
plot_height = 180*10
canvas = ds.Canvas(plot_width=plot_width, plot_height=plot_height,**bounds)
agg_mean = canvas.points(df, 'longitude', 'latitude', ds.max('observation_value'))
img = tf.shade(agg_mean, cmap=['green', 'yellow', 'red'], how='linear', span=[275,305])
utils.export_image(img=img,filename='Oct2431doshade.png', fmt=".png", background=None)

points = hv.Points(df['observation_value'].values)
img = points.hist()
示例#25
0
    def update_plots(
        relayout_data,
        selected_radio,
        selected_range,
        selected_created,
    ):
        t0 = time.time()
        coordinates_4326 = relayout_data and relayout_data.get(
            'mapbox._derived', {}).get('coordinates', None)

        if coordinates_4326:
            lons, lats = zip(*coordinates_4326)
            lon0, lon1 = max(min(lons),
                             data_4326[0][0]), min(max(lons), data_4326[1][0])
            lat0, lat1 = max(min(lats),
                             data_4326[0][1]), min(max(lats), data_4326[1][1])
            coordinates_4326 = [
                [lon0, lat0],
                [lon1, lat1],
            ]
            coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
            # position = {}
            position = {
                'zoom': relayout_data.get('mapbox.zoom', None),
                'center': relayout_data.get('mapbox.center', None)
            }
        else:
            position = {
                'zoom': 0.5,
                'pitch': 0,
                'bearing': 0,
                'center': {
                    'lon': data_center_4326[0][0],
                    'lat': data_center_4326[0][1]
                }
            }
            coordinates_3857 = data_3857
            coordinates_4326 = data_4326

        new_coordinates = [
            [coordinates_4326[0][0], coordinates_4326[1][1]],
            [coordinates_4326[1][0], coordinates_4326[1][1]],
            [coordinates_4326[1][0], coordinates_4326[0][1]],
            [coordinates_4326[0][0], coordinates_4326[0][1]],
        ]

        x_range, y_range = zip(*coordinates_3857)
        x0, x1 = x_range
        y0, y1 = y_range

        # Build query expressions
        query_expr_xy = f"(x_3857 >= {x0}) & (x_3857 <= {x1}) & (y_3857 >= {y0}) & (y_3857 <= {y1})"
        query_expr_range_created_parts = []

        # Handle range selection
        range_slice = slice(None, None)
        if selected_range:
            log10_r0, log10_r1 = selected_range['range']['x']
            if log10_r1 < log10_r0:
                log10_r0, log10_r1 = log10_r1, log10_r0
            range_slice = slice(log10_r0, log10_r1)

            query_expr_range_created_parts.append(
                f"(log10_range >= {log10_r0}) & (log10_range <= {log10_r1})")

        # Handle created selection
        created_slice = slice(None, None)
        if selected_created:
            created_dt0, created_dt1 = pd.to_datetime(
                selected_created['range']['x'])
            if created_dt1 < created_dt0:
                created_dt0, created_dt1 = created_dt1, created_dt0
            created_slice = slice(created_dt0, created_dt1)

            created0, created1 = pd.Series([created_dt0,
                                            created_dt1]).astype('int')
            query_expr_range_created_parts.append(
                f"(created >= {created0}) & (created <= {created1})")

        # Get selected radio categories
        selected_radio_categories = radio_categories
        if selected_radio:
            selected_radio_categories = list(
                set(point['y'] for point in selected_radio['points']))

        # Build dataframe containing rows that satisfy the range and created selections
        if query_expr_range_created_parts:
            query_expr_range_created = ' & '.join(
                query_expr_range_created_parts)
            ddf_selected_range_created = ddf.query(query_expr_range_created)
        else:
            ddf_selected_range_created = ddf

        # Build dataframe containing rows of towers within the map viewport
        ddf_xy = ddf.query(query_expr_xy) if query_expr_xy else ddf

        # Build map figure
        # Create datashader aggregation of x/y data that satisfies the range and created
        # histogram selections
        cvs = ds.Canvas(plot_width=700,
                        plot_height=400,
                        x_range=x_range,
                        y_range=y_range)
        agg = cvs.points(ddf_selected_range_created,
                         x='x_3857',
                         y='y_3857',
                         agg=ds.count_cat('radio'))

        # Downselect aggregation to include only the select radio categories
        if selected_radio_categories:
            agg = agg.sel(radio=selected_radio_categories)

        # Count the number of selected towers
        n_selected = int(agg.sum())

        # Build indicator figure
        n_selected_indicator = {
            'data': [{
                'type': 'indicator',
                'value': n_selected,
                'number': {
                    'font': {
                        'color': '#263238'
                    }
                }
            }],
            'layout': {
                'template': template,
                'height': 150,
                'margin': {
                    'l': 10,
                    'r': 10,
                    't': 10,
                    'b': 10
                }
            }
        }

        if n_selected == 0:
            # Nothing to display
            lat = [None]
            lon = [None]
            customdata = [None]
            marker = {}
            layers = []
        elif n_selected < 5000:
            # Display each individual point using a scattermapbox trace. This way we can
            # give each individual point a tooltip
            ddf_small_expr = ' & '.join(
                [query_expr_xy] + [f'(radio in {selected_radio_categories})'] +
                query_expr_range_created_parts)
            ddf_small = ddf.query(ddf_small_expr)
            lat, lon, radio, log10_range, description, mcc, net, created, status = dask.compute(
                ddf_small.lat, ddf_small.lon, ddf_small.radio,
                ddf_small.log10_range, ddf_small.Description, ddf_small.mcc,
                ddf_small.net, ddf_small.created, ddf_small.Status)

            # Format creation date column for tooltip
            created = pd.to_datetime(created.tolist()).strftime('%x')

            # Build colorscale to give scattermapbox points the appropriate color
            radio_colorscale = [[
                v, radio_colors[cat]
            ] for v, cat in zip(np.linspace(0, 1, len(radio.cat.categories)),
                                radio.cat.categories)]

            # Build array of the integer category codes to use as the numeric color array
            # for the scattermapbox trace
            radio_codes = radio.cat.codes

            # Build marker properties dict
            marker = {
                'color': radio_codes,
                'colorscale': radio_colorscale,
                'cmin': 0,
                'cmax': 3,
                'size': 5,
                'opacity': 0.6,
            }

            # Build customdata array for use in hovertemplate
            def to_str_unknown(cat_series):
                result = cat_series.astype(str)
                result[pd.isnull(cat_series)] = "Unknown"
                return result

            customdata = list(
                zip(
                    radio.astype(str),
                    ((10**log10_range)).astype(int),
                    [s[:25] for s in to_str_unknown(description)],
                    mcc,
                    net,
                    created,
                    to_str_unknown(status),
                ))
            layers = []
        else:
            # Shade aggregation into an image that we can add to the map as a mapbox
            # image layer
            img = tf.shade(agg, color_key=radio_colors, min_alpha=100).to_pil()

            # Resize image to map size to reduce image blurring on zoom.
            img = img.resize((1400, 800))

            # Add image as mapbox image layer. Note that as of version 4.4, plotly will
            # automatically convert the PIL image object into a base64 encoded png string
            layers = [{
                "sourcetype": "image",
                "source": img,
                "coordinates": new_coordinates
            }]

            # Do not display any mapbox markers
            lat = [None]
            lon = [None]
            customdata = [None]
            marker = {}

        # Build map figure
        map_graph = {
            'data': [{
                'type':
                'scattermapbox',
                'lat':
                lat,
                'lon':
                lon,
                'customdata':
                customdata,
                'marker':
                marker,
                'hovertemplate': ("<b>%{customdata[2]}</b><br>"
                                  "MCC: %{customdata[3]}<br>"
                                  "MNC: %{customdata[4]}<br>"
                                  "radio: %{customdata[0]}<br>"
                                  "range: %{customdata[1]:,} m<br>"
                                  "created: %{customdata[5]}<br>"
                                  "status: %{customdata[6]}<br>"
                                  "longitude: %{lon:.3f}&deg;<br>"
                                  "latitude: %{lat:.3f}&deg;<br>"
                                  "<extra></extra>")
            }],
            'layout': {
                'template':
                template,
                'uirevision':
                True,
                'mapbox': {
                    'style': "light",
                    'accesstoken': token,
                    'layers': layers,
                },
                'margin': {
                    "r": 0,
                    "t": 0,
                    "l": 0,
                    "b": 0
                },
                'height':
                500,
                'shapes': [{
                    'type': 'rect',
                    'xref': 'paper',
                    'yref': 'paper',
                    'x0': 0,
                    'y0': 0,
                    'x1': 1,
                    'y1': 1,
                    'line': {
                        'width': 2,
                        'color': '#B0BEC5',
                    }
                }]
            },
        }

        map_graph['layout']['mapbox'].update(position)

        # Use datashader to histogram range, created, and radio simultaneously
        agg_range_created_radio = compute_range_created_radio_hist(ddf_xy)

        # Build radio histogram
        selected_radio_counts = agg_range_created_radio.sel(
            log10_range=range_slice,
            created=created_slice).sum(['log10_range', 'created']).to_series()
        radio_histogram = build_radio_histogram(selected_radio_counts,
                                                selected_radio is None)

        # Build range histogram
        selected_range_counts = agg_range_created_radio.sel(
            radio=selected_radio_categories,
            created=created_slice).sum(['radio', 'created']).to_series()
        range_histogram = build_range_histogram(selected_range_counts,
                                                selected_range is None)

        # Build created histogram
        selected_created_counts = agg_range_created_radio.sel(
            radio=selected_radio_categories,
            log10_range=range_slice).sum(['radio', 'log10_range']).to_series()
        created_histogram = build_created_histogram(selected_created_counts,
                                                    selected_created is None)

        print(f"Update time: {time.time() - t0}")
        return (n_selected_indicator, map_graph, radio_histogram,
                range_histogram, created_histogram)
示例#26
0
## Save output (in case of large file
# Create grid to plot on (time is in hours)
x, y = np.meshgrid(tao, depths)

da = xr.DataArray(Temps, coords=[('depth', depths),
                                 ('tau', tao)]).to_dataset(name='temp')
da.to_netcdf(f'data/dt_{dt}_dz_{dz}_data.nc')

## Sample output plot
# NOTE: does not work for large (e.g. 1 billion) points, need to use
# a different plotting package like datashade
fig, ax = plt.subplots(**{'figsize': (10, 5)})

# Plot temperatures
try:
    temp_plt = ax.pcolormesh(x, y, Temps)
    # temp_plt = ax.contourf(x, y, Temps) # Contour plot

    ax.set_xlabel('Time [s]')
    ax.set_ylabel('Depth [m]')

    fig.colorbar(temp_plt)

    plt.savefig(f"figures/dt_{dt}_{dz}_output.png", dpi=300)
    plt.show()

except Exception as e:
    print(e)
    tf.shade(ds.Canvas(plot_height=400, plot_width=1200).raster(da['Temps']))
示例#27
0
def spectrogram_shaded(S,
                       time,
                       fs: int,
                       start_time=0,
                       end_time=None,
                       onsets=None):
    """

    :param S: spectogram 2d array
    :param fs:
    :param start_time:
    :param end_time:
    :param block_size:
    :param step_size:
    :return:
    """
    if start_time and end_time:
        condition = (time > start_time) & (time < end_time)
        S = S[condition]
        time = time[condition]

    freq = np.linspace(0, fs // 2, num=S.shape[-1])

    # highres_threshold = 4000
    # if len(time) < highres_threshold:
    #     x = time
    #     y = freq
    #     z = np.log(S).tolist()
    # else:
    S = np.log(S)
    xrdata = xr.DataArray(S,
                          coords={
                              'time': time,
                              'freq': freq
                          },
                          dims=('time', 'freq'))
    x_range = [time[0], time[-1]]
    y_range = [0, freq[-1]]
    cvs = ds.Canvas(plot_width=1500,
                    plot_height=S.shape[-1],
                    x_range=x_range,
                    y_range=y_range)

    raster = cvs.raster(xrdata.T, interpolate='nearest')
    img = tf.shade(raster)
    arr = np.array(img)

    z = arr.tolist()
    x = np.linspace(x_range[0], x_range[1], len(z[0]))
    y = np.linspace(y_range[0], y_range[1], len(z))

    fig = {
        'data': [{
            'x':
            x,
            'y':
            y,
            'z':
            z,
            'type':
            'heatmap',
            'showscale':
            False,
            'colorscale': [[0, '#75baf2'], [1, 'rgba(255, 255, 255,0)']],
            'hovertemplate':
            "Frequency: %{y:.0f} Hz<br>" + "Time: %{x:.2f} s<br>" +
            "<extra></extra>",
        }],
        'layout': {
            'height': 400,
            'xaxis': {
                'title': 'Time [s]',
                'showline': True,
                'zeroline': False,
                'showgrid': False,
                'showticklabels': True
            },
            'yaxis': {
                'title': 'Frequency [Hz]',
                'showline': False,
                'zeroline': False,
                'showgrid': False,
                'showticklabels': True,
            },
            'title': 'Spectrogram'
        }
    }
    return fig
示例#28
0
    return pd.DataFrame(dict(x=x, y=y))

@jit
def clifford(a, b, c, d, x, y):

    return np.sin(a*y) + c*np.cos(a*x),   np.sin(b*x) + d*np.cos(b*y)

#---------------------------------------------------------------------------------

cmaps =  [palette[p][::-1] for p in ['bgy', 'bmw', 'bgyw', 'bmy', 'fire', 'gray', 'kbc', 'kgy']]
cmaps += [inferno[::-1], viridis[::-1]]
cvs = ds.Canvas(plot_width = 500, plot_height = 500)
ds.transfer_functions.Image.border=0

#---------------------------------------------------------------------------------

# Parameter  :              a=xxx,  b=xxx,  c=xxx,  d=xxx, 
df = trajectory(clifford,   -1.8,   -2.0,   -0.5,   -0.9,   0,   0)
#df = trajectory(clifford,   -1.4,    1.6,    1.0,    0.7,   0,   0)
#df = trajectory(clifford,    1.7,    1.7,    0.6,    1.2,   0,   0)
#df = trajectory(clifford,   -1.7,    1.3,   -0.1,   -1.2,   0,   0)

# Try to put a value in xxx.
#df = trajectory(clifford,    xxx,    xxx,    xxx,    xxx,   0,   0)

agg = cvs.points(df, 'x', 'y')
img = tf.shade(agg, cmap = cmaps[1], how='linear', span = [0, n/60000])
img_map(img,"attractor")

示例#29
0
文件: myShader.py 项目: cudmore/SanPy
# # Default plot ranges:
x_range = (start, end)
y_range = (1.2 * signal.min(), 1.2 * signal.max())

# Create a dataframe
data['Time'] = np.linspace(start, end, n)
df = pd.DataFrame(data)

time_start = df['Time'].values[0]
time_end = df['Time'].values[-1]

cvs = ds.Canvas(x_range=x_range, y_range=y_range)

aggs = OrderedDict((c, cvs.line(df, 'Time', c)) for c in cols)
img = tf.shade(aggs['Signal'])

arr = np.array(img)
z = arr.tolist()

# axes
dims = len(z[0]), len(z)

x = np.linspace(x_range[0], x_range[1], dims[0])
y = np.linspace(y_range[0], y_range[1], dims[0])

#
# make a second df2 to hold spike times
'''
					'x': [spike['thresholdSec'] for spike in ba.spikeDict if (spike['thresholdSec'] > x0 and spike['thresholdSec'] < x1)],
					'y': [spike['thresholdVal'] for spike in ba.spikeDict if (spike['thresholdSec'] > x0 and spike['thresholdSec'] < x1)],
示例#30
0
def create_map2():

    global cvs
    global terrain
    global water
    global trees

    img = stack(shade(terrain, cmap=['black', 'white'], how='linear'))

    yield img.to_pil()

    img = stack(shade(terrain, cmap=Elevation, how='linear'))

    yield img.to_pil()

    img = stack(
        shade(terrain, cmap=Elevation, how='linear'),
        shade(hillshade(terrain, azimuth=210),
              cmap=['black', 'white'],
              how='linear',
              alpha=128),
    )

    yield img.to_pil()

    img = stack(
        shade(terrain, cmap=Elevation, how='linear'),
        shade(water, cmap=['aqua', 'white']),
        shade(hillshade(terrain, azimuth=210),
              cmap=['black', 'white'],
              how='linear',
              alpha=128),
    )

    yield img.to_pil()

    img = stack(
        shade(terrain, cmap=Elevation, how='linear'),
        shade(water, cmap=['aqua', 'white']),
        shade(hillshade(terrain + trees, azimuth=210),
              cmap=['black', 'white'],
              how='linear',
              alpha=128), shade(tree_colorize, cmap='limegreen', how='linear'))

    yield img.to_pil()
    yield img.to_pil()
    yield img.to_pil()
    yield img.to_pil()
示例#31
0
def run_kk(params, run_kk=True):
    cfg, target_path = params
    tt_fname = target_path.name
    tetrode_file_stem = tt_fname.split(".")[0]
    tetrode_file_elecno = tt_fname.split(".")[-1]
    working_dir = target_path.parent
    logging.debug(f'Tetrode name: {tt_fname}, stem: {tetrode_file_stem}, ElecNo: {tetrode_file_elecno}')
    clu_file = working_dir / (tetrode_file_stem + f'.clu.{tetrode_file_elecno}')
    if clu_file.exists() and cfg['skip']:
        logging.error(f'Clu file {clu_file} exists. Skipping.')
        run_kk = False

    # Read in feature validity
    validity_path = target_path.with_suffix('.validity')
    if not validity_path.exists():
        logger.warning('No explicit feature validity given, falling back to default = all used.')
    with open(validity_path) as vfp:
        validity_string = vfp.readline()
    logger.debug(f'Channel validity: {validity_string}')

    # Combine executable and arguments
    kk_executable = cfg["kk_executable"]
    kk_cmd = f'{kk_executable} {tetrode_file_stem} -ElecNo {tetrode_file_elecno} -UseFeatures {validity_string}'
    if cfg['KKv3']:
        kk_cmd += ' -UseDistributional 0'

    logger.debug('kk_cmd:' )
    # additional command line options
    if (cfg['kk_additional_args']):
        kk_cmd += ' ' + cfg['kk_additional_args']

    kk_cmd_list = kk_cmd.split(' ')
    logger.debug(f'KK COMMAND: {kk_cmd}')
    logger.debug(f'KK COMMAND LIST: {kk_cmd_list}')

    # Call KlustaKwik and gather output
    # TODO: Use communicate to interact with KK, i.e. write to log and monitor progress
    #       see https://stackoverflow.com/questions/21953835/run-subprocess-and-print-output-to-logging
    logger.info('Starting KlustaKwik process')
    if cfg['PRINT_KK_OUTPUT']:
        stdout = None
    else:
        stdout = subprocess.PIPE

    if run_kk:
        kk_call = subprocess.run(kk_cmd_list, stderr=subprocess.STDOUT, stdout=stdout)
        kk_error = kk_call.returncode

        logger.debug('Writing KlustaKwik log file')
        logger.debug('Clu File: ' + str(clu_file))
        if kk_call.stdout is not None:
            with open(clu_file.with_suffix('.log'), 'w') as log_file:
                log_file.write(kk_call.stdout.decode('ascii'))
        else:
            logging.warning('Missing stdout, not writing log file!')

        # Check call return code and output
        if kk_error:
            logging.error(f'KlustaKwik error code: {kk_error}')
            exit(kk_error)
        else:
            logging.debug(f'KlustaKwik successful: {kk_error}')

    # Load clu file
    logger.debug(f'Loading {clu_file}')
    clu_df = pd.read_csv(clu_file, dtype='category', names=['cluster_id'], skiprows=1)
    cluster_labels = clu_df['cluster_id'].cat.categories
    num_clusters = len(cluster_labels)
    logger.info(f'{len(clu_df)} spikes in {num_clusters} clusters')

    # Find all feature .fd files
    feature_files = list(working_dir.glob(tetrode_file_stem + '_*.fd'))
    ff_sizes = [ff.stat().st_mtime for ff in feature_files]
    feature_files = [f for t, f in sorted(zip(ff_sizes, feature_files))]
    if not len(feature_files):
        raise FileNotFoundError(f'No Feature Files found in {working_dir}')

    # TODO: Stupid, the feature names are in the .fd file already
    feature_names = [str(ff.name).split(tetrode_file_stem + '_')[1].split('.')[0] for ff in feature_files]
    logger.info(f'Loading features: {feature_names}')

    color_keys = cfg['CLUSTER_COLORS']
    with open(clu_file.with_suffix('.html'), 'w') as crf:
        crf.write('<head></head><body><h1>{}</h1>'.format(clu_file.name))
        for fd_file, fet_name in zip(feature_files, feature_names):
            crf.write('<h3>Feature: {}</h3>\n'.format(fet_name))
            logger.info(f'Generating images for feature {fet_name}')
            if not fd_file.exists():
                continue

            logger.debug(f'Loading {fd_file}')
            mat_fet = h5s.loadmat(str(fd_file), appendmat=False)

            fd_df = pd.DataFrame(mat_fet['FeatureData'])
            fd_df.rename(columns={c: str(c) for c in fd_df.columns}, inplace=True)
            if not len(clu_df) == len(fd_df):
                raise ValueError(f'Number of cluster labels ({num_clusters}) does not match number of spikes'
                                 f'in {fd_file} ({len(fd_df)})')

            fd_df['clu_id'] = clu_df.cluster_id.astype('category')
            logger.debug(f'Feature {fet_name} loaded with {len(fd_df)} spikes, {fd_df.shape[1] - 1} dimensions ')

            images = []
            titles = []
            for cc in combinations(map(str, range(len(fd_df.columns) - 1)), r=2):
                fet_title = f'x: {fet_name}:{cc[0]} vs y: {fet_name}:{cc[1]}'
                x_range = (np.percentile(fd_df[cc[0]], 0.01), np.percentile(fd_df[cc[0]], 99.9))
                y_range = (np.percentile(fd_df[cc[1]], 0.01), np.percentile(fd_df[cc[1]], 99.9))

                logger.debug(f'shading {len(fd_df)} points in {fd_df.shape[1] - 1} dimensions')
                canvas = ds.Canvas(plot_width=300, plot_height=300, x_range=x_range, y_range=y_range)
                try:
                    agg = canvas.points(fd_df, x=cc[0], y=cc[1], agg=ds.count_cat('clu_id'))
                    with np.errstate(invalid='ignore'):
                        img = ds_tf.shade(agg, how='log', color_key=color_keys)
                        img = img if cfg['no_spread'] else ds_tf.spread(img, px=1)
                except ZeroDivisionError:
                    img = None
                images.append(img)
                titles.append(fet_title)

            logger.debug(f'Creating plot for {fet_name}')
            fet_fig = ds_plot_features(images, how='log', fet_titles=titles)
            crf.write(fig2html(fet_fig) + '</br>\n')
            plt.close(fet_fig)
示例#32
0
def mock_shader_func(agg, span=None):
    img = tf.shade(agg, cmap=viridis, span=span, how='log')
    img = tf.set_background(img, 'black')
    return img
background = "black"
export = partial(export_image, background=background, export_path="export")
#cm = partial(colormap_select, reverse=(background!="black"))
cm = partial(colormap_select, reverse=(background != "black"))

display(HTML("<style>.container {width:100%} !important; }</style>"))

# In[10]:

cvs = ds.Canvas(plot_width, plot_height, *FFM)
agg = cvs.points(df, 'RECHTSWERT', 'HOCHWERT')

# In[23]:

# Export image on different styles or conditions
export(tf.shade(agg, cmap=cm(Greys9, 0.2), how='log'), "Frankfurt_Baumbestand")

# In[22]:

from colorcet import fire
export(tf.shade(agg, cmap=cm(fire, 0.4), how='log'),
       "Frankfurt_Baumbestand_Fire")

# In[21]:

from colorcet import glasbey
export(tf.shade(agg, cmap=cm(glasbey, 0.4), how='eq_hist'),
       "Frankfurt_Baumbestand_Glasbey")

# In[15]:
示例#34
0
def connectivity_base(
    x,
    y,
    edge_df,
    highlights=None,
    edge_bundling=None,
    edge_cmap="gray_r",
    show_points=True,
    labels=None,
    values=None,
    theme=None,
    cmap="Blues",
    color_key=None,
    color_key_cmap="Spectral",
    background="black",
    figsize=(7, 5),
    ax=None,
    sort="raw",
    save_show_or_return="return",
    save_kwargs={},
):
    """Plot connectivity relationships of the underlying UMAP
    simplicial set data structure. Internally UMAP will make
    use of what can be viewed as a weighted graph. This graph
    can be plotted using the layout provided by UMAP as a
    potential diagnostic view of the embedding. Currently this only works
    for 2D embeddings. While there are many optional parameters
    to further control and tailor the plotting, you need only
    pass in the trained/fit umap model to get results. This plot
    utility will attempt to do the hard work of avoiding
    overplotting issues and provide options for plotting the
    points as well as using edge bundling for graph visualization.

    Parameters
    ----------
        x: `int`
            The first component of the embedding.
        y: `int`
            The second component of the embedding.
        edge_df `pd.DataFrame`
            The dataframe denotes the graph edge pairs. The three columns
            include 'source', 'target' and 'weight'.
        highlights: `list`, `list of list` or None (default: `None`)
            The list that cells will be restricted to.
        edge_bundling: string or None (optional, default None)
            The edge bundling method to use. Currently supported
            are None or 'hammer'. See the datashader docs
            on graph visualization for more details.
        edge_cmap: string (default 'gray_r')
            The name of a matplotlib colormap to use for shading/
            coloring the edges of the connectivity graph. Note that
            the ``theme``, if specified, will override this.
        show_points: bool (optional False)
            Whether to display the points over top of the edge
            connectivity. Further options allow for coloring/
            shading the points accordingly.
        labels: array, shape (n_samples,) (optional, default None)
            An array of labels (assumed integer or categorical),
            one for each data sample.
            This will be used for coloring the points in
            the plot according to their label. Note that
            this option is mutually exclusive to the ``values``
            option.
        values: array, shape (n_samples,) (optional, default None)
            An array of values (assumed float or continuous),
            one for each sample.
            This will be used for coloring the points in
            the plot according to a colorscale associated
            to the total range of values. Note that this
            option is mutually exclusive to the ``labels``
            option.
        theme: string (optional, default None)
            A color theme to use for plotting. A small set of
            predefined themes are provided which have relatively
            good aesthetics. Available themes are:
               * 'blue'
               * 'red'
               * 'green'
               * 'inferno'
               * 'fire'
               * 'viridis'
               * 'darkblue'
               * 'darkred'
               * 'darkgreen'
        cmap: string (optional, default 'Blues')
            The name of a matplotlib colormap to use for coloring
            or shading points. If no labels or values are passed
            this will be used for shading points according to
            density (largely only of relevance for very large
            datasets). If values are passed this will be used for
            shading according the value. Note that if theme
            is passed then this value will be overridden by the
            corresponding option of the theme.
        color_key: dict or array, shape (n_categories) (optional, default None)
            A way to assign colors to categoricals. This can either be
            an explicit dict mapping labels to colors (as strings of form
            '#RRGGBB'), or an array like object providing one color for
            each distinct category being provided in ``labels``. Either
            way this mapping will be used to color points according to
            the label. Note that if theme
            is passed then this value will be overridden by the
            corresponding option of the theme.
        color_key_cmap: string (optional, default 'Spectral')
            The name of a matplotlib colormap to use for categorical coloring.
            If an explicit ``color_key`` is not given a color mapping for
            categories can be generated from the label list and selecting
            a matching list of colors from the given colormap. Note
            that if theme
            is passed then this value will be overridden by the
            corresponding option of the theme.
        background: string (optional, default 'white)
            The color of the background. Usually this will be either
            'white' or 'black', but any color name will work. Ideally
            one wants to match this appropriately to the colors being
            used for points etc. This is one of the things that themes
            handle for you. Note that if theme
            is passed then this value will be overridden by the
            corresponding option of the theme.
        width: int (optional, default 800)
            The desired width of the plot in pixels.
        height: int (optional, default 800)
            The desired height of the plot in pixels
        sort: `str` (optional, default `raw`)
            The method to reorder data so that high values points will be on top of background points. Can be one of
            {'raw', 'abs'}, i.e. sorted by raw data or sort by absolute values.
        save_show_or_return: {'show', 'save', 'return'} (default: `return`)
            Whether to save, show or return the figure.
        save_kwargs: `dict` (default: `{}`)
            A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function
            will use the {"path": None, "prefix": 'connectivity_base', "dpi": None, "ext": 'pdf', "transparent": True, "close":
            True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys
            according to your needs.

    Returns
    -------
    result: matplotlib axis
        The result is a matplotlib axis with the relevant plot displayed.
        If you are using a notbooks and have ``%matplotlib inline`` set
        then this will simply display inline.
    """

    import matplotlib.pyplot as plt
    import datashader as ds
    import datashader.transfer_functions as tf
    import datashader.bundling as bd

    dpi = plt.rcParams["figure.dpi"]

    if theme is not None:
        cmap = _themes[theme]["cmap"]
        color_key_cmap = _themes[theme]["color_key_cmap"]
        edge_cmap = _themes[theme]["edge_cmap"]
        background = _themes[theme]["background"]

    points = np.array([x, y]).T
    point_df = pd.DataFrame(points, columns=("x", "y"))

    point_size = 500.0 / np.sqrt(points.shape[0])
    if point_size > 1:
        px_size = int(np.round(point_size))
    else:
        px_size = 1

    if show_points:
        edge_how = "log"
    else:
        edge_how = "eq_hist"

    extent = _get_extent(points)
    canvas = ds.Canvas(
        plot_width=int(figsize[0] * dpi),
        plot_height=int(figsize[1] * dpi),
        x_range=(extent[0], extent[1]),
        y_range=(extent[2], extent[3]),
    )

    if edge_bundling is None:
        edges = bd.directly_connect_edges(point_df, edge_df, weight="weight")
    elif edge_bundling == "hammer":
        warn("Hammer edge bundling is expensive for large graphs!\n" "This may take a long time to compute!")
        edges = bd.hammer_bundle(point_df, edge_df, weight="weight")
    else:
        raise ValueError("{} is not a recognised bundling method".format(edge_bundling))

    edge_img = tf.shade(
        canvas.line(edges, "x", "y", agg=ds.sum("weight")),
        cmap=plt.get_cmap(edge_cmap),
        how=edge_how,
    )
    edge_img = tf.set_background(edge_img, background)

    if show_points:
        point_img = _datashade_points(
            points,
            None,
            labels,
            values,
            highlights,
            cmap,
            color_key,
            color_key_cmap,
            None,
            figsize[0] * dpi,
            figsize[1] * dpi,
            True,
            sort=sort,
        )
        if px_size > 1:
            point_img = tf.dynspread(point_img, threshold=0.5, max_px=px_size)
        result = tf.stack(edge_img, point_img, how="over")
    else:
        result = edge_img

    if ax is None:
        fig = plt.figure(figsize=figsize)
        ax = fig.add_subplot(111)

    _embed_datashader_in_an_axis(result, ax)

    ax.set(xticks=[], yticks=[])

    if save_show_or_return == "save":
        s_kwargs = {
            "path": None,
            "prefix": "connectivity_base",
            "dpi": None,
            "ext": "pdf",
            "transparent": True,
            "close": True,
            "verbose": True,
        }
        s_kwargs = update_dict(s_kwargs, save_kwargs)

        save_fig(**s_kwargs)
    elif save_show_or_return == "show":
        plt.tight_layout()
        plt.show()
    elif save_show_or_return == "return":
        return ax
示例#35
0
def connectivity(
    umap_object,
    edge_bundling=None,
    edge_cmap="gray_r",
    show_points=False,
    labels=None,
    values=None,
    theme=None,
    cmap="Blues",
    color_key=None,
    color_key_cmap="Spectral",
    background="white",
    width=800,
    height=800,
):
    """Plot connectivity relationships of the underlying UMAP
    simplicial set data structure. Internally UMAP will make
    use of what can be viewed as a weighted graph. This graph
    can be plotted using the layout provided by UMAP as a
    potential diagnostic view of the embedding. Currently this only works
    for 2D embeddings. While there are many optional parameters
    to further control and tailor the plotting, you need only
    pass in the trained/fit umap model to get results. This plot
    utility will attempt to do the hard work of avoiding
    overplotting issues and provide options for plotting the
    points as well as using edge bundling for graph visualization.

    Parameters
    ----------
    umap_object: trained UMAP object
        A trained UMAP object that has a 2D embedding.

    edge_bundling: string or None (optional, default None)
        The edge bundling method to use. Currently supported
        are None or 'hammer'. See the datashader docs
        on graph visualization for more details.

    edge_cmap: string (default 'gray_r')
        The name of a matplotlib colormap to use for shading/
        coloring the edges of the connectivity graph. Note that
        the ``theme``, if specified, will override this.

    show_points: bool (optional False)
        Whether to display the points over top of the edge
        connectivity. Further options allow for coloring/
        shading the points accordingly.

    labels: array, shape (n_samples,) (optional, default None)
        An array of labels (assumed integer or categorical),
        one for each data sample.
        This will be used for coloring the points in
        the plot according to their label. Note that
        this option is mutually exclusive to the ``values``
        option.

    values: array, shape (n_samples,) (optional, default None)
        An array of values (assumed float or continuous),
        one for each sample.
        This will be used for coloring the points in
        the plot according to a colorscale associated
        to the total range of values. Note that this
        option is mutually exclusive to the ``labels``
        option.

    theme: string (optional, default None)
        A color theme to use for plotting. A small set of
        predefined themes are provided which have relatively
        good aesthetics. Available themes are:
           * 'blue'
           * 'red'
           * 'green'
           * 'inferno'
           * 'fire'
           * 'viridis'
           * 'darkblue'
           * 'darkred'
           * 'darkgreen'

    cmap: string (optional, default 'Blues')
        The name of a matplotlib colormap to use for coloring
        or shading points. If no labels or values are passed
        this will be used for shading points according to
        density (largely only of relevance for very large
        datasets). If values are passed this will be used for
        shading according the value. Note that if theme
        is passed then this value will be overridden by the
        corresponding option of the theme.

    color_key: dict or array, shape (n_categories) (optional, default None)
        A way to assign colors to categoricals. This can either be
        an explicit dict mapping labels to colors (as strings of form
        '#RRGGBB'), or an array like object providing one color for
        each distinct category being provided in ``labels``. Either
        way this mapping will be used to color points according to
        the label. Note that if theme
        is passed then this value will be overridden by the
        corresponding option of the theme.

    color_key_cmap: string (optional, default 'Spectral')
        The name of a matplotlib colormap to use for categorical coloring.
        If an explicit ``color_key`` is not given a color mapping for
        categories can be generated from the label list and selecting
        a matching list of colors from the given colormap. Note
        that if theme
        is passed then this value will be overridden by the
        corresponding option of the theme.

    background: string (optional, default 'white)
        The color of the background. Usually this will be either
        'white' or 'black', but any color name will work. Ideally
        one wants to match this appropriately to the colors being
        used for points etc. This is one of the things that themes
        handle for you. Note that if theme
        is passed then this value will be overridden by the
        corresponding option of the theme.

    width: int (optional, default 800)
        The desired width of the plot in pixels.

    height: int (optional, default 800)
        The desired height of the plot in pixels

    Returns
    -------
    result: matplotlib axis
        The result is a matplotlib axis with the relevant plot displayed.
        If you are using a notbooks and have ``%matplotlib inline`` set
        then this will simply display inline.
    """
    if theme is not None:
        cmap = _themes[theme]["cmap"]
        color_key_cmap = _themes[theme]["color_key_cmap"]
        edge_cmap = _themes[theme]["edge_cmap"]
        background = _themes[theme]["background"]

    points = umap_object.embedding_
    point_df = pd.DataFrame(points, columns=("x", "y"))

    point_size = 100.0 / np.sqrt(points.shape[0])
    if point_size > 1:
        px_size = int(np.round(point_size))
    else:
        px_size = 1

    if show_points:
        edge_how = "log"
    else:
        edge_how = "eq_hist"

    coo_graph = umap_object.graph_.tocoo()
    edge_df = pd.DataFrame(
        np.vstack([coo_graph.row, coo_graph.col, coo_graph.data]).T,
        columns=("source", "target", "weight"),
    )
    edge_df["source"] = edge_df.source.astype(np.int32)
    edge_df["target"] = edge_df.target.astype(np.int32)

    extent = _get_extent(points)
    canvas = ds.Canvas(
        plot_width=width,
        plot_height=height,
        x_range=(extent[0], extent[1]),
        y_range=(extent[2], extent[3]),
    )

    if edge_bundling is None:
        edges = bd.directly_connect_edges(point_df, edge_df, weight="weight")
    elif edge_bundling == "hammer":
        warn("Hammer edge bundling is expensive for large graphs!\n"
             "This may take a long time to compute!")
        edges = bd.hammer_bundle(point_df, edge_df, weight="weight")
    else:
        raise ValueError(
            "{} is not a recognised bundling method".format(edge_bundling))

    edge_img = tf.shade(
        canvas.line(edges, "x", "y", agg=ds.sum("weight")),
        cmap=plt.get_cmap(edge_cmap),
        how=edge_how,
    )
    edge_img = tf.set_background(edge_img, background)

    if show_points:
        point_img = _datashade_points(
            points,
            None,
            labels,
            values,
            cmap,
            color_key,
            color_key_cmap,
            None,
            width,
            height,
            False,
        )
        if px_size > 1:
            point_img = tf.dynspread(point_img, threshold=0.5, max_px=px_size)
        result = tf.stack(edge_img, point_img, how="over")
    else:
        result = edge_img

    font_color = _select_font_color(background)

    dpi = plt.rcParams["figure.dpi"]
    fig = plt.figure(figsize=(width / dpi, height / dpi))
    ax = fig.add_subplot(111)

    _embed_datashader_in_an_axis(result, ax)

    ax.set(xticks=[], yticks=[])
    ax.text(
        0.99,
        0.01,
        "UMAP: n_neighbors={}, min_dist={}".format(umap_object.n_neighbors,
                                                   umap_object.min_dist),
        transform=ax.transAxes,
        horizontalalignment="right",
        color=font_color,
    )

    return ax
示例#36
0
# # Default plot ranges:
x_range = (start, end)
y_range = (1.2 * signal.min(), 1.2 * signal.max())

# Create a dataframe
data['Time'] = np.linspace(start, end, n)
df = pd.DataFrame(data)

time_start = df['Time'].values[0]
time_end = df['Time'].values[-1]

cvs = ds.Canvas(x_range = x_range, y_range=y_range)

aggs = OrderedDict((c, cvs.line(df, 'Time', c)) for c in cols)
img = tf.shade(aggs['Signal'])

arr = np.array(img)
z = arr.tolist()

# axes
dims = len(z[0]), len(z)

x = np.linspace(x_range[0], x_range[1], dims[0])
y = np.linspace(y_range[0], y_range[1], dims[0])

fig1 = {
    'data': [{
        'x': x,
        'y': y,
        'z': z,
def test_shade_cmap_errors():
    with pytest.raises(ValueError):
        tf.shade(agg.a, cmap='foo')

    with pytest.raises(ValueError):
        tf.shade(agg.a, cmap=[])
示例#38
0
# # Default plot ranges:
x_range = (start, end)
y_range = (1.2 * signal.min(), 1.2 * signal.max())

# Create a dataframe
data["Time"] = np.linspace(start, end, n)
df = pd.DataFrame(data)

time_start = df["Time"].values[0]
time_end = df["Time"].values[-1]

cvs = ds.Canvas(x_range=x_range, y_range=y_range)

aggs = OrderedDict((c, cvs.line(df, "Time", c)) for c in cols)
img = tf.shade(aggs["Signal"])

arr = np.array(img)
z = arr.tolist()

# axes
dims = len(z[0]), len(z)

x = np.linspace(x_range[0], x_range[1], dims[0])
y = np.linspace(y_range[0], y_range[1], dims[0])

# Layout

external_stylesheets = [
    "https://codepen.io/chriddyp/pen/bWLwgP.css",
    "/assets/style.css",
def build_datashader_plot(df, aggregate_column, colorscale_name,
                          colorscale_transform, new_coordinates, position,
                          x_range, y_range):
    """
    Build choropleth figure

    Args:
        df: pandas or cudf DataFrame
        aggregate_column: Column to perform aggregate on. Ignored for 'count' aggregate
        colorscale_name: Name of plotly colorscale
        colorscale_transform: Colorscale transformation
        clear_selection: If true, clear choropleth selection. Otherwise leave
            selection unchanged

    Returns:
        Choropleth figure dictionary
    """

    global data_3857, data_center_3857, data_4326, data_center_4326

    x0, x1 = x_range
    y0, y1 = y_range

    # Build query expressions
    query_expr_xy = f"(x >= {x0}) & (x <= {x1}) & (y >= {y0}) & (y <= {y1})"
    datashader_color_scale = {}

    aggregate = 'count'

    if colorscale_name == 'Blugrn':
        datashader_color_scale['color_key'] = colors[aggregate_column]
        aggregate = 'count_cat'
    else:
        datashader_color_scale['cmap'] = [
            i[1]
            for i in build_colorscale(colorscale_name, colorscale_transform)
        ]
        if not isinstance(df, cudf.DataFrame):
            df[aggregate_column] = df[aggregate_column].astype('int8')

    cvs = ds.Canvas(plot_width=1400,
                    plot_height=1400,
                    x_range=x_range,
                    y_range=y_range)

    agg = cvs.points(df,
                     x='x',
                     y='y',
                     agg=getattr(ds, aggregate)(aggregate_column))
    cmin = cupy.asnumpy(agg.min().data)
    cmax = cupy.asnumpy(agg.max().data)

    # Count the number of selected towers
    temp = agg.sum()
    temp.data = cupy.asnumpy(temp.data)
    n_selected = int(temp)

    if n_selected == 0:
        # Nothing to display
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}
        layers = []
    else:
        # Shade aggregation into an image that we can add to the map as a mapbox
        # image layer
        img = tf.shade(agg, how='log', **datashader_color_scale).to_pil()

        # Add image as mapbox image layer. Note that as of version 4.4, plotly will
        # automatically convert the PIL image object into a base64 encoded png string
        layers = [{
            "sourcetype": "image",
            "source": img,
            "coordinates": new_coordinates
        }]

        # Do not display any mapbox markers
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}

    # Build map figure
    map_graph = {
        'data': [],
        'layout': {
            'template':
            template,
            'uirevision':
            True,
            'mapbox': {
                'style': mapbox_style,
                'accesstoken': token,
                'layers': layers,
            },
            'margin': {
                "r": 140,
                "t": 0,
                "l": 0,
                "b": 0
            },
            'height':
            500,
            'shapes': [{
                'type': 'rect',
                'xref': 'paper',
                'yref': 'paper',
                'x0': 0,
                'y0': 0,
                'x1': 1,
                'y1': 1,
                'line': {
                    'width': 1,
                    'color': '#191a1a',
                }
            }]
        },
    }

    if aggregate == 'count_cat':
        # for `Age By PurBlue` category
        colorscale = [0, 1]
        marker = dict(
            size=0,
            showscale=True,
            colorbar={
                "title": {
                    "text": 'Sex',
                    "side": "right",
                    "font": {
                        "size": 14
                    }
                },
                "tickvals": [0.25, 0.75],
                "ticktext": ['male', 'female'],
                "ypad": 30
            },
            colorscale=[(0.00, colors['sex'][0]), (0.50, colors['sex'][0]),
                        (0.50, colors['sex'][1]), (1.00, colors['sex'][1])],
            cmin=0,
            cmax=1,
        )

        map_graph['data'].append({
            'type': 'scattermapbox',
            'lat': lat,
            'lon': lon,
            'customdata': customdata,
            'marker': marker,
            'hoverinfo': 'none',
        })
        map_graph['layout']['annotations'] = []
    else:
        marker = dict(size=0,
                      showscale=True,
                      colorbar={
                          "title": {
                              "text": 'Population',
                              "side": "right",
                              "font": {
                                  "size": 14
                              }
                          },
                          "ypad": 30
                      },
                      colorscale=build_colorscale(
                          colorscale_name,
                          colorscale_transform,
                      ),
                      cmin=cmin,
                      cmax=cmax)
        map_graph['data'].append({
            'type': 'scattermapbox',
            'lat': lat,
            'lon': lon,
            'customdata': customdata,
            'marker': marker,
            'hoverinfo': 'none'
        })

    map_graph['layout']['mapbox'].update(position)

    return map_graph
示例#40
0
文件: customZoom.py 项目: dpinney/omf
def edgesplot(edges, name=None, canvas=None):
    canvas = ds.Canvas(**cvsopts) if canvas is None else canvas
    return tf.shade(canvas.line(edges, 'x','y', agg=ds.count()), name=name)
示例#41
0
def plot_knn_f1scores(plot_label=''):
    # Plots F1-score for each source from the nearest neighbours found using knn_closest. Input is a list of indices.
    # If dim==1 knn found in 1-D. If dim==10, knn found in 10-D. (see later half of this function for details)
    # Choose to plot as function of 1D feature or r magnitude.
    # Load output from previous run:
    print('Loading knn indices from previous run saved on disk...')
    filename1d = 'knn_f1scores_1D'
    filename10d = 'knn_f1scores_10D'

    try:
        knn_f1scores_1d = load_obj(filename1d)
        knn_f1scores_10d = load_obj(filename10d)
    except:
        print(
            'Failed to load knn_f1scores_*.pkl from disk - did you run "get_knn_accuracy()" yet?'
        )
        exit()

    # combine list of dicts into single dictionary
    knn_f1scores_1d = {
        k: [d.get(k) for d in knn_f1scores_1d]
        for k in {k
                  for d in knn_f1scores_1d for k in d}
    }
    knn_f1scores_10d = {
        k: [d.get(k) for d in knn_f1scores_10d]
        for k in {k
                  for d in knn_f1scores_10d for k in d}
    }
    df1d = pd.DataFrame(knn_f1scores_1d)
    df10d = pd.DataFrame(knn_f1scores_10d)

    # 1D
    df1d_g = df1d[[
        'galaxy_xvar_mean', 'galaxy_xvar_std', 'galaxy_probs_mean',
        'galaxy_probs_std', 'f1g', 'f1gerr', 'correct_source'
    ]].copy()
    df1d_q = df1d[[
        'quasar_xvar_mean', 'quasar_xvar_std', 'quasar_probs_mean',
        'quasar_probs_std', 'f1q', 'f1qerr', 'correct_source'
    ]].copy()
    df1d_s = df1d[[
        'star_xvar_mean', 'star_xvar_std', 'star_probs_mean', 'star_probs_std',
        'f1s', 'f1serr', 'correct_source'
    ]].copy()
    df1d_g['class'] = 'GALAXY'
    df1d_g.columns = [
        'feature1d_mean', 'feature1d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df1d_q['class'] = 'QSO'
    df1d_q.columns = [
        'feature1d_mean', 'feature1d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df1d_s['class'] = 'STAR'
    df1d_s.columns = [
        'feature1d_mean', 'feature1d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df_all_1d = pd.concat([df1d_g, df1d_q, df1d_s], axis=0)
    df_all_1d['class'] = df_all_1d['class'].astype(
        'category')  # datashader wants categorical class

    df10d_g = df10d[[
        'galaxy_xvar_mean', 'galaxy_xvar_std', 'galaxy_probs_mean',
        'galaxy_probs_std', 'f1g', 'f1gerr', 'correct_source'
    ]].copy()
    df10d_q = df10d[[
        'quasar_xvar_mean', 'quasar_xvar_std', 'quasar_probs_mean',
        'quasar_probs_std', 'f1q', 'f1qerr', 'correct_source'
    ]].copy()
    df10d_s = df10d[[
        'star_xvar_mean', 'star_xvar_std', 'star_probs_mean', 'star_probs_std',
        'f1s', 'f1serr', 'correct_source'
    ]].copy()
    df10d_g['class'] = 'GALAXY'
    df10d_g.columns = [
        'feature10d_mean', 'feature10d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df10d_q['class'] = 'QSO'
    df10d_q.columns = [
        'feature10d_mean', 'feature10d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df10d_s['class'] = 'STAR'
    df10d_s.columns = [
        'feature10d_mean', 'feature10d_std', 'probs_mean', 'probs_std', 'f1',
        'f1err', 'correct_source', 'class'
    ]
    df_all_10d = pd.concat([df10d_g, df10d_q, df10d_s], axis=0)
    df_all_10d['class'] = df_all_10d['class'].astype(
        'category')  # datashader wants categorical class

    # Did we fit the knn in 1-D or in 10-D?
    # In 1-D a few thousand nearest neighbours will likely be a healthy mix of the 3 classes throughout most/all of the feature space. So you will get reliable numbers for F1 scores per class (perhaps with differring error bars). These are basically a round-about way of getting F1 scores shown in the histogram created by the function plot_histogram_matrix_f1. It is nice they agree (they most definately should). The mannor in which they agree is interesting - since knn effectively uses variable bin widths to get enough nearest neighbours, whilst plot_histogram_matrix_f1 uses fixed bin widths and averages within that bin.

    # select correct sources only?
    # Only plot f1-score for correct object type in question. e.g. If it's a galaxy, nearest 10000 objects will likely only be galaxies, so f1 for star and quasar will be very poor or zero because there are no True Positives in this area of 1-D feature space. In 1-D feature space the 10000 nearest neighbours were a healthy mix of all three classes so we didn't have this problem.

    print(df_all_1d.correct_source.value_counts())
    print(df_all_10d.correct_source.value_counts())
    df_all_1d = df_all_1d[df_all_1d.correct_source == 1]
    df_all_10d = df_all_10d[df_all_10d.correct_source == 1]

    # only 5000 sources are wrong, not so bad.
    # Create datashader pngs for each plot, since we have too much data for matplotlib to handle

    # 1D - 1dfeature vs f1
    xmin1d = df1d.star_xvar_mean.min() - 0.1  # padd for plotting later
    xmax1d = df1d.star_xvar_mean.max() + 0.1
    print(xmin1d, xmax1d)
    ymin = 0
    ymax = 1.05
    cvs = ds.Canvas(plot_width=1000,
                    plot_height=600,
                    x_range=(xmin1d, xmax1d),
                    y_range=(ymin, ymax),
                    x_axis_type='linear',
                    y_axis_type='linear')
    agg = cvs.points(df_all_1d, 'feature1d_mean', 'f1', ds.count_cat('class'))
    ckey = dict(GALAXY=(101, 236, 101), QSO='hotpink', STAR='dodgerblue')
    img = tf.shade(agg, color_key=ckey, how='log')
    export_image(img, 'knn1d_1d_vs_f1', fmt='.png', background='white')

    # 10D - 1dfeature vs f1
    xmin10d = df10d.star_xvar_mean.min() - 0.1  # padd for plotting later
    xmax10d = df10d.star_xvar_mean.max() + 0.1
    print(xmin10d, xmax10d)
    ymin = 0
    ymax = 1.05
    cvs = ds.Canvas(plot_width=200,
                    plot_height=120,
                    x_range=(xmin10d, xmax10d),
                    y_range=(ymin, ymax),
                    x_axis_type='linear',
                    y_axis_type='linear')
    agg = cvs.points(df_all_10d, 'feature10d_mean', 'f1',
                     ds.count_cat('class'))
    ckey = dict(GALAXY=(101, 236, 101), QSO='hotpink', STAR='dodgerblue')
    img = tf.shade(agg, color_key=ckey, how='log')
    export_image(img, 'knn10d_1d_vs_f1', fmt='.png', background='white')

    # 1D - prob vs f1
    xmin1d_probs = 0  # padd for plotting later
    xmax1d_probs = 1.05
    ymin = 0
    ymax = 1.05
    cvs = ds.Canvas(plot_width=300,
                    plot_height=300,
                    x_range=(xmin1d_probs, xmax1d_probs),
                    y_range=(ymin, ymax),
                    x_axis_type='linear',
                    y_axis_type='linear')
    agg = cvs.points(df_all_1d, 'probs_mean', 'f1', ds.count_cat('class'))
    ckey = dict(GALAXY=(101, 236, 101), QSO='hotpink', STAR='dodgerblue')
    img = tf.shade(agg, color_key=ckey, how='log')
    export_image(img, 'knn1d_probs_vs_f1', fmt='.png', background='white')

    # 10D - 1dfeature vs f1
    xmin10d_probs = 0  # padd for plotting later
    xmax10d_probs = 1.05
    ymin = 0
    ymax = 1.05
    cvs = ds.Canvas(plot_width=200,
                    plot_height=200,
                    x_range=(xmin10d_probs, xmax10d_probs),
                    y_range=(ymin, ymax),
                    x_axis_type='linear',
                    y_axis_type='linear')
    agg = cvs.points(df_all_10d, 'probs_mean', 'f1', ds.count_cat('class'))
    ckey = dict(GALAXY=(101, 236, 101), QSO='hotpink', STAR='dodgerblue')
    img = tf.shade(agg, color_key=ckey, how='log')
    export_image(img, 'knn10d_probs_vs_f1', fmt='.png', background='white')

    # ----------------- plotting -----------------
    # get datashader pngs, and plot a small sample of points over the top to guide eye with error bars.
    img_1d_1d = mpimg.imread('knn1d_1d_vs_f1.png')
    img_1d_probs = mpimg.imread('knn1d_probs_vs_f1.png')
    mpl.rcParams.update({'font.size': 10})
    markeredgewidth = 0.5
    mew = 0.5
    elinewidth = 0.5

    fig, axs = plt.subplots(1, 2, figsize=(14.5, 4))
    # --- 1D --- 1d ---
    plt.sca(axs[0])
    plt.imshow(img_1d_1d, extent=[xmin1d, xmax1d, ymin * 10,
                                  ymax * 10])  # make yaxis 10 times larger
    # fix ylabels after scaling the axis
    ylabels = axs[0].get_yticks()
    new_ylabels = [l / 10
                   for l in ylabels]  # account for factor of 10 increase
    axs[0].set_yticklabels(new_ylabels)
    axs[0].xaxis.set_major_formatter(FormatStrFormatter('%.1f'))

    # plot sample over the top to get a feel for error bars
    samp = 2500
    plt.errorbar(df1d_g[0::samp]['feature1d_mean'],
                 df1d_g[0::samp]['f1'] * 10,
                 xerr=df1d_g[0::samp]['feature1d_std'],
                 yerr=df1d_g[0::samp]['f1err'] * 10,
                 color=galaxy_c,
                 elinewidth=elinewidth,
                 markeredgewidth=mew,
                 ls='none',
                 label='Galaxies')
    plt.errorbar(df1d_q[0::samp]['feature1d_mean'],
                 df1d_q[0::samp]['f1'] * 10,
                 xerr=df1d_q[0::samp]['feature1d_std'],
                 yerr=df1d_q[0::samp]['f1err'] * 10,
                 color=quasar_c,
                 elinewidth=elinewidth,
                 markeredgewidth=mew,
                 ls='none',
                 label='Quasars')
    plt.errorbar(df1d_s[0::samp]['feature1d_mean'],
                 df1d_s[0::samp]['f1'] * 10,
                 xerr=df1d_s[0::samp]['feature1d_std'],
                 yerr=df1d_s[0::samp]['f1err'] * 10,
                 color=star_c,
                 elinewidth=elinewidth,
                 markeredgewidth=mew,
                 ls='none',
                 label='Stars')

    plt.tick_params(axis='y', which='both', right=True)
    plt.minorticks_on()
    plt.xlabel('1D feature')
    plt.ylabel('F1 score in 1 dimensions')
    #axs[1].text(0.95, 0.01, 'calculated from 10000 nearest neighbours in 10 dimensions', verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes, color='black', fontsize=8)
    plt.xlim(-7, 12.5)
    plt.legend(frameon=False, loc='lower right')
    plt.tight_layout()
    fig.tight_layout()

    # --- 1D --- probs ---
    plt.sca(axs[1])
    xf = 2
    plt.imshow(img_1d_probs,
               extent=[xmin1d_probs * xf, xmax1d_probs * xf, ymin,
                       ymax])  # make xaxis larger
    # fix ylabels after scaling the axis
    #xlabels = axs[0].get_xticks()
    #new_xlabels = [l/xf for l in xlabels] # account for scaling axis
    axs[1].set_xticks(np.arange(0, 2.1, step=0.2))
    axs[1].set_xticklabels(np.arange(0, 1.1, step=0.1))
    #axs[0].xaxis.set_major_formatter(FormatStrFormatter('%.1f')) # doesn't work
    # getting some labels with 8 F****** decimal places without these two lines:
    labels = [item.get_text() for item in axs[1].get_xticklabels()]
    axs[1].set_xticklabels([str(round(float(label), 2)) for label in labels])

    # plot sample over the top to get a feel for error bars
    df1d_g2 = df1d_g[(df1d_g.f1 < 0.85) & (df1d_g.probs_mean < 0.85)][0::3000]
    plt.errorbar(df1d_g2['probs_mean'] * xf,
                 df1d_g2['f1'],
                 xerr=df1d_g2['probs_std'] * xf,
                 yerr=df1d_g2['f1err'],
                 color=galaxy_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Galaxies')
    df1d_q2 = df1d_q[(df1d_q.f1 < 0.85) & (df1d_q.probs_mean < 0.85)][0::3000]
    plt.errorbar(df1d_q2['probs_mean'] * xf,
                 df1d_q2['f1'],
                 xerr=df1d_q2['probs_std'] * xf,
                 yerr=df1d_q2['f1err'],
                 color=quasar_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Quasars')
    df1d_q2 = df1d_q[(df1d_q.f1 < 0.85) & (df1d_q.probs_mean < 0.75)][
        0::800]  # plot more at lower values in undersampled region
    plt.errorbar(df1d_q2['probs_mean'] * xf,
                 df1d_q2['f1'],
                 xerr=df1d_q2['probs_std'] * xf,
                 yerr=df1d_q2['f1err'],
                 color=quasar_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew)
    df1d_s2 = df1d_s[(df1d_s.f1 < 0.85) & (df1d_s.probs_mean < 0.85)][0::3000]
    plt.errorbar(df1d_s2['probs_mean'] * xf,
                 df1d_s2['f1'],
                 xerr=df1d_s2['probs_std'] * xf,
                 yerr=df1d_s2['f1err'],
                 color=star_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Stars')

    plt.tick_params(axis='y', which='both', right=True)
    plt.minorticks_on()
    plt.xlabel('Classification probability')
    plt.ylabel('F1 score in 1 dimension')
    #axs[0].text(0.95, 0.01, 'calculated from 10000 nearest neighbours in 1 dimension', verticalalignment='bottom', horizontalalignment='right', transform=axs[0].transAxes, color='black', fontsize=8)
    #plt.xlim(0.66,2)
    plt.tight_layout()

    #fig.subplots_adjust(wspace=0.1, hspace=0.1) # Must come after tight_layout to work! ... doesn't seem to work when using imshow :(
    fig.savefig('knn_plot_1D' + plot_label + '.pdf')
    plt.clf()

    # ---------------- 10-d ----------------

    # ----------------- plotting -----------------
    elinewidth = 0.2
    mpl.rcParams.update({'font.size':
                         10})  # else its really small in the paper

    img_10d_1d = mpimg.imread('knn10d_1d_vs_f1.png')
    img_10d_probs = mpimg.imread('knn10d_probs_vs_f1.png')

    fig, axs = plt.subplots(1, 2, figsize=(14.5, 4))
    xf = 2  # make x-axis twice as long as y.

    # --- 10D ---
    plt.sca(axs[0])
    plt.imshow(img_10d_1d, extent=[xmin10d, xmax10d, ymin * 10,
                                   ymax * 10])  # make yaxis 10 times larger
    # fix ylabels after scaling the axis
    ylabels = axs[0].get_yticks()
    new_ylabels = [l / 10
                   for l in ylabels]  # account for factor of 10 increase
    axs[0].set_yticklabels(new_ylabels)
    axs[0].xaxis.set_major_formatter(FormatStrFormatter('%.1f'))

    # plot sample over the top to get a feel for error bars
    df10d_g2 = df10d_g[df10d_g.f1 < 0.95][
        0::
        500]  # only plot error bars below 0.95 because above this they are v small.
    plt.errorbar(df10d_g2['feature10d_mean'],
                 df10d_g2['f1'] * 10,
                 xerr=df10d_g2['feature10d_std'],
                 yerr=df10d_g2['f1err'] * 10,
                 color=galaxy_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Galaxies')
    df10d_q2 = df10d_q[df10d_q.f1 < 0.95][0::500]
    plt.errorbar(df10d_q2['feature10d_mean'],
                 df10d_q2['f1'] * 10,
                 xerr=df10d_q2['feature10d_std'],
                 yerr=df10d_q2['f1err'] * 10,
                 color=quasar_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Quasars')
    df10d_s2 = df10d_s[df10d_s.f1 < 0.95][0::500]
    plt.errorbar(df10d_s2['feature10d_mean'],
                 df10d_s2['f1'] * 10,
                 xerr=df10d_s2['feature10d_std'],
                 yerr=df10d_s2['f1err'] * 10,
                 color=star_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Stars')
    plt.tick_params(axis='y', which='both', right=True)
    plt.minorticks_on()
    plt.xlabel('1D feature')
    plt.ylabel('F1 score in 10 dimensions')
    #axs[1].text(0.95, 0.01, 'calculated from 10000 nearest neighbours in 10 dimensions', verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes, color='black', fontsize=8)
    plt.xlim(-7, 12.5)
    plt.tight_layout()

    # --- 10D --- probs ---
    plt.sca(axs[1])
    plt.imshow(img_10d_probs,
               extent=[xmin10d_probs * xf, xmax10d_probs * xf, ymin,
                       ymax])  # make xaxis larger
    # fix ylabels after scaling the axis
    #xlabels = axs[1].get_xticks()
    #new_xlabels = [l/xf for l in xlabels] # account for scaling axis
    #axs[1].set_xticklabels(new_xlabels)
    axs[1].set_xticks(np.arange(0, 2.1, step=0.2))
    axs[1].set_xticklabels(np.arange(0, 1.1, step=0.1))
    #axs[0].xaxis.set_major_formatter(FormatStrFormatter('%.1f')) # doesn't work
    labels = [item.get_text() for item in axs[1].get_xticklabels()]
    axs[1].set_xticklabels([str(round(float(label), 2)) for label in labels])

    # plot sample over the top to get a feel for error bars
    df10d_g2 = df10d_g[(df10d_g.f1 < 0.85) & (
        df10d_g.probs_mean < 0.85
    )][0::
       1000]  # only plot error bars below 0.95 because above this they are v small, and overcrowd the plot.
    plt.errorbar(df10d_g2['probs_mean'] * xf,
                 df10d_g2['f1'],
                 xerr=df10d_g2['probs_std'] * xf,
                 yerr=df10d_g2['f1err'],
                 color=galaxy_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Galaxy')
    df10d_q2 = df10d_q[(df10d_q.f1 < 0.85)
                       & (df10d_q.probs_mean < 0.85)][0::1000]
    plt.errorbar(df10d_q2['probs_mean'] * xf,
                 df10d_q2['f1'],
                 xerr=df10d_q2['probs_std'] * xf,
                 yerr=df10d_q2['f1err'],
                 color=quasar_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Quasar')
    df10d_s2 = df10d_s[(df10d_s.f1 < 0.85)
                       & (df10d_s.probs_mean < 0.85)][0::1000]
    plt.errorbar(df10d_s2['probs_mean'] * xf,
                 df10d_s2['f1'],
                 xerr=df10d_s2['probs_std'] * xf,
                 yerr=df10d_s2['f1err'],
                 color=star_c,
                 elinewidth=elinewidth,
                 ls='none',
                 markeredgewidth=mew,
                 label='Star')

    plt.tick_params(axis='y', which='both', right=True)
    plt.minorticks_on()
    plt.xlabel('Classification probability')
    plt.ylabel('F1 score in 10 dimensions')
    plt.legend(frameon=False, loc='upper left')
    #axs[1].text(0.95, 0.01, 'calculated from 10000 nearest neighbours in 10 dimensions', verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes, color='black', fontsize=8)
    plt.tight_layout()
    fig.tight_layout()
    #plt.xlim(0.66,2)
    fig.savefig('knn_plot_10D' + plot_label + '.pdf')
示例#42
0
 def _shader_func(self, agg, span=None):
     img = tf.shade(agg, cmap=cm[self.color_map])
     return img
    def _process(self, element, key=None):
        if isinstance(element, NdOverlay):
            bounds = element.last.bounds
            element = self.concatenate(element)
        elif isinstance(element, Overlay):
            return element.map(self._process, [Element])
        else:
            bounds = element.bounds

        vdim = element.vdims[0].name
        array = element.data[vdim]
        kdims = element.kdims

        # Compute shading options depending on whether
        # it is a categorical or regular aggregate
        shade_opts = dict(how=self.p.normalization, min_alpha=self.p.min_alpha)
        if element.ndims > 2:
            kdims = element.kdims[1:]
            categories = array.shape[-1]
            if not self.p.color_key:
                pass
            elif isinstance(self.p.color_key, dict):
                shade_opts['color_key'] = self.p.color_key
            elif isinstance(self.p.color_key, Iterable):
                shade_opts['color_key'] = [
                    c for i, c in zip(range(categories), self.p.color_key)
                ]
            else:
                colors = [
                    self.p.color_key(s) for s in np.linspace(0, 1, categories)
                ]
                shade_opts['color_key'] = map(self.rgb2hex, colors)
        elif not self.p.cmap:
            pass
        elif isinstance(self.p.cmap, Callable):
            colors = [self.p.cmap(s) for s in np.linspace(0, 1, 256)]
            shade_opts['cmap'] = map(self.rgb2hex, colors)
        else:
            shade_opts['cmap'] = self.p.cmap

        if self.p.clims:
            shade_opts['span'] = self.p.clims
        elif ds_version > '0.5.0' and self.p.normalization != 'eq_hist':
            shade_opts['span'] = element.range(vdim)

        for d in kdims:
            if array[d.name].dtype.kind == 'M':
                array[d.name] = array[d.name].astype('datetime64[ns]').astype(
                    'int64') * 10e-4

        with warnings.catch_warnings():
            warnings.filterwarnings(
                'ignore', r'invalid value encountered in true_divide')
            if np.isnan(array.data).all():
                arr = np.zeros(array.data.shape, dtype=np.uint32)
                img = array.copy()
                img.data = arr
            else:
                img = tf.shade(array, **shade_opts)
        params = dict(get_param_values(element),
                      kdims=kdims,
                      bounds=bounds,
                      vdims=RGB.vdims[:])
        return RGB(self.uint32_to_uint8(img.data), **params)
示例#44
0
def _datashade_points(
    points,
    ax=None,
    labels=None,
    values=None,
    cmap="Blues",
    color_key=None,
    color_key_cmap="Spectral",
    background="white",
    width=800,
    height=800,
    show_legend=True,
):
    """Use datashader to plot points"""
    extent = _get_extent(points)
    canvas = ds.Canvas(
        plot_width=width,
        plot_height=height,
        x_range=(extent[0], extent[1]),
        y_range=(extent[2], extent[3]),
    )
    data = pd.DataFrame(points, columns=("x", "y"))

    legend_elements = None

    # Color by labels
    if labels is not None:
        if labels.shape[0] != points.shape[0]:
            raise ValueError("Labels must have a label for "
                             "each sample (size mismatch: {} {})".format(
                                 labels.shape[0], points.shape[0]))

        data["label"] = pd.Categorical(labels)
        aggregation = canvas.points(data, "x", "y", agg=ds.count_cat("label"))
        if color_key is None and color_key_cmap is None:
            result = tf.shade(aggregation, how="eq_hist")
        elif color_key is None:
            unique_labels = np.unique(labels)
            num_labels = unique_labels.shape[0]
            color_key = _to_hex(
                plt.get_cmap(color_key_cmap)(np.linspace(0, 1, num_labels)))
            legend_elements = [
                Patch(facecolor=color_key[i], label=k)
                for i, k in enumerate(unique_labels)
            ]
            result = tf.shade(aggregation, color_key=color_key, how="eq_hist")
        else:
            legend_elements = [
                Patch(facecolor=color_key[k], label=k)
                for k in color_key.keys()
            ]
            result = tf.shade(aggregation, color_key=color_key, how="eq_hist")

    # Color by values
    elif values is not None:
        if values.shape[0] != points.shape[0]:
            raise ValueError("Values must have a value for "
                             "each sample (size mismatch: {} {})".format(
                                 values.shape[0], points.shape[0]))
        unique_values = np.unique(values)
        if unique_values.shape[0] >= 256:
            min_val, max_val = np.min(values), np.max(values)
            bin_size = (max_val - min_val) / 255.0
            data["val_cat"] = pd.Categorical(
                np.round((values - min_val) / bin_size).astype(np.int16))
            aggregation = canvas.points(data,
                                        "x",
                                        "y",
                                        agg=ds.count_cat("val_cat"))
            color_key = _to_hex(plt.get_cmap(cmap)(np.linspace(0, 1, 256)))
            result = tf.shade(aggregation, color_key=color_key, how="eq_hist")
        else:
            data["val_cat"] = pd.Categorical(values)
            aggregation = canvas.points(data,
                                        "x",
                                        "y",
                                        agg=ds.count_cat("val_cat"))
            color_key_cols = _to_hex(
                plt.get_cmap(cmap)(np.linspace(0, 1, unique_values.shape[0])))
            color_key = dict(zip(unique_values, color_key_cols))
            result = tf.shade(aggregation, color_key=color_key, how="eq_hist")

    # Color by density (default datashader option)
    else:
        aggregation = canvas.points(data, "x", "y", agg=ds.count())
        result = tf.shade(aggregation, cmap=plt.get_cmap(cmap))

    if background is not None:
        result = tf.set_background(result, background)

    if ax is not None:
        _embed_datashader_in_an_axis(result, ax)
        if show_legend and legend_elements is not None:
            ax.legend(handles=legend_elements)
        return ax
    else:
        return result
import numpy as np
import pandas as pd
import matplotlib.pylab as plt

import datashader as ds
import datashader.transfer_functions as tf
from datashader.utils import export_image

from functools import partial

background = "white"
export = partial(export_image, background=background, export_path="export")

N = 100000
df = pd.DataFrame(np.random.random((N, 3)), columns=['x', 'y', 'z'])

f, ax = plt.subplots(2, 2)
ax_r = ax.ravel()

ax_r[0].scatter(df['x'], df['y'], df['z'].mean(), cmap=plt.get_cmap('jet'))
# ax_r[1].hist(df['x'])
# ax_r[2].hist(df['y'])
# ax_r[3].plot(df['z'])

cvs = ds.Canvas(plot_width=250, plot_height=300)
agg = cvs.points(df, 'x', 'y', ds.mean('z'))
# a = export(tf.shade(agg, cmap=['blue', 'red'], how='eq_hist'), 'test')
# cmap = plt.get_cmap('jet')
a = export(tf.shade(agg, cmap=plt.get_cmap('jet'), how='eq_hist'), 'test')
plt.show()
示例#46
0
def edgesplot(edges, name=None, canvas=None):
    canvas = ds.Canvas(**cvsopts) if canvas is None else canvas
    return tf.shade(canvas.line(edges, 'x','y', agg=ds.count()), name=name)
示例#47
0
def nodesplot(nodes, name=None, canvas=None, cat=None):
    canvas = ds.Canvas(**cvsopts) if canvas is None else canvas
    aggregator = None if cat is None else ds.count_cat(cat)
    agg = canvas.points(nodes, 'x', 'y', aggregator)
    return tf.spread(tf.shade(agg, cmap=["#FF3333"]), px=1, name=name)
示例#48
0
def test_shade_cmap_non_categorical_alpha(agg, cmap):
    img = tf.shade(agg.a, how='log', cmap=cmap)
    sol = np.array([[0, 671088640, 1946157056], [2701131776, 0, 3640655872],
                    [3976200192, 4278190080, 0]])
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
示例#49
0
def test_shade_cmap_errors(agg):
    with pytest.raises(ValueError):
        tf.shade(agg.a, cmap='foo')

    with pytest.raises(ValueError):
        tf.shade(agg.a, cmap=[])
def test_shade_category(array):
    coords = [np.array([0, 1]), np.array([2, 5])]
    cat_agg = tf.Image(array([[(0, 12, 0), (3, 0, 3)],
                              [(12, 12, 12), (24, 0, 0)]], dtype='u4'),
                       coords=(coords + [['a', 'b', 'c']]),
                       dims=(dims + ['cats']))

    colors = [(255, 0, 0), '#0000FF', 'orange']

    img = tf.shade(cat_agg, color_key=colors, how='log', min_alpha=20)
    sol = np.array([[2583625728, 335565567],
                    [4283774890, 3707764991]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    # Check dims/coordinates order
    assert list(img.coords) == ['x_axis', 'y_axis']
    assert list(img.dims) == ['y_axis', 'x_axis']

    colors = dict(zip('abc', colors))

    img = tf.shade(cat_agg, color_key=colors, how='cbrt', min_alpha=20)
    sol = np.array([[2650734592, 335565567],
                    [4283774890, 3657433343]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)
    sol = np.array([[1140785152, 335565567],
                    [4283774890, 2701132031]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)

    img = tf.shade(cat_agg, color_key=colors,
                   how=lambda x, m: np.where(m, np.nan, x) ** 2,
                   min_alpha=20)
    sol = np.array([[503250944, 335565567],
                    [4283774890, 1744830719]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)

    # all pixels should be at min_alpha
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(50, 100))
    sol = np.array([[16711680, 21247],
                    [5584810, 255]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    # redundant verification that alpha channel is all 0x00
    assert ((img.data[0,0] >> 24) & 0xFF) == 0
    assert ((img.data[0,1] >> 24) & 0xFF) == 0
    assert ((img.data[1,0] >> 24) & 0xFF) == 0
    assert ((img.data[1,1] >> 24) & 0xFF) == 0

    # all pixels should be at max_alpha
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(0, 2))
    sol = np.array([[4294901760, 4278211327],
                    [4283774890, 4278190335]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    # redundant verification that alpha channel is all 0xFF
    assert ((img.data[0,0] >> 24) & 0xFF) == 255
    assert ((img.data[0,1] >> 24) & 0xFF) == 255
    assert ((img.data[1,0] >> 24) & 0xFF) == 255
    assert ((img.data[1,1] >> 24) & 0xFF) == 255

    # One pixel should be min-alpha, the other max-alpha
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(6, 36))
    sol = np.array([[872349696, 21247],
                    [4283774890, 2566914303]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    # redundant verification that alpha channel is correct
    assert ((img.data[0,0] >> 24) & 0xFF) == 51 # (6 / 30) * 255
    assert ((img.data[0,1] >> 24) & 0xFF) == 0
    assert ((img.data[1,0] >> 24) & 0xFF) == 255
    assert ((img.data[1,1] >> 24) & 0xFF) == 153 # ( 18 /30) * 255

    # One pixel should be min-alpha, the other max-alpha
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(0, 72))
    sol = np.array([[721354752, 352342783],
                    [2136291242, 1426063615]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    # redundant verification that alpha channel is correct
    assert ((img.data[0,0] >> 24) & 0xFF) == 42 # (12 / 72) * 255
    assert ((img.data[0,1] >> 24) & 0xFF) == 21 # (6 / 72) * 255
    assert ((img.data[1,0] >> 24) & 0xFF) == 127 # ( 36 / 72) * 255
    assert ((img.data[1,1] >> 24) & 0xFF) == 85 # ( 24 /72 ) * 255

    # test that empty coordinates are always fully transparent, even when
    # min_alpha is non-zero
    cat_agg = tf.Image(array([[(0, 0, 0), (3, 0, 3)],
                              [(12, 12, 12), (24, 0, 0)]], dtype='u4'),
                           coords=(coords + [['a', 'b', 'c']]),
                           dims=(dims + ['cats']))

    # First test auto-span
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)
    sol = np.array([[5584810, 335565567],
                    [4283774890, 2701132031]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)

    # redundant verification that alpha channel is correct
    assert ((img.data[0,0] >> 24) & 0xFF) == 0 # fully transparent
    assert ((img.data[0,1] >> 24) & 0xFF) != 0 # not fully transparent
    assert ((img.data[1,0] >> 24) & 0xFF) != 0 # not fully transparent
    assert ((img.data[1,1] >> 24) & 0xFF) != 0 # not fully transparent

    # Next test manual-span
    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(6, 36))
    sol = np.array([[5584810, 335565567],
                    [4283774890, 2701132031]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)

    # redundant verification that alpha channel is correct
    assert ((img.data[0,0] >> 24) & 0xFF) == 0 # fully transparent
    assert ((img.data[0,1] >> 24) & 0xFF) != 0 # not fully transparent
    assert ((img.data[1,0] >> 24) & 0xFF) != 0 # not fully transparent
    assert ((img.data[1,1] >> 24) & 0xFF) != 0 # not fully transparent


    # Categorical aggregations with some reductions (such as sum) can result in negative
    # values in the data here we test positive and negative values
    cat_agg = tf.Image(array([[(0, -30, 0), (18, 0, -18)],
                              [(-2, 2, -2), (-18, 9, 12)]], dtype='i4'),
                       coords=(coords + [['a', 'b', 'c']]),
                       dims=(dims + ['cats']))

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)
    sol = np.array([[335565567, 3914667690],
                    [3680253090, 4285155988]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    assert ((img.data[0,0] >> 24) & 0xFF) == 20
    assert ((img.data[0,1] >> 24) & 0xFF) == 233
    assert ((img.data[1,0] >> 24) & 0xFF) == 219
    assert ((img.data[1,1] >> 24) & 0xFF) == 255

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(0, 3))
    sol = np.array([[335565567, 341120682],
                    [341587106, 4285155988]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    assert ((img.data[0,0] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[0,1] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[1,0] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[1,1] >> 24) & 0xFF) == 255

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, color_baseline=9)
    sol = np.array([[341129130, 3909091583],
                    [3679795114, 4278232575]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    assert ((img.data[0,0] >> 24) & 0xFF) == 20
    assert ((img.data[0,1] >> 24) & 0xFF) == 233
    assert ((img.data[1,0] >> 24) & 0xFF) == 219
    assert ((img.data[1,1] >> 24) & 0xFF) == 255

    # Categorical aggregations with some reductions (such as sum) can result in negative
    # values in the data, here we test all negative values
    cat_agg = tf.Image(array([[(0, -30, 0), (-18, 0, -18)],
                              [(-2, -2, -2), (-18, 0, 0)]], dtype='i4'),
                       coords=(coords + [['a', 'b', 'c']]),
                       dims=(dims + ['cats']))

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)
    sol = np.array([[1124094719, 344794225],
                    [4283774890, 2708096148]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    assert ((img.data[0,0] >> 24) & 0xFF) == 67
    assert ((img.data[0,1] >> 24) & 0xFF) == 20
    assert ((img.data[1,0] >> 24) & 0xFF) == 255
    assert ((img.data[1,1] >> 24) & 0xFF) == 161

    img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(6, 36))
    sol = np.array([[335565567, 344794225],
                    [341129130, 342508692]], dtype='u4')
    sol = tf.Image(sol, coords=coords, dims=dims)
    assert_eq_xr(img, sol)
    assert ((img.data[0,0] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[0,1] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[1,0] >> 24) & 0xFF) == 20 # min alpha
    assert ((img.data[1,1] >> 24) & 0xFF) == 20 # min alpha
示例#51
0
def build_datashader_plot(
        df, aggregate, aggregate_column, colorscale_name, colorscale_transform,
        new_coordinates, position, x_range, y_range
):
    """
    Build choropleth figure

    Args:
        df: pandas or cudf DataFrame
        aggregate: Aggregate operation (count, mean, etc.)
        aggregate_column: Column to perform aggregate on. Ignored for 'count' aggregate
        colorscale_name: Name of plotly colorscale
        colorscale_transform: Colorscale transformation
        clear_selection: If true, clear choropleth selection. Otherwise leave
            selection unchanged

    Returns:
        Choropleth figure dictionary
    """

    global data_3857, data_center_3857, data_4326, data_center_4326

    x0, x1 = x_range
    y0, y1 = y_range

    # Build query expressions
    query_expr_xy = f"(x >= {x0}) & (x <= {x1}) & (y >= {y0}) & (y <= {y1})"
    datashader_color_scale = {}

    if aggregate == 'count_cat':
        datashader_color_scale['color_key'] = colors[aggregate_column] 
    else:
        datashader_color_scale['cmap'] = [i[1] for i in build_colorscale(colorscale_name, colorscale_transform, aggregate, aggregate_column)]
        if not isinstance(df, cudf.DataFrame):
            df[aggregate_column] = df[aggregate_column].astype('int8')

    cvs = ds.Canvas(
        plot_width=1400,
        plot_height=1400,
        x_range=x_range, y_range=y_range
    )
    agg = cvs.points(
        df, x='x', y='y', agg=getattr(ds, aggregate)(aggregate_column)
    )
    

    # Count the number of selected towers
    temp = agg.sum()
    temp.data = cupy.asnumpy(temp.data)
    n_selected = int(temp)
    
    if n_selected == 0:
        # Nothing to display
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}
        layers = []
    # elif n_selected < 5000:
    #     # Display each individual point using a scattermapbox trace. This way we can
    #     # give each individual point a tooltip

    #     ddf_gpu_small_expr = ' & '.join(
    #         [query_expr_xy]
    #     )
    #     ddf_gpu_small = df.query(ddf_gpu_small_expr).to_pandas()

    #     x, y, sex, edu, inc, cow = (
    #         ddf_gpu_small.x, ddf_gpu_small.y, ddf_gpu_small.sex, ddf_gpu_small.education, ddf_gpu_small.income, ddf_gpu_small.cow
    #     )

    #     # Format creation date column for tooltip
    #     # created = pd.to_datetime(created.tolist()).strftime('%x')


    #     # Build array of the integer category codes to use as the numeric color array
    #     # for the scattermapbox trace
    #     sex_codes = sex.unique().tolist()

    #     # Build marker properties dict
    #     marker = {
    #         'color': sex_codes,
    #         'colorscale': colors[aggregate_column],
    #         'cmin': 0,
    #         'cmax': 3,
    #         'size': 5,
    #         'opacity': 0.6,
    #     }
    #     lat = list(zip(
    #         x.astype(str)
    #     ))
    #     lon = list(zip(
    #         y.astype(str)
    #     ))
    #     customdata = list(zip(
    #         sex.astype(str),
    #         edu.astype(str),
    #         inc.astype(str),
    #         cow.astype(str)
    #     ))
    #     layers = []
    else:
        # Shade aggregation into an image that we can add to the map as a mapbox
        # image layer
        max_px = 1
        if n_selected<5000:
            max_px=10
        img = tf.shade(agg, **datashader_color_scale)
        img = tf.dynspread(
                    img,
                    threshold=0.5,
                    max_px=max_px,
                    shape='circle',
                ).to_pil()

        # Add image as mapbox image layer. Note that as of version 4.4, plotly will
        # automatically convert the PIL image object into a base64 encoded png string
        layers = [
            {
                "sourcetype": "image",
                "source": img,
                "coordinates": new_coordinates
            }
        ]

        # Do not display any mapbox markers
        lat = [None]
        lon = [None]
        customdata = [None]
        marker = {}

    # Build map figure
    map_graph = {
        'data': [{
            'type': 'scattermapbox',
            'lat': lat, 'lon': lon,
            'customdata': customdata,
            'marker': marker,
            'hovertemplate': (
                "sex: %{customdata[0]}<br>"
                "<extra></extra>"
            )
        }],
        'layout': {
            'template': template,
            'uirevision': True,
            'mapbox': {
                'style': "dark",
                'accesstoken': token,
                'layers': layers,
            },
            'margin': {"r": 0, "t": 0, "l": 0, "b": 0},
            'height': 500,
            'shapes': [{
                'type': 'rect',
                'xref': 'paper',
                'yref': 'paper',
                'x0': 0,
                'y0': 0,
                'x1': 1,
                'y1': 1,
                'line': {
                    'width': 1,
                    'color': '#191a1a',
                }
            }]
        },
    }

    map_graph['layout']['mapbox'].update(position)

    return map_graph
示例#52
0
def my_nodesplot(nodes, name=None, canvas=None, cat=None):
    canvas = ds.Canvas(**cvsopts) if canvas is None else canvas
    aggregator=None if cat is None else ds.count_cat(cat)
    agg=canvas.points(nodes,'x','y',aggregator)
    return tf.spread(tf.shade(agg, cmap=["#333333"], color_key=colors, min_alpha=255), px=3, name=name)
示例#53
0
#sparse.save_npz('1e6_factors_mat.npz', mat_csr)

reducer = umap.UMAP(metric='cosine', verbose=2, n_epochs=100)

# takes about 2 hours with an i7 7700 ;_;7
embedding = reducer.fit_transform(mat_csr)

np.save('embedding_primes', embedding)

mat = np.load('embedding_primes.npy')

df = pd.DataFrame(mat, columns=['x', 'y'])

cvs = ds.Canvas(plot_width=500, plot_height=500)
agg = cvs.points(df, 'x', 'y')
img = tf.shade(agg, how='eq_hist', cmap=mp.cm.viridis)
tf.set_background(img, 'black')
fig = plt.figure(figsize=(10, 10))
fig.patch.set_facecolor('black')
plt.scatter(df.x,
            df.y,
            marker='o',
            s=1,
            edgecolor='',
            c=df.index,
            cmap="magma",
            alpha=0.5)

plt.axis("off")
plt.show()