def test_mean(): out = df.i32.reshape((2, 2, 5)).mean(axis=2).T eq(c.points(df, 'x', 'y', ds.mean('i32')), out) eq(c.points(df, 'x', 'y', ds.mean('i64')), out) out = np.nanmean(df.f64.reshape((2, 2, 5)), axis=2).T eq(c.points(df, 'x', 'y', ds.mean('f32')), out) eq(c.points(df, 'x', 'y', ds.mean('f64')), out)
def test_categorical_mean(ddf): sol = np.array([[[2, nan, nan, nan], [nan, nan, 12, nan]], [[nan, 7, nan, nan], [nan, nan, nan, 17]]]) out = xr.DataArray(sol, coords=(coords + [['a', 'b', 'c', 'd']]), dims=(dims + ['cat'])) agg = c.points(ddf, 'x', 'y', ds.by('cat', ds.mean('f32'))) assert_eq_xr(agg, out) agg = c.points(ddf, 'x', 'y', ds.by('cat', ds.mean('f64'))) assert_eq_xr(agg, out) out = xr.DataArray(sol, coords=(coords + [range(4)]), dims=(dims + ['cat_int'])) agg = c.points( ddf, 'x', 'y', ds.by(ds.category_modulo('cat_int', modulo=4, offset=10), ds.mean('f32'))) assert_eq_xr(agg, out) agg = c.points( ddf, 'x', 'y', ds.by(ds.category_modulo('cat_int', modulo=4, offset=10), ds.mean('f64'))) assert_eq_xr(agg, out)
def test_mean(): out = xr.DataArray(df.i32.values.reshape((2, 2, 5)).mean(axis=2, dtype='f8').T, coords=coords, dims=dims) assert_eq(c.points(df, 'x', 'y', ds.mean('i32')), out) assert_eq(c.points(df, 'x', 'y', ds.mean('i64')), out) out = xr.DataArray(np.nanmean(df.f64.values.reshape((2, 2, 5)), axis=2).T, coords=coords, dims=dims) assert_eq(c.points(df, 'x', 'y', ds.mean('f32')), out) assert_eq(c.points(df, 'x', 'y', ds.mean('f64')), out)
def test_mean(): out = xr.DataArray(df.i32.values.reshape((2, 2, 5)).mean(axis=2, dtype='f8').T, coords=coords, dims=dims) assert_eq(c.points(ddf, 'x', 'y', ds.mean('i32')), out) assert_eq(c.points(ddf, 'x', 'y', ds.mean('i64')), out) out = xr.DataArray(np.nanmean(df.f64.values.reshape((2, 2, 5)), axis=2).T, coords=coords, dims=dims) assert_eq(c.points(ddf, 'x', 'y', ds.mean('f32')), out) assert_eq(c.points(ddf, 'x', 'y', ds.mean('f64')), out)
def __call__(self, dset, **params): self.p = ParamOverrides(self, params) if self.p.vdim is None: vdim = dset.vdims[0].name else: vdim = self.p.vdim pts = hv.util.Dynamic(dset, operation=skypoints, streams=[self.p.filter_stream]) if self.p.aggregator == 'mean': aggregator = ds.mean(vdim) elif self.p.aggregator == 'std': aggregator = ds.std(vdim) elif self.p.aggregator == 'count': aggregator = ds.count() decimate_opts = dict(plot={'tools': ['hover', 'box_select']}, style={'alpha': 0, 'size': self.p.decimate_size, 'nonselection_alpha': 0}) decimated = decimate(pts).opts(**decimate_opts) raster_ = rasterize(pts, aggregator=aggregator) color_gadget = raster_.opts(cmap=Viridis[256], colorbar=True, alpha=0) sky_shaded = shade(raster_, cmap=viridis) plot = dynspread(sky_shaded) * decimated * color_gadget return plot.options(bgcolor="black", responsive=True, min_height=100)
def plot_gene_insitu_routine( ax, data, x, y, hue, scale_paras, cmap, title, arrows=True, scalebar=True, vmaxp=99, vmin=0, vmax=0, ): """ """ # main agg = ds.mean(hue) rangex = data[x].max() - data[x].min() rangey = data[y].max() - data[y].min() ps = PlotScale(rangex, rangey, **scale_paras) massive_scatterplot( ax, data, x, y, ps.npxlx, ps.npxly, agg=agg, cmap=cmap, vmaxp=vmaxp, vmin=vmin, vmax=vmax, ) ax.set_title(title) # arrows if arrows: add_arrows(ax, 'in situ') # scale bar if scalebar: bar_length = 1000 # (micron) add_scalebar(ax, ps.npxlx-ps.len2pixel(bar_length), ps.npxlx, '1 mm') return ax
def datashade( self, df: pd.DataFrame, x_dim: str = "x", y_dim: str = "y", z_dim: str = "h_range", plot_width: int = 1400, ) -> xr.DataArray: """ Convenience function to quickly datashade a table of x, y, z points into a grid for visualization purposes, using a mean aggregate function """ # Datashade our height values (vector points) onto a grid (raster image) # Will maintain the correct aspect ratio according to the region bounds canvas: datashader.core.Canvas = datashader.Canvas( plot_width=plot_width, plot_height=int(plot_width * ((self.ymax - self.ymin) / (self.xmax - self.xmin))), x_range=(self.xmin, self.xmax), y_range=(self.ymin, self.ymax), ) return canvas.points(source=df, x=x_dim, y=y_dim, agg=datashader.mean(column=z_dim))
def _process(self, element, key=None): vdim = self.p.vdim if self.p.aggregator == 'mean': aggregator = ds.mean(vdim) elif self.p.aggregator == 'std': aggregator = ds.std(vdim) elif self.p.aggregator == 'count': aggregator = ds.count() kwargs = dict(cmap=cc.palette[self.p.cmap], aggregator=aggregator) if self.p.width is not None: kwargs.update(width=self.p.width, height=self.p.height, streams=[hv.streams.RangeXY]) datashaded = dynspread(datashade(element, **kwargs)) # decimate_opts = dict(plot={'tools':['hover', 'box_select']}, # style={'alpha':0, 'size':self.p.decimate_size, # 'nonselection_alpha':0}) # decimated = decimate(element, max_samples=self.p.max_samples).opts(**decimate_opts) return datashaded # * decimated
def view_elevation(self): """ Method to display the mesh as continuous color contours""" if self.elevation_toggle: return rasterize(self.tri_mesh, aggregator=ds.mean('z'), precompute=True) else: return hv.Curve([])
def test_rasterize_trimesh_ds_aggregator(self): simplices = [(0, 1, 2, 0.5), (3, 2, 1, 1.5)] vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)] trimesh = TriMesh((simplices, vertices), vdims=['z']) img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z')) image = Image(np.array([[1.5, 1.5, np.NaN], [0.5, 1.5, np.NaN], [np.NaN, np.NaN, np.NaN]]), bounds=(0, 0, 1, 1)) self.assertEqual(img, image)
def compute_image(self, w, h, map_scheme='bone', wavelength='770.7', bg='#777777'): # Use the bone color map to make it look more moon-like cmap = plt.get_cmap(map_scheme) canvas = ds.Canvas(plot_width=w, plot_height=h) agg = canvas.points(self.dataframe, 'long', 'lat', ds.mean(wavelength)) img = tf.shade(agg, cmap=cmap) self.img = ds.transfer_functions.set_background(img, bg) return self.img
def test_rasterize_quadmesh(self): qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]]))) img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z')) image = Image(np.array([[2, 3, 3], [2, 3, 3], [0, 1, 1]]), bounds=(-.5, -.5, 1.5, 1.5)) self.assertEqual(img, image)
def test_multiple_aggregates(df): agg = c.points(df, 'x', 'y', ds.summary(f64_mean=ds.mean('f64'), i32_sum=ds.sum('i32'), i32_count=ds.count('i32'))) f = lambda x: xr.DataArray(x, coords=coords, dims=dims) assert_eq_xr(agg.f64_mean, f(np.nanmean(values(df.f64).reshape((2, 2, 5)), axis=2).T)) assert_eq_xr(agg.i32_sum, f(values(df.i32).reshape((2, 2, 5)).sum(axis=2, dtype='f8').T)) assert_eq_xr(agg.i32_count, f(np.array([[5, 5], [5, 5]], dtype='i4')))
def test_multiple_aggregates(): agg = c.points(ddf, 'x', 'y', f64=dict(std=ds.std('f64'), mean=ds.mean('f64')), i32_sum=ds.sum('i32'), i32_count=ds.count('i32')) eq(agg.f64.std, df.f64.reshape((2, 2, 5)).std(axis=2).T) eq(agg.f64.mean, df.f64.reshape((2, 2, 5)).mean(axis=2).T) eq(agg.i32_sum, df.i32.reshape((2, 2, 5)).sum(axis=2).T) eq(agg.i32_count, np.array([[5, 5], [5, 5]], dtype='i4'))
def tests_datashader(): import datashader as ds import datashader.transfer_functions as tf import pandas as pd df = pd.read_csv('/Users/iregon/C3S/dessaps/test_data/imma_converter/observations-sst-2014-6.psv',usecols=[6,7,14],sep="|",skiprows=0) agg_mean = cvs.points(df, 'longitude', 'latitude', ds.mean('observation_value')) agg_max = cvs.points(df, 'longitude', 'latitude', ds.max('observation_value')) agg_min = cvs.points(df, 'longitude', 'latitude', ds.min('observation_value')) agg_count = cvs.points(df, 'longitude', 'latitude', ds.count('observation_value'))
def test_multiple_aggregates(): agg = c.points(ddf, 'x', 'y', ds.summary(f64_std=ds.std('f64'), f64_mean=ds.mean('f64'), i32_sum=ds.sum('i32'), i32_count=ds.count('i32'))) f = lambda x: xr.DataArray(x, coords=coords, dims=dims) assert_eq(agg.f64_std, f(np.nanstd(df.f64.values.reshape((2, 2, 5)), axis=2).T)) assert_eq(agg.f64_mean, f(np.nanmean(df.f64.values.reshape((2, 2, 5)), axis=2).T)) assert_eq(agg.i32_sum, f(df.i32.values.reshape((2, 2, 5)).sum(axis=2, dtype='f8').T)) assert_eq(agg.i32_count, f(np.array([[5, 5], [5, 5]], dtype='i4')))
def make_sky(self, object_type, ra_range=None, dec_range=None, x_range=None, y_range=None, **kwargs): if object_type == 'all': dset = self.ds else: dset = self.ds.select(label=object_type) if x_range is not None and y_range is not None: dset = dset.select(x=x_range, y=y_range) self._selected = dset.data.id pts = dset.to(hv.Points, kdims=['ra', 'dec'], vdims=['y'], groupby=[]) agg = aggregate(pts, width=100, height=100, x_range=ra_range, y_range=dec_range, aggregator=ds.mean('y'), dynamic=False) hover = hv.QuadMesh(agg).opts( '[tools=["hover"]] (alpha=0 hover_alpha=0.2)') shaded = dynspread( datashade(pts, x_range=ra_range, y_range=dec_range, dynamic=False, cmap=cc.palette['coolwarm'], aggregator=ds.mean('y'))) shaded = shaded.opts('RGB [width=400, height=400]') return (shaded * hover).relabel('{} ({})'.format( object_type, len(dset)))
def test_trimesh_agg_api(): """Assert that the trimesh aggregation API properly handles weights on the simplices.""" pts = pd.DataFrame({'x': [1, 3, 4, 3, 3], 'y': [2, 1, 2, 1, 4]}, columns=['x', 'y']) tris = pd.DataFrame({'n1': [4, 1], 'n2': [1, 4], 'n3': [2, 0], 'weight': [0.83231525, 1.3053126]}, columns=['n1', 'n2', 'n3', 'weight']) cvs = ds.Canvas(x_range=(0, 10), y_range=(0, 10)) agg = cvs.trimesh(pts, tris, agg=ds.mean('weight')) assert agg.shape == (600, 600)
def create_image(x_range=x_range, y_range=y_range, w=w, h=h): """ """ cvs = ds.Canvas(x_range=x_range, y_range=y_range, plot_height=h, plot_width=w) if len(fdf[col].unique())>10: colormap = fire else: colormap = bkr agg = cvs.points(fdf, 'x_web', 'y_web', agg=ds.mean(col)) image = dtf.shade(agg, cmap=colormap) ds.utils.export_image(image,filename=col+'.png') return dtf.dynspread(image, threshold=0.75, max_px=8)
def test_categorical_mean(ddf): sol = np.array([[[2, nan, nan, nan], [nan, nan, 12, nan]], [[nan, 7, nan, nan], [nan, nan, nan, 17]]]) out = xr.DataArray(sol, coords=(coords + [['a', 'b', 'c', 'd']]), dims=(dims + ['cat'])) agg = c.points(ddf, 'x', 'y', ds.by('cat', ds.mean('f32'))) assert_eq_xr(agg, out) agg = c.points(ddf, 'x', 'y', ds.by('cat', ds.mean('f64'))) assert_eq_xr(agg, out) out = xr.DataArray(sol, coords=(coords + [range(4)]), dims=(dims + ['cat_int'])) agg = c.points( ddf, 'x', 'y', ds.by(ds.category_modulo('cat_int', modulo=4, offset=10), ds.mean('f32'))) assert_eq_xr(agg, out) agg = c.points( ddf, 'x', 'y', ds.by(ds.category_modulo('cat_int', modulo=4, offset=10), ds.mean('f64'))) assert_eq_xr(agg, out) # add an extra category (this will count nans and out of bounds) sol = np.append(sol, [[[nan], [nan]], [[nan], [nan]]], axis=2) for col in 'f32', 'f64': out = xr.DataArray(sol, coords=(coords + [range(5)]), dims=(dims + [col])) agg = c.points(ddf, 'x', 'y', ds.by(ds.category_binning(col, 0, 20, 4), ds.mean(col))) assert_eq_xr(agg, out)
def make_image(data, time_range, y_range, size, scale=None, width=0): "Flatten the given range of the data into a 2d image" time_range = (time_range[0].timestamp() * 1e6, time_range[1].timestamp() * 1e6) cvs = datashader.Canvas(x_range=time_range, y_range=y_range, plot_width=size[0], plot_height=size[1], y_axis_type=scale or "linear") # aggregate some useful measures agg_line = cvs.line(source=data["data"], x="t", y="value_r") agg_points = cvs.points(source=data["data"], x="t", y="value_r", agg=datashader.summary( count=datashader.count("value_r"), vmean=datashader.mean("value_r"), vmin=datashader.min("value_r"), vmax=datashader.max("value_r"))) color = data["info"].get("color", "red") image = datashader.transfer_functions.shade(agg_line, cmap=[color]) if width > 0: image = datashader.transfer_functions.spread(image, px=width) # image = datashader.transfer_functions.spread( # image, mask=np.matrix([[False, False, False], # [False, True, True], # [False, True, True]])) with timer("Making hover info"): indices = np.where(np.nanmax(agg_points["count"].values, axis=0))[0] vmin = np.take(np.nanmin(agg_points["vmin"].values, axis=0), indices) vmax = np.take(np.nanmax(agg_points["vmax"].values, axis=0), indices) # vmean = np.take(np.nanmax(agg_points["vmean"].values, axis=0), indices) # TODO: aggregating the mean is not quite this simple... timestamps = np.take(agg_points["x_axis"].values, indices) count = np.take(np.sum(agg_points["count"].values, axis=0), indices) desc = { "total_points": data["points"], "indices": indices.tolist(), "min": np.where(np.isnan(vmin), None, vmin).tolist(), "max": np.where(np.isnan(vmax), None, vmax).tolist(), "timestamp": [float(t) for t in timestamps], # "mean": np.where(np.isnan(vmean), None, vmean).tolist(), "count": np.where(np.isnan(count), None, count).tolist() } return image, desc
class contours_rasterize(aggregate): """ Rasterizes the Contours element by weighting the aggregation by the iso-contour levels if a value dimension is defined, otherwise default to any aggregator. """ aggregator = param.ClassSelector(default=ds.mean(), class_=(ds.reductions.Reduction, basestring)) def _get_aggregator(self, element, add_field=True): agg = self.p.aggregator if not element.vdims and agg.column is None and not isinstance(agg, (rd.count, rd.any)): return ds.any() return super(contours_rasterize, self)._get_aggregator(element, add_field)
def test_multiple_aggregates(ddf): if dask_cudf and isinstance(ddf, dask_cudf.DataFrame): pytest.skip("std not supported with cudf") agg = c.points(ddf, 'x', 'y', ds.summary(f64_std=ds.std('f64'), f64_mean=ds.mean('f64'), i32_sum=ds.sum('i32'), i32_count=ds.count('i32'))) f = lambda x: xr.DataArray(x, coords=coords, dims=dims) assert_eq_xr(agg.f64_std, f(np.nanstd(values(df_pd.f64).reshape((2, 2, 5)), axis=2).T)) assert_eq_xr(agg.f64_mean, f(np.nanmean(values(df_pd.f64).reshape((2, 2, 5)), axis=2).T)) assert_eq_xr(agg.i32_sum, f(values(df_pd.i32).reshape((2, 2, 5)).sum(axis=2, dtype='f8').T)) assert_eq_xr(agg.i32_count, f(np.array([[5, 5], [5, 5]], dtype='i4')))
def test_categorical_mean_binning(ddf): if cudf and isinstance(ddf._meta, cudf.DataFrame): pytest.skip( "The categorical binning of 'mean' reduction is yet supported on the GPU" ) sol = np.array([[[2, nan, nan, nan], [nan, nan, 12, nan]], [[nan, 7, nan, nan], [nan, nan, nan, 17]]]) # add an extra category (this will count nans and out of bounds) sol = np.append(sol, [[[nan], [nan]], [[nan], [nan]]], axis=2) for col in 'f32', 'f64': out = xr.DataArray(sol, coords=(coords + [range(5)]), dims=(dims + [col])) agg = c.points(ddf, 'x', 'y', ds.by(ds.category_binning(col, 0, 20, 4), ds.mean(col))) assert_eq_xr(agg, out)
def tests_datashader(): import datashader as ds import datashader.transfer_functions as tf import pandas as pd df = pd.read_csv( '/Users/iregon/C3S/dessaps/test_data/imma_converter/observations-sst-2014-6.psv', usecols=[6, 7, 14], sep="|", skiprows=0) agg_mean = cvs.points(df, 'longitude', 'latitude', ds.mean('observation_value')) agg_max = cvs.points(df, 'longitude', 'latitude', ds.max('observation_value')) agg_min = cvs.points(df, 'longitude', 'latitude', ds.min('observation_value')) agg_count = cvs.points(df, 'longitude', 'latitude', ds.count('observation_value'))
def generate(self, bounds_polygon: sg.Polygon, raster_shape: Tuple[int, int], from_cache: bool = False, hour: int = 8, resolution: float = 20, **kwargs) -> \ Tuple[Geometry, np.ndarray, gpd.GeoDataFrame]: from holoviews.operation.datashader import rasterize import geoviews as gv import datashader as ds import colorcet relative_variation = self.relative_variations_flat[hour] roads_gdf = self._interpolate_traffic_counts(bounds_polygon) roads_gdf['population_per_hour'] = roads_gdf[ 'population_per_hour'] * relative_variation roads_gdf['population'] = roads_gdf['population_per_hour'] / 3600 roads_gdf['density'] = roads_gdf['population'] / ( roads_gdf.geometry.area * 1e-6) # km^2 ln_mask = roads_gdf['density'] > 0 roads_gdf.loc[ln_mask, 'ln_density'] = np.log(roads_gdf.loc[ln_mask, 'density']) roads_gdf['ln_density'].fillna(0, inplace=True) roads_gdf = roads_gdf.set_crs('EPSG:27700').to_crs('EPSG:4326') points = gv.Polygons( roads_gdf, kdims=['Longitude', 'Latitude'], vdims=['population_per_hour', 'ln_density', 'density']).opts( # colorbar=True, cmap=colorcet.CET_L18, color='ln_density', line_color='ln_density') bounds = bounds_polygon.bounds raster = rasterize(points, aggregator=ds.mean('density'), width=raster_shape[0], height=raster_shape[1], x_range=(bounds[1], bounds[3]), y_range=(bounds[0], bounds[2]), dynamic=False) raster_grid = np.copy( list(raster.data.data_vars.items())[0][1].data.astype(np.float)) return points, raster_grid, gpd.GeoDataFrame(roads_gdf)
def _process(self, element, key=None): vdim = self.p.vdim if self.p.aggregator == "mean": aggregator = ds.mean(vdim) elif self.p.aggregator == "std": aggregator = ds.std(vdim) elif self.p.aggregator == "count": aggregator = ds.count() kwargs = dict(cmap=list(cc.palette[self.p.cmap]), aggregator=aggregator) datashaded = dynspread(datashade(element, **kwargs)) # decimate_opts = dict(plot={'tools':['hover', 'box_select']}, # style={'alpha':0, 'size':self.p.decimate_size, # 'nonselection_alpha':0}) # decimated = decimate(element, max_samples=self.p.max_samples).opts(**decimate_opts) return datashaded.options(responsive=True, height=300) # * decimated
def view_map(self): # print('view_map method') if self.adh_mod.mesh.elevation_toggle: elevation = rasterize(self.adh_mod.mesh.tri_mesh, aggregator=ds.mean('z'), precompute=True).apply.opts( opts.Image( cmap=self.cmap_opts.colormap, clim=self.display_range.color_range, height=self.map_height, width=self.map_width)) else: elevation = Curve([]).opts(height=self.map_height, width=self.map_width) # return self.adh_mod.mesh.view_bathy() * self.adh_mod.mesh.view_elements(line_color='yellow') * base_map * self.view_scatter() return elevation * self.adh_mod.mesh.view_elements( line_color='yellow') * hv.DynamicMap( self.adh_mod.wmts.view) * self.view_scatter()
def __call__(self, dset, **params): self.p = ParamOverrides(self, params) if self.p.vdim is None: vdim = dset.vdims[0].name else: vdim = self.p.vdim pts = hv.util.Dynamic(dset, operation=skypoints, streams=[self.p.filter_stream]) if self.p.aggregator == 'mean': aggregator = ds.mean(vdim) elif self.p.aggregator == 'std': aggregator = ds.std(vdim) elif self.p.aggregator == 'count': aggregator = ds.count() kwargs = dict(cmap=cc.palette[self.p.cmap], aggregator=aggregator) if self.p.width is not None: kwargs.update(width=self.p.width, height=self.p.height) # streams=[hv.streams.RangeXY]) decimate_opts = dict(plot={'tools': ['hover', 'box_select']}, style={ 'alpha': 0, 'size': self.p.decimate_size, 'nonselection_alpha': 0 }) decimated = decimate(pts).opts(**decimate_opts) sky_shaded = datashade(pts, **kwargs) return dynspread(sky_shaded) * decimated
def plot_gene_umap_routine( ax, data, x, y, hue, scale_paras, cmap, title, arrows=True, vmaxp=99, ): """ """ # main agg = ds.mean(hue) rangex = data[x].max() - data[x].min() rangey = data[y].max() - data[y].min() ps = PlotScale(rangex, rangey, **scale_paras) massive_scatterplot(ax, data, x, y, ps.npxlx, ps.npxly, agg=agg, cmap=cmap, vmaxp=vmaxp, ) ax.set_title(title) # arrows if arrows: add_arrows(ax, 'UMAP', px=-0.03, py=-0.03) return ax
## Plot ## ========================================== print("Plotting: ", end='') tic = time.time() plt.clf() if fastPlotting: df = pd.DataFrame(np.array([particles.x, particles.z, particles.phase]).T, columns=('x', 'y', 'phase')) refineFac = 2 cvs = ds.Canvas(plot_width=model.Nx * refineFac, plot_height=model.Nz * refineFac, x_range=(model.xmin, model.xmax), y_range=(model.zmin, model.zmax), x_axis_type='linear', y_axis_type='linear') agg = cvs.points(df, 'x', 'y', ds.mean('phase')) plt.imshow(agg.variable, extent=[model.xmin, model.xmax, model.zmin, model.zmax], origin='lower') else: plt.scatter(particles.x, particles.z, c=particles.phase) cb = plt.colorbar() plt.fill(ocPlate.x, ocPlate.z, faceColor='none', edgeColor='r') plt.fill(slab.x, slab.z, faceColor='none', edgeColor='r') plt.axis("equal") plt.xlim([model.xmin, model.xmax])
def shade_scatter( dfs, in_ax=None, figsize: float = 6, pixels: int = 1000, spread_px: int = 1, spread_threshold: float = 0.2, min_alpha: int = 10, color_map=None, color_key: dict = None, mask_values: list = None, mask_name: str = "NA", mask_color: str = "k", ax_label_size: float = 12, frame_offset: float = 0.05, spine_width: float = 0.5, spine_color: str = "k", displayed_sides: tuple = ("bottom", "left"), legend_ondata: bool = True, legend_onside: bool = True, legend_size: float = 12, legends_per_col: int = 20, titles: Union[str, List[str]] = None, title_size: int = 12, hide_title: bool = False, cbar_shrink: float = 0.6, marker_scale: float = 70, lspacing: float = 0.1, cspacing: float = 1, savename: str = None, dpi: int = 300, force_ints_as_cats: bool = True, n_columns: int = 4, w_pad: float = None, h_pad: float = None, show_fig: bool = True, ): """ Shows shaded scatter plots. If more then one dataframe is provided it will place the scatterplots in a grid. """ import datashader as dsh from datashader.mpl_ext import dsshow import datashader.transfer_functions as tf from functools import partial titles = _handle_titles_type(titles, len(dfs)) axs = _create_axes(dfs, in_ax, figsize, figsize, w_pad, h_pad, n_columns) for n, df in _iter_dataframes(dfs, mask_values, mask_name, force_ints_as_cats): dim1, dim2, vc = df.columns[:3] v = df[vc] col_map, col_key = _scatter_make_colors(v, color_map, color_key, mask_color, mask_name) if v.dtype.name == "category": agg = dsh.count_cat(vc) else: if v.nunique() == 1: agg = dsh.count(vc) else: agg = dsh.mean(vc) ax = axs[int(n / n_columns), n % n_columns] artist = dsshow( df, dsh.Point(dim1, dim2), aggregator=agg, norm="eq_hist", color_key=col_key, cmap=col_map, alpha_range=(min_alpha, 255), shade_hook=partial(tf.dynspread, threshold=spread_threshold, max_px=spread_px), plot_height=pixels, plot_width=pixels, aspect="equal", width_scale=1, height_scale=1, ax=ax, ) _scatter_label_axis(df, ax, ax_label_size, frame_offset) _scatter_cleanup(ax, spine_width, spine_color, displayed_sides) if titles is not None: title = titles[n] else: title = None _scatter_legends( df, ax, col_map, col_key, legend_ondata, legend_onside, legend_size, title, title_size, hide_title, legends_per_col, marker_scale, lspacing, cspacing, cbar_shrink, ) if savename: plt.savefig(savename, dpi=dpi, bbox_inches="tight") if show_fig: plt.show() else: return axs
i = 1 for yr in range(y_ini,y_end + 1): for mo in range(1,13): logging.info('{0}-{1}'.format(yr,mo)) mm = '{:02d}'.format(mo) # Read monthly data (header and observed vars) to df indexed by report_id df_mo = read_monthly_data() # For each observed variable in df, compute stats and aggregate to its monhtly composite. for vari in vars_in: if mm in descriptors['counts'][vari].keys(): descriptors['counts'][vari][mm] = descriptors['counts'][vari][mm] + cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.count(vari)) descriptors['max'][vari][mm] = np.fmax(descriptors['max'][vari][mm],cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.max(vari))) descriptors['min'][vari][mm] = np.fmin(descriptors['min'][vari][mm],cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.min(vari))) # All this mess, because addition propagates nan's in xarrays! mean_mm = cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.mean(vari)) max_frame = np.fmax(descriptors['ave'][vari][mm],mean_mm) min_frame = np.fmin(descriptors['ave'][vari][mm],mean_mm) descriptors['ave'][vari][mm] = 0.5*max_frame + 0.5*min_frame else: descriptors['counts'][vari][mm] = cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.count(vari)) # <class 'xarray.core.dataarray.DataArray'> descriptors['max'][vari][mm] = cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.max(vari)) descriptors['min'][vari][mm] = cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.min(vari)) mean_mm = cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.mean(vari)) descriptors['ave'][vari][mm] = mean_mm # Also add monthly stats to the global aggregate if 'global' in descriptors['counts'][vari].keys(): descriptors['counts'][vari]['global'] = descriptors['counts'][vari]['global'] + cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.count(vari)) descriptors['max'][vari]['global'] = np.fmax(descriptors['max'][vari]['global'],cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.max(vari))) descriptors['min'][vari]['global'] = np.fmin(descriptors['min'][vari]['global'],cvs.points(df_mo[['latitude','longitude',vari]], 'longitude', 'latitude', ds.min(vari)))
def image_callback(x_range, y_range, w, h): cvs = ds.Canvas( plot_width=w,plot_height=h,x_range=x_range,y_range=y_range) agg = cvs.points(df,'meterswest','metersnorth', ds.mean('temp')) img = tf.interpolate(agg, cmap = cmap, how='cbrt',span=[0.0,1.0]) return tf.dynspread(img,threshold=0.5, max_px=4)
def test_rasterize_quadmesh(self): qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]]))) img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z')) image = Image(np.array([[2., 3., np.NaN], [0, 1, np.NaN], [np.NaN, np.NaN, np.NaN]]), bounds=(-.5, -.5, 1.5, 1.5)) self.assertEqual(img, image)
class Model(PolyAndPointAnnotator): """ Allows drawing and annotating Points and Polygons using a bokeh DataTable. """ wmts = param.ClassSelector(default=GVTS(), class_=GVTS) default_value = param.Number( default=-99999, doc="default value to set for new points and polys", precedence=-1) viewable_polys = param.Boolean( default=True, doc='Will the polygons be viewable in the map', label='Polygons', precedence=20) viewable_points = param.Boolean( default=True, doc='Will the points be viewable in the map', label='Points', precedence=21) # line cross section options resolution = param.Number(default=1000, doc=""" Distance between samples in meters. Used for interpolation of the cross-section paths.""") aggregator = param.ClassSelector(class_=ds.reductions.Reduction, default=ds.mean(), precedence=-1) def __init__(self, *args, **params): super(Model, self).__init__(*args, **params) self.conceptual_model = None # line cross section def _gen_samples(self, geom): """ Interpolates a LineString geometry to the defined resolution. Returning the x- and y-coordinates along with the distance along the path. """ xs, ys, distance = [], [], [] dist = geom.length for d in np.linspace(0, dist, int(dist / self.resolution)): point = geom.interpolate(d) xs.append(point.x) ys.append(point.y) distance.append(d) return xs, ys, distance # line cross section def _sample(self, obj, data): """ Rasterizes the supplied object in the current region and samples it with the drawn paths returning an NdOverlay of Curves. Note: Because the function returns an NdOverlay containing a variable number of elements batching must be enabled and the legend_limit must be set to 0. """ if self.poly_stream.data is None: path = self.polys else: path = self.poly_stream.element if isinstance(obj, TriMesh): vdim = obj.nodes.vdims[0] else: vdim = obj.vdims[0] if len(path) > 2: x_range = path.range(0) y_range = path.range(1) else: return hv.NdOverlay({0: hv.Curve([], 'Distance', vdim)}) (x0, x1), (y0, y1) = x_range, y_range width, height = (max([min([(x1 - x0) / self.resolution, 500]), 10]), max([min([(y1 - y0) / self.resolution, 500]), 10])) raster = rasterize(obj, x_range=x_range, y_range=y_range, aggregator=self.aggregator, width=int(width), height=int(height), dynamic=False) x, y = raster.kdims sections = [] for g in path.geom(): xs, ys, distance = self._gen_samples(g) indexes = {x.name: xs, y.name: ys} points = raster.data.sel_points(method='nearest', **indexes).to_dataframe() points['Distance'] = distance sections.append(hv.Curve(points, 'Distance', vdims=[vdim, x, y])) return hv.NdOverlay(dict(enumerate(sections))) # line cross section def _pos_indicator(self, obj, x): """ Returns an NdOverlay of Points indicating the current mouse position along the cross-sections. Note: Because the function returns an NdOverlay containing a variable number of elements batching must be enabled and the legend_limit must be set to 0. """ points = [] elements = obj or [] for el in elements: if len(el) < 1: continue p = Points(el[x], ['x', 'y'], crs=ccrs.GOOGLE_MERCATOR) points.append(p) if not points: return hv.NdOverlay({0: Points([], ['x', 'y'])}) return hv.NdOverlay(enumerate(points)) def map_view(self): if self.viewable_points: view_points = self.points.options(tools=['hover'], clone=False, height=self.height, width=self.width) else: view_points = hv.Curve( []) # todo use empty layouts when they become available if self.viewable_polys: view_polys = self.polys.options(clone=False, line_width=5, height=self.height, width=self.width) else: view_polys = hv.Curve( []) # todo use empty layouts when they become available return hv.DynamicMap(self.wmts.view) * view_polys * view_points def table_view(self): return pn.Tabs(('Polygons', self.poly_table), ('Vertices', self.vertex_table), ('Points', self.point_table), name='View Data') def panel(self): return pn.Row(self.map_view(), self.table_view()) def view(self): if self.viewable_points: view_points = self.points.opts(tools=['hover'], clone=False, active_tools=['pan', 'wheel_zoom'], height=self.height, width=self.width) else: view_points = hv.Curve([]) if self.viewable_polys: view_polys = self.polys.opts(clone=False, line_width=5, active_tools=['pan', 'wheel_zoom'], height=self.height, width=self.width) else: view_polys = hv.Curve([]) return (hv.DynamicMap(self.wmts.view) * view_polys * view_points + self.poly_table + self.point_table + self.vertex_table).cols(1) @param.output(path=hv.Path) def path_output(self): return self.poly_stream.element
def test_mean(): out = df.i32.reshape((2, 2, 5)).mean(axis=2).T eq(c.points(ddf, 'x', 'y', agg=ds.mean('i32')).agg, out) eq(c.points(ddf, 'x', 'y', agg=ds.mean('i64')).agg, out) eq(c.points(ddf, 'x', 'y', agg=ds.mean('f32')).agg, out) eq(c.points(ddf, 'x', 'y', agg=ds.mean('f64')).agg, out)