class rasterize(ResamplingOperation): """ Rasterize is a high-level operation which will rasterize any Element or combination of Elements aggregating it with the supplied aggregator and interpolation method. The default aggregation method depends on the type of Element but usually defaults to the count of samples in each bin, other aggregators can be supplied implementing mean, max, min and other reduction operations. The bins of the aggregate are defined by the width and height and the x_range and y_range. If x_sampling or y_sampling are supplied the operation will ensure that a bin is no smaller than the minimum sampling distance by reducing the width and height when zoomed in beyond the minimum sampling distance. By default, the PlotSize and RangeXY streams are applied when this operation is used dynamically, which means that the width, height, x_range and y_range will automatically be set to match the inner dimensions of the linked plot and the ranges of the axes. """ aggregator = param.ClassSelector(class_=ds.reductions.Reduction, default=None) interpolation = param.ObjectSelector(default='bilinear', objects=['bilinear', None], doc=""" The interpolation method to apply during rasterization.""") def _process(self, element, key=None): # Get input Images to avoid multiple rasterization imgs = element.traverse(lambda x: x, [Image]) # Rasterize TriMeshes tri_params = dict( {k: v for k, v in self.p.items() if k in aggregate.params()}, dynamic=False) trirasterize = trimesh_rasterize.instance(**tri_params) trirasterize._precomputed = self._precomputed element = element.map(trirasterize, TriMesh) self._precomputed = trirasterize._precomputed # Rasterize QuadMesh quad_params = dict( {k: v for k, v in self.p.items() if k in aggregate.params()}, dynamic=False) quadrasterize = quadmesh_rasterize.instance(**quad_params) quadrasterize._precomputed = self._precomputed element = element.map(quadrasterize, QuadMesh) self._precomputed = quadrasterize._precomputed # Rasterize NdOverlay of objects agg_params = dict( {k: v for k, v in self.p.items() if k in aggregate.params()}, dynamic=False) dsrasterize = aggregate.instance(**agg_params) dsrasterize._precomputed = self._precomputed predicate = lambda x: (isinstance(x, NdOverlay) and issubclass( x.type, Dataset) and not issubclass(x.type, Image)) element = element.map(dsrasterize, predicate) # Rasterize other Dataset types predicate = lambda x: (isinstance(x, Dataset) and (not isinstance(x, Image) or x in imgs)) element = element.map(dsrasterize, predicate) self._precomputed = dsrasterize._precomputed return element
class ResamplingOperation(Operation): """ Abstract baseclass for resampling operations """ dynamic = param.Boolean(default=True, doc=""" Enables dynamic processing by default.""") expand = param.Boolean(default=True, doc=""" Whether the x_range and y_range should be allowed to expand beyond the extent of the data. Setting this value to True is useful for the case where you want to ensure a certain size of output grid, e.g. if you are doing masking or other arithmetic on the grids. A value of False ensures that the grid is only just as large as it needs to be to contain the data, which will be faster and use less memory if the resulting aggregate is being overlaid on a much larger background.""") height = param.Integer(default=400, doc=""" The height of the output image in pixels.""") width = param.Integer(default=400, doc=""" The width of the output image in pixels.""") x_range = param.NumericTuple(default=None, length=2, doc=""" The x_range as a tuple of min and max x-value. Auto-ranges if set to None.""") y_range = param.NumericTuple(default=None, length=2, doc=""" The x_range as a tuple of min and max y-value. Auto-ranges if set to None.""") x_sampling = param.Number(default=None, doc=""" Specifies the smallest allowed sampling interval along the y-axis.""") y_sampling = param.Number(default=None, doc=""" Specifies the smallest allowed sampling interval along the y-axis.""") target = param.ClassSelector(class_=Image, doc=""" A target Image which defines the desired x_range, y_range, width and height. """) streams = param.List(default=[PlotSize, RangeXY], doc=""" List of streams that are applied if dynamic=True, allowing for dynamic interaction with the plot.""") element_type = param.ClassSelector(class_=(Dataset, ), instantiate=False, is_instance=False, default=Image, doc=""" The type of the returned Elements, must be a 2D Dataset type.""") link_inputs = param.Boolean(default=True, doc=""" By default, the link_inputs parameter is set to True so that when applying shade, backends that support linked streams update RangeXY streams on the inputs of the shade operation. Disable when you do not want the resulting plot to be interactive, e.g. when trying to display an interactive plot a second time.""") precompute = param.Boolean(default=False, doc=""" Whether to apply precomputing operations. Precomputing can speed up resampling operations by avoiding unnecessary recomputation if the supplied element does not change between calls. The cost of enabling this option is that the memory used to represent this internal state is not freed between calls.""") @bothmethod def instance(self_or_cls, **params): inst = super(ResamplingOperation, self_or_cls).instance(**params) inst._precomputed = {} return inst def _get_sampling(self, element, x, y): target = self.p.target if target: x_range, y_range = target.range(x), target.range(y) height, width = target.dimension_values(2, flat=False).shape else: if x is None or y is None: x_range = self.p.x_range or (-0.5, 0.5) y_range = self.p.y_range or (-0.5, 0.5) else: if self.p.expand or not self.p.x_range: x_range = self.p.x_range or element.range(x) else: x0, x1 = self.p.x_range ex0, ex1 = element.range(x) x_range = np.max([x0, ex0]), np.min([x1, ex1]) if x_range[0] == x_range[1]: x_range = (x_range[0] - 0.5, x_range[0] + 0.5) if self.p.expand or not self.p.y_range: y_range = self.p.y_range or element.range(y) else: y0, y1 = self.p.y_range ey0, ey1 = element.range(y) y_range = np.max([y0, ey0]), np.min([y1, ey1]) width, height = self.p.width, self.p.height (xstart, xend), (ystart, yend) = x_range, y_range xtype = 'numeric' if isinstance(xstart, datetime_types) or isinstance( xend, datetime_types): xstart, xend = dt_to_int(xstart), dt_to_int(xend) xtype = 'datetime' elif not np.isfinite(xstart) and not np.isfinite(xend): if element.get_dimension_type(x) in datetime_types: xstart, xend = 0, 10000 xtype = 'datetime' else: xstart, xend = 0, 1 elif xstart == xend: xstart, xend = (xstart - 0.5, xend + 0.5) x_range = (xstart, xend) ytype = 'numeric' if isinstance(ystart, datetime_types) or isinstance( yend, datetime_types): ystart, yend = dt_to_int(ystart), dt_to_int(yend) ytype = 'datetime' elif not np.isfinite(ystart) and not np.isfinite(yend): if element.get_dimension_type(y) in datetime_types: xstart, xend = 0, 10000 xtype = 'datetime' else: ystart, yend = 0, 1 elif ystart == yend: ystart, yend = (ystart - 0.5, yend + 0.5) y_range = (ystart, yend) # Compute highest allowed sampling density xspan = xend - xstart yspan = yend - ystart if self.p.x_sampling: width = int(min([(xspan / self.p.x_sampling), width])) if self.p.y_sampling: height = int(min([(yspan / self.p.y_sampling), height])) xunit, yunit = float(xspan) / width, float(yspan) / height xs, ys = (np.linspace(xstart + xunit / 2., xend - xunit / 2., width), np.linspace(ystart + yunit / 2., yend - yunit / 2., height)) return (x_range, y_range), (xs, ys), (width, height), (xtype, ytype)
class trimesh_rasterize(aggregate): """ Rasterize the TriMesh element using the supplied aggregator. If the TriMesh nodes or edges define a value dimension will plot filled and shaded polygons otherwise returns a wiremesh of the data. """ aggregator = param.ClassSelector(class_=ds.reductions.Reduction, default=None) interpolation = param.ObjectSelector(default='bilinear', objects=['bilinear', None], doc=""" The interpolation method to apply during rasterization.""") def _precompute(self, element): from datashader.utils import mesh if element.vdims: simplices = element.dframe([0, 1, 2, 3]) verts = element.nodes.dframe([0, 1]) elif element.nodes.vdims: simplices = element.dframe([0, 1, 2]) verts = element.nodes.dframe([0, 1, 3]) return { 'mesh': mesh(verts, simplices), 'simplices': simplices, 'vertices': verts } def _process(self, element, key=None): if isinstance(element, TriMesh): x, y = element.nodes.kdims[:2] else: x, y = element.kdims info = self._get_sampling(element, x, y) (x_range, y_range), _, (width, height), (xtype, ytype) = info cvs = ds.Canvas(plot_width=width, plot_height=height, x_range=x_range, y_range=y_range) if not (element.vdims or element.nodes.vdims): return aggregate._process(self, element, key) elif element._plot_id in self._precomputed: precomputed = self._precomputed[element._plot_id] else: precomputed = self._precompute(element) simplices = precomputed['simplices'] pts = precomputed['vertices'] mesh = precomputed['mesh'] if self.p.precompute: self._precomputed = {element._plot_id: precomputed} vdim = element.vdims[0] if element.vdims else element.nodes.vdims[0] interpolate = bool(self.p.interpolation) agg = cvs.trimesh(pts, simplices, agg=self.p.aggregator, interp=interpolate, mesh=mesh) params = dict(get_param_values(element), kdims=[x, y], datatype=['xarray'], vdims=[vdim]) return Image(agg, **params)
class TestPattern(SheetPanel): sheet_type = GeneratorSheet dock = param.Boolean(False) edit_sheet = param.ObjectSelector(doc=""" Sheet for which to edit pattern properties.""") plastic = param.Boolean(default=False, doc=""" Whether to enable plasticity during presentation.""") duration = param.Number(default=1.0, softbounds=(0.0, 10.0), doc=""" How long to run the simulation for each presentation.""") Present = tk.Button(doc="""Present this pattern to the simulation.""") pattern_generator = param.ClassSelector(default=Constant(), class_=PatternGenerator, doc=""" Type of pattern to present. Each type has various parameters that can be changed.""" ) def __init__(self, master, plotgroup=None, **params): plotgroup = plotgroup or TestPatternPlotGroup() super(TestPattern, self).__init__(master, plotgroup, **params) self.auto_refresh = True self.plotcommand_frame.pack_forget() for name in ['pre_plot_hooks', 'plot_hooks', 'Fwd', 'Back']: self.hide_param(name) edit_sheet_param = self.get_parameter_object('edit_sheet') edit_sheet_param.objects = self.plotgroup.sheets() self.pg_control_pane = Frame(self) #,bd=1,relief="sunken") self.pg_control_pane.pack(side="top", expand='yes', fill='x') self.params_frame = tk.ParametersFrame( self.pg_control_pane, parameterized_object=self.pattern_generator, on_modify=self.conditional_refresh, msg_handler=master.status) self.params_frame.hide_param('Close') self.params_frame.hide_param('Refresh') # CEB: 'new_default=True' is temporary so that the current # behavior is the same as before; shoudl make None the # default and mean 'apply to all sheets'. self.pack_param('edit_sheet', parent=self.pg_control_pane, on_modify=self.switch_sheet, widget_options={ 'new_default': True, 'sort_fn_args': { 'cmp': lambda x, y: cmp(-x.precedence, -y.precedence) } }) self.pack_param('pattern_generator', parent=self.pg_control_pane, on_modify=self.change_pattern_generator, side="top") present_frame = Frame(self) present_frame.pack(side='bottom') self.pack_param('plastic', side='bottom', parent=present_frame) self.params_frame.pack(side='bottom', expand='yes', fill='x') self.pack_param('duration', parent=present_frame, side='left') self.pack_param('Present', parent=present_frame, on_set=self.present_pattern, side="right") def setup_plotgroup(self): super(TestPattern, self).setup_plotgroup() # CB: could copy the sheets instead (deleting connections etc) self.plotgroup._sheets = [ GeneratorSheet(name=gs.name, nominal_bounds=gs.nominal_bounds, nominal_density=gs.nominal_density) for gs in topo.sim.objects(GeneratorSheet).values() ] self.plotgroup._set_name("Test Pattern") def switch_sheet(self): if self.edit_sheet is not None: self.pattern_generator = self.edit_sheet.input_generator self.change_pattern_generator() def change_pattern_generator(self): """ Set the current PatternGenerator to the one selected and get the ParametersFrameWithApply to draw the relevant widgets """ # CEBALERT: if pattern generator is set to None, there will be # an error. Need to handle None in the appropriate place # (presumably tk.py). self.params_frame.set_PO(self.pattern_generator) for sheet in self.plotgroup.sheets(): if sheet == self.edit_sheet: sheet.set_input_generator(self.pattern_generator) self.conditional_refresh() def refresh(self, update=True): """ Simply update the plots: skip all handling of history. """ self.refresh_plots(update) def present_pattern(self): """ Move the user created patterns into the GeneratorSheets, run for the specified length of time, then restore the original patterns. """ input_dict = dict([(sheet.name,sheet.input_generator) \ for sheet in self.plotgroup.sheets()]) pattern_present(inputs=input_dict, durations=[self.duration], plastic=self.plastic, overwrite_previous=False, install_sheetview=True, restore_state=True) topo.guimain.auto_refresh(update=False)
class aggregate(ResamplingOperation): """ aggregate implements 2D binning for any valid HoloViews Element type using datashader. I.e., this operation turns a HoloViews Element or overlay of Elements into an Image or an overlay of Images by rasterizing it. This allows quickly aggregating large datasets computing a fixed-sized representation independent of the original dataset size. By default it will simply count the number of values in each bin but other aggregators can be supplied implementing mean, max, min and other reduction operations. The bins of the aggregate are defined by the width and height and the x_range and y_range. If x_sampling or y_sampling are supplied the operation will ensure that a bin is no smaller than the minimum sampling distance by reducing the width and height when zoomed in beyond the minimum sampling distance. By default, the PlotSize stream is applied when this operation is used dynamically, which means that the height and width will automatically be set to match the inner dimensions of the linked plot. """ aggregator = param.ClassSelector(class_=ds.reductions.Reduction, default=ds.count()) @classmethod def get_agg_data(cls, obj, category=None): """ Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated. """ paths = [] if isinstance(obj, Graph): obj = obj.edgepaths kdims = list(obj.kdims) vdims = list(obj.vdims) dims = obj.dimensions()[:2] if isinstance(obj, Path): glyph = 'line' for p in obj.split(datatype='dataframe'): paths.append(p) elif isinstance(obj, CompositeOverlay): element = None for key, el in obj.data.items(): x, y, element, glyph = cls.get_agg_data(el) dims = (x, y) df = PandasInterface.as_dframe(element) if isinstance(obj, NdOverlay): df = df.assign( **dict(zip(obj.dimensions('key', True), key))) paths.append(df) if element is None: dims = None else: kdims += element.kdims vdims = element.vdims elif isinstance(obj, Element): glyph = 'line' if isinstance(obj, Curve) else 'points' paths.append(PandasInterface.as_dframe(obj)) if dims is None or len(dims) != 2: return None, None, None, None else: x, y = dims if len(paths) > 1: if glyph == 'line': path = paths[0][:1] if isinstance(path, dd.DataFrame): path = path.compute() empty = path.copy() empty.iloc[0, :] = (np.NaN, ) * empty.shape[1] paths = [elem for p in paths for elem in (p, empty)][:-1] if all(isinstance(path, dd.DataFrame) for path in paths): df = dd.concat(paths) else: paths = [ p.compute() if isinstance(p, dd.DataFrame) else p for p in paths ] df = pd.concat(paths) else: df = paths[0] if category and df[category].dtype.name != 'category': df[category] = df[category].astype('category') if any(df[d.name].dtype.kind == 'M' for d in (x, y)): df = df.copy() for d in (x, y): if df[d.name].dtype.kind == 'M': df[d.name] = df[d.name].astype('datetime64[ns]').astype( 'int64') * 10e-4 return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph def _aggregate_ndoverlay(self, element, agg_fn): """ Optimized aggregation for NdOverlay objects by aggregating each Element in an NdOverlay individually avoiding having to concatenate items in the NdOverlay. Works by summing sum and count aggregates and applying appropriate masking for NaN values. Mean aggregation is also supported by dividing sum and count aggregates. count_cat aggregates are grouped by the categorical dimension and a separate aggregate for each category is generated. """ # Compute overall bounds x, y = element.last.dimensions()[0:2] info = self._get_sampling(element, x, y) (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info agg_params = dict( {k: v for k, v in self.p.items() if k in aggregate.params()}, x_range=x_range, y_range=y_range) # Optimize categorical counts by aggregating them individually if isinstance(agg_fn, ds.count_cat): agg_params.update(dict(dynamic=False, aggregator=ds.count())) agg_fn1 = aggregate.instance(**agg_params) if element.ndims == 1: grouped = element else: grouped = element.groupby([agg_fn.column], container_type=NdOverlay, group_type=NdOverlay) return grouped.clone({k: agg_fn1(v) for k, v in grouped.items()}) # Create aggregate instance for sum, count operations, breaking mean # into two aggregates column = agg_fn.column or 'Count' if isinstance(agg_fn, ds.mean): agg_fn1 = aggregate.instance( **dict(agg_params, aggregator=ds.sum(column))) agg_fn2 = aggregate.instance( **dict(agg_params, aggregator=ds.count())) else: agg_fn1 = aggregate.instance(**agg_params) agg_fn2 = None is_sum = isinstance(agg_fn1.aggregator, ds.sum) # Accumulate into two aggregates and mask agg, agg2, mask = None, None, None mask = None for v in element: # Compute aggregates and mask new_agg = agg_fn1.process_element(v, None) if is_sum: new_mask = np.isnan(new_agg.data[column].values) new_agg.data = new_agg.data.fillna(0) if agg_fn2: new_agg2 = agg_fn2.process_element(v, None) if agg is None: agg = new_agg if is_sum: mask = new_mask if agg_fn2: agg2 = new_agg2 else: agg.data += new_agg.data if is_sum: mask &= new_mask if agg_fn2: agg2.data += new_agg2.data # Divide sum by count to compute mean if agg2 is not None: agg2.data.rename({'Count': agg_fn.column}, inplace=True) with np.errstate(divide='ignore', invalid='ignore'): agg.data /= agg2.data # Fill masked with with NaNs if is_sum: agg.data[column].values[mask] = np.NaN return agg def _process(self, element, key=None): agg_fn = self.p.aggregator category = agg_fn.column if isinstance(agg_fn, ds.count_cat) else None if (isinstance(element, NdOverlay) and ((isinstance(agg_fn, (ds.count, ds.sum, ds.mean)) and agg_fn.column not in element.kdims) or (isinstance(agg_fn, ds.count_cat) and agg_fn.column in element.kdims))): return self._aggregate_ndoverlay(element, agg_fn) if element._plot_id in self._precomputed: x, y, data, glyph = self._precomputed[element._plot_id] else: x, y, data, glyph = self.get_agg_data(element, category) if self.p.precompute: self._precomputed[element._plot_id] = x, y, data, glyph (x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = self._get_sampling(element, x, y) if x is None or y is None: xarray = xr.DataArray(np.full((height, width), np.NaN, dtype=np.float32), dims=['y', 'x'], coords={ 'x': xs, 'y': ys }) return self.p.element_type(xarray) cvs = ds.Canvas(plot_width=width, plot_height=height, x_range=x_range, y_range=y_range) column = agg_fn.column if agg_fn else None if column: dims = [d for d in element.dimensions('ranges') if d == column] if not dims: raise ValueError( "Aggregation column %s not found on %s element. " "Ensure the aggregator references an existing " "dimension.") if isinstance(agg_fn, ds.count_cat): name = '%s Count' % agg_fn.column vdims = [dims[0](column)] else: vdims = Dimension('Count') params = dict(get_param_values(element), kdims=[x, y], datatype=['xarray'], vdims=vdims) dfdata = PandasInterface.as_dframe(data) agg = getattr(cvs, glyph)(dfdata, x.name, y.name, self.p.aggregator) if 'x_axis' in agg and 'y_axis' in agg: agg = agg.rename({'x_axis': x, 'y_axis': y}) if xtype == 'datetime': agg[x.name] = agg[x.name].astype('datetime64[us]') if ytype == 'datetime': agg[y.name] = agg[y.name].astype('datetime64[us]') if agg.ndim == 2: # Replacing x and y coordinates to avoid numerical precision issues eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data) return self.p.element_type(eldata, **params) else: layers = {} for c in agg.coords[column].data: cagg = agg.sel(**{column: c}) eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data) layers[c] = self.p.element_type(eldata, **params) return NdOverlay(layers, kdims=[data.get_dimension(column)])
class contours(Operation): """ Given a Image with a single channel, annotate it with contour lines for a given set of contour levels. The return is an NdOverlay with a Contours layer for each given level, overlaid on top of the input Image. """ output_type = Overlay levels = param.ClassSelector(default=10, class_=(list, int), doc=""" A list of scalar values used to specify the contour levels.""") group = param.String(default='Level', doc=""" The group assigned to the output contours.""") filled = param.Boolean(default=False, doc=""" Whether to generate filled contours""") overlaid = param.Boolean(default=False, doc=""" Whether to overlay the contour on the supplied Element.""") _per_element = True def _process(self, element, key=None): try: from matplotlib.contour import QuadContourSet from matplotlib.axes import Axes from matplotlib.figure import Figure from matplotlib.dates import num2date, date2num except ImportError: raise ImportError("contours operation requires matplotlib.") extent = element.range(0) + element.range(1)[::-1] xs = element.dimension_values(0, True, flat=False) ys = element.dimension_values(1, True, flat=False) zs = element.dimension_values(2, flat=False) # Ensure that coordinate arrays specify bin centers if xs.shape[0] != zs.shape[0]: xs = xs[:-1] + np.diff(xs, axis=0) / 2. if xs.shape[1] != zs.shape[1]: xs = xs[:, :-1] + (np.diff(xs, axis=1) / 2.) if ys.shape[0] != zs.shape[0]: ys = ys[:-1] + np.diff(ys, axis=0) / 2. if ys.shape[1] != zs.shape[1]: ys = ys[:, :-1] + (np.diff(ys, axis=1) / 2.) data = (xs, ys, zs) # if any data is a datetime, transform to matplotlib's numerical format data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data)) if any(data_is_datetime): data = tuple( date2num(d) if is_datetime else d for d, is_datetime in zip(data, data_is_datetime)) xdim, ydim = element.dimensions('key', label=True) if self.p.filled: contour_type = Polygons else: contour_type = Contours vdims = element.vdims[:1] kwargs = {} levels = self.p.levels zmin, zmax = element.range(2) if isinstance(self.p.levels, int): if zmin == zmax: contours = contour_type([], [xdim, ydim], vdims) return (element * contours) if self.p.overlaid else contours data += (levels, ) else: kwargs = {'levels': levels} fig = Figure() ax = Axes(fig, [0, 0, 1, 1]) contour_set = QuadContourSet(ax, *data, filled=self.p.filled, extent=extent, **kwargs) levels = np.array(contour_set.get_array()) crange = levels.min(), levels.max() if self.p.filled: levels = levels[:-1] + np.diff(levels) / 2. vdims = [vdims[0].clone(range=crange)] paths = [] empty = np.array([[np.nan, np.nan]]) for level, cset in zip(levels, contour_set.collections): exteriors = [] interiors = [] for geom in cset.get_paths(): interior = [] polys = geom.to_polygons(closed_only=False) for ncp, cp in enumerate(polys): if any(data_is_datetime[0:2]): # transform x/y coordinates back to datetimes xs, ys = np.split(cp, 2, axis=1) if data_is_datetime[0]: xs = np.array(num2date(xs)) if data_is_datetime[1]: ys = np.array(num2date(ys)) cp = np.concatenate((xs, ys), axis=1) if ncp == 0: exteriors.append(cp) exteriors.append(empty) else: interior.append(cp) if len(polys): interiors.append(interior) if not exteriors: continue geom = { element.vdims[0].name: num2date(level) if data_is_datetime[2] else level, (xdim, ydim): np.concatenate(exteriors[:-1]) } if self.p.filled and interiors: geom['holes'] = interiors paths.append(geom) contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims) if self.p.overlaid: contours = element * contours return contours
class histogram(Operation): """ Returns a Histogram of the input element data, binned into num_bins over the bin_range (if specified) along the specified dimension. """ bin_range = param.NumericTuple(default=None, length=2, doc=""" Specifies the range within which to compute the bins.""") bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc=""" An explicit set of bin edges or a method to find the optimal set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more documentation on these approaches see the np.histogram_bin_edges documentation.""") cumulative = param.Boolean(default=False, doc=""" Whether to compute the cumulative histogram""") dimension = param.String(default=None, doc=""" Along which dimension of the Element to compute the histogram.""") frequency_label = param.String(default=None, doc=""" Format string defining the label of the frequency dimension of the Histogram.""" ) groupby = param.ClassSelector(default=None, class_=(basestring, Dimension), doc=""" Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""" ) log = param.Boolean(default=False, doc=""" Whether to use base 10 logarithmic samples for the bin edges.""") mean_weighted = param.Boolean(default=False, doc=""" Whether the weighted frequencies are averaged.""") normed = param.ObjectSelector(default=False, objects=[True, False, 'integral', 'height'], doc=""" Controls normalization behavior. If `True` or `'integral'`, then `density=True` is passed to np.histogram, and the distribution is normalized such that the integral is unity. If `False`, then the frequencies will be raw counts. If `'height'`, then the frequencies are normalized such that the max bin height is unity.""") nonzero = param.Boolean(default=False, doc=""" Whether to use only nonzero values when computing the histogram""") num_bins = param.Integer(default=20, doc=""" Number of bins in the histogram .""") weight_dimension = param.String(default=None, doc=""" Name of the dimension the weighting should be drawn from""") style_prefix = param.String(default=None, allow_None=None, doc=""" Used for setting a common style for histograms in a HoloMap or AdjointLayout.""" ) def _process(self, element, key=None): if self.p.groupby: if not isinstance(element, Dataset): raise ValueError( 'Cannot use histogram groupby on non-Dataset Element') grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay) self.p.groupby = None return grouped.map(self._process, Dataset) normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed if self.p.dimension: selected_dim = self.p.dimension else: selected_dim = [d.name for d in element.vdims + element.kdims][0] dim = element.get_dimension(selected_dim) if hasattr(element, 'interface'): data = element.interface.values(element, selected_dim, compute=False) else: data = element.dimension_values(selected_dim) is_datetime = isdatetime(data) if is_datetime: data = data.astype('datetime64[ns]').astype('int64') # Handle different datatypes is_finite = isfinite is_cupy = is_cupy_array(data) if is_cupy: import cupy full_cupy_support = LooseVersion(cupy.__version__) > '8.0' if not full_cupy_support and (normed or self.p.weight_dimension): data = cupy.asnumpy(data) is_cupy = False else: is_finite = cupy.isfinite # Mask data if is_ibis_expr(data): mask = data.notnull() if self.p.nonzero: mask = mask & (data != 0) data = data.to_projection() data = data[mask] no_data = not len(data.head(1).execute()) data = data[dim.name] else: mask = is_finite(data) if self.p.nonzero: mask = mask & (data != 0) data = data[mask] da = dask_array_module() no_data = False if da and isinstance(data, da.Array) else not len(data) # Compute weights if self.p.weight_dimension: if hasattr(element, 'interface'): weights = element.interface.values(element, self.p.weight_dimension, compute=False) else: weights = element.dimension_values(self.p.weight_dimension) weights = weights[mask] else: weights = None # Compute bins if isinstance(self.p.bins, str): bin_data = cupy.asnumpy(data) if is_cupy else data edges = np.histogram_bin_edges(bin_data, bins=self.p.bins) elif isinstance(self.p.bins, (list, np.ndarray)): edges = self.p.bins if isdatetime(edges): edges = edges.astype('datetime64[ns]').astype('int64') else: hist_range = self.p.bin_range or element.range(selected_dim) # Avoids range issues including zero bin range and empty bins if hist_range == (0, 0) or any(not isfinite(r) for r in hist_range): hist_range = (0, 1) steps = self.p.num_bins + 1 start, end = hist_range if is_datetime: start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns') if self.p.log: bin_min = max([abs(start), data[data > 0].min()]) edges = np.logspace(np.log10(bin_min), np.log10(end), steps) else: edges = np.linspace(start, end, steps) if is_cupy: edges = cupy.asarray(edges) if not is_dask_array(data) and no_data: nbins = self.p.num_bins if self.p.bins is None else len( self.p.bins) - 1 hist = np.zeros(nbins) elif hasattr(element, 'interface'): density = True if normed else False hist, edges = element.interface.histogram(data, edges, density=density, weights=weights) if normed == 'height': hist /= hist.max() if self.p.weight_dimension and self.p.mean_weighted: hist_mean, _ = element.interface.histogram(data, density=False, bins=edges) hist /= hist_mean elif normed: # This covers True, 'height', 'integral' hist, edges = np.histogram(data, density=True, weights=weights, bins=edges) if normed == 'height': hist /= hist.max() else: hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges) if self.p.weight_dimension and self.p.mean_weighted: hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins) hist /= hist_mean hist[np.isnan(hist)] = 0 if is_datetime: edges = (edges / 1e3).astype('datetime64[us]') params = {} if self.p.weight_dimension: params['vdims'] = [element.get_dimension(self.p.weight_dimension)] elif self.p.frequency_label: label = self.p.frequency_label.format(dim=dim.pprint_label) params['vdims'] = [Dimension('Frequency', label=label)] else: label = 'Frequency' if normed else 'Count' params['vdims'] = [ Dimension('{0}_{1}'.format(dim.name, label.lower()), label=label) ] if element.group != element.__class__.__name__: params['group'] = element.group if self.p.cumulative: hist = np.cumsum(hist) if self.p.normed in (True, 'integral'): hist *= edges[1] - edges[0] # Save off the computed bin edges so that if this operation instance # is used to compute another histogram, it will default to the same # bin edges. self.bins = list(edges) return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)], label=element.label, **params)
class BokehRenderer(Renderer): theme = param.ClassSelector(default=default_theme, class_=(Theme, str), allow_None=True, doc=""" The applicable Bokeh Theme object (if any).""") backend = param.String(default='bokeh', doc="The backend name.") fig = param.ObjectSelector(default='auto', objects=['html', 'json', 'auto', 'png'], doc=""" Output render format for static figures. If None, no figure rendering will occur. """) holomap = param.ObjectSelector( default='auto', objects=['widgets', 'scrubber', 'server', None, 'auto'], doc=""" Output render multi-frame (typically animated) format. If None, no multi-frame rendering will occur.""") mode = param.ObjectSelector(default='default', objects=['default', 'server'], doc=""" Whether to render the object in regular or server mode. In server mode a bokeh Document will be returned which can be served as a bokeh server app. By default renders all output is rendered to HTML.""" ) # Defines the valid output formats for each mode. mode_formats = { 'fig': { 'default': ['html', 'json', 'auto', 'png'], 'server': ['html', 'json', 'auto'] }, 'holomap': { 'default': ['widgets', 'scrubber', 'auto', None], 'server': ['server', 'auto', None] } } webgl = param.Boolean(default=False, doc=""" Whether to render plots with WebGL if available""") widgets = { 'scrubber': BokehScrubberWidget, 'widgets': BokehSelectionWidget, 'server': BokehServerWidgets } backend_dependencies = { 'js': CDN.js_files if CDN.js_files else tuple(INLINE.js_raw), 'css': CDN.css_files if CDN.css_files else tuple(INLINE.css_raw) } _loaded = False # Define the handler for updating bokeh plots comm_msg_handler = bokeh_msg_handler if bokeh_version > '0.12.14' else None def __call__(self, obj, fmt=None, doc=None): """ Render the supplied HoloViews component using the appropriate backend. The output is not a file format but a suitable, in-memory byte stream together with any suitable metadata. """ plot, fmt = self._validate(obj, fmt, doc=doc) info = {'file-ext': fmt, 'mime_type': MIME_TYPES[fmt]} if self.mode == 'server': return self.server_doc(plot, doc), info elif isinstance(plot, tuple(self.widgets.values())): return plot(), info elif fmt == 'png': png = self._figure_data(plot, fmt=fmt, doc=doc) return png, info elif fmt == 'html': html = self._figure_data(plot, doc=doc) html = "<div style='display: table; margin: 0 auto;'>%s</div>" % html return self._apply_post_render_hooks(html, obj, fmt), info elif fmt == 'json': return self.diff(plot), info @bothmethod def _save_prefix(self_or_cls, ext): "Hook to prefix content for instance JS when saving HTML" if ext == 'html': return '\n'.join(self_or_cls.html_assets()).encode('utf8') return @bothmethod def get_plot(self_or_cls, obj, doc=None, renderer=None, **kwargs): """ Given a HoloViews Viewable return a corresponding plot instance. Allows supplying a document attach the plot to, useful when combining the bokeh model with another plot. """ if doc is None: doc = Document() if self_or_cls.notebook_context else curdoc() if self_or_cls.notebook_context: curdoc().theme = self_or_cls.theme doc.theme = self_or_cls.theme plot = super(BokehRenderer, self_or_cls).get_plot(obj, renderer, **kwargs) plot.document = doc return plot @bothmethod def get_widget(self_or_cls, plot, widget_type, doc=None, **kwargs): if not isinstance(plot, Plot): plot = self_or_cls.get_plot(plot, doc) if self_or_cls.mode == 'server': return BokehServerWidgets(plot, renderer=self_or_cls.instance(), **kwargs) else: return super(BokehRenderer, self_or_cls).get_widget(plot, widget_type, **kwargs) @bothmethod def app(self_or_cls, plot, show=False, new_window=False, websocket_origin=None, port=0): """ Creates a bokeh app from a HoloViews object or plot. By default simply attaches the plot to bokeh's curdoc and returns the Document, if show option is supplied creates an Application instance and displays it either in a browser window or inline if notebook extension has been loaded. Using the new_window option the app may be displayed in a new browser tab once the notebook extension has been loaded. A websocket origin is required when launching from an existing tornado server (such as the notebook) and it is not on the default port ('localhost:8888'). """ if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server': renderer = self_or_cls.instance(mode='server') else: renderer = self_or_cls def modify_doc(doc): renderer(plot, doc=doc) handler = FunctionHandler(modify_doc) app = Application(handler) if not show: # If not showing and in notebook context return app return app elif self_or_cls.notebook_context and not new_window: # If in notebook, show=True and no new window requested # display app inline if isinstance(websocket_origin, list): if len(websocket_origin) > 1: raise ValueError( 'In the notebook only a single websocket origin ' 'may be defined, which must match the URL of the ' 'notebook server.') websocket_origin = websocket_origin[0] opts = dict( notebook_url=websocket_origin) if websocket_origin else {} return bkshow(app, **opts) # If app shown outside notebook or new_window requested # start server and open in new browser tab from tornado.ioloop import IOLoop loop = IOLoop.current() if websocket_origin and not isinstance(websocket_origin, list): websocket_origin = [websocket_origin] opts = dict(allow_websocket_origin=websocket_origin ) if websocket_origin else {} opts['io_loop'] = loop server = Server({'/': app}, port=port, **opts) def show_callback(): server.show('/') server.io_loop.add_callback(show_callback) server.start() def sig_exit(*args, **kwargs): loop.add_callback_from_signal(do_stop) def do_stop(*args, **kwargs): loop.stop() signal.signal(signal.SIGINT, sig_exit) try: loop.start() except RuntimeError: pass return server @bothmethod def server_doc(self_or_cls, obj, doc=None): """ Get a bokeh Document with the plot attached. May supply an existing doc, otherwise bokeh.io.curdoc() is used to attach the plot to the global document instance. """ if not isinstance(obj, (Plot, BokehServerWidgets)): if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server': renderer = self_or_cls.instance(mode='server') else: renderer = self_or_cls plot, _ = renderer._validate(obj, 'auto') else: plot = obj root = plot.state if isinstance(plot, BokehServerWidgets): plot = plot.plot if doc is None: doc = plot.document else: plot.document = doc plot.traverse(lambda x: attach_periodic(x), [GenericElementPlot]) doc.add_root(root) return doc def components(self, obj, fmt=None, comm=True, **kwargs): # Bokeh has to handle comms directly in <0.12.15 comm = False if bokeh_version < '0.12.15' else comm return super(BokehRenderer, self).components(obj, fmt, comm, **kwargs) def _figure_data(self, plot, fmt='html', doc=None, as_script=False, **kwargs): """ Given a plot instance, an output format and an optional bokeh document, return the corresponding data. If as_script is True, the content will be split in an HTML and a JS component. """ model = plot.state if doc is None: doc = plot.document else: plot.document = doc for m in model.references(): m._document = None doc.theme = self.theme doc.add_root(model) comm_id = plot.comm.id if plot.comm else None # Bokeh raises warnings about duplicate tools and empty subplots # but at the holoviews level these are not issues logger = logging.getLogger(bokeh.core.validation.check.__file__) logger.disabled = True if fmt == 'png': from bokeh.io.export import get_screenshot_as_png img = get_screenshot_as_png(plot.state, None) imgByteArr = BytesIO() img.save(imgByteArr, format='PNG') data = imgByteArr.getvalue() if as_script: b64 = base64.b64encode(data).decode("utf-8") (mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt] src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64) div = tag.format(src=src, mime_type=mime_type, css='') js = '' else: try: js, div, _ = notebook_content(model, comm_id) html = NOTEBOOK_DIV.format(plot_script=js, plot_div=div) data = encode_utf8(html) doc.hold() except: logger.disabled = False raise logger.disabled = False plot.document = doc if as_script: return div, js return data def diff(self, plot, binary=True, individual=False): """ Returns a json diff required to update an existing plot with the latest plot data. """ events = list(plot.document._held_events) if not events: return None if individual: msgs = [] for event in events: msg = Protocol("1.0").create("PATCH-DOC", [event], use_buffers=binary) msgs.append(msg) else: msgs = Protocol("1.0").create("PATCH-DOC", events, use_buffers=binary) plot.document._held_events = [] return msgs @classmethod def plot_options(cls, obj, percent_size): """ Given a holoviews object and a percentage size, apply heuristics to compute a suitable figure size. For instance, scaling layouts and grids linearly can result in unwieldy figure sizes when there are a large number of elements. As ad hoc heuristics are used, this functionality is kept separate from the plotting classes themselves. Used by the IPython Notebook display hooks and the save utility. Note that this can be overridden explicitly per object using the fig_size and size plot options. """ obj = obj.last if isinstance(obj, HoloMap) else obj plot = Store.registry[cls.backend].get(type(obj), None) if not hasattr(plot, 'width') or not hasattr(plot, 'height'): from .plot import BokehPlot plot = BokehPlot options = plot.lookup_options(obj, 'plot').options width = options.get('width', plot.width) height = options.get('height', plot.height) return dict(options, **{'width': int(width), 'height': int(height)}) @bothmethod def get_size(self_or_cls, plot): """ Return the display size associated with a plot before rendering to any particular format. Used to generate appropriate HTML display. Returns a tuple of (width, height) in pixels. """ if isinstance(plot, Plot): plot = plot.state elif not isinstance(plot, Model): raise ValueError('Can only compute sizes for HoloViews ' 'and bokeh plot objects.') return compute_plot_size(plot) @classmethod def load_nb(cls, inline=True): """ Loads the bokeh notebook resources. """ LOAD_MIME_TYPE = bokeh.io.notebook.LOAD_MIME_TYPE bokeh.io.notebook.LOAD_MIME_TYPE = MIME_TYPES['jlab-hv-load'] load_notebook(hide_banner=True, resources=INLINE if inline else CDN) bokeh.io.notebook.LOAD_MIME_TYPE = LOAD_MIME_TYPE bokeh.io.notebook.curstate().output_notebook()
class Icon(param.Parameterized): # pylint: disable=line-too-long """The Icon can be used to add SVG based icons inline to buttons, menus etc. >>> Icon( ... name="Github", ... value='<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 19 20"><path d="M6.71154 17.0776C2.09615 18.4906 2.09615 14.7226 0.25 14.2517L6.71154 17.0776ZM13.1731 19.9036V16.2581C13.2077 15.8089 13.1482 15.3574 12.9986 14.9335C12.849 14.5096 12.6127 14.123 12.3054 13.7995C15.2038 13.4698 18.25 12.3489 18.25 7.20563C18.2498 5.89046 17.754 4.62572 16.8654 3.67319C17.2862 2.52257 17.2564 1.25074 16.7823 0.121918C16.7823 0.121918 15.6931 -0.207776 13.1731 1.51605C11.0574 0.930913 8.82722 0.930913 6.71154 1.51605C4.19154 -0.207776 3.10231 0.121918 3.10231 0.121918C2.62819 1.25074 2.59844 2.52257 3.01923 3.67319C2.12396 4.63279 1.62771 5.90895 1.63462 7.23389C1.63462 12.3394 4.68077 13.4604 7.57923 13.8278C7.27554 14.148 7.04132 14.5299 6.89182 14.9486C6.74233 15.3674 6.6809 15.8135 6.71154 16.2581V19.9036"></path></svg>', ... fill_color="#E1477E", ... spin_duration=2000, ... ) Icon(fill_color='#E1477E', name='Github', spin_duration=2000, value='<svg xmlns="http://www.w3...) """ # pylint: enable=line-too-long name = param.String( default=None, constant=False, doc="""The name of the icon. We will append 'icon' and 'icon-{name}' to enable custom styling""", ) value = param.String(doc=""" A html string defining the icon. """) size = param.Number( default=1.0, bounds=(0.0, None), step=0.1, doc="The size in em units, i.e. a multiplier of the current font-size.", ) fill_color = param.String( default="currentColor", doc= """The fill color of the Icon. Any valid css color like '#eb4034', 'rgb(235, 64, 52)' or 'currentColor'. Default is 'currentColor' which is the color of the surrounding text""", ) # For CSS Spin See https://codepen.io/eveness/pen/BjLaoa spin_duration = param.Integer( default=0, bounds=(0, None), doc="""The spin duration in miliseconds. If greater than 0 the Icon will do a spinning animation. Defaults to 0""", ) _bk_icon = param.ClassSelector(class_=_BkIcon) def __init__(self, **params): super().__init__(**params) self._bk_icon = _BkIcon( label=self.name, text=self.value, size=self.size, fill_color=self.fill_color, spin_duration=self.spin_duration, ) @param.depends("name", watch=True) def _update_label(self, *_): self._bk_icon.label = self.name @param.depends("value", watch=True) def _update_value(self, *_): self._bk_icon.text = self.value @param.depends("size", watch=True) def _update_size(self, *_): self._bk_icon.size = self.size @param.depends("fill_color", watch=True) def _update_fill_color(self, *_): self._bk_icon.fill_color = self.fill_color @param.depends("spin_duration", watch=True) def _update_spin_duration(self, *_): self._bk_icon.spin_duration = self.spin_duration
class HpcConnect(param.Parameterized): uit_client = param.ClassSelector(Client) system = param.ObjectSelector(default=HPC_SYSTEMS[0], objects=HPC_SYSTEMS) login_node = param.ObjectSelector(default=None, objects=[None], label='Login Node') exclude_nodes = param.ListSelector(default=list(), objects=[], label='Exclude Nodes') connected = param.Boolean(default=False, allow_None=True) connect_btn = param.Action(lambda self: self.connect(), label='Connect') disconnect_btn = param.Action(lambda self: self.disconnect(), label='Disconnect') connection_status = param.String(default='Not Connected', label='Status') ready = param.Boolean(default=False, precedence=-1) _next_stage = param.Selector() next_stage = param.Selector() def __init__(self, uit_client=None, **params): super().__init__(**params) self.uit_client = uit_client or Client() self.update_node_options() @param.depends('system', watch=True) def update_node_options(self): options = [f'{self.system}{i:02d}' for i in range(1, 8)] self.param.exclude_nodes.objects = options options = options.copy() options.insert(0, None) self.param.login_node.objects = options self.param.login_node.names = {'Random': None} @param.depends('login_node', watch=True) def update_exclude_nodes_visibility(self): self.param.exclude_nodes.precedence = 1 if self.login_node is None else -1 @param.depends('_next_stage', watch=True) def update_next_stage(self): self.next_stage = self._next_stage def connect(self): system = None if self.login_node is not None else self.system self.connection_status = self.uit_client.connect( system=system, login_node=self.login_node, exclude_login_nodes=self.exclude_nodes, ) try: self.connected = None self.uit_client.call(':') except Exception as e: log.exception(e) self.connected = self.uit_client.connected self.ready = self.connected def disconnect(self): self.param.connect_btn.label = 'Connect' self.connection_status = 'Not Connected' self.connected = False @param.depends('connected') def view(self): header = '# Connect to HPC System' spn = pn.widgets.indicators.LoadingSpinner(value=True, color='primary', aspect_ratio=1, width=0) connect_btn = pn.Param( self.param.connect_btn, widgets={ 'connect_btn': { 'button_type': 'success', 'width': 100, } }, )[0] connect_btn.js_on_click(args={ 'btn': connect_btn, 'spn': spn }, code='btn.visible=false; spn.width=50;') if self.connected is None: content = pn.pane.GIF( resource_filename('panel', 'assets/spinner.gif')) elif self.connected is False: system_pn = pn.Column( pn.panel(self, parameters=['system'], show_name=False), name='HPC System', ) advanced_pn = pn.Column( pn.panel( self, parameters=['login_node', 'exclude_nodes'], widgets={'exclude_nodes': pn.widgets.CrossSelector}, show_name=False, ), name='Advanced Options', ) content = pn.Column(pn.layout.Tabs(system_pn, advanced_pn), connect_btn, spn) else: self.param.connect_btn.label = 'Re-Connect' btns = pn.Param( self, parameters=['connect_btn', 'disconnect_btn'], widgets={ 'disconnect_btn': { 'button_type': 'danger', 'width': 100 }, 'connect_btn': { 'button_type': 'success', 'width': 100 } }, show_name=False, default_layout=pn.Row, ) return pn.Column( header, btns, pn.panel(self, parameters=['connection_status'], show_name=False, width=400), ) return pn.Column(header, content, width=500) def panel(self): return pn.panel(self.view)
class HSV(RGB): """ HSV represents a regularly spaced 2D grid of an underlying continuous space of HSV (hue, saturation and value) color space values. The definition of the grid closely matches the semantics of an Image or RGB element and in the simplest case the grid may be specified as a NxMx3 or NxMx4 array of values along with a bounds, but it may also be defined through explicit and regularly spaced x/y-coordinate arrays. The two most basic supported constructors of an HSV element therefore include: HSV((X, Y, H, S, V)) where X is a 1D array of shape M, Y is a 1D array of shape N and H/S/V are 2D array of shape NxM, or equivalently: HSV(Z, bounds=(x0, y0, x1, y1)) where Z is a 3D array of stacked H/S/V arrays with shape NxMx3/4 and the bounds define the (left, bottom, top, right) edges of the four corners of the grid. Other gridded formats which support declaring of explicit x/y-coordinate arrays such as xarray are also supported. Note that the interpretation of the orientation changes depending on whether bounds or explicit coordinates are used. """ group = param.String(default='HSV', constant=True) alpha_dimension = param.ClassSelector(default=Dimension('A',range=(0,1)), class_=Dimension, instantiate=False, doc=""" The alpha dimension definition to add the value dimensions if an alpha channel is supplied.""") vdims = param.List( default=[Dimension('H', range=(0,1), cyclic=True), Dimension('S',range=(0,1)), Dimension('V', range=(0,1))], bounds=(3, 4), doc=""" The dimension description of the data held in the array. If an alpha channel is supplied, the defined alpha_dimension is automatically appended to this list.""") hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb) @property def rgb(self): """ Conversion from HSV to RGB. """ coords = tuple(self.dimension_values(d, expanded=False) for d in self.kdims) data = [self.dimension_values(d, flat=False) for d in self.vdims] hsv = self.hsv_to_rgb(*data[:3]) if len(self.vdims) == 4: hsv += (data[3],) params = util.get_param_values(self) del params['vdims'] return RGB(coords+hsv, bounds=self.bounds, xdensity=self.xdensity, ydensity=self.ydensity, **params)
class RGB(Image): """ RGB represents a regularly spaced 2D grid of an underlying continuous space of RGB(A) (red, green, blue and alpha) color space values. The definition of the grid closely matches the semantics of an Image and in the simplest case the grid may be specified as a NxMx3 or NxMx4 array of values along with a bounds, but it may also be defined through explicit and regularly spaced x/y-coordinate arrays. The two most basic supported constructors of an RGB element therefore include: RGB((X, Y, R, G, B)) where X is a 1D array of shape M, Y is a 1D array of shape N and R/G/B are 2D array of shape NxM, or equivalently: RGB(Z, bounds=(x0, y0, x1, y1)) where Z is a 3D array of stacked R/G/B arrays with shape NxMx3/4 and the bounds define the (left, bottom, top, right) edges of the four corners of the grid. Other gridded formats which support declaring of explicit x/y-coordinate arrays such as xarray are also supported. Note that the interpretation of the orientation changes depending on whether bounds or explicit coordinates are used. """ group = param.String(default='RGB', constant=True) alpha_dimension = param.ClassSelector(default=Dimension('A',range=(0,1)), class_=Dimension, instantiate=False, doc=""" The alpha dimension definition to add the value dimensions if an alpha channel is supplied.""") vdims = param.List( default=[Dimension('R', range=(0,1)), Dimension('G',range=(0,1)), Dimension('B', range=(0,1))], bounds=(3, 4), doc=""" The dimension description of the data held in the matrix. If an alpha channel is supplied, the defined alpha_dimension is automatically appended to this list.""") _ndim = 3 _vdim_reductions = {1: Image} @property def rgb(self): """ Returns the corresponding RGB element. Other than the updating parameter definitions, this is the only change needed to implemented an arbitrary colorspace as a subclass of RGB. """ return self @classmethod def load_image(cls, filename, height=1, array=False, bounds=None, bare=False, **kwargs): """ Returns an raster element or raw numpy array from a PNG image file, using matplotlib. The specified height determines the bounds of the raster object in sheet coordinates: by default the height is 1 unit with the width scaled appropriately by the image aspect ratio. Note that as PNG images are encoded as RGBA, the red component maps to the first channel, the green component maps to the second component etc. For RGB elements, this mapping is trivial but may be important for subclasses e.g. for HSV elements. Setting bare=True will apply options disabling axis labels displaying just the bare image. Any additional keyword arguments will be passed to the Image object. """ try: from matplotlib import pyplot as plt except: raise ImportError("RGB.load_image requires matplotlib.") data = plt.imread(filename) if array: return data (h, w, _) = data.shape if bounds is None: f = float(height) / h xoffset, yoffset = w*f/2, h*f/2 bounds=(-xoffset, -yoffset, xoffset, yoffset) rgb = cls(data, bounds=bounds, **kwargs) if bare: rgb = rgb(plot=dict(xaxis=None, yaxis=None)) return rgb def __init__(self, data, kdims=None, vdims=None, **params): if isinstance(data, Overlay): images = data.values() if not all(isinstance(im, Image) for im in images): raise ValueError("Input overlay must only contain Image elements") shapes = [im.data.shape for im in images] if not all(shape==shapes[0] for shape in shapes): raise ValueError("Images in the input overlays must contain data of the consistent shape") ranges = [im.vdims[0].range for im in images] if any(None in r for r in ranges): raise ValueError("Ranges must be defined on all the value dimensions of all the Images") arrays = [(im.data - r[0]) / (r[1] - r[0]) for r,im in zip(ranges, images)] data = np.dstack(arrays) if vdims is None: vdims = list(self.vdims) else: vdims = list(vdims) if isinstance(vdims, list) else [vdims] if isinstance(data, np.ndarray): if data.shape[-1] == 4 and len(vdims) == 3: vdims.append(self.alpha_dimension) super(RGB, self).__init__(data, kdims=kdims, vdims=vdims, **params)