def test_Dict_wrap(): for y in (0, 1, 2.3, "foo", None, (), []): r = Dict.wrap(y) assert r == y assert isinstance(r, type(y)) r = Dict.wrap(dict(a=1, b=2)) assert r == dict(a=1, b=2) assert isinstance(r, bcpw.PropertyValueDict) r2 = Dict.wrap(r) assert r is r2
class DictModel(Model): values = Dict(String, Any)
class AttrSpec(HasProps): """A container for assigning attributes to values and retrieving them as needed. A special function this provides is automatically handling cases where the provided iterator is too short compared to the distinct values provided. Once created as attr_spec, you can do attr_spec[data_label], where data_label must be a one dimensional tuple of values, representing the unique group in the data. See the :meth:`AttrSpec.setup` method for the primary way to provide an existing AttrSpec with data and column values and update all derived property values. """ data = Instance(ColumnDataSource) iterable = List(Any, default=None) attrname = String(help='Name of the attribute the spec provides.') columns = Either(ColumnLabel, List(ColumnLabel), help=""" The label or list of column labels that correspond to the columns that will be used to find all distinct values (single column) or combination of values ( multiple columns) to then assign a unique attribute to. If not enough unique attribute values are found, then the attribute values will be cycled. """) default = Any(default=None, help=""" The default value for the attribute, which is used if no column is assigned to the attribute for plotting. If the default value is not provided, the first value in the `iterable` property is used. """) attr_map = Dict(Any, Any, help=""" Created by the attribute specification when `iterable` and `data` are available. The `attr_map` will include a mapping between the distinct value(s) found in `columns` and the attribute value that has been assigned. """) items = Any(default=None, help=""" The attribute specification calculates this list of distinct values that are found in `columns` of `data`. """) sort = Bool(default=True, help=""" A boolean flag to tell the attribute specification to sort `items`, when it is calculated. This affects which value of `iterable` is assigned to each distinct value in `items`. """) ascending = Bool(default=True, help=""" A boolean flag to tell the attribute specification how to sort `items` if the `sort` property is set to `True`. The default setting for `ascending` is `True`. """) bins = Instance(Bins, help=""" If an attribute spec is binning data, so that we can map one value in the `iterable` to one value in `items`, then this attribute will contain an instance of the Bins stat. This is used to create unique labels for each bin, which is then used for `items` instead of the actual unique values in `columns`. """) def __init__(self, columns=None, df=None, iterable=None, default=None, items=None, **properties): """Create a lazy evaluated attribute specification. Args: columns: a list of column labels df(:class:`~pandas.DataFrame`): the data source for the attribute spec. iterable: an iterable of distinct attribute values default: a value to use as the default attribute when no columns are passed items: the distinct values in columns. If items is provided as input, then the values provided are used instead of being calculated. This can be used to force a specific order for assignment. **properties: other properties to pass to parent :class:`HasProps` """ properties['columns'] = self._ensure_list(columns) if df is not None: properties['data'] = ColumnDataSource(df) if default is None and iterable is not None: default_iter = copy(iterable) properties['default'] = next(iter(default_iter)) elif default is not None: properties['default'] = default if iterable is not None: properties['iterable'] = iterable if items is not None: properties['items'] = items super(AttrSpec, self).__init__(**properties) if self.default is None and self.iterable is not None: self.default = next(iter(copy(self.iterable))) if self.data is not None and self.columns is not None: if df is None: df = self.data.to_df() self._generate_items(df, columns=self.columns) if self.items is not None and self.iterable is not None: self.attr_map = self._create_attr_map() @staticmethod def _ensure_list(attr): """Always returns a list with the provided value. Returns the value if a list.""" if isinstance(attr, str): return [attr] elif isinstance(attr, tuple): return list(attr) else: return attr @staticmethod def _ensure_tuple(attr): """Return tuple with the provided value. Returns the value if a tuple.""" if not isinstance(attr, tuple): return (attr, ) else: return attr def _setup_default(self): """Stores the first value of iterable into `default` property.""" self.default = next(self._setup_iterable()) def _setup_iterable(self): """Default behavior is to copy and cycle the provided iterable.""" return cycle(copy(self.iterable)) def _generate_items(self, df, columns): """Produce list of unique tuples that identify each item.""" if self.sort: # TODO (fpliger): this handles pandas API change so users do not experience # the related annoying deprecation warning. This is probably worth # removing when pandas deprecated version (0.16) is "old" enough try: df = df.sort_values(by=columns, ascending=self.ascending) except AttributeError: df = df.sort(columns=columns, ascending=self.ascending) items = df[columns].drop_duplicates() self.items = [tuple(x) for x in items.to_records(index=False)] def _create_attr_map(self, df=None, columns=None): """Creates map between unique values and available attributes.""" if df is not None and columns is not None: self._generate_items(df, columns) iterable = self._setup_iterable() return {item: next(iterable) for item in self._item_tuples()} def _item_tuples(self): return [self._ensure_tuple(item) for item in self.items] def set_columns(self, columns): """Set columns property and update derived properties as needed.""" columns = self._ensure_list(columns) if all([col in self.data.column_names for col in columns]): self.columns = columns else: # we have input values other than columns # assume this is now the iterable at this point self.iterable = columns self._setup_default() def setup(self, data=None, columns=None): """Set the data and update derived properties as needed.""" if data is not None: self.data = data if columns is not None and self.data is not None: self.set_columns(columns) if self.columns is not None and self.data is not None: self.attr_map = self._create_attr_map(self.data.to_df(), self.columns) def update_data(self, data): self.setup(data=data, columns=self.columns) def __getitem__(self, item): """Lookup the attribute to use for the given unique group label.""" if not self.attr_map: return self.default elif self._ensure_tuple(item) not in self.attr_map.keys(): # make sure we have attr map self.setup() return self.attr_map[self._ensure_tuple(item)] @property def series(self): if not self.attr_map: return pd.Series() else: index = pd.MultiIndex.from_tuples(self._item_tuples(), names=self.columns) return pd.Series(list(self.attr_map.values()), index=index)
class _TestModel2(HasProps): x = Int(12) y = String("hello") z = List(Int, [1, 2, 3]) zz = Dict(String, Int) s = Nullable(String, default=None)
def test_Dict(self): p = Dict(String, Float) with pytest.raises(ValueError) as e: p.validate("junk") assert not str(e).endswith("ValueError")
class Perspective(HTMLBox): aggregates = Either(Dict(String, Any), Null()) column_pivots = Either(List(String), Null()) columns = Either(List(String), Null) computed_columns = Either(List(String), Null()) editable = Nullable(Bool()) filters = Either(List(Any), Null()) plugin = String() plugin_config = Either(Dict(String, Any), Null) row_pivots = Either(List(String), Null()) selectable = Nullable(Bool()) schema = Dict(String, String) sort = Either(List(List(String)), Null()) source = Instance(ColumnDataSource) toggle_config = Bool(True) theme = String() # pylint: disable=line-too-long __javascript__ = [ "https://unpkg.com/@finos/[email protected]/dist/umd/perspective.js", "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer.js", "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-datagrid.js", "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-hypergrid.js", "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-d3fc.js", ] __js_skip__ = { "perspective": __javascript__, } __js_require__ = { "paths": { "perspective": "https://unpkg.com/@finos/[email protected]/dist/umd/perspective", "perspective-viewer": "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer", "perspective-viewer-datagrid": "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-datagrid", "perspective-viewer-hypergrid": "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-hypergrid", "perspective-viewer-d3fc": "https://unpkg.com/@finos/[email protected]/dist/umd/perspective-viewer-d3fc", }, "exports": { "perspective": "perspective", "perspective-viewer": "PerspectiveViewer", "perspective-viewer-datagrid": "PerspectiveViewerDatagrid", "perspective-viewer-hypergrid": "PerspectiveViewerHypergrid", "perspective-viewer-d3fc": "PerspectiveViewerD3fc", }, } __css__ = [ "https://unpkg.com/@finos/[email protected]/dist/umd/all-themes.css" ]
class Foo(HasProps): x = Int(12) y = String("hello") z = List(Int, [1, 2, 3]) zz = Dict(String, Int) s = String(None)
class HasStringDictProp(Model): foo = Dict(String, Any) def __init__(self, **kwargs): super(HasStringDictProp, self).__init__(**kwargs)
class HasIntDictProp(Model): foo = Dict(Int, Any) def __init__(self, **kwargs) -> None: super().__init__(**kwargs)
class Builder(HasProps): """ A prototype class to inherit each new chart Builder type. It provides useful methods to be used by the inherited builder classes, in order to automate most of the charts creation tasks and leave the core customization to specialized builder classes. In that pattern inherited builders just need to provide the following methods: Required: * :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be rendered into the plot. Here you should call the :meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can setup the legend for you. * :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the glyphs. This is called after glyph creation, so you are able to inspect the comp_glyphs for their minimum and maximum values. See the :meth:`~bokeh.charts.builder.Builder.create` method for more information on when this is called and how the builder provides the ranges to the containing :class:`Chart` using the :meth:`Chart.add_ranges` method. Optional: * :meth:`~bokeh.charts.builder.Builder.setup`: provides an area where subclasses of builder can introspect properties, setup attributes, or change property values. This is called before :meth:`~bokeh.charts.builder.Builder.process_data`. * :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area where subclasses of builder can manipulate the source data before renderers are created. """ # Optional Inputs x_range = Instance(Range) y_range = Instance(Range) xlabel = String() ylabel = String() xscale = String() yscale = String() palette = List(Color, help="""Optional input to override the default palette used by any color attribute. """) # Dimension Configuration """ The dimension labels that drive the position of the glyphs. Subclasses should implement this so that the Builder base class knows which dimensions it needs to operate on. An example for a builder working with cartesian x and y coordinates would be dimensions = ['x', 'y']. You should then instantiate the x and y dimensions as attributes of the subclass of builder using the :class:`Dimension <bokeh.charts.properties.Dimension>` class. One for x, as x = Dimension(...), and one as y = Dimension(...). """ dimensions = None # None because it MUST be overridden """ The dimension labels that must exist to produce the glyphs. This specifies what are the valid configurations for the chart, with the option of specifying the type of the columns. The :class:`~bokeh.charts.data_source.ChartDataSource` will inspect this property of your subclass of Builder and use this to fill in any required dimensions if no keyword arguments are used. """ req_dimensions = [] # Attribute Configuration attributes = Dict(String, Instance(AttrSpec), help=""" The attribute specs used to group data. This is a mapping between the role of the attribute spec (e.g. 'color') and the :class:`~bokeh.charts.attributes.AttrSpec` class (e.g., :class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this attributes property during runtime, which will consist of any attribute specs that are passed into the chart creation function (e.g., :class:`~bokeh.charts.Bar`), ones that are created for the user from simple input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`), or lastly, the attribute spec found in the default_attributes configured for the subclass of :class:`~bokeh.charts.builder.Builder`. """) """ The default attribute specs used to group data. This is where the subclass of Builder should specify what the default attributes are that will yield attribute values to each group of data, and any specific configuration. For example, the :class:`ColorAttr` utilizes a default palette for assigning color based on groups of data. If the user doesn't assign a column of the data to the associated attribute spec, then the default attrspec is used, which will yield a constant color value for each group of data. This is by default the first color in the default palette, but can be customized by setting the default color in the ColorAttr. """ default_attributes = None # None because it MUST be overridden # Derived properties (created by Builder at runtime) attribute_columns = List(ColumnLabel, help=""" All columns used for specifying attributes for the Chart. The Builder will set this value on creation so that the subclasses can know the distinct set of columns that are being used to assign attributes. """) comp_glyphs = List(Instance(CompositeGlyph), help=""" A list of composite glyphs, where each represents a unique subset of data. The composite glyph is a helper class that encapsulates all low level :class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of data. For example, the :class:`BoxGlyph` is a single class that yields each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The single Box represents a full array of values that are aggregated, and is made up of multiple :class:`~bokeh.models.glyphs.Rect` and :class:`~bokeh.models.glyphs.Segment` glyphs. """) labels = List( String, help="""Represents the unique labels to be used for legends.""") """List of attributes to use for legends.""" label_attributes = [] """ Used to assign columns to dimensions when no selections have been provided. The default behavior is provided by the :class:`OrderedAssigner`, which assigns a single column to each dimension available in the `Builder`'s `dims` property. """ column_selector = OrderedAssigner comp_glyph_types = List(Instance(CompositeGlyph)) sort_dim = Dict(String, Bool, default={}) legend_sort_field = String(help=""" Attribute that should be used to sort the legend, for example: color, dash, maker, etc. Valid values for this property depend on the type of chart. """) legend_sort_direction = Enum(SortDirection, help=""" Sort direction to apply to :attr:`~bokeh.charts.builder.Builder.sort_legend`. Valid values are: `ascending` or `descending`. """) source = Instance(ColumnDataSource) tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None, help=""" Tells the builder to add tooltips to the chart by either using the columns specified to the chart attributes (True), or by generating tooltips for each column specified (list(str)), or by explicit specification of the tooltips using the valid input for the `HoverTool` tooltips kwarg. """) __deprecated_attributes__ = () def __init__(self, *args, **kws): """Common arguments to be used by all the inherited classes. Args: data (:ref:`userguide_charts_data_types`): source data for the chart legend (str, bool): the legend of your plot. The legend content is inferred from incoming input.It can be ``top_left``, ``top_right``, ``bottom_left``, ``bottom_right``. It is ``top_right`` is you set it as True. Attributes: source (obj): datasource object for your plot, initialized as a dummy None. x_range (obj): x-associated datarange object for you plot, initialized as a dummy None. y_range (obj): y-associated datarange object for you plot, initialized as a dummy None. groups (list): to be filled with the incoming groups of data. Useful for legend construction. data (dict): to be filled with the incoming data and be passed to the ChartDataSource for each Builder class. attr (list(AttrSpec)): to be filled with the new attributes created after loading the data dict. """ data = None if len(args) != 0 or len(kws) != 0: # chart dimensions can be literal dimensions or attributes attrs = list(self.default_attributes.keys()) dims = self.dimensions + attrs # pop the dimension inputs from kwargs data_args = {} for dim in dims: if dim in kws.keys(): data_args[dim] = kws[dim] # build chart data source from inputs, given the dimension configuration data_args['dims'] = tuple(dims) data_args['required_dims'] = tuple(self.req_dimensions) data_args['attrs'] = attrs data_args['column_assigner'] = self.column_selector data = ChartDataSource.from_data(*args, **data_args) # make sure that the builder dimensions have access to the chart data source for dim in self.dimensions: getattr(getattr(self, dim), 'set_data')(data) # handle input attrs and ensure attrs have access to data attributes = self._setup_attrs(data, kws) # remove inputs handled by dimensions and chart attributes for dim in dims: kws.pop(dim, None) else: attributes = dict() kws['attributes'] = attributes super(Builder, self).__init__(**kws) # collect unique columns used for attributes self.attribute_columns = collect_attribute_columns(**self.attributes) for k in self.__deprecated_attributes__: if k in kws: setattr(self, k, kws[k]) self._data = data self._legends = [] def _setup_attrs(self, data, kws): """Handle overridden attributes and initialize them with data. Makes sure that all attributes have access to the data source, which is used for mapping attributes to groups of data. Returns: None """ source = ColumnDataSource(data.df) attr_names = self.default_attributes.keys() custom_palette = kws.get('palette') attributes = dict() for attr_name in attr_names: attr = kws.pop(attr_name, None) # if given an attribute use it if isinstance(attr, AttrSpec): attributes[attr_name] = attr # if we are given columns, use those elif isinstance(attr, (str, list)): attributes[attr_name] = self.default_attributes[ attr_name]._clone() # override palette if available if isinstance(attributes[attr_name], ColorAttr): if custom_palette is not None: attributes[attr_name].iterable = custom_palette attributes[attr_name].setup(data=source, columns=attr) else: # override palette if available if (isinstance(self.default_attributes[attr_name], ColorAttr) and custom_palette is not None): attributes[attr_name] = self.default_attributes[ attr_name]._clone() attributes[attr_name].iterable = custom_palette else: attributes[attr_name] = self.default_attributes[ attr_name]._clone() # make sure all have access to data source for attr_name in attr_names: attributes[attr_name].update_data(data=source) return attributes def setup(self): """Perform any initial pre-processing, attribute config. Returns: None """ pass def process_data(self): """Make any global data manipulations before grouping. It has to be implemented by any of the inherited class representing each different chart type. It is the place where we make specific calculations for each chart. Returns: None """ pass def yield_renderers(self): """ Generator that yields the glyphs to be draw on the plot It has to be implemented by any of the inherited class representing each different chart type. Yields: :class:`GlyphRenderer` """ raise NotImplementedError( 'Subclasses of %s must implement _yield_renderers.' % self.__class__.__name__) def set_ranges(self): """Calculate and set the x and y ranges. It has to be implemented by any of the subclasses of builder representing each different chart type, and is called after :meth:`yield_renderers`. Returns: None """ raise NotImplementedError( 'Subclasses of %s must implement _set_ranges.' % self.__class__.__name__) def get_dim_extents(self): """Helper method to retrieve maximum extents of all the renderers. Returns: a dict mapping between dimension and value for x_max, y_max, x_min, y_min """ return { 'x_max': max([renderer.x_max for renderer in self.comp_glyphs]), 'y_max': max([renderer.y_max for renderer in self.comp_glyphs]), 'x_min': min([renderer.x_min for renderer in self.comp_glyphs]), 'y_min': min([renderer.y_min for renderer in self.comp_glyphs]) } def add_glyph(self, group, glyph): """Add a composite glyph. Manages the legend, since the builder might not want all attribute types used for the legend. Args: group (:class:`DataGroup`): the data the `glyph` is associated with glyph (:class:`CompositeGlyph`): the glyph associated with the `group` Returns: None """ if isinstance(glyph, list): for sub_glyph in glyph: self.comp_glyphs.append(sub_glyph) else: self.comp_glyphs.append(glyph) # handle cases where builders have specified which attributes to use for labels label = None if len(self.label_attributes) > 0: for attr in self.label_attributes: # this will get the last attribute group label for now if self.attributes[attr].columns is not None: label = self._get_group_label(group, attr=attr) # if no special case for labeling, just use the group label if label is None: label = self._get_group_label(group, attr='label') # add to legend if new and unique label if str(label) not in self.labels and label is not None: self._legends.append((label, glyph.renderers)) self.labels.append(label) def _get_group_label(self, group, attr='label'): """Get the label of the group by the attribute name. Args: group (:attr:`DataGroup`: the group of data attr (str, optional): the attribute name containing the label, defaults to 'label'. Returns: str: the label for the group """ if attr is 'label': label = group.label else: label = group[attr] if isinstance(label, dict): label = tuple(label.values()) return self._get_label(label) @staticmethod def _get_label(raw_label): """Converts a label by string or tuple to a string representation. Args: raw_label (str or tuple(any, any)): a unique identifier for the data group Returns: str: a label that is usable in charts """ # don't convert None type to string so we can test for it later if raw_label is None: return None if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \ len(raw_label) == 1: raw_label = raw_label[0] elif isinstance(raw_label, dict): raw_label = label_from_index_dict(raw_label) return str(raw_label) def collect_attr_kwargs(self): if hasattr(super(self.__class__, self), 'default_attributes'): attrs = set(self.default_attributes.keys()) - set( (super(self.__class__, self).default_attributes or {}).keys()) else: attrs = set() return attrs def get_group_kwargs(self, group, attrs): return {attr: group[attr] for attr in attrs} def create(self, chart=None): """Builds the renderers, adding them and other components to the chart. Args: chart (:class:`Chart`, optional): the chart that will contain the glyph renderers that the `Builder` produces. Returns: :class:`Chart` """ # call methods that allow customized setup by subclasses self.setup() self.process_data() # create and add renderers to chart renderers = self.yield_renderers() if chart is None: chart = Chart() chart.add_renderers(self, renderers) # handle ranges after renders, since ranges depend on aggregations # ToDo: should reconsider where this occurs self.set_ranges() chart.add_ranges('x', self.x_range) chart.add_ranges('y', self.y_range) # sort the legend if we are told to self._legends = self._sort_legend(self.legend_sort_field, self.legend_sort_direction, self._legends, self.attributes) # always contribute legends, let Chart sort it out chart.add_legend(self._legends) chart.add_labels('x', self.xlabel) chart.add_labels('y', self.ylabel) chart.add_scales('x', self.xscale) chart.add_scales('y', self.yscale) if self.tooltips is not None: tooltips = build_hover_tooltips(hover_spec=self.tooltips, chart_cols=self.attribute_columns) chart.add_tooltips(tooltips) return chart @classmethod def generate_help(cls): help_str = '' for comp_glyph in cls.comp_glyph_types: help_str += str(comp_glyph.glyph_properties()) return help_str @staticmethod def _sort_legend(legend_sort_field, legend_sort_direction, legends, attributes): """Sort legends sorted by looping though sort_legend items ( see :attr:`Builder.sort_legend` for more details) """ if legend_sort_field: if len(attributes[legend_sort_field].columns) > 0: # TODO(fpliger): attributes should be consistent and not # need any type checking but for # the moment it is not, specially when going # though a process like binning or when data # is built for HeatMap, Scatter, etc... item_order = [ x[0] if isinstance(x, tuple) else x for x in attributes[legend_sort_field].items ] item_order = [ str(x) if not isinstance(x, string_types) else x for x in item_order ] def foo(leg): return item_order.index(leg[0]) reverse = legend_sort_direction == 'descending' return list(sorted(legends, key=foo, reverse=reverse)) return legends
class CompositeGlyph(HasProps): """Represents a subset of data. A collection of hetero or homogeneous glyph renderers which represent a subset of data. The purpose of the composite glyph is to abstract away the details of constructing glyphs, based on the details of a subset of data, from the grouping operations that a generalized builders must implement. In general, the Builder operates at the full column oriented data source level, segmenting and assigning attributes from a large selection, while the composite glyphs will typically be passed an array-like structures with one or more singular attributes to apply. Another way to explain the concept is that the Builder operates as the groupby, as in pandas, while the CompositeGlyph operates as the function used in the apply. What is the responsibility of the Composite Glyph? - Produce GlyphRenderers - Apply any aggregations - Tag the GlyphRenderers with the group label - Apply transforms due to chart operations - Note: Operations require implementation of special methods """ # composite glyph inputs data = Any() label = Either(String, Dict(String, Any), default='None', help='Identifies the subset of data.') values = Either(Column(Float), Column(String), help="""Array-like values, which are used as the input to the composite glyph.""") # derived from inputs source = Instance(ColumnDataSource, help="""The data source used for the contained glyph renderers. Simple glyphs part of the composite glyph might not use the column data source.""") renderers = List(Instance(GlyphRenderer)) glyphs = Dict(String, Any) # where we expect a Glyph class as Value operations = List(Any, help="""A list of chart operations that can be applied to manipulate their visual depiction.""") color = Color(default='gray', help="""A high level color. Some glyphs will implement more specific color attributes for parts or specific glyphs.""" ) fill_color = Color(default="gray") line_color = Color(default='black', help="""A default outline color for contained glyphs.""") fill_alpha = Float(default=0.8) line_alpha = Float(default=1.0) left_buffer = Float(default=0.0) right_buffer = Float(default=0.0) top_buffer = Float(default=0.0) bottom_buffer = Float(default=0.0) def setup(self): """Build renderers and data source and set sources on renderers.""" self.renderers = [renderer for renderer in self.build_renderers()] if self.renderers is not None: self.refresh() def refresh(self): """Update the GlyphRenderers. .. note: this method would be called after data is added. """ if self.renderers is not None: data = self.build_source() if data is not None: if isinstance(data, dict): source = ColumnDataSource(data) if not isinstance(source, ColumnDataSource) and source is not None: raise TypeError( 'build_source must return dict or ColumnDataSource.') else: self.source = self.add_chart_index(source) self._set_sources() def add_chart_index(self, data): if isinstance(data, ColumnDataSource): source = data data = source.data else: source = None # add chart index to data if 'chart_index' not in data: n_rows = len(list(data.values())[0]) # add composite chart index as column data['chart_index'] = [self.label] * n_rows # add constant value for each column in chart index if isinstance(self.label, dict): for col, val in iteritems(self.label): data[col] = [val] * n_rows if source is not None: source.data = data return source else: return data def build_renderers(self): raise NotImplementedError('You must return list of renderers.') def build_source(self): raise NotImplementedError('You must return ColumnDataSource.') def _set_sources(self): """Store reference to source in each GlyphRenderer. .. note:: if the glyphs that are part of the composite glyph differ, you may have to override this method and handle the sources manually. """ for renderer in self.renderers: renderer.data_source = self.source def __stack__(self, glyphs): """A special method the `stack` function applies to composite glyphs.""" pass def __jitter__(self, glyphs): """A special method the `jitter` function applies to composite glyphs.""" pass def __dodge__(self, glyphs): """A special method the `dodge` function applies to composite glyphs.""" pass def __overlay__(self, glyphs): """A special method the `overlay` function applies to composite glyphs.""" pass def apply_operations(self): pass @classmethod def glyph_properties(cls): props = {} for name, glyph in iteritems(cls.glyphs): props[name] = glyph.class_properties(withbases=True) return props
def test_Dict(self, detail): p = Dict(String, Float) with pytest.raises(ValueError) as e: p.validate("junk", detail) assert str(e).endswith("ValueError") == (not detail)
def test_wrap_dict(self) -> None: prop = bcpn.Nullable(Dict(String, Int)) assert prop.wrap(None) is None wrapped = prop.wrap({"foo": 10}) assert isinstance(wrapped, PropertyValueDict) assert prop.wrap(wrapped) is wrapped
def test_Dict(self): p = Dict(String, Float) with pytest.raises(ValueError) as e: p.validate("junk") assert matches(str(e.value), r"expected an element of Dict\(String, Float\), got 'junk'")
class V(self.pObjectClass): u1 = Instance(U) u2 = List(Instance(U)) u3 = Tuple(Int, Instance(U)) u4 = Dict(String, Instance(U)) u5 = Dict(String, List(Instance(U)))
class HasDictDefault(Model): value = Dict(String, Int, default=dict(hello=42))
class DataTabulator(HTMLBox): """A Bokeh Model that enables easy use of Tabulator tables See http://tabulator.info/ """ configuration = Dict(String, Any) columns = List(Instance(TableColumn), help=""" The list of child column widgets. """) download = Bool(default=False) editable = Bool(default=True) filename = String(default="table.csv") follow = Bool() frozen_rows = List(Int) groupby = List(String) hidden_columns = List(String) layout = Enum('fit_data', 'fit_data_fill', 'fit_data_stretch', 'fit_data_table', 'fit_columns', default="fit_data") source = Instance(ColumnDataSource) styles = Dict(Int, Dict(Int, List(String))) pagination = String() page = Int() page_size = Int() max_page = Int() theme = Enum(*TABULATOR_THEMES, default="simple") theme_url = String(default=THEME_URL) __css__ = [THEME_URL + 'tabulator_simple.min.css'] __javascript__ = [JS_SRC, MOMENT_SRC] __js_require__ = { 'paths': { 'tabulator': JS_SRC[:-3] }, 'exports': { 'tabulator': 'Tabulator' } }
class HasIntDictProp(Model): foo = Dict(Int, Any) def __init__(self, **kwargs): super(HasIntDictProp, self).__init__(**kwargs)
class DataTabulator(HTMLBox): """A Bokeh Model that enables easy use of Tabulator tables See http://tabulator.info/ """ aggregators = Dict(String, String) buttons = Dict(String, String) configuration = Dict(String, Any) columns = List(Instance(TableColumn), help=""" The list of child column widgets. """) download = Bool(default=False) children = Dict(Int, Instance(LayoutDOM)) editable = Bool(default=True) expanded = List(Int) filename = String(default="table.csv") filters = List(Any) follow = Bool(True) frozen_rows = List(Int) groupby = List(String) hidden_columns = List(String) indexes = List(String) layout = Enum('fit_data', 'fit_data_fill', 'fit_data_stretch', 'fit_data_table', 'fit_columns', default="fit_data") source = Instance(ColumnDataSource) styles = Dict( String, Either( String, Dict(Int, Dict(Int, List(Either(String, Tuple(String, String))))))) pagination = Nullable(String) page = Nullable(Int) page_size = Int() max_page = Int() sorters = List(Dict(String, String)) select_mode = Any() selectable_rows = Nullable(List(Int)) theme = Enum(*TABULATOR_THEMES, default="simple") theme_url = String(default=THEME_URL) __css_raw__ = CSS_URLS @classproperty def __css__(cls): cls.__css_raw__ = [ url for url in cls.__css_raw__ if 'simple' in url or len(cls.__css_raw__) == 1 ] return bundled_files(cls, 'css') __javascript_raw__ = [JS_SRC, MOMENT_SRC] @classproperty def __javascript__(cls): return bundled_files(cls) @classproperty def __js_skip__(cls): return { 'Tabulator': cls.__javascript__[:1], 'moment': cls.__javascript__[1:] } __js_require__ = { 'paths': { 'tabulator': JS_SRC[:-3], 'moment': MOMENT_SRC[:-3] }, 'exports': { 'tabulator': 'Tabulator', 'moment': 'moment' } }
class AcePlot(HTMLBox): """ A Bokeh model that wraps around a Ace editor and renders it inside a Bokeh plot. """ __javascript_raw__ = [ 'https://cdnjs.cloudflare.com/ajax/libs/ace/1.4.11/ace.js', 'https://cdnjs.cloudflare.com/ajax/libs/ace/1.4.11/ext-language_tools.js', 'https://cdnjs.cloudflare.com/ajax/libs/ace/1.4.11/ext-modelist.js' ] __tarball__ = { 'tar': 'https://registry.npmjs.org/ace-builds/-/ace-builds-1.4.11.tgz', 'src': 'package/src-min/', 'dest': 'ajax/libs/1.4.11', 'exclude': ['snippets'] } @classproperty def __javascript__(cls): return bundled_files(cls) @classproperty def __js_skip__(cls): return {'ace': cls.__javascript__} __js_require__ = { 'paths': { ('ace', ('ace/ace', 'ace/ext-language_tools')): '//cdnjs.cloudflare.com/ajax/libs/ace/1.4.7' }, 'exports': { 'ace': 'ace' }, 'shim': { 'ace/ext-language_tools': { 'deps': ["ace/ace"] }, 'ace/ext-modelist': { 'deps': ["ace/ace"] } } } code = String() theme = Enum(ace_themes, default='chrome') filename = Nullable(String()) language = String() annotations = List(Dict(String, Any), default=[]) readonly = Bool(default=False) print_margin = Bool(default=False) height = Override(default=300) width = Override(default=300)
class DeckGLPlot(HTMLBox): """A Bokeh model that wraps around a DeckGL plot and renders it inside a HTMLBox""" __css_raw__ = ["https://api.mapbox.com/mapbox-gl-js/v2.6.1/mapbox-gl.css"] @classproperty def __css__(cls): return bundled_files(cls, 'css') __javascript_raw__ = [ "https://unpkg.com/[email protected]/dist/h3-js.umd.js", "https://cdn.jsdelivr.net/npm/[email protected]/dist.min.js", "https://cdn.jsdelivr.net/npm/@deck.gl/[email protected]/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://api.mapbox.com/mapbox-gl-js/v2.6.1/mapbox-gl.js", ] @classproperty def __javascript__(cls): return bundled_files(cls) @classproperty def __js_skip__(cls): return { 'deck': cls.__javascript__[:-1], 'mapboxgl': cls.__javascript__[-1:] } __js_require__ = { 'paths': OrderedDict([ ("h3", "https://unpkg.com/[email protected]/dist/h3-js.umd"), ("deck-gl", "https://cdn.jsdelivr.net/npm/[email protected]/dist.min"), ("deck-json", "https://cdn.jsdelivr.net/npm/@deck.gl/[email protected]/dist.min"), ("loader-csv", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min"), ("loader-json", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min"), ("loader-tiles", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min"), ("mapbox-gl", 'https://cdn.jsdelivr.net/npm/[email protected]/dist/mapbox-gl.min'), ]), 'exports': {"deck-gl": "deck", "mapbox-gl": "mapboxgl", "h3": "h3"}, 'shim': { 'deck-json': {'deps': ["deck-gl"]}, 'deck-gl': {'deps': ["h3"]} } } data = Dict(String, Any) data_sources = List(Instance(ColumnDataSource)) initialViewState = Dict(String, Any) layers = List(Dict(String, Any)) mapbox_api_key = String() tooltip = Either(Bool, Dict(Any, Any), default=True) clickState = Dict(String, Any) hoverState = Dict(String, Any) viewState = Dict(String, Any) throttle = Dict(String, Int) height = Override(default=400) width = Override(default=600)
def test_Dict(self, detail) -> None: p = Dict(String, Float) with pytest.raises(ValueError) as e: p.validate("junk", detail) assert (str(e.value) == "") == (not detail)
class CustomSelect(InputWidget): ''' Custom select widget. ''' __implementation__ = os.path.join("implementation_files", "select.ts") __javascript__ = [ "https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js", "https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.5.3/js/bootstrap.bundle.min.js", "https://cdnjs.cloudflare.com/ajax/libs/bootstrap-multiselect/0.9.16/js/bootstrap-multiselect.min.js", "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.2/js/all.min.js", ] __css__ = [ "https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.5.3/css/bootstrap.min.css", "https://cdnjs.cloudflare.com/ajax/libs/bootstrap-multiselect/0.9.16/css/bootstrap-multiselect.min.css", ] options = Either(Dict(String, List(Either(String, Tuple(String, String)))), List(Either(String, Tuple(String, String))), help=""" Available selection options. Options may be provided either as a list of possible string values, or as a list of tuples, each of the form ``(value, label)``. In the latter case, the visible widget text for each value will be corresponding given label. In order to group options, provide a dictionary where each key is the name of a group and its corresponding value is the group options. For example: {"Even": ["2", "4", "6"], "Odd": ["1", "3", "5"]}. Note! the option's value should be unique across all other options and not only the option's group). """) value = Either(String, List(String), default="", help=""" Initial or selected values. Note! when the options are grouped the value is a tuple that follows the pattern: (<group>, <value>). """) enable_filtering = Bool(default=False, help=""" Enable filtering options using a search box. """) enabled = Bool(default=True, help=""" Controls whether the widget is enabled (True) or disabled (False). Note! the "disabled" property is not supported in this widget. Use this property instead. """) non_selected_text = String(default="Select...", help=""" The text to display on the toggle button when none of the options are selected. """) allow_non_selected = Bool(default=True, help=""" Allows/Disallows none of the options to be selected. If set to False, the first option is selected by default. """) is_opt_grouped = Bool(readonly=True, help=""" Indicates whether the widget contains grouped options or not. """) @classmethod def create(cls: Type[T], title: str) -> T: """This function creates a custom single select filter with the title given. """ return cls( options=[], value="", title=title, enable_filtering=True, margin=[10, 10, 10, 5], allow_non_selected=True, sizing_mode='scale_width', css_classes=['custom_select', 'custom'], )
class CrossFilter(Model): """Interactive filtering and faceting application with multiple plot types""" # identify properties for the data columns = List(Dict(String, Any)) data = Instance(ColumnDataSource) filtered_data = Instance(ColumnDataSource) head = Instance(ColumnDataSource) # list of datasources to use for filtering widgets filter_sources = Dict(String, Instance(ColumnDataSource)) # list of columns we are filtering filtering_columns = List(String) # dict of column name to filtering widgets filter_widgets = Dict(String, Instance(Model)) # dict which aggregates all the selections from the different filtering # widgets filtered_selections = Dict(String, Dict(String, Any)) # list of facet vars facet_x = List(String, default=[]) facet_y = List(String, default=[]) facet_tab = List(String, default=[]) # the displayed plot object plot = Instance(Model) x_range = Instance(Range) y_range = Instance(Range) # configuration properties for the plot plot_type = Enum("line", "scatter", "bar") plot_map = { 'line': CrossLinePlugin, 'scatter': CrossScatterPlugin, 'bar': CrossBarPlugin } x = String y = String agg = String color = String title = String height = Int() width = Int() # identify the selector/drop-down properties plot_selector = Instance(Select) x_selector = Instance(Select) y_selector = Instance(Select) agg_selector = Instance(Select) def __init__(self, *args, **kwargs): # print ("__init__") """Creates original and filtered ColumnDataSource and handles defaults. The df and starting configuration are only provided the first time init is called, within the create method. Kwargs: df (DataFrame): the data to use in the crossfilter app plot_type (str, optional): starting plot type agg (str, optional): starting aggregation type """ if 'df' in kwargs: self._df = kwargs.pop('df') print "what is understood from the df", self._df # initialize a "pure" and filtered data source based on df data_source = self.query( "http://10.200.94.205/bench/v1/col/dict/all") print "query successful" kwargs['head'] = ColumnDataSource(data=self._df) kwargs['data'] = ColumnDataSource(data=data_source) kwargs['filtered_data'] = ColumnDataSource(data=data_source) # default plot type if 'plot_type' not in kwargs: kwargs['plot_type'] = "scatter" # default aggregation type if 'agg' not in kwargs: kwargs['agg'] = 'sum' if 'plot_map' in kwargs: self.plot_map = kwargs.pop('plot_map') super(CrossFilter, self).__init__(**kwargs) def query(self, endpoint): # print ("query") import httplib import json import traceback import requests conn = httplib.HTTPConnection('10.200.94.205', 80) conn.request("GET", endpoint) response = conn.getresponse() data = response.read() conn.close() try: data_json = json.loads(data) return data_json['content'] except: # print (traceback.print_exc()) return None @classmethod def create(cls, **kwargs): print "create" """Performs all one-time construction of bokeh objects. This classmethod is required due to the way that bokeh handles the python and javascript components. The initialize method will be called each additional time the app is updated (including once in the create method), but the Model infrastructure will find that the object already exists in any future calls, and will not create a new object. Kwargs: df (DataFrame): the data to use in the crossfilter app plot_type (str, optional): starting plot type agg (str, optional): starting aggregation type """ obj = cls(**kwargs) print "executes" obj.set_metadata() choices = obj.make_plot_choices() print "choices set" obj.update_plot_choices(choices) print "setting plot" obj.set_plot() obj.set_input_selector() print "all set and return object" return obj def set_input_selector(self): # print ("set_input_selector") """Creates and configures each selector (drop-down menu).""" col_names = [x['name'] for x in self.columns] col_names.append('None') self.plot_selector = Select.create( title="PlotType", name="plot_type", value=self.plot_type, options=["line", "scatter", "bar"], ) self.x_selector = Select.create( name="x", value=self.x, options=col_names, ) self.y_selector = Select.create( name="y", value=self.y, options=col_names, ) self.agg_selector = Select.create( name='agg', value=self.agg, options=['sum', 'mean', 'last', 'count', 'percent'], ) def update_plot_choices(self, input_dict): # print ("update_plot_choices") """Sets object attributes corresponding to input_dict's values. Args: input_dict (dict): dict with x, y, and plot_type keys """ for k, v in input_dict.items(): if getattr(self, k) is None: setattr(self, k, v) def get_plot_class(self): # print ("get_plot_class") """Return the class for the current plot selection.""" return self.plot_map[self.plot_type] def column_descriptor_dict(self): # print ("column_descriptor_dict") """Creates column stats dict with keys of column names. Returns: dict: dict with key per column in data, where values are column stats """ column_descriptors = {} for x in self.columns: column_descriptors[x['name']] = x return column_descriptors @property def continuous_columns(self): # print ("continuous_columns") """Returns list of column descriptors for the non-Discrete columns. Returns: list(dict): list of dicts, containing metadata about columns """ return [x for x in self.columns if x['type'] != 'DiscreteColumn'] @property def discrete_columns(self): # print ("discrete_columns") """Returns list of column descriptors for the Discrete columns. Returns: list(dict): list of dicts, containing metadata about columns """ return [x for x in self.columns if x['type'] == 'DiscreteColumn'] def make_plot_choices(self): # print ("make_plot_choices") """Selects first two continuous columns for x,y during initial setup Returns: dict: x, y, and plot_type keys and values for initial setup """ # prefer continuous columns to initialize with, otherwise use what we have if len(self.continuous_columns) > 1: x, y = [x['name'] for x in self.continuous_columns[:2]] else: x, y = [x['name'] for x in self.columns[:2]] return {'x': x, 'y': y, 'plot_type': 'scatter'} def set_plot(self): # print ("set_plot") """Makes and sets the plot based on the current configuration of app.""" self.update_xy_ranges(source=self.df) plot = self.make_plot() self.plot = plot curdoc()._add_all() def make_plot(self): # print ("make_plot") """Makes the correct plot layout type, based on app's current config. Returns: Model: one plot, grid of plots, or tabs of plots/grids of plots """ if self.facet_tab: facets = self.make_facets(dimension='tab') # generate a list of panels, containing plot/plots for each facet tabs = [ self.make_tab(content=self.create_plot_page(tab_facet=facet), tab_label=self.facet_title(facet)) for facet in facets ] return Tabs(tabs=tabs) else: return self.create_plot_page() def create_plot_page(self, tab_facet=None): # print ("create_plot_page") """Generates a single visible page of a plot or plots. Args: tab_facet (DiscreteFacet or ContinuousFacet): a facet to filter on Returns: Model: a single or grid of plots """ # no faceting if all([len(self.facet_x) == 0, len(self.facet_y) == 0]): plot_page = self.make_single_plot(facet=tab_facet) # x xor y faceting if all([(len(self.facet_x) != 0) ^ (len(self.facet_y) != 0)]): plot_page = self.make_1d_facet_plot(facet=tab_facet) # x and y faceting if all([len(self.facet_x) != 0, len(self.facet_y) != 0]): plot_page = self.make_2d_facet_plot(facet=tab_facet) if isinstance(plot_page, GridPlot): self.apply_grid_style(plot_page) return plot_page @staticmethod def make_tab(content, tab_label): # print ("make_tab") """Creates a container for the contents of a tab. Args: content (Model): the primary content of the tab tab_label (str): the text to place in the tab Returns: Panel: represents a single tab in a group of tabs """ return Panel(child=content, title=tab_label) def make_facets(self, dimension): # print ("make_facets") """Creates combination of all facets for the provided dimension Args: dimension (str): name of the dimension to create facets for Returns: list(list(DiscreteFacet or ContinuousFacet)): list of list of unique facet combinations """ if dimension == 'x': facets = self.facet_x elif dimension == 'y': facets = self.facet_y else: facets = self.facet_tab # create facets for each column column_descriptor_dict = self.column_descriptor_dict() all_facets = [[]] for field in facets: # create facets from discrete columns if column_descriptor_dict[field]['type'] == 'DiscreteColumn': field_facets = [ DiscreteFacet(field, val) for val in np.unique(self.df[field].values) ] # combine any facets as required all_facets = cross(all_facets, field_facets) else: # create quantile based discrete data and pairs of bins categorical, bins = pd.qcut(self.df[field], 4, retbins=True) cats = categorical.cat.categories bins = [[bins[idx], bins[idx + 1]] for idx in range(len(bins) - 1)] bins[0][0] = None # create list of facets field_facets = [ ContinuousFacet(field, value, bin) for bin, value in zip(bins, cats) ] # combine any facets as required all_facets = cross(all_facets, field_facets) return all_facets @staticmethod def facet_title(facets): # print ("facet_title") """Joins list of facets by commas. Args: facets (list(DiscreteFacet or ContinuousFacet)): list of facets, which are a combination of column and unique value within it Returns: str: string representation of the combination of facets """ title = ",".join([str(x) for x in facets]) return title def facet_data(self, facets, df=None): # print ("facet_data") """Filters data to the rows associated with the given facet. Args: facets (list(DiscreteFacet or ContinuousFacet)): list of facets, which are a combination of column and unique value within it df (DataFrame, optional): data to be filtered on Returns: DataFrame: filtered DataFrame based on provided facets """ if df is None: df = self.filtered_df for f in facets: df = f.filter(df) return df def make_1d_facet_plot(self, facet=None): # print ("make_1d_facet_plot") """Creates the faceted plots when a facet is added to the x axis. Returns: GridPlot: a grid of plots, where each plot has subset of data """ if self.facet_x: all_facets = self.make_facets('x') else: all_facets = self.make_facets('y') plots = [] # loop over facets and create single plots for data subset for facets in all_facets: title = self.facet_title(facets) if facet: facets += facet # if len(self.filter_widgets) == 0: # filtered_df = self.query('/bench/col/dict/bare') # else: filtered_df = self.df df = self.facet_data(facets, filtered_df) plot = self.make_single_plot(df=df, title=title, plot_height=200, plot_width=200, tools="pan,wheel_zoom,reset", facet=facets) # append single plot to list of plots plots.append(plot) # create squarish grid based on number of plots chunk_size = int(np.ceil(np.sqrt(len(plots)))) # create list of lists of plots, where each list of plots is a row grid_plots = [] for i in range(0, len(plots), chunk_size): chunk = plots[i:i + chunk_size] grid_plots.append(chunk) self.hide_internal_axes(grid_plots) # return the grid as the plot return GridPlot(children=grid_plots, plot_width=200 * chunk_size) def make_2d_facet_plot(self, facet=None): # print ("make_2d_facet_plot") """Creates the grid of plots when there are both x and y facets. Returns: GridPlot: grid of x and y facet combinations """ # ToDo: gracefully handle large combinations of facets all_facets_x = self.make_facets('x') all_facets_y = self.make_facets('y') grid_plots = [] # y faceting down column for facets_y in all_facets_y: # x faceting across row row = [] for facets_x in all_facets_x: # build the facets and title facets = facets_x + facets_y title = self.facet_title(facets) # must filter by any extra facets provided for facet tab if facet: filter_facets = facets + facet else: filter_facets = facets # if len(self.filter_widgets) == 0: # filtered_df = self.query('/bench/col/dict/bare') # else: filtered_df = self.df df = self.facet_data(filter_facets, filtered_df) plot = self.make_single_plot(df=df, title=title, plot_height=200, plot_width=200, tools="pan,wheel_zoom,reset", facet=facets) row.append(plot) # append the row to the list of rows grid_plots.append(row) self.hide_internal_axes(grid_plots) # return the grid of plots as the plot return GridPlot(children=grid_plots, plot_width=200 * len(all_facets_x)) @staticmethod def apply_facet_style(plot): # print ("apply_facet_style") """Applies facet-specific style for a given plot. Override this method to modify the look of a customized CrossFilter for all plugins. Or, apply custom styles in the plugin, since the plugin will be told if it is currently being faceted. """ plot.title_text_font_size = "9pt" plot.min_border = 0 def apply_single_plot_style(self, plot): # print ("apply_single_plot_style") """Applies styles when we have only one plot. Override this method to modify the look of a customized CrossFilter for all plugins. """ plot.min_border_left = 60 def apply_grid_style(self, grid_plot): # print ("apply_grid_style") """Applies facet-specific style for the grid of faceted plots. Override this method to modify the look of a customized CrossFilter for all plugins. Or, apply custom styles in the plugin, since the plugin will be told if it is currently being faceted. """ grid_plot.title_text_font_size = "12pt" grid_plot.title_text_font_style = "bold" grid_plot.title = self.title @staticmethod def hide_internal_axes(grid_plots): # print ("hide_internal_axes") """Hides the internal axes for a grid of plots. Args: grid_plots (list(list(Figure))): list of rows (list), containing plots """ for i, row in enumerate(grid_plots): is_bottom = i + 1 == len(grid_plots) for j, plot in enumerate(row): if j != 0: if is_bottom: hide_axes(plot, axes='y') else: hide_axes(plot) elif j == 0 and not is_bottom: hide_axes(plot, axes='x') def make_single_plot(self, df=None, title=None, plot_width=700, plot_height=680, tools="pan,wheel_zoom,box_zoom,save,resize," "box_select,reset", facet=None): # print ("make_single_plot") """Creates a plot based on the current app configuration. Args: df (DataFrame, optional): data to use for the plot title (str, optional): plot title plot_width (float, optional): width of plot in pixels plot_height (float, optional): height of plot in pixels tools (str, optional): comma separated string of tool names Returns: Model: the generated plot """ faceting = False # df is not provided when we are not faceting if df is None: source = self.filtered_data else: df = self.facet_data(facets=facet, df=df) # create column data source with filtered df source = ColumnDataSource(data=df) faceting = True # check for tab faceting and filter if provided if facet: df = self.facet_data(facets=facet, df=df) source = ColumnDataSource(data=df) # get the helper class for the plot type selected plot_class = self.get_plot_class() # initialize the plugin class plugin = plot_class(source=source, title_text_font_size="12pt", title_text_font_style="bold", plot_height=plot_height, plot_width=plot_width, tools=tools, title=title, x_range=self.x_range, y_range=self.y_range, facet=faceting, crossfilter=self) # generate plot plot = plugin.get_plot() # apply faceting-specific styling if required if facet: self.apply_facet_style(plot) self.title = plugin.title else: self.apply_single_plot_style(plot) self.title = plot.title return plot def update_xy_ranges(self, source): # print ("update_xy_ranges") """Updates common x_range, y_range to use for creating figures. Args: source (ColumnDataSource): the source to return correct range for """ plt_cls = self.get_plot_class() x_range, y_range = plt_cls.make_xy_ranges(cf=self) # store x and y range from the plot class self.x_range = x_range self.y_range = y_range def plot_attribute_change(self, obj, attrname, old, new): # print ("plot_attribute_change") """Updates app's attribute and plot when view configuration changes. Args: obj (Widget): the object that has an attribute change attrname (str): name of the attribute old (type): the previous value of unknown type new (type): the new value of unknown type """ setattr(self, obj.name, new) self.set_plot() def facet_change(self, obj, attrname, old, new): # print ("facet_change") """Updates plot when any facet configuration changes. Args: obj (Widget): the object that has an attribute change attrname (str): name of the attribute old (type): the previous value of unknown type new (type): the new value of unknown type """ self.set_plot() @property def df(self): # print ("df") """The core data that is used by the app for plotting. Returns: DataFrame: the original data structure """ if hasattr(self, '_df'): return self._df else: if self.data: return self.data.to_df() @property def filtered_df(self): # print ("filtered_df") """The subset of the data to use for plotting. Returns: DataFrame: the original data structure """ if hasattr(self, '_df'): return self._df else: if self.filtered_data: return self.filtered_data.to_df() def update(self, **kwargs): # print ("update") """Updates CrossFilter attributes each time the model changes. The events are setup each time so that we can add event handlers to the selection/filtering widgets as they are added. """ super(CrossFilter, self).update(**kwargs) self.setup_events() def setup_events(self): # print ("setup_events") """Registers events each time the app changes state.""" # watch the app's filtering_columns attribute to setup filters self.on_change('filtering_columns', self, 'setup_filter_widgets') # register any available filter widget for obj in self.filter_widgets.values(): if isinstance(obj, InputWidget): obj.on_change('value', self, 'handle_filter_selection') # watch app column data source attribute for changes for obj in self.filter_sources.values(): obj.on_change('selected', self, 'handle_filter_selection') # selector event registration if self.plot_selector: self.plot_selector.on_change('value', self, 'plot_attribute_change') if self.x_selector: self.x_selector.on_change('value', self, 'plot_attribute_change') if self.y_selector: self.y_selector.on_change('value', self, 'plot_attribute_change') if self.agg_selector: self.agg_selector.on_change('value', self, 'plot_attribute_change') # register to watch the app's facet attributes self.on_change('facet_x', self, 'facet_change') self.on_change('facet_y', self, 'facet_change') self.on_change('facet_tab', self, 'facet_change') def handle_filter_selection(self, obj, attrname, old, new): # print ("handle_filter_selection") """Filters the data source whenever a filter widget changes. Args: obj (Widget): the object that has an attribute change attrname (str): name of the attribute old (type): the previous value of unknown type new (type): the new value of unknown type """ if len(self.filter_widgets) == 0: df = self.query('/bench/v1/col/dict/bare') else: df = self.df # loop over the column metadata for descriptor in self.columns: colname = descriptor['name'] # handle discrete selections if descriptor['type'] == 'DiscreteColumn' and \ colname in self.filter_widgets: selected = self.filter_widgets[colname].value if not selected: continue if isinstance(selected, six.string_types): df = df[colname == selected] else: df = df[np.in1d(df[colname], selected)] # handle time or continuous selections elif descriptor['type'] in ('TimeColumn', 'ContinuousColumn') and \ colname in self.filter_widgets: obj = self.filter_sources[colname] # hack because we don't have true range selection if not obj.selected: continue # TODO: (bev) This works until CF selections are not made on # [multi]lines and [multi]patches min_idx = np.min(obj.selected['1d']['indices']) max_idx = np.max(obj.selected['1d']['indices']) min_val = obj.data['centers'][min_idx] max_val = obj.data['centers'][max_idx] df = df[(df[colname] >= min_val) & (df[colname] <= max_val)] # update filtered data and force plot update for colname in self.data.column_names: self.filtered_data.data[colname] = df[colname] self.filtered_data._dirty = True self.set_plot() def clear_selections(self, obj, attrname, old, new): # print ("clear_selections") """Updates filter widgets and sources as they are removed. Args: obj (Widget): the object that has an attribute change attrname (str): name of the attribute old (type): the previous value of unknown type new (type): the new value of unknown type """ diff = set(old) - set(new) column_descriptor_dict = self.column_descriptor_dict() # delete any removed filter widgets if len(diff) > 0: for col in diff: metadata = column_descriptor_dict[col] if metadata['type'] != 'DiscreteColumn': del self.filter_sources[col] del self.filter_widgets[col] # update the data based on latest changes if diff: self.handle_filter_selection(obj, attrname, old, new) def setup_filter_widgets(self, obj, attrname, old, new): # print ("setup_filter_widgets") """Creates new filter widget each time a new column is added to filters. Args: obj (Widget): the object that has an attribute change attrname (str): name of the attribute old (type): the previous value of unknown type new (type): the new value of unknown type """ self.clear_selections(obj, attrname, old, new) # add new widget as required for each column set to filter on column_descriptor_dict = self.column_descriptor_dict() for col in self.filtering_columns: metadata = column_descriptor_dict[col] if not col in self.filter_widgets: # discrete if metadata['type'] == 'DiscreteColumn': description = self.query( '/bench/v1/desc/single/{0}'.format(col)) options = description['options'] select = MultiSelect.create(name=col, options=options) self.filter_widgets[col] = select # continuous else: col_query = self.query( '/bench/v1/col/single/{0}'.format(col)) histogram = col_query['values'] source = make_histogram_source(histogram) self.filter_sources[col] = source hist_plot = make_histogram(self.filter_sources[col], plot_width=200, plot_height=100, title_text_font_size='8pt', tools='box_select') hist_plot.title = col self.filter_widgets[col] = hist_plot curdoc()._add_all() def set_metadata(self): # print ("set_metadata") """Creates a list of dicts, containing summary info for each column. The descriptions are stored in the ``columns`` property. """ descriptors = [] descriptions = self.query('/bench/v1/desc/all') if descriptions: for description in descriptions: # # print (description # DiscreteColumn if 'object' in description['df']['dtype']: descriptors.append({ 'type': "DiscreteColumn", 'name': description['column'], 'size': description['df']['count'], 'unique': description['df']['unique'], }) # TimeColumn elif 'datetime64' in description['df']['dtype']: descriptors.append({ 'type': "TimeColumn", 'name': description['column'], 'size': description['df']['count'], 'unique': description['df']['unique'], }) # ContinuousColumn else: descriptors.append({ 'type': "ContinuousColumn", 'name': description['column'], 'size': description['df']['count'], }) # columns = self.df.columns # for c in columns: # # get description for column from pandas DataFrame # desc = self.df[c].describe() # # DiscreteColumn # if self.df[c].dtype == object: # descriptors.append({ # 'type': "DiscreteColumn", # 'name': c, # 'size': desc['count'], # 'unique': desc['unique'], # # 'top': desc['top'], # # 'freq': desc['freq'], # }) # # TimeColumn # elif self.df[c].dtype == np.datetime64: # descriptors.append({ # 'type': "TimeColumn", # 'name': c, # 'size': desc['count'], # 'unique': desc['unique'], # # 'first': desc['first'], # # 'last': desc['last'], # }) # # ContinuousColumn # else: # descriptors.append({ # 'type': "ContinuousColumn", # 'name': c, # 'size': desc['count'], # # 'mean': "%.2f"%desc['mean'], # # 'std': "%.2f"%desc['std'], # # 'min': "%.2f"%desc['min'], # # 'max': "%.2f"%desc['max'], # }) # # print (str(descriptors) self.columns = descriptors
class DeckGLPlot(HTMLBox): """A Bokeh model that wraps around a DeckGL plot and renders it inside a HTMLBox""" __css_raw__ = ["https://api.mapbox.com/mapbox-gl-js/v1.7.0/mapbox-gl.css"] @classproperty def __css__(cls): return bundled_files(cls, 'css') __javascript_raw__ = [ "https://cdn.jsdelivr.net/npm/[email protected]/dist.min.js", "https://cdn.jsdelivr.net/npm/@deck.gl/[email protected]/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://cdn.jsdelivr.net/npm/@loaders.gl/[email protected]/dist/dist.min.js", "https://api.mapbox.com/mapbox-gl-js/v1.7.0/mapbox-gl.js", ] @classproperty def __javascript__(cls): return bundled_files(cls) @classproperty def __js_skip__(cls): return { 'deck': cls.__javascript__[:-1], 'mapboxgl': cls.__javascript__[-1:] } __js_require__ = { 'paths': OrderedDict([ ("deck.gl", "https://cdn.jsdelivr.net/npm/@deck.gl/jupyter-widget@^8.1.2/dist/index" ), ("mapbox-gl", 'https://cdn.jsdelivr.net/npm/[email protected]/dist/mapbox-gl.min' ), ]), 'exports': { "deck.gl": "deck", "mapbox-gl": "mapboxgl" } } data = Dict(String, Any) data_sources = List(Instance(ColumnDataSource)) initialViewState = Dict(String, Any) layers = List(Dict(String, Any)) mapbox_api_key = String() tooltip = Either(Bool, Dict(Any, Any)) clickState = Dict(String, Any) hoverState = Dict(String, Any) viewState = Dict(String, Any) height = Override(default=400) width = Override(default=600)