class SettlingCFSheet(JointNormalizingCFSheet): """ A JointNormalizingCFSheet implementing the idea of settling. Breaks continuous time up into discrete iterations, each consisting of a series of activations, up to a fixed number of settling steps. Settling is controlled by the tsettle parameter; once that number of settling steps has been reached, an external input is required before the sheet will activate again. See the LISSOM algorithm (Sirosh and Miikkulainen, Biological Cybernetics 71:66-78, 1994) for one example of its usage. """ strict_tsettle = param.Parameter(default=None, doc=""" If non-None, delay sending output until activation_count reaches this value.""" ) mask_init_time = param.Integer(default=5, bounds=(0, None), doc=""" Determines when a new mask is initialized in each new iteration. The mask is reset whenever new input comes in. Once the activation_count (see tsettle) reaches mask_init_time, the mask is initialized to reflect the current activity profile.""") tsettle = param.Integer(default=8, bounds=(0, None), doc=""" Number of times to activate the SettlingCFSheet sheet for each external input event. A counter is incremented each time an input is received from any source, and once the counter reaches tsettle, the last activation step is skipped so that there will not be any further recurrent activation. The next external (i.e., afferent or feedback) event will then start the counter over again.""") continuous_learning = param.Boolean(default=False, doc=""" Whether to modify the weights after every settling step. If false, waits until settling is completed before doing learning.""") precedence = param.Number(0.6) post_initialization_weights_output_fns = param.HookList([], doc=""" If not empty, weights output_fns that will replace the existing ones after an initial normalization step.""") beginning_of_iteration = param.HookList(default=[], instantiate=False, doc=""" List of callables to be executed at the beginning of each iteration.""" ) end_of_iteration = param.HookList(default=[], instantiate=False, doc=""" List of callables to be executed at the end of each iteration.""") def __init__(self, **params): super(SettlingCFSheet, self).__init__(**params) self.__counter_stack = [] self.activation_count = 0 self.new_iteration = True def start(self): self._normalize_weights(active_units_mask=False) if len(self.post_initialization_weights_output_fns) > 0: for proj in self.in_connections: if not isinstance(proj, Projection): self.debug("Skipping non-Projection ") else: proj.weights_output_fns = self.post_initialization_weights_output_fns def input_event(self, conn, data): # On a new afferent input, clear the activity if self.new_iteration: for f in self.beginning_of_iteration: f() self.new_iteration = False self.activity *= 0.0 for proj in self.in_connections: proj.activity *= 0.0 self.mask.reset() super(SettlingCFSheet, self).input_event(conn, data) ### JABALERT! There should be some sort of warning when ### tsettle times the input delay is larger than the input period. ### Right now it seems to do strange things in that case (does it ### settle at all after the first iteration?), but of course that ### is arguably an error condition anyway (and should thus be ### flagged). # CEBALERT: there is at least one bug in here for tsettle==0: see # CB/JAB email "LISSOM tsettle question", 2010/03/22. def process_current_time(self): """ Pass the accumulated stimulation through self.output_fns and send it out on the default output port. """ if self.new_input: self.new_input = False if self.activation_count == self.mask_init_time: self.mask.calculate() if self.tsettle == 0: # Special case: behave just like a CFSheet self.activate() self.learn() elif self.activation_count == self.tsettle: # Once we have been activated the required number of times # (determined by tsettle), reset various counters, learn # if appropriate, and avoid further activation until an # external event arrives. for f in self.end_of_iteration: f() self.activation_count = 0 self.new_iteration = True # used by input_event when it is called if (self.plastic and not self.continuous_learning): self.learn() else: self.activate() self.activation_count += 1 if (self.plastic and self.continuous_learning): self.learn() # print the weights of a unit def printwts(self, x, y): for proj in self.in_connections: print proj.name, x, y print proj.cfs[x, y].weights def state_push(self, **args): super(SettlingCFSheet, self).state_push(**args) self.__counter_stack.append( (self.activation_count, self.new_iteration)) def state_pop(self, **args): super(SettlingCFSheet, self).state_pop(**args) self.activation_count, self.new_iteration = self.__counter_stack.pop() def send_output(self, src_port=None, data=None): """Send some data out to all connections on the given src_port.""" out_conns_on_src_port = [ conn for conn in self.out_connections if self._port_match(conn.src_port, [src_port]) ] for conn in out_conns_on_src_port: if self.strict_tsettle != None: if self.activation_count < self.strict_tsettle: if len(conn.dest_port ) > 2 and conn.dest_port[2] == 'Afferent': continue self.verbose( "Sending output on src_port %s via connection %s to %s", src_port, conn.name, conn.dest.name) e = EPConnectionEvent( self.simulation.convert_to_time_type(conn.delay) + self.simulation.time(), conn, data) self.simulation.enqueue_event(e)
class Link(Callback): """ A Link defines some connection between a source and target model. It allows defining callbacks in response to some change or event on the source object. Instead a Link directly causes some action to occur on the target, for JS based backends this usually means that a corresponding JS callback will effect some change on the target in response to a change on the source. A Link must define a source object which is what triggers events, but must not define a target. It is also possible to define bi- directional links between the source and target object. """ bidirectional = param.Boolean(default=False, doc=""" Whether to link source and target in both directions.""") properties = param.Dict(default={}, doc=""" A dictionary mapping between source specification to target specification.""") # Whether the link requires a target _requires_target = True def __init__(self, source, target=None, **params): if self._requires_target and target is None: raise ValueError('%s must define a target.' % type(self).__name__) # Source is stored as a weakref to allow it to be garbage collected self._target = None if target is None else weakref.ref(target) super().__init__(source, **params) @property def target(self): return self._target() if self._target else None def link(self): """ Registers the Link """ self.init() if self.source in self.registry: links = self.registry[self.source] params = { k: v for k, v in self.param.values().items() if k != 'name' } for link in links: link_params = { k: v for k, v in link.param.values().items() if k != 'name' } if (type(link) is type(self) and link.source is self.source and link.target is self.target and params == link_params): return self.registry[self.source].append(self) else: self.registry[self.source] = [self] def unlink(self): """ Unregisters the Link """ links = self.registry.get(self.source) if self in links: links.pop(links.index(self))
class GaussianIntervention(DistributionInterventions): top_percent_hatchers = pm.Number(0.5, bounds=(0, 1), step=0.01, precedence=-1) apply_constant_ubi = pm.Boolean(False) apply_gaussian_ubi = pm.Boolean(False) ubi = pm.Number(5, bounds=(0, 100), step=1) gubi = pm.Number(15, bounds=(0, 100), step=1) gubi_concentrate = pm.Number(0.03, bounds=(0, 0.05), step=0.01, doc="Standard Deviation") def gaussian_function(self, x): mean = len(self.data[self.data['Impact Hours'] > gmean( self.filtered_data()['Impact Hours'])]) return self.gubi * np.exp(-( (x - mean)**2) / 2 * self.gubi_concentrate**2) def intervention(self): xs = np.linspace(0, len(self.filtered_data()), len(self.filtered_data())) ys = self.gaussian_function(xs) return pd.DataFrame(zip(xs, ys), columns=['x', 'y']) def view_intervention(self): intervention = self.intervention() return intervention.hvplot.line(x='x', y='y', title='Gaussian Intervention', height=320).opts(labelled=[]) def augmented_data(self): data = self.filtered_data() if self.apply_gaussian_ubi: data = pd.DataFrame(data['Impact Hours'] + self.intervention()['y'], columns=['Impact Hours']) else: data = pd.DataFrame(data['Impact Hours'], columns=['Impact Hours']) data['% of distribution'] = data['Impact Hours'] / data[ 'Impact Hours'].sum() return data def ubi_info(self): if self.apply_gaussian_ubi: gubi = round(self.intervention()['y'].sum(), 0) else: gubi = 0 if self.apply_constant_ubi: ubi = round(len(self.filtered_data()) * self.ubi, 0) else: ubi = 0 return pd.DataFrame( { 'Gaussian UBI Hours': gubi, 'Constant UBI Hours': ubi, 'Total': ubi + gubi, }, index=['value'])
class param_formatter(param.ParameterizedFunction): """ This class is closely related to the param_formatter class in topo/command/__init__.py. Like that default class, it formats parameters as a string for use in a directory name. Unlike that default class, it does not use the parameters repr methods but the exact, succinct commandline representation as returned by a Lancet Args object. This version has several advantages over the default: - It formats values exactly as they appear in a command. For example, a value specified as 6.00 on the commandline remains this way and is never represented to higher precision or with floating point error. - Parameters are sorted from slowest to fastest varying or (optionally) alphanumerically by default. - It allows for a custom separator and an optional trunctation length for values. - By default, formats a string only for the parameters that are varying (may be toggled). """ abbreviations = param.Dict(default={}, doc=''' A dictionary of abbreviations to use of type {<key>:<abbrev>}. If a specifier key has an entry in the dictionary, the abbreviation is used. Useful for shortening long parameter names in the directory structure.''') alphanumeric_sort = param.Boolean(default=False, doc=''' Whether to sort the (potentially abbreviated) keys alphabetically or not. By default, keys are ordered from slowest varying to fastest varying using thr information provided by Lancet's Args object.''') format_constant_keys = param.Boolean(default=False, doc=''' Whether to represent parameters that are known to be constant across batches.''') truncation_limit = param.Number(default=None, allow_None=True, doc=''' If None, no truncation is performed, otherwise specifies the maximum length of any given specification value.''') separator = param.String(default=',', doc=""" The separator to use between <key>=<value> pairs.""") def __call__(self, constant_keys, varying_keys, spec): ordering = (constant_keys if self.format_constant_keys else []) + varying_keys if self.alphanumeric_sort: ordering = sorted(ordering) abbreved = [(self.abbreviations.get(k, k), spec[k]) for k in ordering] return self.separator.join( ['%s=%s' % (k, v[:self.truncation_limit]) for (k, v) in abbreved])
class SheetView(param.Parameterized): """ A SheetView is constructed from a matrix of values, a bounding box for that matrix, and a name. There are two major ways to create a SheetView: one is from a single matrix of data from a single sheet, the other is by combining the matrices from multiple matrices or SheetViews. """ cyclic = param.Boolean( default=False, doc= """Whether or not the values in this View's matrix represent a cyclic dimension.""" ) cyclic_range = param.Parameter( None, doc="""If cyclic is True, this value is the cyclic range.""") ### JCALERT! term_1 and term_2 should be more explicit... ### the 3 cases described in the doc, are they really useful? ### shouldn't it be simplified? def __init__(self, (term_1, term_2), src_name=None, precedence=0.0, timestamp=-1, row_precedence=0.5, **params): """ For ``__init__(self, input_tuple, **params)``, there are three types of input_tuples:: (matrix_data, matrix_bbox) This form locks the value of the sheetview to a single matrix. Terminating case of a composite SheetView. (operation, [tuple_list]) 'operation' is performed on the matrices collected from tuple_list. See the list of valid operations in operations.keys(). Each tuple in the tuple_list is one of the following:: (SheetView, None) Another SheetView may be passed in to create nested plots. (matrix_data, bounding_box) Static matrix data complete with bounding box. (Sheet, sheet_view_name) This gets sheet_name.sheet_view(sheet_view_name) each time the current SheetView has its data requested by .view(). (Sheet, sheet_view_name) Degenerate case that will pull data from another SheetView and not do any additional processing. Don't yet know a use for this case, but documented for possible future use. """ super(SheetView, self).__init__(**params) self.src_name = src_name self.precedence = precedence self.row_precedence = row_precedence self.timestamp = timestamp # Assume there's no such thing as an operator that can be mistaken # for a matrix_data element. This is true as long as the real # values of the operator keys are strings. if term_1 not in operations.keys(): self.operation = ADD self._view_list = [(term_1, term_2)] else: self.operation = operations[term_1] self._view_list = term_2 if not isinstance(self._view_list, types.ListType): self._view_list = [self._view_list]
class output(param.ParameterizedFunction): """ Utility function to set output either at the global level or on a specific object. To set output globally use: output(options) Where options may be an options specification string (as accepted by the %opts magic) or an options specifications dictionary. For instance: output("backend='bokeh'") # Or equivalently output(backend='bokeh') To set save output from a specific object do disk using the 'filename' argument, you can supply the object as the first positional argument and supply the filename keyword: curve = hv.Curve([1,2,3]) output(curve, filename='curve.png') For compatibility with the output magic, you can supply the object as the second argument after the string specification: curve = hv.Curve([1,2,3]) output("filename='curve.png'", curve) These two modes are equivalent to the %output line magic and the %%output cell magic respectively. Note that only the filename argument is supported when supplying an object and all other options are ignored. """ filename_warning = param.Boolean(default=True, doc=""" Whether to warn if the output utility is called on an object and a filename is not given (in which case the utility has no effect)""") def __call__(self, *args, **options): warn = options.pop('filename_warning', self.filename_warning) help_prompt = 'For help with hv.util.output call help(hv.util.output)' line, obj = None, None if len(args) > 2: raise TypeError( 'The opts utility accepts one or two positional arguments.') if len(args) == 1 and options: obj = args[0] elif len(args) == 1: line = args[0] elif len(args) == 2: (line, obj) = args if isinstance(obj, Dimensioned): if line: options = Store.output_settings.extract_keywords(line, {}) for k in options.keys(): if k not in Store.output_settings.allowed: raise KeyError('Invalid keyword: %s' % k) if 'filename' in options: def save_fn(obj, renderer): renderer.save(obj, options['filename']) Store.output_settings.output(line=line, cell=obj, cell_runner=save_fn, help_prompt=help_prompt, **options) elif warn: self.warning("hv.output not supplied a filename to export the " "given object. This call will have no effect.") return obj elif obj is not None: return obj else: Store.output_settings.output(line=line, help_prompt=help_prompt, **options)
class Dynamic(param.ParameterizedFunction): """ Dynamically applies a callable to the Elements in any HoloViews object. Will return a DynamicMap wrapping the original map object, which will lazily evaluate when a key is requested. By default Dynamic applies a no-op, making it useful for converting HoloMaps to a DynamicMap. Any supplied kwargs will be passed to the callable and any streams will be instantiated on the returned DynamicMap. If the supplied operation is a method on a parameterized object which was decorated with parameter dependencies Dynamic will automatically create a stream to watch the parameter changes. This default behavior may be disabled by setting watch=False. """ operation = param.Callable(default=lambda x: x, doc=""" Operation or user-defined callable to apply dynamically""") kwargs = param.Dict(default={}, doc=""" Keyword arguments passed to the function.""") link_inputs = param.Boolean(default=True, doc=""" If Dynamic is applied to another DynamicMap, determines whether linked streams attached to its Callable inputs are transferred to the output of the utility. For example if the Dynamic utility is applied to a DynamicMap with an RangeXY, this switch determines whether the corresponding visualization should update this stream with range changes originating from the newly generated axes.""") shared_data = param.Boolean(default=False, doc=""" Whether the cloned DynamicMap will share the same cache.""") streams = param.List(default=[], doc=""" List of streams to attach to the returned DynamicMap""") def __call__(self, map_obj, **params): watch = params.pop('watch', True) self.p = param.ParamOverrides(self, params) callback = self._dynamic_operation(map_obj) streams = self._get_streams(map_obj, watch) if isinstance(map_obj, DynamicMap): dmap = map_obj.clone(callback=callback, shared_data=self.p.shared_data, streams=streams) else: dmap = self._make_dynamic(map_obj, callback, streams) return dmap def _get_streams(self, map_obj, watch=True): """ Generates a list of streams to attach to the returned DynamicMap. If the input is a DynamicMap any streams that are supplying values for the key dimension of the input are inherited. And the list of supplied stream classes and instances are processed and added to the list. """ streams = [] for stream in self.p.streams: if inspect.isclass(stream) and issubclass(stream, Stream): stream = stream() elif not isinstance(stream, Stream): raise ValueError('Streams must be Stream classes or instances') if isinstance(self.p.operation, Operation): updates = { k: self.p.operation.p.get(k) for k, v in stream.contents.items() if v is None and k in self.p.operation.p } if updates: reverse = {v: k for k, v in stream._rename.items()} stream.update( **{reverse.get(k, k): v for k, v in updates.items()}) streams.append(stream) if isinstance(map_obj, DynamicMap): dim_streams = util.dimensioned_streams(map_obj) streams = list(util.unique_iterator(streams + dim_streams)) # If callback is a parameterized method and watch is disabled add as stream param_watch_support = util.param_version >= '1.8.0' and watch if util.is_param_method(self.p.operation) and param_watch_support: streams.append(self.p.operation) valid, invalid = Stream._process_streams(streams) if invalid: msg = ('The supplied streams list contains objects that ' 'are not Stream instances: {objs}') raise TypeError( msg.format(objs=', '.join('%r' % el for el in invalid))) return valid def _process(self, element, key=None): if isinstance(self.p.operation, Operation): kwargs = { k: v for k, v in self.p.kwargs.items() if k in self.p.operation.params() } return self.p.operation.process_element(element, key, **kwargs) else: return self.p.operation(element, **self.p.kwargs) def _dynamic_operation(self, map_obj): """ Generate function to dynamically apply the operation. Wraps an existing HoloMap or DynamicMap. """ if not isinstance(map_obj, DynamicMap): def dynamic_operation(*key, **kwargs): self.p.kwargs.update(kwargs) obj = map_obj[key] if isinstance(map_obj, HoloMap) else map_obj return self._process(obj, key) else: def dynamic_operation(*key, **kwargs): self.p.kwargs.update(kwargs) return self._process(map_obj[key], key) if isinstance(self.p.operation, Operation): return OperationCallable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs, operation=self.p.operation) else: return Callable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs) def _make_dynamic(self, hmap, dynamic_fn, streams): """ Accepts a HoloMap and a dynamic callback function creating an equivalent DynamicMap from the HoloMap. """ if isinstance(hmap, ViewableElement): return DynamicMap(dynamic_fn, streams=streams) dim_values = zip(*hmap.data.keys()) params = util.get_param_values(hmap) kdims = [ d(values=list(util.unique_iterator(values))) for d, values in zip(hmap.kdims, dim_values) ] return DynamicMap(dynamic_fn, streams=streams, **dict(params, kdims=kdims))
class BasicTemplate(BaseTemplate): """ BasicTemplate provides a baseclass for templates with a basic organization including a header, sidebar and main area. Unlike the more generic Template class these default templates make it easy for a user to generate an application with a polished look and feel without having to write any Jinja2 template themselves. """ config = param.ClassSelector(default=_base_config(), class_=_base_config, constant=True, doc=""" Configuration object declaring custom CSS and JS files to load specifically for this template.""") busy_indicator = param.ClassSelector(default=LoadingSpinner(width=20, height=20), class_=BooleanIndicator, constant=True, doc=""" Visual indicator of application busy state.""") header = param.ClassSelector(class_=ListLike, constant=True, doc=""" A list-like container which populates the header bar.""") main = param.ClassSelector(class_=ListLike, constant=True, doc=""" A list-like container which populates the main area.""") main_max_width = param.String(default="", doc=""" The maximum width of the main area. For example '800px' or '80%'. If the string is '' (default) no max width is set.""") sidebar = param.ClassSelector(class_=ListLike, constant=True, doc=""" A list-like container which populates the sidebar.""") modal = param.ClassSelector(class_=ListLike, constant=True, doc=""" A list-like container which populates the modal""") logo = param.String(constant=True, doc=""" URI of logo to add to the header (if local file, logo is base64 encoded as URI).""") favicon = param.String(default=FAVICON_URL, constant=True, doc=""" URI of favicon to add to the document head (if local file, favicon is base64 encoded as URI).""") title = param.String(default="Panel Application", doc=""" A title to show in the header. Also added to the document head meta settings and as the browser tab title.""") site = param.String(default="", doc=""" The name of the site. Will be shown in the header and link to the root of the site. Default is '', i.e. not shown.""") meta_description = param.String(doc=""" A meta description to add to the document head for search engine optimization. For example 'P.A. Nelson'.""") meta_keywords = param.String(doc=""" Meta keywords to add to the document head for search engine optimization.""") meta_author = param.String(doc=""" A meta author to add to the the document head for search engine optimization. For example 'P.A. Nelson'.""") meta_refresh = param.String(doc=""" A meta refresh rate to add to the document head. For example '30' will instruct the browser to refresh every 30 seconds. Default is '', i.e. no automatic refresh.""") meta_viewport = param.String(doc=""" A meta viewport to add to the header.""") base_url = param.String(doc=""" Specifies the base URL for all relative URLs in a page. Default is '', i.e. not the domain.""") base_target = param.ObjectSelector( default="_self", objects=["_blank", "_self", "_parent", "_top"], doc=""" Specifies the base Target for all relative URLs in a page.""") header_background = param.String(doc=""" Optional header background color override.""") header_color = param.String(doc=""" Optional header text color override.""") theme = param.ClassSelector(class_=Theme, default=DefaultTheme, constant=True, is_instance=False, instantiate=False) location = param.Boolean(default=True, readonly=True) ############# # Resources # ############# # Resource locations for bundled resources _CDN = CDN_DIST _LOCAL = LOCAL_DIST # pathlib.Path pointing to local CSS file(s) _css = None # pathlib.Path pointing to local JS file(s) _js = None # pathlib.Path pointing to local Jinja2 template _template = None # External resources _resources = {'css': {}, 'js': {}, 'js_modules': {}, 'tarball': {}} _modifiers = {} __abstract = True def __init__(self, **params): template = self._template.read_text() if 'header' not in params: params['header'] = ListLike() else: params['header'] = self._get_params(params['header'], self.param.header.class_) if 'main' not in params: params['main'] = ListLike() else: params['main'] = self._get_params(params['main'], self.param.main.class_) if 'sidebar' not in params: params['sidebar'] = ListLike() else: params['sidebar'] = self._get_params(params['sidebar'], self.param.sidebar.class_) if 'modal' not in params: params['modal'] = ListLike() else: params['modal'] = self._get_params(params['modal'], self.param.modal.class_) super().__init__(template=template, **params) if self.busy_indicator: state.sync_busy(self.busy_indicator) self._js_area = HTML(margin=0, width=0, height=0) self._render_items['js_area'] = (self._js_area, []) self._update_busy() self.main.param.watch(self._update_render_items, ['objects']) self.modal.param.watch(self._update_render_items, ['objects']) self.sidebar.param.watch(self._update_render_items, ['objects']) self.header.param.watch(self._update_render_items, ['objects']) self.main.param.trigger('objects') self.sidebar.param.trigger('objects') self.header.param.trigger('objects') self.modal.param.trigger('objects') def _init_doc(self, doc=None, comm=None, title=None, notebook=False, location=True): title = title or self.title self._update_vars() doc = super()._init_doc(doc, comm, title, notebook, location) if self.theme: theme = self._get_theme() if theme and theme.bokeh_theme: doc.theme = theme.bokeh_theme return doc def _apply_hooks(self, viewable, root): BaseTemplate._apply_hooks(viewable, root) theme = self._get_theme() if theme and theme.bokeh_theme and root.document: root.document.theme = theme.bokeh_theme def _get_theme(self): return self.theme.find_theme(type(self))() def _template_resources(self): name = type(self).__name__.lower() resources = _settings.resources(default="server") if resources == 'server': if state.rel_path: dist_path = f'{state.rel_path}/{self._LOCAL}' else: dist_path = self._LOCAL else: dist_path = self._CDN # External resources css_files = dict(self._resources.get('css', {})) for cssname, css in css_files.items(): css_path = url_path(css) if (BUNDLE_DIR / 'css' / css_path.replace('/', os.path.sep)).is_file(): css_files[cssname] = dist_path + f'bundled/css/{css_path}' js_files = dict(self._resources.get('js', {})) for jsname, js in js_files.items(): js_path = url_path(js) if (BUNDLE_DIR / 'js' / js_path.replace('/', os.path.sep)).is_file(): js_files[jsname] = dist_path + f'bundled/js/{js_path}' js_modules = dict(self._resources.get('js_modules', {})) for jsname, js in js_modules.items(): js_path = url_path(js) if jsname in self._resources.get('tarball', {}): js_path += '/index.mjs' else: js_path += '.mjs' if os.path.isfile(BUNDLE_DIR / js_path.replace('/', os.path.sep)): js_modules[jsname] = dist_path + f'bundled/js/{js_path}' for name, js in self.config.js_files.items(): if not '//' in js and state.rel_path: js = f'{state.rel_path}/{js}' js_files[name] = js for name, js in self.config.js_modules.items(): if not '//' in js and state.rel_path: js = f'{state.rel_path}/{js}' js_modules[name] = js extra_css = [] for css in list(self.config.css_files): if not '//' in css and state.rel_path: css = f'{state.rel_path}/{css}' extra_css.append(css) raw_css = list(self.config.raw_css) # CSS files base_css = self._css if not isinstance(base_css, list): base_css = [base_css] if base_css else [] for css in base_css: tmpl_name = name for cls in type(self).__mro__[1:-5]: tmpl_css = cls._css if isinstance(cls._css, list) else [cls._css] if css in tmpl_css: tmpl_name = cls.__name__.lower() css_file = os.path.basename(css) if (BUNDLE_DIR / tmpl_name / css_file).is_file(): css_files[ f'base_{css_file}'] = dist_path + f'bundled/{tmpl_name}/{css_file}' else: with open(css, encoding='utf-8') as f: raw_css.append(f.read()) # JS files base_js = self._js if not isinstance(base_js, list): base_js = [base_js] if base_js else [] for js in base_js: tmpl_name = name for cls in type(self).__mro__[1:-5]: tmpl_js = cls._js if isinstance(cls._js, list) else [cls._js] if js in tmpl_js: tmpl_name = cls.__name__.lower() js = os.path.basename(js) if (BUNDLE_DIR / tmpl_name / js).is_file(): js_files[ f'base_{js}'] = dist_path + f'bundled/{tmpl_name}/{js}' if self.theme: theme = self.theme.find_theme(type(self)) if theme: if theme.base_css: basename = os.path.basename(theme.base_css) owner = theme.param.base_css.owner.__name__.lower() if (BUNDLE_DIR / owner / basename).is_file(): css_files[ 'theme_base'] = dist_path + f'bundled/{owner}/{basename}' else: with open(theme.base_css, encoding='utf-8') as f: raw_css.append(f.read()) if theme.css: basename = os.path.basename(theme.css) if (BUNDLE_DIR / name / basename).is_file(): css_files[ 'theme'] = dist_path + f'bundled/{name}/{basename}' else: with open(theme.base_css, encoding='utf-8') as f: raw_css.append(f.read()) return { 'css': css_files, 'extra_css': extra_css, 'raw_css': raw_css, 'js': js_files, 'js_modules': js_modules } def _update_vars(self, *args): self._render_variables['app_title'] = self.title self._render_variables['meta_name'] = self.title self._render_variables['site_title'] = self.site self._render_variables['meta_description'] = self.meta_description self._render_variables['meta_keywords'] = self.meta_keywords self._render_variables['meta_author'] = self.meta_author self._render_variables['meta_refresh'] = self.meta_refresh self._render_variables['meta_viewport'] = self.meta_viewport self._render_variables['base_url'] = self.base_url self._render_variables['base_target'] = self.base_target if os.path.isfile(self.logo): img = _panel(self.logo) if not isinstance(img, ImageBase): raise ValueError( "Could not determine file type of logo: {self.logo}.") logo = img._b64() else: logo = self.logo if os.path.isfile(self.favicon): img = _panel(self.favicon) if not isinstance(img, ImageBase): raise ValueError( "Could not determine file type of favicon: {self.favicon}." ) favicon = img._b64() else: if _settings.resources( default='server') == 'cdn' and self.favicon == FAVICON_URL: favicon = CDN_DIST + "icons/favicon.ico" else: favicon = self.favicon self._render_variables[ 'template_resources'] = self._template_resources() self._render_variables['app_logo'] = logo self._render_variables['app_favicon'] = favicon self._render_variables['app_favicon_type'] = self._get_favicon_type( self.favicon) self._render_variables['header_background'] = self.header_background self._render_variables['header_color'] = self.header_color self._render_variables['main_max_width'] = self.main_max_width def _update_busy(self): if self.busy_indicator: self._render_items['busy_indicator'] = (self.busy_indicator, []) elif 'busy_indicator' in self._render_items: del self._render_items['busy_indicator'] self._render_variables['busy'] = self.busy_indicator is not None def _update_render_items(self, event): if event.obj is self and event.name == 'busy_indicator': return self._update_busy() if event.obj is self.main: tag = 'main' elif event.obj is self.sidebar: tag = 'nav' elif event.obj is self.header: tag = 'header' elif event.obj is self.modal: tag = 'modal' old = event.old if isinstance(event.old, list) else list( event.old.values()) for obj in old: ref = str(id(obj)) if ref in self._render_items: del self._render_items[ref] new = event.new if isinstance(event.new, list) else event.new.values() for o in new: if o not in old: for hvpane in o.select(HoloViews): if self.theme.bokeh_theme: hvpane.theme = self.theme.bokeh_theme labels = {} for obj in new: ref = str(id(obj)) if obj.name.startswith(type(obj).__name__): labels[ref] = 'Content' else: labels[ref] = obj.name self._render_items[ref] = (obj, [tag]) tags = [tags for _, tags in self._render_items.values()] self._render_variables['nav'] = any('nav' in ts for ts in tags) self._render_variables['header'] = any('header' in ts for ts in tags) self._render_variables['root_labels'] = labels def open_modal(self): """ Opens the modal area """ self._js_area.object = """ <script> var modal = document.getElementById("pn-Modal"); modal.style.display = "block"; </script> """ self._js_area.object = "" def close_modal(self): """ Closes the modal area """ self._js_area.object = """ <script> var modal = document.getElementById("pn-Modal"); modal.style.display = "none"; </script> """ self._js_area.object = "" @staticmethod def _get_favicon_type(favicon): if not favicon: return "" elif favicon.endswith(".png"): return "image/png" elif favicon.endswith("jpg"): return "image/jpg" elif favicon.endswith("gif"): return "image/gif" elif favicon.endswith("svg"): return "image/svg" elif favicon.endswith("ico"): return "image/x-icon" else: raise ValueError("favicon type not supported.") @staticmethod def _get_params(value, class_): if isinstance(value, class_): return value if isinstance(value, tuple): value = [*value] elif not isinstance(value, list): value = [value] # Important to fx. convert @param.depends functions value = [_panel(item) for item in value] if class_ is ListLike: return ListLike(objects=value) if class_ is GridSpec: grid = GridSpec(ncols=12, mode='override') for index, item in enumerate(value): grid[index, :] = item return grid return value
class BaseTemplate(param.Parameterized, ServableMixin): location = param.Boolean(default=False, doc=""" Whether to add a Location component to this Template. Note if this is set to true, the Jinja2 template must either insert all available roots or explicitly embed the location root with : {{ embed(roots.location) }}.""") # Dictionary of property overrides by bokeh Model type _modifiers = {} __abstract = True def __init__(self, template=None, items=None, nb_template=None, **params): super().__init__(**params) if isinstance(template, string_types): self._code = template template = _Template(template) else: self._code = None self.template = template if isinstance(nb_template, string_types): nb_template = _Template(nb_template) self.nb_template = nb_template or template self._render_items = OrderedDict() self._render_variables = {} self._server = None self._layout = self._build_layout() def _build_layout(self): str_repr = Str(repr(self)) server_info = HTML('') button = Button(name='Launch server') def launch(event): if self._server: button.name = 'Launch server' server_info.object = '' self._server.stop() self._server = None else: button.name = 'Stop server' self._server = self._get_server(start=True, show=True) server_info.object = _server_info.format( port=self._server.port) button.param.watch(launch, 'clicks') return Column(str_repr, server_info, button) def __repr__(self): cls = type(self).__name__ spacer = '\n ' objs = [ '[%s] %s' % (name, obj[0].__repr__(1)) for name, obj in self._render_items.items() if not name.startswith('_') ] template = '{cls}{spacer}{objs}' return template.format(cls=cls, objs=('%s' % spacer).join(objs), spacer=spacer) @classmethod def _apply_hooks(cls, viewable, root): ref = root.ref['id'] for o in viewable.select(): cls._apply_modifiers(o, ref) @classmethod def _apply_modifiers(cls, viewable, mref): if mref not in viewable._models: return model, _ = viewable._models[mref] modifiers = cls._modifiers.get(type(viewable), {}) child_modifiers = modifiers.get('children', {}) if child_modifiers: for child in viewable: child_params = { k: v for k, v in child_modifiers.items() if getattr(child, k) == child.param[k].default } child.param.set_param(**child_params) child_props = child._process_param_change(child_params) child._models[mref][0].update(**child_props) params = { k: v for k, v in modifiers.items() if k != 'children' and getattr(viewable, k) == viewable.param[k].default } viewable.param.set_param(**params) props = viewable._process_param_change(params) model.update(**props) def _apply_root(self, name, viewable, tags): pass def _init_doc(self, doc=None, comm=None, title=None, notebook=False, location=True): doc = doc or _curdoc() title = title or 'Panel Application' if location and self.location: loc = self._add_location(doc, location) doc.on_session_destroyed(loc._server_destroy) doc.title = title # Initialize fake root. This is needed to ensure preprocessors # which assume that all models are owned by a single root can # link objects across multiple roots in a template. col = Column() preprocess_root = col.get_root(doc, comm) col._hooks.append(self._apply_hooks) ref = preprocess_root.ref['id'] objs = [] for name, (obj, tags) in self._render_items.items(): if self._apply_hooks not in obj._hooks: obj._hooks.append(self._apply_hooks) # We skip preprocessing on the individual roots model = obj.get_root(doc, comm, preprocess=False) mref = model.ref['id'] doc.on_session_destroyed(obj._server_destroy) for sub in obj.select(Viewable): submodel = sub._models.get(mref) if submodel is None: continue sub._models[ref] = submodel if isinstance(sub, HoloViews) and mref in sub._plots: sub._plots[ref] = sub._plots.get(mref) obj._documents[doc] = model model.name = name model.tags = tags self._apply_root(name, model, tags) add_to_doc(model, doc, hold=bool(comm)) objs.append(obj) # Here we ensure that the preprocessor is run across all roots # and set up session cleanup hooks for the fake root. state._fake_roots.append(ref) state._views[ref] = (col, preprocess_root, doc, comm) col.objects = objs col._preprocess(preprocess_root) col._documents[doc] = preprocess_root doc.on_session_destroyed(col._server_destroy) if notebook: doc.template = self.nb_template else: doc.template = self.template doc._template_variables.update(self._render_variables) return doc def _repr_mimebundle_(self, include=None, exclude=None): loaded = panel_extension._loaded if not loaded and 'holoviews' in sys.modules: import holoviews as hv loaded = hv.extension._loaded if not loaded: param.main.param.warning( 'Displaying Panel objects in the notebook requires ' 'the panel extension to be loaded. Ensure you run ' 'pn.extension() before displaying objects in the ' 'notebook.') return None try: assert get_ipython().kernel is not None # noqa state._comm_manager = _JupyterCommManager except Exception: pass from IPython.display import display doc = _Document() comm = state._comm_manager.get_server_comm() self._init_doc(doc, comm, notebook=True) ref = doc.roots[0].ref['id'] manager = CommManager(comm_id=comm.id, plot_id=ref, name='comm_manager') client_comm = state._comm_manager.get_client_comm( on_msg=partial(self._on_msg, ref, manager), on_error=partial(self._on_error, ref), on_stdout=partial(self._on_stdout, ref)) manager.client_comm_id = client_comm.id doc.add_root(manager) if config.console_output != 'disable': handle = display(display_id=uuid.uuid4().hex) state._handles[ref] = (handle, []) return render_template(doc, comm, manager) #---------------------------------------------------------------- # Public API #---------------------------------------------------------------- def save(self, filename, title=None, resources=None, embed=False, max_states=1000, max_opts=3, embed_json=False, json_prefix='', save_path='./', load_path=None): """ Saves Panel objects to file. Arguments --------- filename: string or file-like object Filename to save the plot to title: string Optional title for the plot resources: bokeh resources One of the valid bokeh.resources (e.g. CDN or INLINE) embed: bool Whether the state space should be embedded in the saved file. max_states: int The maximum number of states to embed max_opts: int The maximum number of states for a single widget embed_json: boolean (default=True) Whether to export the data to json files json_prefix: str (default='') Prefix for the auto-generated json directory save_path: str (default='./') The path to save json files to load_path: str (default=None) The path or URL the json files will be loaded from. """ if embed: raise ValueError("Embedding is not yet supported on Template.") return save(self, filename, title, resources, self.template, self._render_variables, embed, max_states, max_opts, embed_json, json_prefix, save_path, load_path) def server_doc(self, doc=None, title=None, location=True): """ Returns a servable bokeh Document with the panel attached Arguments --------- doc : bokeh.Document (optional) The Bokeh Document to attach the panel to as a root, defaults to bokeh.io.curdoc() title : str A string title to give the Document location : boolean or panel.io.location.Location Whether to create a Location component to observe and set the URL location. Returns ------- doc : bokeh.Document The Bokeh document the panel was attached to """ return self._init_doc(doc, title=title, location=location) def select(self, selector=None): """ Iterates over the Template and any potential children in the applying the Selector. Arguments --------- selector: type or callable or None The selector allows selecting a subset of Viewables by declaring a type or callable function to filter by. Returns ------- viewables: list(Viewable) """ objects = [] for obj, _ in self._render_items.values(): objects += obj.select(selector) return objects
class InteractiveDatashaderGraph(InteractiveDatashaderBase): nodes_df = param.ClassSelector( class_=(cudf.DataFrame, dask_cudf.DataFrame), doc="nodes cuDF/dask_cuDF dataframe", ) edges_df = param.ClassSelector( class_=(cudf.DataFrame, dask_cudf.DataFrame), doc="edges cuDF/dask_cuDF dataframe", ) node_x = param.String("x") node_y = param.String("y") node_pixel_shade_type = param.String("linear") node_spread_threshold = param.Number( 0, doc="threshold parameter passed to dynspread function" ) tile_provider = param.String(None) node_aggregate_col = param.String(allow_None=True) node_aggregate_fn = param.String("count") legend = param.Boolean(True, doc="whether to display legends or not") legend_position = param.String("right", doc="position of legend") node_cmap = param.Dict(default={"cmap": CUXF_DEFAULT_COLOR_PALETTE}) tools = param.List( default=["pan", "reset", "lasso_select", "wheel_zoom"], doc="interactive tools to add to the chart", ) node_color_palette = param.List() node_point_shape = param.ObjectSelector( default="circle", objects=[ "circle", "square", "rect_vertical", "rect_horizontal", "cross", ], ) node_max_px = param.Integer(10) node_clims = param.Tuple(default=(None, None)) edge_color = param.String() edge_source = param.String("src") edge_target = param.String("dst") edge_transparency = param.Number(0, bounds=(0, 1)) inspect_neighbors = param.ClassSelector( class_=CustomInspectTool, doc="tool to assign selection mechanism(inspect neighbors or default)", ) display_edges = param.ClassSelector( class_=CustomInspectTool, doc="tool to select whether to display edges or not", ) @property def df_type(self): if type(self.nodes_df) == type(self.edges_df): # noqa: E721 return type(self.nodes_df) raise TypeError("nodes and edges must be of the same type") def update_color_palette(self, value): self.node_color_palette = value self.nodes_chart.color_palette = value def __init__(self, **params): super(InteractiveDatashaderGraph, self).__init__(**params) self.tiles = ( tile_sources[self.tile_provider]() if (self.tile_provider is not None) else self.tile_provider ) self.nodes_chart = InteractiveDatashaderPoints( source_df=self.nodes_df, x=self.node_x, y=self.node_y, aggregate_col=self.node_aggregate_col, aggregate_fn=self.node_aggregate_fn, color_palette=self.node_color_palette, pixel_shade_type=self.node_pixel_shade_type, tile_provider=self.tile_provider, legend=self.legend, legend_position=self.legend_position, spread_threshold=self.node_spread_threshold, point_shape=self.node_point_shape, max_px=self.node_max_px, ) self.edges_chart = InteractiveDatashaderLine( source_df=self.edges_df, x=self.edge_source, y=self.edge_target, color=self.edge_color, transparency=self.edge_transparency, ) def update_data(self, nodes=None, edges=None): if nodes is not None: self.nodes_chart.update_data(nodes) if edges is not None: self.edges_chart.update_data(edges) def view(self): def set_tools(plot, element): if plot.state.toolbar.tools[-1] != self.display_edges: # if self.df_type != dask_cudf.DataFrame: # # no interactions(yet) with dask_cudf backed graph charts plot.state.add_tools(self.inspect_neighbors) plot.state.add_tools(self.display_edges) dmap_nodes = dynspread( self.nodes_chart.get_chart( streams=[ self.box_stream, self.lasso_stream, self.reset_stream, ] ), threshold=self.node_spread_threshold, shape=self.node_point_shape, max_px=self.node_max_px, ).opts( xaxis=None, yaxis=None, responsive=True, default_tools=[], active_tools=["wheel_zoom", "pan"], tools=self.tools, hooks=[set_tools], ) dmap_edges = dynspread( self.edges_chart.get_chart().opts(default_tools=[]) ) dmap_graph = dmap_edges * dmap_nodes return pn.pane.HoloViews( self.tiles * dmap_graph if self.tiles is not None else dmap_graph, sizing_mode="stretch_both", height=self.height, )
class forceatlas2_layout(LayoutAlgorithm): """ Assign coordinates to the nodes using force-directed algorithm. This is a force-directed graph layout algorithm called `ForceAtlas2`. Timothee Poisot's `nxfa2` is the original implementation of this algorithm. .. _ForceAtlas2: http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0098679&type=printable .. _nxfa2: https://github.com/tpoisot/nxfa2 """ iterations = param.Integer(default=10, bounds=(1, None), doc=""" Number of passes for the layout algorithm""") linlog = param.Boolean(False, doc=""" Whether to use logarithmic attraction force""") nohubs = param.Boolean(False, doc=""" Whether to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree)""") k = param.Number(default=None, doc=""" Compensates for the repulsion for nodes that are far away from the center. Defaults to the inverse of the number of nodes.""") dim = param.Integer(default=2, bounds=(1, None), doc=""" Coordinate dimensions of each node""") seed = param.Integer(default=None, bounds=(0, 2**32 - 1), doc=""" Random seed used to initialize the pseudo-random number generator.""") def __call__(self, nodes, edges, **params): p = param.ParamOverrides(self, params) np.random.seed(p.seed) # Convert graph into sparse adjacency matrix and array of points points = _extract_points_from_nodes(nodes) matrix = _convert_graph_to_sparse_matrix(nodes, edges) if p.k is None: p.k = np.sqrt(1.0 / len(points)) # the initial "temperature" is about .1 of domain area (=1x1) # this is the largest step allowed in the dynamics. temperature = 0.1 # simple cooling scheme. # linearly step down by dt on each iteration so last iteration is size dt. cooling(matrix, points, temperature, p) # Return the nodes with updated positions return _merge_points_with_nodes(nodes, points)
class InteractiveDatashaderPoints(InteractiveDatashader): aggregate_col = param.String(allow_None=True) aggregate_fn = param.String("count") legend = param.Boolean(True, doc="whether to display legends or not") legend_position = param.String("right", doc="position of legend") cmap = param.Dict(default={"cmap": CUXF_DEFAULT_COLOR_PALETTE}) tools = param.List( default=["pan", "reset", "lasso_select", "wheel_zoom"], doc="interactive tools to add to the chart", ) color_palette = param.List() point_shape = param.ObjectSelector( default="circle", objects=[ "circle", "square", "rect_vertical", "rect_horizontal", "cross", ], ) max_px = param.Integer(10) clims = param.Tuple(default=(None, None)) def __init__(self, **params): super(InteractiveDatashaderPoints, self).__init__(**params) self._compute_datashader_assets() def _compute_clims(self): if not isinstance( self.source_df[self.aggregate_col].dtype, cudf.core.dtypes.CategoricalDtype, ): self.clims = get_min_max(self.source_df, self.aggregate_col) def _compute_datashader_assets(self): self.aggregator = None self.cmap = {"cmap": self.color_palette} if isinstance( self.source_df[self.aggregate_col].dtype, cudf.core.dtypes.CategoricalDtype, ): self.cmap = { "color_key": { k: v for k, v in zip( list( self.source_df[ self.aggregate_col ].cat.categories.to_pandas() ), self.color_palette, ) } } if self.aggregate_fn: self.aggregator = getattr(ds, self.aggregate_fn)( self.aggregate_col ) self._compute_clims() def update_data(self, data): self.source_df = data self._compute_clims() @param.depends("source_df") def points(self, **kwargs): return hv.Scatter( self.source_df, kdims=[self.x], vdims=self.vdims, ).opts(tools=[], default_tools=[]) def get_chart(self, streams=[]): dmap = rasterize( hv.DynamicMap(self.points, streams=streams), aggregator=self.aggregator, ).opts( cnorm=self.pixel_shade_type, **self.cmap, colorbar=self.legend, nodata=0, colorbar_position=self.legend_position, tools=[], default_tools=[], ) if self.aggregate_fn != "count": dmap = dmap.opts(clim=self.clims) return dmap def view(self): dmap = dynspread( self.get_chart( streams=[ self.box_stream, self.lasso_stream, self.reset_stream, ] ), threshold=self.spread_threshold, shape=self.point_shape, max_px=self.max_px, ).opts( xaxis=None, yaxis=None, responsive=True, tools=self.tools, active_tools=["wheel_zoom", "pan"], ) return pn.pane.HoloViews( self.tiles * dmap if self.tiles is not None else dmap, sizing_mode="stretch_both", height=self.height, )
class Tabs(ListPanel): """ Panel of Viewables to be displayed in separate tabs. """ active = param.Integer(default=0, bounds=(0, None), doc=""" Number of the currently active tab.""") closable = param.Boolean(default=False, doc=""" Whether it should be possible to close tabs.""") dynamic = param.Boolean(default=False, doc=""" Dynamically populate only the active tab.""") objects = param.List(default=[], doc=""" The list of child objects that make up the tabs.""") tabs_location = param.ObjectSelector( default='above', objects=['above', 'below', 'left', 'right'], doc=""" The location of the tabs relative to the tab contents.""") height = param.Integer(default=None, bounds=(0, None)) width = param.Integer(default=None, bounds=(0, None)) _bokeh_model = BkTabs _source_transforms = {'dynamic': None, 'objects': None} _rename = {'name': None, 'objects': 'tabs', 'dynamic': None} _linked_props = ['active', 'tabs'] _js_transforms = { 'tabs': """ var ids = []; for (t of value) {{ ids.push(t.id) }}; value = ids; """ } def __init__(self, *items, **params): if 'objects' in params: if items: raise ValueError('Tabs objects should be supplied either ' 'as positional arguments or as a keyword, ' 'not both.') items = params['objects'] objects, self._names = self._to_objects_and_names(items) super(Tabs, self).__init__(*objects, **params) self._panels = defaultdict(dict) self.param.watch(self._update_names, 'objects') self.param.watch(self._update_active, ['dynamic', 'active']) self.param.active.bounds = (0, len(self) - 1) # ALERT: Ensure that name update happens first, should be # replaced by watch precedence support in param self._param_watchers['objects']['value'].reverse() def _to_object_and_name(self, item): from .pane import panel if isinstance(item, tuple): name, item = item else: name = getattr(item, 'name', None) pane = panel(item, name=name) name = param_name(pane.name) if name is None else name return pane, name def _to_objects_and_names(self, items): objects, names = [], [] for item in items: pane, name = self._to_object_and_name(item) objects.append(pane) names.append(name) return objects, names def _init_properties(self): return { k: v for k, v in self.param.get_param_values() if v is not None and k != 'closable' } #---------------------------------------------------------------- # Callback API #---------------------------------------------------------------- def _process_close(self, ref, attr, old, new): """ Handle closed tabs. """ model, _ = self._models.get(ref) if model: inds = [i for i, t in enumerate(model.tabs) if t in new] old = self.objects new = [old[i] for i in inds] return old, new def _comm_change(self, doc, ref, attr, old, new): if attr == 'tabs': old, new = self._process_close(ref, attr, old, new) super(Tabs, self)._comm_change(doc, ref, attr, old, new) def _server_change(self, doc, ref, attr, old, new): if attr == 'tabs': old, new = self._process_close(ref, attr, old, new) super(Tabs, self)._server_change(doc, ref, attr, old, new) def _update_names(self, event): self.param.active.bounds = (0, len(event.new) - 1) if len(event.new) == len(self._names): return names = [] for obj in event.new: if obj in event.old: index = event.old.index(obj) name = self._names[index] else: name = obj.name names.append(name) self._names = names def _update_active(self, *events): for event in events: if event.name == 'dynamic' or (self.dynamic and event.name == 'active'): self.param.trigger('objects') return #---------------------------------------------------------------- # Model API #---------------------------------------------------------------- def _update_model(self, events, msg, root, model, doc, comm=None): msg = dict(msg) if 'closable' in msg: closable = msg.pop('closable') for child in model.tabs: child.closable = closable super(Tabs, self)._update_model(events, msg, root, model, doc, comm) def _get_objects(self, model, old_objects, doc, root, comm=None): """ Returns new child models for the layout while reusing unchanged models and cleaning up any dropped objects. """ from .pane.base import RerenderError, panel new_models = [] if len(self._names) != len(self): raise ValueError('Tab names do not match objects, ensure ' 'that the Tabs.objects are not modified ' 'directly. Found %d names, expected %d.' % (len(self._names), len(self))) for i, (name, pane) in enumerate(zip(self._names, self)): pane = panel(pane, name=name) self.objects[i] = pane for obj in old_objects: if obj not in self.objects: obj._cleanup(root) current_objects = list(self) panels = self._panels[root.ref['id']] for i, (name, pane) in enumerate(zip(self._names, self)): hidden = self.dynamic and i != self.active if (pane in old_objects and id(pane) in panels and ((hidden and isinstance(panels[id(pane)].child, BkSpacer)) or (not hidden and not isinstance(panels[id(pane)].child, BkSpacer)))): panel = panels[id(pane)] new_models.append(panel) continue elif self.dynamic and i != self.active: child = BkSpacer( **{ k: v for k, v in pane.param.get_param_values() if k in Layoutable.param }) else: try: child = pane._get_model(doc, root, model, comm) except RerenderError: return self._get_objects(model, current_objects[:i], doc, root, comm) panel = panels[id(pane)] = BkPanel(title=name, name=pane.name, child=child, closable=self.closable) new_models.append(panel) return new_models def _cleanup(self, root): super(Tabs, self)._cleanup(root) if root.ref['id'] in self._panels: del self._panels[root.ref['id']] #---------------------------------------------------------------- # Public API #---------------------------------------------------------------- def __setitem__(self, index, panes): new_objects = list(self) if not isinstance(index, slice): if index > len(self.objects): raise IndexError( 'Index %d out of bounds on %s ' 'containing %d objects.' % (index, type(self).__name__, len(self.objects))) start, end = index, index + 1 panes = [panes] else: start = index.start or 0 end = len(self.objects) if index.stop is None else index.stop if index.start is None and index.stop is None: if not isinstance(panes, list): raise IndexError( 'Expected a list of objects to ' 'replace the objects in the %s, ' 'got a %s type.' % (type(self).__name__, type(panes).__name__)) expected = len(panes) new_objects = [None] * expected self._names = [None] * len(panes) end = expected else: expected = end - start if end > len(self.objects): raise IndexError( 'Index %d out of bounds on %s ' 'containing %d objects.' % (end, type(self).__name__, len(self.objects))) if not isinstance(panes, list) or len(panes) != expected: raise IndexError('Expected a list of %d objects to set ' 'on the %s to match the supplied slice.' % (expected, type(self).__name__)) for i, pane in zip(range(start, end), panes): new_objects[i], self._names[i] = self._to_object_and_name(pane) self.objects = new_objects def clone(self, *objects, **params): """ Makes a copy of the Tabs sharing the same parameters. Arguments --------- objects: Objects to add to the cloned Tabs object. params: Keyword arguments override the parameters on the clone. Returns ------- Cloned Tabs object """ if not objects: if 'objects' in params: objects = params.pop('objects') else: objects = zip(self._names, self.objects) elif 'objects' in params: raise ValueError('Tabs objects should be supplied either ' 'as positional arguments or as a keyword, ' 'not both.') p = dict(self.param.get_param_values(), **params) del p['objects'] return type(self)(*objects, **params) def append(self, pane): """ Appends an object to the tabs. Arguments --------- obj (object): Panel component to add as a tab. """ new_object, new_name = self._to_object_and_name(pane) new_objects = list(self) new_objects.append(new_object) self._names.append(new_name) self.objects = new_objects def clear(self): """ Clears the tabs. """ self._names = [] self.objects = [] def extend(self, panes): """ Extends the the tabs with a list. Arguments --------- objects (list): List of panel components to add as tabs. """ new_objects, new_names = self._to_objects_and_names(panes) objects = list(self) objects.extend(new_objects) self._names.extend(new_names) self.objects = objects def insert(self, index, pane): """ Inserts an object in the tabs at the specified index. Arguments --------- index (int): Index at which to insert the object. object (object): Panel components to insert as tabs. """ new_object, new_name = self._to_object_and_name(pane) new_objects = list(self.objects) new_objects.insert(index, new_object) self._names.insert(index, new_name) self.objects = new_objects def pop(self, index): """ Pops an item from the tabs by index. Arguments --------- index (int): The index of the item to pop from the tabs. """ new_objects = list(self) if index in new_objects: index = new_objects.index(index) new_objects.pop(index) self._names.pop(index) self.objects = new_objects def remove(self, pane): """ Removes an object from the tabs. Arguments --------- obj (object): The object to remove from the tabs. """ new_objects = list(self) if pane in new_objects: index = new_objects.index(pane) new_objects.remove(pane) self._names.pop(index) self.objects = new_objects def reverse(self): """ Reverses the tabs. """ new_objects = list(self) new_objects.reverse() self._names.reverse() self.objects = new_objects
class ListPanel(Panel): """ An abstract baseclass for Panel objects with list-like children. """ margin = param.Parameter(default=0, doc=""" Allows to create additional space around the component. May be specified as a two-tuple of the form (vertical, horizontal) or a four-tuple (top, right, bottom, left).""") objects = param.List(default=[], doc=""" The list of child objects that make up the layout.""") scroll = param.Boolean(default=False, doc=""" Whether to add scrollbars if the content overflows the size of the container.""") _source_transforms = {'scroll': None} __abstract = True def __init__(self, *objects, **params): from .pane import panel if objects: if 'objects' in params: raise ValueError("A %s's objects should be supplied either " "as positional arguments or as a keyword, " "not both." % type(self).__name__) params['objects'] = [panel(pane) for pane in objects] super(Panel, self).__init__(**params) def _process_param_change(self, params): scroll = params.pop('scroll', None) css_classes = self.css_classes or [] if scroll: params['css_classes'] = css_classes + ['scrollable'] elif scroll == False: params['css_classes'] = css_classes return super(ListPanel, self)._process_param_change(params) def _cleanup(self, root): super(ListPanel, self)._cleanup(root) for p in self.objects: p._cleanup(root) #---------------------------------------------------------------- # Public API #---------------------------------------------------------------- def __getitem__(self, index): return self.objects[index] def __len__(self): return len(self.objects) def __iter__(self): for obj in self.objects: yield obj def __contains__(self, obj): return obj in self.objects def __setitem__(self, index, panes): from .pane import panel new_objects = list(self) if not isinstance(index, slice): start, end = index, index + 1 if start > len(self.objects): raise IndexError('Index %d out of bounds on %s ' 'containing %d objects.' % (end, type(self).__name__, len(self.objects))) panes = [panes] else: start = index.start or 0 end = len(self) if index.stop is None else index.stop if index.start is None and index.stop is None: if not isinstance(panes, list): raise IndexError( 'Expected a list of objects to ' 'replace the objects in the %s, ' 'got a %s type.' % (type(self).__name__, type(panes).__name__)) expected = len(panes) new_objects = [None] * expected end = expected elif end > len(self.objects): raise IndexError('Index %d out of bounds on %s ' 'containing %d objects.' % (end, type(self).__name__, len(self.objects))) else: expected = end - start if not isinstance(panes, list) or len(panes) != expected: raise IndexError('Expected a list of %d objects to set ' 'on the %s to match the supplied slice.' % (expected, type(self).__name__)) for i, pane in zip(range(start, end), panes): new_objects[i] = panel(pane) self.objects = new_objects def clone(self, *objects, **params): """ Makes a copy of the layout sharing the same parameters. Arguments --------- objects: Objects to add to the cloned layout. params: Keyword arguments override the parameters on the clone. Returns ------- Cloned layout object """ if not objects: if 'objects' in params: objects = params.pop('objects') else: objects = self.objects elif 'objects' in params: raise ValueError("A %s's objects should be supplied either " "as arguments or as a keyword, not both." % type(self).__name__) p = dict(self.param.get_param_values(), **params) del p['objects'] return type(self)(*objects, **params) def append(self, obj): """ Appends an object to the layout. Arguments --------- obj (object): Panel component to add to the layout. """ from .pane import panel new_objects = list(self) new_objects.append(panel(obj)) self.objects = new_objects def clear(self): """ Clears the objects on this layout. """ self.objects = [] def extend(self, objects): """ Extends the objects on this layout with a list. Arguments --------- objects (list): List of panel components to add to the layout. """ from .pane import panel new_objects = list(self) new_objects.extend(list(map(panel, objects))) self.objects = new_objects def insert(self, index, obj): """ Inserts an object in the layout at the specified index. Arguments --------- index (int): Index at which to insert the object. object (object): Panel components to insert in the layout. """ from .pane import panel new_objects = list(self) new_objects.insert(index, panel(obj)) self.objects = new_objects def pop(self, index): """ Pops an item from the layout by index. Arguments --------- index (int): The index of the item to pop from the layout. """ new_objects = list(self) if index in new_objects: index = new_objects.index(index) obj = new_objects.pop(index) self.objects = new_objects return obj def remove(self, obj): """ Removes an object from the layout. Arguments --------- obj (object): The object to remove from the layout. """ new_objects = list(self) new_objects.remove(obj) self.objects = new_objects def reverse(self): """ Reverses the objects in the layout. """ new_objects = list(self) new_objects.reverse() self.objects = new_objects
class BarPlot(LegendPlot): group_index = param.Integer(default=0, doc=""" Index of the dimension in the supplied Bars Element, which will be laid out into groups.""") category_index = param.Integer(default=1, doc=""" Index of the dimension in the supplied Bars Element, which will be laid out into categories.""") stack_index = param.Integer(default=2, doc=""" Index of the dimension in the supplied Bars Element, which will stacked.""") padding = param.Number(default=0.2, doc=""" Defines the padding between groups.""") color_by = param.List(default=['category'], doc=""" Defines how the Bar elements colored. Valid options include any permutation of 'group', 'category' and 'stack'.""") show_legend = param.Boolean(default=True, doc=""" Whether to show legend for the plot.""") xticks = param.Integer(0, precedence=-1) style_opts = [ 'alpha', 'color', 'align', 'visible', 'edgecolor', 'log', 'facecolor', 'capsize', 'error_kw', 'hatch' ] legend_specs = dict( LegendPlot.legend_specs, **{ 'top': dict(bbox_to_anchor=(0., 1.02, 1., .102), ncol=3, loc=3, mode="expand", borderaxespad=0.), 'bottom': dict(ncol=3, mode="expand", loc=2, bbox_to_anchor=(0., -0.4, 1., .102), borderaxespad=0.1) }) _dimensions = OrderedDict([('group', 0), ('category', 1), ('stack', 2)]) def __init__(self, element, **params): super(BarPlot, self).__init__(element, **params) self.values, self.bar_dimensions = self._get_values() def _get_values(self): """ Get unique index value for each bar """ gi, ci, si = self.group_index, self.category_index, self.stack_index ndims = self.hmap.last.ndims dims = self.hmap.last.kdims dimensions = [] values = {} for vidx, vtype in zip([gi, ci, si], self._dimensions): if vidx < ndims: dim = dims[vidx] dimensions.append(dim) vals = self.hmap.dimension_values(dim.name) else: dimensions.append(None) vals = [None] values[vtype] = list(unique_iterator(vals)) return values, dimensions def _compute_styles(self, element, style_groups): """ Computes color and hatch combinations by any combination of the 'group', 'category' and 'stack'. """ style = self.lookup_options(element, 'style')[0] sopts = [] for sopt in ['color', 'hatch']: if sopt in style: sopts.append(sopt) style.pop(sopt, None) color_groups = [] for sg in style_groups: color_groups.append(self.values[sg]) style_product = list(product(*color_groups)) wrapped_style = self.lookup_options(element, 'style').max_cycles( len(style_product)) color_groups = { k: tuple(wrapped_style[n][sopt] for sopt in sopts) for n, k in enumerate(style_product) } return style, color_groups, sopts def get_extents(self, element, ranges): ngroups = len(self.values['group']) vdim = element.vdims[0].name if self.stack_index in range(element.ndims): return 0, 0, ngroups, np.NaN else: vrange = ranges[vdim] return 0, np.nanmin([vrange[0], 0]), ngroups, vrange[1] @mpl_rc_context def initialize_plot(self, ranges=None): element = self.hmap.last vdim = element.vdims[0] axis = self.handles['axis'] key = self.keys[-1] ranges = self.compute_ranges(self.hmap, key, ranges) ranges = match_spec(element, ranges) self.handles['artist'], self.handles[ 'xticks'], xdims = self._create_bars(axis, element) return self._finalize_axis(key, ranges=ranges, xticks=self.handles['xticks'], element=element, dimensions=[xdims, vdim]) def _finalize_ticks(self, axis, element, xticks, yticks, zticks): """ Apply ticks with appropriate offsets. """ yalignments = None if xticks is not None: ticks, labels, yalignments = zip( *sorted(xticks, key=lambda x: x[0])) xticks = (list(ticks), list(labels)) super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks) if yalignments: for t, y in zip(axis.get_xticklabels(), yalignments): t.set_y(y) def _create_bars(self, axis, element): # Get style and dimension information values = self.values gi, ci, si = self.group_index, self.category_index, self.stack_index gdim, cdim, sdim = [ element.kdims[i] if i < element.ndims else None for i in (gi, ci, si) ] indices = dict(zip(self._dimensions, (gi, ci, si))) style_groups = [ sg for sg in self.color_by if indices[sg] < element.ndims ] style_opts, color_groups, sopts = self._compute_styles( element, style_groups) dims = element.dimensions('key', label=True) ndims = len(dims) xdims = [d for d in [cdim, gdim] if d is not None] # Compute widths width = (1 - (2. * self.padding)) / len(values['category']) # Initialize variables xticks = [] val_key = [None] * ndims style_key = [None] * len(style_groups) label_key = [None] * len(style_groups) labels = [] bars = {} # Iterate over group, category and stack dimension values # computing xticks and drawing bars and applying styles for gidx, grp_name in enumerate(values['group']): if grp_name is not None: grp = gdim.pprint_value(grp_name) if 'group' in style_groups: idx = style_groups.index('group') label_key[idx] = str(grp) style_key[idx] = grp_name val_key[gi] = grp_name if ci < ndims: yalign = -0.04 else: yalign = 0 xticks.append((gidx + 0.5, grp, yalign)) for cidx, cat_name in enumerate(values['category']): xpos = gidx + self.padding + (cidx * width) if cat_name is not None: cat = gdim.pprint_value(cat_name) if 'category' in style_groups: idx = style_groups.index('category') label_key[idx] = str(cat) style_key[idx] = cat_name val_key[ci] = cat_name xticks.append((xpos + width / 2., cat, 0)) prev = 0 for stk_name in values['stack']: if stk_name is not None: if 'stack' in style_groups: idx = style_groups.index('stack') stk = gdim.pprint_value(stk_name) label_key[idx] = str(stk) style_key[idx] = stk_name val_key[si] = stk_name vals = element.sample([tuple(val_key)]).dimension_values( element.vdims[0].name) val = float(vals[0]) if len(vals) else np.NaN label = ', '.join(label_key) style = dict(style_opts, label='' if label in labels else label, **dict( zip(sopts, color_groups[tuple(style_key)]))) bar = axis.bar([xpos], [val], width=width, bottom=prev, **style) # Update variables bars[tuple(val_key)] = bar prev += val if np.isfinite(val) else 0 labels.append(label) title = [ element.kdims[indices[cg]].pprint_label for cg in self.color_by if indices[cg] < ndims ] if self.show_legend and any(len(l) for l in labels): leg_spec = self.legend_specs[self.legend_position] if self.legend_cols: leg_spec['ncol'] = self.legend_cols axis.legend(title=', '.join(title), **leg_spec) return bars, xticks, xdims def update_handles(self, key, axis, element, ranges, style): dims = element.dimensions('key', label=True) ndims = len(dims) ci, gi, si = self.category_index, self.group_index, self.stack_index val_key = [None] * ndims for g in self.values['group']: if g is not None: val_key[gi] = g for c in self.values['category']: if c is not None: val_key[ci] = c prev = 0 for s in self.values['stack']: if s is not None: val_key[si] = s bar = self.handles['artist'].get(tuple(val_key)) if bar: vals = element.sample([ tuple(val_key) ]).dimension_values(element.vdims[0].name) height = float(vals[0]) if len(vals) else np.NaN bar[0].set_height(height) bar[0].set_y(prev) prev += height if np.isfinite(height) else 0 return {'xticks': self.handles['xticks']}
class FileBase(DivPaneBase): embed = param.Boolean(default=True, doc=""" Whether to embed the file as base64.""") _rerender_params = ['embed', 'object', 'style', 'width', 'height'] __abstract = True def __init__(self, object=None, **params): if isinstance(object, PurePath): object = str(object) super().__init__(object=object, **params) def _type_error(self, object): if isinstance(object, str): raise ValueError( "%s pane cannot parse string that is not a filename " "or URL." % type(self).__name__) super()._type_error(object) @classmethod def applies(cls, obj): filetype = cls.filetype if hasattr(obj, '_repr_{}_'.format(filetype)): return True if isinstance(obj, PurePath): obj = str(obj.absolute()) if isinstance(obj, str): if isfile(obj) and obj.endswith('.' + filetype): return True if isurl(obj, [cls.filetype]): return True elif isurl(obj, None): return 0 elif isinstance(obj, bytes): try: cls._imgshape(obj) return True except Exception: return False if hasattr(obj, 'read'): # Check for file like object return True return False def _data(self): if hasattr(self.object, '_repr_{}_'.format(self.filetype)): return getattr(self.object, '_repr_' + self.filetype + '_')() if isinstance(self.object, str): if isfile(self.object): with open(self.object, 'rb') as f: return f.read() elif isinstance(self.object, bytes): return self.object if hasattr(self.object, 'read'): if hasattr(self.object, 'seek'): self.object.seek(0) return self.object.read() if isurl(self.object, None): import requests r = requests.request(url=self.object, method='GET') return r.content
class opts(param.ParameterizedFunction): """ Utility function to set options at the global level or to provide an Options object that can be used with the .options method of an element or container. Option objects can be generated and validated in a tab-completable way (in appropriate environments such as Jupyter notebooks) using completers such as opts.Curve, opts.Image, opts.Overlay, etc. To set opts globally you can pass these option objects into opts.defaults: opts.defaults(*options) For instance: opts.defaults(opts.Curve(color='red')) To set opts on a specific object, you can supply these option objects to the .options method. For instance: curve = hv.Curve([1,2,3]) curve.options(opts.Curve(color='red')) The options method also accepts lists of Option objects. """ __original_docstring__ = None # Keywords not to be tab-completed (helps with deprecation) _no_completion = ['title_format', 'color_index', 'size_index', 'finalize_hooks', 'scaling_factor', 'scaling_method', 'size_fn', 'normalize_lengths', 'group_index', 'category_index', 'stack_index', 'color_by'] strict = param.Boolean(default=False, doc=""" Whether to be strict about the options specification. If not set to strict (default), any invalid keywords are simply skipped. If strict, invalid keywords prevent the options being applied.""") def __call__(self, *args, **params): if not params and not args: return Options() elif params and not args: return Options(**params) if len(args) == 1: msg = ("Positional argument signature of opts is deprecated, " "use opts.defaults instead.\nFor instance, instead of " "opts('Points (size=5)') use opts.defaults(opts.Points(size=5))") if util.config.future_deprecations: self.param.warning(msg) self._linemagic(args[0]) elif len(args) == 2: msg = ("Double positional argument signature of opts is deprecated, " "use the .options method instead.\nFor instance, instead of " "opts('Points (size=5)', points) use points.options(opts.Points(size=5))") if util.config.future_deprecations: self.param.warning(msg) self._cellmagic(args[0], args[1]) @classmethod def _group_kwargs_to_options(cls, obj, kwargs): "Format option group kwargs into canonical options format" groups = Options._option_groups if set(kwargs.keys()) - set(groups): raise Exception("Keyword options %s must be one of %s" % (groups, ','.join(repr(g) for g in groups))) elif not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % ','.join(repr(k) for k in kwargs.keys())) # Check whether the user is specifying targets (such as 'Image.Foo') targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.") elif not any(targets): # Not targets specified - add current object as target sanitized_group = util.group_sanitizer(obj.group) if obj.label: identifier = ('%s.%s.%s' % ( obj.__class__.__name__, sanitized_group, util.label_sanitizer(obj.label))) elif sanitized_group != obj.__class__.__name__: identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group) else: identifier = obj.__class__.__name__ options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}} else: dfltdict = defaultdict(dict) for grp, entries in kwargs.items(): for identifier, kws in entries.items(): dfltdict[identifier][grp] = kws options = dict(dfltdict) return options @classmethod def _apply_groups_to_backend(cls, obj, options, backend, clone): "Apply the groups to a single specified backend" obj_handle = obj if options is None: if clone: obj_handle = obj.map(lambda x: x.clone(id=None)) else: obj.map(lambda x: setattr(x, 'id', None)) elif clone: obj_handle = obj.map(lambda x: x.clone(id=x.id)) return StoreOptions.set_options(obj_handle, options, backend=backend) @classmethod def _grouped_backends(cls, options, backend): "Group options by backend and filter out output group appropriately" if options is None: return [(backend or Store.current_backend, options)] dfltdict = defaultdict(dict) for spec, groups in options.items(): if 'output' not in groups.keys() or len(groups['output'])==0: dfltdict[backend or Store.current_backend][spec.strip()] = groups elif set(groups['output'].keys()) - set(['backend']): dfltdict[groups['output']['backend']][spec.strip()] = groups elif ['backend'] == list(groups['output'].keys()): filtered = {k:v for k,v in groups.items() if k != 'output'} dfltdict[groups['output']['backend']][spec.strip()] = filtered else: raise Exception('The output options group must have the backend keyword') return [(bk, bk_opts) for (bk, bk_opts) in dfltdict.items()] @classmethod def apply_groups(cls, obj, options=None, backend=None, clone=True, **kwargs): """Applies nested options definition grouped by type. Applies options on an object or nested group of objects, returning a new object with the options applied. This method accepts the separate option namespaces explicitly (i.e 'plot', 'style' and 'norm'). If the options are to be set directly on the object a simple format may be used, e.g.: opts.apply_groups(obj, style={'cmap': 'viridis'}, plot={'show_title': False}) If the object is nested the options must be qualified using a type[.group][.label] specification, e.g.: opts.apply_groups(obj, {'Image': {'plot': {'show_title': False}, 'style': {'cmap': 'viridis}}}) If no opts are supplied all options on the object will be reset. Args: options (dict): Options specification Options specification should be indexed by type[.group][.label] or option type ('plot', 'style', 'norm'). backend (optional): Backend to apply options to Defaults to current selected backend clone (bool, optional): Whether to clone object Options can be applied inplace with clone=False **kwargs: Keywords of options by type Applies options directly to the object by type (e.g. 'plot', 'style', 'norm') specified as dictionaries. Returns: Returns the object or a clone with the options applied """ if isinstance(options, basestring): from ..util.parser import OptsSpec try: options = OptsSpec.parse(options) except SyntaxError: options = OptsSpec.parse( '{clsname} {options}'.format(clsname=obj.__class__.__name__, options=options)) if kwargs: options = cls._group_kwargs_to_options(obj, kwargs) for backend, backend_opts in cls._grouped_backends(options, backend): obj = cls._apply_groups_to_backend(obj, backend_opts, backend, clone) return obj @classmethod def _process_magic(cls, options, strict, backends=None): if isinstance(options, basestring): from .parser import OptsSpec try: ns = get_ipython().user_ns # noqa except: ns = globals() options = OptsSpec.parse(options, ns=ns) errmsg = StoreOptions.validation_error_message(options, backends=backends) if errmsg: sys.stderr.write(errmsg) if strict: sys.stderr.write('Options specification will not be applied.') return options, True return options, False @classmethod def _cellmagic(cls, options, obj, strict=False): "Deprecated, not expected to be used by any current code" options, failure = cls._process_magic(options, strict) if failure: return obj if not isinstance(obj, Dimensioned): return obj else: return StoreOptions.set_options(obj, options) @classmethod def _linemagic(cls, options, strict=False, backend=None): "Deprecated, not expected to be used by any current code" backends = None if backend is None else [backend] options, failure = cls._process_magic(options, strict, backends=backends) if failure: return with options_policy(skip_invalid=True, warn_on_skip=False): StoreOptions.apply_customizations(options, Store.options(backend=backend)) @classmethod def defaults(cls, *options, **kwargs): """Set default options for a session. Set default options for a session. whether in a Python script or a Jupyter notebook. Args: *options: Option objects used to specify the defaults. backend: The plotting extension the options apply to """ if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend': raise Exception('opts.defaults only accepts "backend" keyword argument') cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend')) @classmethod def _expand_by_backend(cls, options, backend): """ Given a list of flat Option objects which may or may not have 'backend' in their kwargs, return a list of grouped backend """ groups = defaultdict(list) used_fallback = False for obj in options: if 'backend' in obj.kwargs: opts_backend = obj.kwargs['backend'] elif backend is None: opts_backend = Store.current_backend obj.kwargs['backend']= opts_backend else: opts_backend = backend obj.kwargs['backend'] = opts_backend used_fallback = True groups[opts_backend].append(obj) if backend and not used_fallback: cls.param.warning("All supplied Options objects already define a backend, " "backend override %r will be ignored." % backend) return [(bk, cls._expand_options(o, bk)) for (bk, o) in groups.items()] @classmethod def _expand_options(cls, options, backend=None): """ Validates and expands a dictionaries of options indexed by type[.group][.label] keys into separate style, plot, norm and output options. opts._expand_options({'Image': dict(cmap='viridis', show_title=False)}) returns {'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}} """ current_backend = Store.current_backend try: backend_options = Store.options(backend=backend or current_backend) except KeyError as e: raise Exception('The %s backend is not loaded. Please load the backend using hv.extension.' % str(e)) expanded = {} if isinstance(options, list): options = merge_options_to_dict(options) for objspec, options in options.items(): objtype = objspec.split('.')[0] if objtype not in backend_options: raise ValueError('%s type not found, could not apply options.' % objtype) obj_options = backend_options[objtype] expanded[objspec] = {g: {} for g in obj_options.groups} for opt, value in options.items(): found = False valid_options = [] for g, group_opts in sorted(obj_options.groups.items()): if opt in group_opts.allowed_keywords: expanded[objspec][g][opt] = value found = True break valid_options += group_opts.allowed_keywords if found: continue cls._options_error(opt, objtype, backend, valid_options) return expanded @classmethod def _options_error(cls, opt, objtype, backend, valid_options): """ Generates an error message for an invalid option suggesting similar options through fuzzy matching. """ current_backend = Store.current_backend loaded_backends = Store.loaded_backends() kws = Keywords(values=valid_options) matches = sorted(kws.fuzzy_match(opt)) if backend is not None: if matches: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. Similar ' 'options are: %s.' % (opt, objtype, backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. No ' 'similar options founds.' % (opt, objtype, backend)) # Check option is invalid for all backends found = [] for lb in [b for b in loaded_backends if b != backend]: lb_options = Store.options(backend=lb).get(objtype) if lb_options is None: continue for g, group_opts in lb_options.groups.items(): if opt in group_opts.allowed_keywords: found.append(lb) if found: param.main.param.warning( 'Option %r for %s type not valid for selected ' 'backend (%r). Option only applies to following ' 'backends: %r' % (opt, objtype, current_backend, found)) return if matches: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. Similar options ' 'for current extension (%r) are: %s.' % (opt, objtype, current_backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. No similar options ' 'found.' % (opt, objtype)) @classmethod def _builder_reprs(cls, options, namespace=None, ns=None): """ Given a list of Option objects (such as those returned from OptsSpec.parse_options) or an %opts or %%opts magic string, return a list of corresponding option builder reprs. The namespace is typically given as 'hv' if fully qualified namespaces are desired. """ if isinstance(options, basestring): from .parser import OptsSpec if ns is None: try: ns = get_ipython().user_ns # noqa except: ns = globals() options = options.replace('%%opts','').replace('%opts','') options = OptsSpec.parse_options(options, ns=ns) reprs = [] ns = '{namespace}.'.format(namespace=namespace) if namespace else '' for option in options: kws = ', '.join('%s=%r' % (k,option.kwargs[k]) for k in sorted(option.kwargs)) if '.' in option.key: element = option.key.split('.')[0] spec = repr('.'.join(option.key.split('.')[1:])) + ', ' else: element = option.key spec = '' opts_format = '{ns}opts.{element}({spec}{kws})' reprs.append(opts_format.format(ns=ns, spec=spec, kws=kws, element=element)) return reprs @classmethod def _create_builder(cls, element, completions): def builder(cls, spec=None, **kws): spec = element if spec is None else '%s.%s' % (element, spec) prefix = 'In opts.{element}(...), '.format(element=element) backend = kws.get('backend', None) keys = set(kws.keys()) if backend: allowed_kws = cls._element_keywords(backend, elements=[element])[element] invalid = keys - set(allowed_kws) else: mismatched = {} all_valid_kws = set() for loaded_backend in Store.loaded_backends(): valid = set(cls._element_keywords(loaded_backend).get(element, [])) all_valid_kws |= set(valid) if keys <= valid: # Found a backend for which all keys are valid return Options(spec, **kws) mismatched[loaded_backend] = list(keys - valid) invalid = keys - all_valid_kws # Keys not found for any backend if mismatched and not invalid: # Keys found across multiple backends msg = ('{prefix} keywords supplied are mixed across backends. ' 'Keyword(s) {info}') info = ', '.join('%s are invalid for %s' % (', '.join(repr(el) for el in v), k) for k,v in mismatched.items()) raise ValueError(msg.format(info=info, prefix=prefix)) allowed_kws = completions reraise = False if invalid: try: cls._options_error(list(invalid)[0], element, backend, allowed_kws) except ValueError as e: msg = str(e)[0].lower() + str(e)[1:] reraise = True if reraise: raise ValueError(prefix + msg) return Options(spec, **kws) filtered_keywords = [k for k in completions if k not in cls._no_completion] kws = ', '.join('{opt}=None'.format(opt=opt) for opt in sorted(filtered_keywords)) builder.__doc__ = '{element}({kws})'.format(element=element, kws=kws) return classmethod(builder) @classmethod def _element_keywords(cls, backend, elements=None): "Returns a dictionary of element names to allowed keywords" if backend not in Store.loaded_backends(): return {} mapping = {} backend_options = Store.options(backend) elements = elements if elements is not None else backend_options.keys() for element in elements: if '.' in element: continue element = element if isinstance(element, tuple) else (element,) element_keywords = [] options = backend_options['.'.join(element)] for group in Options._option_groups: element_keywords.extend(options[group].allowed_keywords) mapping[element[0]] = element_keywords return mapping @classmethod def _update_backend(cls, backend): if cls.__original_docstring__ is None: cls.__original_docstring__ = cls.__doc__ all_keywords = set() element_keywords = cls._element_keywords(backend) for element, keywords in element_keywords.items(): with param.logging_level('CRITICAL'): all_keywords |= set(keywords) setattr(cls, element, cls._create_builder(element, keywords)) filtered_keywords = [k for k in all_keywords if k not in cls._no_completion] kws = ', '.join('{opt}=None'.format(opt=opt) for opt in sorted(filtered_keywords)) old_doc = cls.__original_docstring__.replace('params(strict=Boolean, name=String)','') cls.__doc__ = '\n opts({kws})'.format(kws=kws) + old_doc
class SVG(ImageBase): """ The `SVG` pane embeds a .svg image file in a panel if provided a local path, or will link to a remote image if provided a URL. Reference: https://panel.holoviz.org/reference/panes/SVG.html :Example: >>> SVG( ... 'https://upload.wikimedia.org/wikipedia/commons/6/6b/Bitmap_VS_SVG.svg', ... alt_text='A gif vs svg comparison', ... link_url='https://en.wikipedia.org/wiki/SVG', ... width=300, height=400 ... ) """ encode = param.Boolean(default=False, doc=""" Whether to enable base64 encoding of the SVG, base64 encoded SVGs do not support links.""") filetype = 'svg' _rerender_params = ImageBase._rerender_params + ['encode'] @classmethod def applies(cls, obj): return (super().applies(obj) or (isinstance(obj, str) and obj.lstrip().startswith('<svg'))) def _type_error(self, object): if isinstance(object, str): raise ValueError( "%s pane cannot parse string that is not a filename, " "URL or a SVG XML contents." % type(self).__name__) super()._type_error(object) def _data(self): if (isinstance(self.object, str) and self.object.lstrip().startswith('<svg')): return self.object return super()._data() def _b64(self): data = self._data() if not isinstance(data, bytes): data = data.encode('utf-8') b64 = base64.b64encode(data).decode("utf-8") return f"data:image/svg+xml;base64,{b64}" def _imgshape(self, data): return (self.width, self.height) def _get_properties(self): p = super(ImageBase, self)._get_properties() if self.object is None: return dict(p, text='<img></img>') data = self._data() width, height = self._imgshape(data) if not isinstance(data, bytes): data = data.encode('utf-8') if self.encode: b64 = base64.b64encode(data).decode("utf-8") src = "data:image/svg+xml;base64,{b64}".format(b64=b64) html = "<img src='{src}' width={width} height={height}></img>".format( src=src, width=width, height=height) else: html = data.decode("utf-8") return dict(p, width=width, height=height, text=escape(html))
class opts(param.ParameterizedFunction): """ Utility function to set options at the global level or to provide an Options object that can be used with the .options method of an element or container. Option objects can be generated and validated in a tab-completable way (in appropriate environments such as Jupyter notebooks) using completers such as opts.Curve, opts.Image, opts.Overlay, etc. To set opts globally you can pass these option objects into opts.defaults: opts.defaults(*options) For instance: opts.defaults(opts.Curve(color='red')) To set opts on a specific object, you can supply these option objects to the .options method. For instance: curve = hv.Curve([1,2,3]) curve.options(opts.Curve(color='red')) The options method also accepts lists of Option objects. """ __original_docstring__ = None strict = param.Boolean(default=False, doc=""" Whether to be strict about the options specification. If not set to strict (default), any invalid keywords are simply skipped. If strict, invalid keywords prevent the options being applied.""") def __call__(self, *args, **params): if params and not args: return Options(**params) if len(args) == 1: msg = ( "Positional argument signature of opts is deprecated, " "use opts.defaults instead.\nFor instance, instead of " "opts('Points (size=5)') use opts.defaults(opts.Points(size=5))" ) if util.config.future_deprecations: self.warning(msg) self._linemagic(args[0]) elif len(args) == 2: msg = ( "Double positional argument signature of opts is deprecated, " "use the .options method instead.\nFor instance, instead of " "opts('Points (size=5)', points) use points.options(opts.Points(size=5))" ) if util.config.future_deprecations: self.warning(msg) self._cellmagic(args[0], args[1]) @classmethod def _process_magic(cls, options, strict): if isinstance(options, basestring): from .parser import OptsSpec try: ns = get_ipython().user_ns # noqa except: ns = globals() options = OptsSpec.parse(options, ns=ns) errmsg = StoreOptions.validation_error_message(options) if errmsg: sys.stderr.write(errmsg) if strict: sys.stderr.write(' Options specification will not be applied.') return options, True return options, False @classmethod def _cellmagic(cls, options, obj, strict=False): "Deprecated, not expected to be used by any current code" options, failure = cls._process_magic(options, strict) if failure: return obj if not isinstance(obj, Dimensioned): return obj else: return StoreOptions.set_options(obj, options) @classmethod def _linemagic(cls, options, strict=False): "Deprecated, not expected to be used by any current code" options, failure = cls._process_magic(options, strict) if failure: return with options_policy(skip_invalid=True, warn_on_skip=False): StoreOptions.apply_customizations(options, Store.options()) @classmethod def defaults(cls, *options): """ Set default options for a session, whether in a Python script or a Jupyter notebook. """ cls.linemagic(cls.expand_options(merge_options_to_dict(options))) @classmethod def expand_options(cls, options, backend=None): """ Validates and expands a dictionaries of options indexed by type[.group][.label] keys into separate style, plot and norm options. opts.expand_options({'Image': dict(cmap='viridis', show_title=False)}) returns {'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}} """ current_backend = Store.current_backend try: backend_options = Store.options(backend=backend or current_backend) except KeyError as e: raise Exception( 'The %s backend is not loaded. Please load the backend using hv.extension.' % str(e)) expanded = {} if isinstance(options, list): options = merge_options_to_dict(options) for objspec, options in options.items(): objtype = objspec.split('.')[0] if objtype not in backend_options: raise ValueError( '%s type not found, could not apply options.' % objtype) obj_options = backend_options[objtype] expanded[objspec] = {g: {} for g in obj_options.groups} for opt, value in options.items(): found = False valid_options = [] for g, group_opts in sorted(obj_options.groups.items()): if opt in group_opts.allowed_keywords: expanded[objspec][g][opt] = value found = True break valid_options += group_opts.allowed_keywords if found: continue cls._options_error(opt, objtype, backend, valid_options) return expanded @classmethod def _options_error(cls, opt, objtype, backend, valid_options): """ Generates an error message for an invalid option suggesting similar options through fuzzy matching. """ current_backend = Store.current_backend loaded_backends = Store.loaded_backends() kws = Keywords(values=valid_options) matches = sorted(kws.fuzzy_match(opt)) if backend is not None: if matches: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. Similar ' 'options are: %s.' % (opt, objtype, backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. No ' 'similar options founds.' % (opt, objtype, backend)) # Check option is invalid for all backends found = [] for lb in [b for b in loaded_backends if b != backend]: lb_options = Store.options(backend=lb).get(objtype) if lb_options is None: continue for g, group_opts in lb_options.groups.items(): if opt in group_opts.allowed_keywords: found.append(lb) if found: param.main.warning('Option %r for %s type not valid ' 'for selected backend (%r). Option ' 'only applies to following backends: %r' % (opt, objtype, current_backend, found)) return if matches: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. Similar options ' 'for current extension (%r) are: %s.' % (opt, objtype, current_backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. No similar options ' 'found.' % (opt, objtype)) @classmethod def _completer_reprs(cls, options, namespace=None, ns=None): """ Given a list of Option objects (such as those returned from OptsSpec.parse_options) or an %opts or %%opts magic string, return a list of corresponding completer reprs. The namespace is typically given as 'hv' if fully qualified namespaces are desired. """ if isinstance(options, basestring): from .parser import OptsSpec if ns is None: try: ns = get_ipython().user_ns # noqa except: ns = globals() options = options.replace('%%opts', '').replace('%opts', '') options = OptsSpec.parse_options(options, ns=ns) reprs = [] ns = '{namespace}.'.format(namespace=namespace) if namespace else '' for option in options: kws = ', '.join('%s=%r' % (k, option.kwargs[k]) for k in sorted(option.kwargs)) if '.' in option.key: element = option.key.split('.')[0] spec = repr('.'.join(option.key.split('.')[1:])) + ', ' else: element = option.key spec = '' opts_format = '{ns}opts.{element}({spec}{kws})' reprs.append( opts_format.format(ns=ns, spec=spec, kws=kws, element=element)) return reprs @classmethod def _build_completer(cls, element, allowed): def fn(cls, spec=None, **kws): spec = element if spec is None else '%s.%s' % (element, spec) invalid = set(kws.keys()) - set(allowed) if invalid: try: cls._options_error( list(invalid)[0], element, Store.current_backend, allowed) except ValueError as e: prefix = 'In opts.{element}(...), '.format(element=element) msg = str(e)[0].lower() + str(e)[1:] raise ValueError(prefix + msg) return Options(spec, **kws) kws = ', '.join('{opt}=None'.format(opt=opt) for opt in sorted(allowed)) fn.__doc__ = '{element}({kws})'.format(element=element, kws=kws) return classmethod(fn) @classmethod def _update_backend(cls, backend): if cls.__original_docstring__ is None: cls.__original_docstring__ = cls.__doc__ if backend not in Store.loaded_backends(): return backend_options = Store.options(backend) all_keywords = set() for element in backend_options.keys(): if '.' in element: continue element_keywords = [] options = backend_options['.'.join(element)] for group in Options._option_groups: element_keywords.extend(options[group].allowed_keywords) all_keywords |= set(element_keywords) with param.logging_level('CRITICAL'): setattr(cls, element[0], cls._build_completer(element[0], element_keywords)) kws = ', '.join('{opt}=None'.format(opt=opt) for opt in sorted(all_keywords)) old_doc = cls.__original_docstring__.replace( 'params(strict=Boolean, name=String)', '') cls.__doc__ = '\n opts({kws})'.format(kws=kws) + old_doc
class ChartPlot(ElementPlot): show_legend = param.Boolean(default=True, doc=""" Whether to show legend for the plot.""")
class NdWidget(param.Parameterized): """ NdWidget is an abstract base class implementing a method to find the dimensions and keys of any ViewableElement, GridSpace or UniformNdMapping type. In the process it creates a mock_obj to hold the dimensions and keys. """ display_options = param.Dict(default={}, doc=""" The display options used to generate individual frames""") embed = param.Boolean(default=True, doc=""" Whether to embed all plots in the Javascript, generating a static widget not dependent on the IPython server.""") ####################### # JSON export options # ####################### export_json = param.Boolean(default=False, doc="""Whether to export plots as json files, which can be dynamically loaded through a callback from the slider.""") json_save_path = param.String(default='./json_figures', doc=""" If export_json is enabled the widget will save the json data to this path. If None data will be accessible via the json_data attribute.""") json_load_path = param.String(default=None, doc=""" If export_json is enabled the widget JS code will load the data from this path, if None defaults to json_save_path. For loading the data from within the notebook the path must be relative, when exporting the notebook the path can be set to another location like a webserver where the json files can be uploaded to.""") ############################## # Javascript include options # ############################## CDN = param.Dict(default={'underscore': 'https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/underscore-min.js', 'jQueryUI': 'https://code.jquery.com/ui/1.10.4/jquery-ui.min.js'}) css = param.String(default=None, doc=""" Defines the local CSS file to be loaded for this widget.""") basejs = param.String(default='widgets.js', doc=""" JS file containing javascript baseclasses for the widget.""") extensionjs = param.String(default=None, doc=""" Optional javascript extension file for a particular backend.""") widgets = {} counter = 0 def __init__(self, plot, renderer=None, **params): super(NdWidget, self).__init__(**params) self.id = plot.comm.id if plot.comm else uuid.uuid4().hex self.plot = plot streams = [] for stream in plot.streams: if any(k in plot.dimensions for k in stream.contents): streams.append(stream) self.dimensions, self.keys = drop_streams(streams, plot.dimensions, plot.keys) self.json_data = {} if self.plot.dynamic: self.embed = False if renderer is None: backend = Store.current_backend self.renderer = Store.renderers[backend] else: self.renderer = renderer # Create mock NdMapping to hold the common dimensions and keys self.mock_obj = NdMapping([(k, None) for k in self.keys], kdims=self.dimensions) NdWidget.widgets[self.id] = self # Set up jinja2 templating import jinja2 templateLoader = jinja2.FileSystemLoader(subdirs) self.jinjaEnv = jinja2.Environment(loader=templateLoader) def __call__(self): return self.render_html(self._get_data()) def _get_data(self): delay = int(1000./self.display_options.get('fps', 5)) CDN = {k: v[:-3] for k, v in self.CDN.items()} template = self.jinjaEnv.get_template(self.base_template) name = type(self).__name__ cached = str(self.embed).lower() load_json = str(self.export_json).lower() mode = str(self.renderer.mode) json_path = (self.json_save_path if self.json_load_path is None else self.json_load_path) if json_path and json_path[-1] != '/': json_path = json_path + '/' dynamic = json.dumps(self.plot.dynamic) if self.plot.dynamic else 'false' return dict(CDN=CDN, frames=self.get_frames(), delay=delay, cached=cached, load_json=load_json, mode=mode, id=self.id, Nframes=len(self.plot), widget_name=name, json_path=json_path, widget_template=template, dynamic=dynamic) def render_html(self, data): template = self.jinjaEnv.get_template(self.template) return template.render(**data) def get_frames(self): if self.embed: frames = OrderedDict([(idx, self._plot_figure(idx)) for idx in range(len(self.plot))]) else: frames = {} return self.encode_frames(frames) def encode_frames(self, frames): if isinstance(frames, dict): frames = dict(frames) return json.dumps(frames) def save_json(self, frames): """ Saves frames data into a json file at the specified json_path, named with the widget uuid. """ if self.json_save_path is None: return path = os.path.join(self.json_save_path, '%s.json' % self.id) if not os.path.isdir(self.json_save_path): os.mkdir(self.json_save_path) with open(path, 'w') as f: json.dump(frames, f) self.json_data = frames def _plot_figure(self, idx): with self.renderer.state(): self.plot.update(idx) css = self.display_options.get('css', {}) figure_format = self.display_options.get('figure_format', self.renderer.fig) return self.renderer.html(self.plot, figure_format, css=css, comm=False) def update(self, key): if not self.plot.dimensions: self.plot.refresh() else: self.plot.update(key) self.plot.push() return 'Complete'
class CurvePlot(ChartPlot): """ CurvePlot can plot Curve and ViewMaps of Curve, which can be displayed as a single frame or animation. Axes, titles and legends are automatically generated from dim_info. If the dimension is set to cyclic in the dim_info it will rotate the curve so that minimum y values are at the minimum x value to make the plots easier to interpret. """ autotick = param.Boolean(default=False, doc=""" Whether to let matplotlib automatically compute tick marks or to allow the user to control tick marks.""") interpolation = param.ObjectSelector( objects=['linear', 'steps-mid', 'steps-pre', 'steps-post'], default='linear', doc=""" Defines how the samples of the Curve are interpolated, default is 'linear', other options include 'steps-mid', 'steps-pre' and 'steps-post'.""") relative_labels = param.Boolean(default=False, doc=""" If plotted quantity is cyclic and center_cyclic is enabled, will compute tick labels relative to the center.""") show_grid = param.Boolean(default=False, doc=""" Enable axis grid.""") show_legend = param.Boolean(default=True, doc=""" Whether to show legend for the plot.""") style_opts = [ 'alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker', 'ms' ] _plot_methods = dict(single='plot') def get_data(self, element, ranges, style): if 'steps' in self.interpolation: element = interpolate_curve(element, interpolation=self.interpolation) xs = element.dimension_values(0) ys = element.dimension_values(1) dims = element.dimensions() if xs.dtype.kind == 'M': dt_format = Dimension.type_formatters[np.datetime64] dims[0] = dims[0](value_format=DateFormatter(dt_format)) coords = (ys, xs) if self.invert_axes else (xs, ys) return coords, style, {'dimensions': dims} def init_artists(self, ax, plot_args, plot_kwargs): xs, ys = plot_args if xs.dtype.kind == 'M': artist = ax.plot_date(xs, ys, '-', **plot_kwargs)[0] else: artist = ax.plot(xs, ys, **plot_kwargs)[0] return {'artist': artist} def update_handles(self, key, axis, element, ranges, style): artist = self.handles['artist'] (xs, ys), style, axis_kwargs = self.get_data(element, ranges, style) artist.set_xdata(xs) artist.set_ydata(ys) return axis_kwargs
class TopoCommand(Command): """ TopoCommand is designed to to format Lancet Args objects into run_batch commands in a general way. Note that Topographica is always invoked with the -a flag so all of topo.command is imported. Some of the parameters duplicate those in run_batch to ensure consistency with previous run_batch usage in Topographica. As a consequence, this class sets all the necessary options for run_batch except the 'times' parameter which may vary specified arbitrarily by the Lancet Args object. """ tyfile = param.String(doc="The Topographica model file to run.") analysis_fn = param.String(default="default_analysis_function", doc=""" The name of the analysis_fn to run. If modified from the default, the named callable will need to be imported into the namespace using a '-c' command in topo_flag_options.""") tag = param.Boolean(default=False, doc=""" Whether to label the run_batch generated directory with the batch name and batch tag.""") topo_switches = param.List(default=['-a'], doc=""" Specifies the Topographica qsub switches (flags without arguments) as a list of strings. Note the that the -a switch is always used to auto import commands.""") topo_flag_options = param.Dict(default={}, doc=""" Specifies Topographica flags and their corresponding options as a dictionary. This parameter is suitable for setting -c and -p flags for Topographica. This parameter is important for introducing the callable named by the analysis_fn parameter into the namespace. Tuples can be used to indicate groups of options using the same flag: {'-p':'retina_density=5'} => -p retina_density=5 {'-p':('retina_density=5', 'scale=2') => -p retina_density=5 -p scale=2 If a plain Python dictionary is used, the keys are alphanumerically sorted, otherwise the dictionary is assumed to be an OrderedDict (Python 2.7+, Python3 or param.external.OrderedDict) and the key ordering will be preserved. Note that the '-' is prefixed to the key if missing (to ensure a valid flag). This allows keywords to be specified with the dict constructor eg.. dict(key1=value1, key2=value2).""") param_formatter = param.Callable( param_formatter.instance(), doc="""Used to specify run_batch formatting.""") max_name_length = param.Number( default=200, doc="Matches run_batch parameter of same name.") snapshot = param.Boolean(default=True, doc="Matches run_batch parameter of same name.") vc_info = param.Boolean(default=True, doc="Matches run_batch parameter of same name.") save_global_params = param.Boolean( default=True, doc="Matches run_batch parameter of same name.") progress_bar = param.String( default='disabled', doc="Matches run_batch parameter of same name.") progress_interval = param.Number( default=100, doc="Matches run_batch parameter of same name.") def __init__(self, tyfile, executable=None, **kwargs): auto_executable = os.path.realpath( os.path.join(topo.__file__, '..', '..', 'topographica')) executable = executable if executable else auto_executable super(TopoCommand, self).__init__(tyfile=tyfile, executable=executable, **kwargs) self.pprint_args(['executable', 'tyfile', 'analysis_fn'], ['topo_switches', 'snapshot']) self._typath = os.path.abspath(self.tyfile) if not os.path.isfile(self.executable): raise Exception( 'Cannot find the topographica script relative to topo/__init__.py.' ) if not os.path.exists(self._typath): raise Exception("Tyfile doesn't exist! Cannot proceed.") if ((self.analysis_fn.strip() != "default_analysis_function") and (type(self) == TopoCommand) and ('-c' not in self.topo_flag_options)): raise Exception, 'Please use -c option to introduce the appropriate analysis into the namespace.' def _topo_args(self, switch_override=[]): """ Method to generate Popen style argument list for Topographica using the topo_switches and topo_flag_options parameters. Switches are returned first, sorted alphanumerically. The qsub_flag_options follow in the order given by keys() which may be controlled if an OrderedDict is used (eg. in Python 2.7+ or using param.external OrderedDict). Otherwise the keys are sorted alphanumerically. """ opt_dict = type(self.topo_flag_options)() opt_dict.update(self.topo_flag_options) # Alphanumeric sort if vanilla Python dictionary if type(self.topo_flag_options) == dict: ordered_options = [(k, opt_dict[k]) for k in sorted(opt_dict)] else: ordered_options = list(opt_dict.items()) # Unpack tuple values so flag:(v1, v2,...)) => ..., flag:v1, flag:v2, ... unpacked_groups = [[(k, v) for v in val] if type(val) == tuple else [(k, val)] for (k, val) in ordered_options] unpacked_kvs = [el for group in unpacked_groups for el in group] # Adds '-' if missing (eg, keywords in dict constructor) and flattens lists. ordered_pairs = [(k, v) if (k[0] == '-') else ('-%s' % (k), v) for (k, v) in unpacked_kvs] ordered_options = [[k] + ([v] if type(v) == str else v) for (k, v) in ordered_pairs] flattened_options = [el for kvs in ordered_options for el in kvs] switches = [ s for s in switch_override if (s not in self.topo_switches) ] + self.topo_switches return sorted(switches) + flattened_options def _run_batch_kwargs(self, spec, tid, info): """ Defines the keywords accepted by run_batch and so specifies run_batch behaviour. These keywords are those consumed by run_batch for controlling run_batch behaviour. """ # Direct options for controlling run_batch. options = { 'name_time_format': repr(info['timestamp_format']), 'max_name_length': self.max_name_length, 'snapshot': self.snapshot, 'vc_info': self.vc_info, 'save_global_params': self.save_global_params, 'progress_interval': self.progress_interval, 'progress_bar': repr(self.progress_bar), 'metadata_dir': repr('metadata'), 'compress_metadata': repr('zip'), 'save_script_repr': repr('first') } # Settings inferred using information from launcher ('info') tag_info = (info['batch_name'], info['batch_tag']) tag = '[%s]_' % ':'.join(el for el in tag_info if el) if self.tag else '' derived_options = { 'dirname_prefix': repr(''), 'tag': repr('%st%s_' % (tag, tid)), 'output_directory': repr(info['root_directory']) } # Use fixed timestamp argument to run_batch if available. if info['timestamp'] is not None: derived_options['timestamp'] = info['timestamp'] # The analysis_fn is set my self.analysis_fn derived_options['analysis_fn'] = self.analysis_fn # Use the specified param_formatter to create the suitably named # lambda (returning the desired string) in run_batch. dir_format = self.param_formatter(info['constant_keys'], info['varying_keys'], spec) dir_formatter = 'lambda p: %s' % repr(dir_format) derived_options['dirname_params_filter'] = dir_formatter return dict(options.items() + derived_options.items()) def __call__(self, spec, tid=None, info={}): """ Returns a Popen argument list to invoke Topographica and execute run_batch with all options appropriately set (in alphabetical order). Keywords that are not run_batch options are also in alphabetical order at the end of the keyword list. """ kwarg_opts = self._run_batch_kwargs(spec, tid, info) # Override spec values if mistakenly included. allopts = dict(spec, **kwarg_opts) keywords = ', '.join([ '%s=%s' % (k, allopts[k]) for k in sorted(kwarg_opts.keys()) + sorted(spec.keys()) ]) run_batch_list = ["run_batch(%s,%s)" % (repr(self._typath), keywords)] topo_args = self._topo_args(['-a']) return [self.executable ] + topo_args + ['-c', '; '.join(run_batch_list)]
class SideHistogramPlot(AdjoinedPlot, HistogramPlot): bgcolor = param.Parameter(default=(1, 1, 1, 0), doc=""" Make plot background invisible.""") offset = param.Number(default=0.2, bounds=(0, 1), doc=""" Histogram value offset for a colorbar.""") show_grid = param.Boolean(default=False, doc=""" Whether to overlay a grid on the axis.""") def _process_hist(self, hist): """ Subclassed to offset histogram by defined amount. """ edges, hvals, widths, lims = super(SideHistogramPlot, self)._process_hist(hist) offset = self.offset * lims[3] hvals *= 1 - self.offset hvals += offset lims = lims[0:3] + (lims[3] + offset, ) return edges, hvals, widths, lims def _update_artists(self, n, element, edges, hvals, widths, lims, ranges): super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges) self._update_plot(n, element, self.handles['artist'], lims, ranges) def _update_plot(self, key, element, bars, lims, ranges): """ Process the bars and draw the offset line as necessary. If a color map is set in the style of the 'main' ViewableElement object, color the bars appropriately, respecting the required normalization settings. """ main = self.adjoined.main _, y1 = element.range(1) offset = self.offset * y1 range_item, main_range, dim = get_sideplot_ranges( self, element, main, ranges) # Check if plot is colormapped plot_type = Store.registry['matplotlib'].get(type(range_item)) opts = self.lookup_options(range_item, 'plot') if plot_type and issubclass(plot_type, ColorbarPlot): cidx = opts.options.get('color_index', None) cdim = None if cidx is None else range_item.get_dimension(cidx) else: cdim = None # Get colormapping options if isinstance(range_item, Raster) or cdim: style = self.lookup_options(range_item, 'style')[self.cyclic_index] cmap = cm.get_cmap(style.get('cmap')) main_range = style.get('clims', main_range) else: cmap = None if offset and ('offset_line' not in self.handles): self.handles['offset_line'] = self.offset_linefn(offset, linewidth=1.0, color='k') elif offset: self._update_separator(offset) if cmap is not None: self._colorize_bars(cmap, bars, element, main_range, dim) return bars def _colorize_bars(self, cmap, bars, element, main_range, dim): """ Use the given cmap to color the bars, applying the correct color ranges as necessary. """ cmap_range = main_range[1] - main_range[0] lower_bound = main_range[0] colors = np.array(element.dimension_values(dim)) colors = (colors - lower_bound) / (cmap_range) for c, bar in zip(colors, bars): bar.set_facecolor(cmap(c)) bar.set_clip_on(False) def _update_separator(self, offset): """ Compute colorbar offset and update separator line if map is non-zero. """ offset_line = self.handles['offset_line'] if offset == 0: offset_line.set_visible(False) else: offset_line.set_visible(True) if self.invert_axes: offset_line.set_xdata(offset) else: offset_line.set_ydata(offset)
class PathPlot(ColorbarPlot): aspect = param.Parameter(default='square', doc=""" PathPlots axes usually define single space so aspect of Paths follows aspect in data coordinates by default.""") color_index = param.ClassSelector(default=None, class_=(util.basestring, int), allow_None=True, doc=""" Index of the dimension from which the color will the drawn""") show_legend = param.Boolean(default=False, doc=""" Whether to show legend for the plot.""") style_opts = [ 'alpha', 'color', 'linestyle', 'linewidth', 'visible', 'cmap' ] def get_data(self, element, ranges, style): cdim = element.get_dimension(self.color_index or style.get('color')) with abbreviated_exception(): style = self._apply_transforms(element, ranges, style) scalar = element.interface.isunique(element, cdim, per_geom=True) if cdim else False style_mapping = any( isinstance(v, util.arraylike_types) and not (k == 'c' and scalar) for k, v in style.items()) dims = element.kdims xdim, ydim = dims generic_dt_format = Dimension.type_formatters[np.datetime64] paths, cvals, dims = [], [], {} for path in element.split(datatype='columns'): xarr, yarr = path[xdim.name], path[ydim.name] if util.isdatetime(xarr): dt_format = Dimension.type_formatters.get( type(xarr[0]), generic_dt_format) xarr = date2num(xarr) dims[0] = xdim(value_format=DateFormatter(dt_format)) if util.isdatetime(yarr): dt_format = Dimension.type_formatters.get( type(yarr[0]), generic_dt_format) yarr = date2num(yarr) dims[1] = ydim(value_format=DateFormatter(dt_format)) arr = np.column_stack([xarr, yarr]) if not (self.color_index is not None or style_mapping): paths.append(arr) continue length = len(xarr) for (s1, s2) in zip(range(length - 1), range(1, length + 1)): if cdim: cvals.append(path[cdim.name]) paths.append(arr[s1:s2 + 1]) if self.invert_axes: paths = [p[::-1] for p in paths] if not (self.color_index or style_mapping): if cdim: style['array'] = style.pop('c') style['clim'] = style.pop('vmin', None), style.pop('vmax', None) return (paths, ), style, {'dimensions': dims} if cdim: self._norm_kwargs(element, ranges, style, cdim) style['array'] = np.array(cvals) if 'c' in style: style['array'] = style.pop('c') if 'vmin' in style: style['clim'] = style.pop('vmin', None), style.pop('vmax', None) return (paths, ), style, {'dimensions': dims} def init_artists(self, ax, plot_args, plot_kwargs): line_segments = LineCollection(*plot_args, **plot_kwargs) ax.add_collection(line_segments) return {'artist': line_segments} def update_handles(self, key, axis, element, ranges, style): artist = self.handles['artist'] data, style, axis_kwargs = self.get_data(element, ranges, style) artist.set_paths(data[0]) if 'array' in style: artist.set_array(style['array']) artist.set_clim(style['clim']) if 'norm' in style: artist.set_norm(style['norm']) artist.set_visible(style.get('visible', True)) if 'colors' in style: artist.set_edgecolors(style['colors']) if 'facecolors' in style: artist.set_facecolors(style['facecolors']) if 'linewidth' in style: artist.set_linewidths(style['linewidth']) return axis_kwargs
class PointPlot(ChartPlot, ColorbarPlot): """ Note that the 'cmap', 'vmin' and 'vmax' style arguments control how point magnitudes are rendered to different colors. """ color_index = param.ClassSelector(default=None, class_=(basestring, int), allow_None=True, doc=""" Index of the dimension from which the color will the drawn""") size_index = param.ClassSelector(default=None, class_=(basestring, int), allow_None=True, doc=""" Index of the dimension from which the sizes will the drawn.""") scaling_method = param.ObjectSelector(default="area", objects=["width", "area"], doc=""" Determines whether the `scaling_factor` should be applied to the width or area of each point (default: "area").""") scaling_factor = param.Number(default=1, bounds=(0, None), doc=""" Scaling factor which is applied to either the width or area of each point, depending on the value of `scaling_method`.""") show_grid = param.Boolean(default=False, doc=""" Whether to draw grid lines at the tick positions.""") size_fn = param.Callable(default=np.abs, doc=""" Function applied to size values before applying scaling, to remove values lower than zero.""") style_opts = [ 'alpha', 'color', 'edgecolors', 'facecolors', 'linewidth', 'marker', 'size', 'visible', 'cmap', 'vmin', 'vmax', 'norm' ] _disabled_opts = ['size'] _plot_methods = dict(single='scatter') def get_data(self, element, ranges, style): xs, ys = (element.dimension_values(i) for i in range(2)) self._compute_styles(element, ranges, style) return (ys, xs) if self.invert_axes else (xs, ys), style, {} def _compute_styles(self, element, ranges, style): cdim = element.get_dimension(self.color_index) color = style.pop('color', None) cmap = style.get('cmap', None) if cdim and cmap: cs = element.dimension_values(self.color_index) # Check if numeric otherwise treat as categorical if cs.dtype.kind in 'if': style['c'] = cs else: categories = np.unique(cs) xsorted = np.argsort(categories) ypos = np.searchsorted(categories[xsorted], cs) style['c'] = xsorted[ypos] self._norm_kwargs(element, ranges, style, cdim) elif color: style['c'] = color style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none')) sdim = element.get_dimension(self.size_index) if sdim: sizes = element.dimension_values(self.size_index) ms = style['s'] if 's' in style else mpl.rcParams[ 'lines.markersize'] sizes = compute_sizes(sizes, self.size_fn, self.scaling_factor, self.scaling_method, ms) if sizes is None: eltype = type(element).__name__ self.warning('%s dimension is not numeric, cannot ' 'use to scale %s size.' % (sdim.pprint_label, eltype)) else: style['s'] = sizes style['edgecolors'] = style.pop('edgecolors', 'none') def update_handles(self, key, axis, element, ranges, style): paths = self.handles['artist'] (xs, ys), style, _ = self.get_data(element, ranges, style) paths.set_offsets(np.column_stack([xs, ys])) sdim = element.get_dimension(self.size_index) if sdim: paths.set_sizes(style['s']) cdim = element.get_dimension(self.color_index) if cdim: paths.set_clim((style['vmin'], style['vmax'])) paths.set_array(style['c']) if 'norm' in style: paths.norm = style['norm']
class ImageBase(DivPaneBase): """ Encodes an image as base64 and wraps it in a Bokeh Div model. This is an abstract base class that needs the image type to be specified and specific code for determining the image shape. The imgtype determines the filetype, extension, and MIME type for this image. Each image type (png,jpg,gif) has a base class that supports anything with a `_repr_X_` method (where X is `png`, `gif`, etc.), a local file with the given file extension, or a HTTP(S) url with the given extension. Subclasses of each type can provide their own way of obtaining or generating a PNG. """ alt_text = param.String(default=None, doc=""" alt text to add to the image tag. The alt text is shown when a user cannot load or display the image.""") link_url = param.String(default=None, doc=""" A link URL to make the image clickable and link to some other website.""") embed = param.Boolean(default=True, doc=""" Whether to embed the image as base64.""") imgtype = 'None' _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style'] _target_transforms = {'object': """'<img src="' + value + '"></img>'"""} __abstract = True @classmethod def applies(cls, obj): imgtype = cls.imgtype if hasattr(obj, '_repr_{}_'.format(imgtype)): return True if isinstance(obj, string_types): if isfile(obj) and obj.endswith('.' + imgtype): return True if isurl(obj, [cls.imgtype]): return True elif isurl(obj, None): return 0 if hasattr(obj, 'read'): # Check for file like object return True return False def _type_error(self, object): if isinstance(object, string_types): raise ValueError( "%s pane cannot parse string that is not a filename " "or URL." % type(self).__name__) super(ImageBase, self)._type_error(object) def _img(self): if hasattr(self.object, '_repr_{}_'.format(self.imgtype)): return getattr(self.object, '_repr_' + self.imgtype + '_')() if isinstance(self.object, string_types): if isfile(self.object): with open(self.object, 'rb') as f: return f.read() if hasattr(self.object, 'read'): return self.object.read() if isurl(self.object, None): import requests r = requests.request(url=self.object, method='GET') return r.content def _b64(self): data = self._img() if not isinstance(data, bytes): data = data.encode('utf-8') b64 = base64.b64encode(data).decode("utf-8") return "data:image/" + self.imgtype + f";base64,{b64}" def _imgshape(self, data): """Calculate and return image width,height""" raise NotImplementedError def _get_properties(self): p = super(ImageBase, self)._get_properties() if self.object is None: return dict(p, text='<img></img>') data = self._img() if not isinstance(data, bytes): data = base64.b64decode(data) width, height = self._imgshape(data) if self.width is not None: if self.height is None: height = int((self.width / width) * height) else: height = self.height width = self.width elif self.height is not None: width = int((self.height / height) * width) height = self.height if not self.embed: src = self.object else: b64 = base64.b64encode(data).decode("utf-8") src = "data:image/" + self.imgtype + ";base64,{b64}".format( b64=b64) smode = self.sizing_mode if smode in ['fixed', None]: w, h = '%spx' % width, '%spx' % height elif smode == 'stretch_both': w, h = '100%', '100%' elif smode == 'stretch_width': w, h = '%spx' % width, '100%' elif smode == 'stretch_height': w, h = '100%', '%spx' % height elif smode == 'scale_height': w, h = 'auto', '100%' else: w, h = '100%', 'auto' html = '<img src="{src}" width="{width}" height="{height}" alt="{alt}"></img>'.format( src=src, width=w, height=h, alt=self.alt_text or '') if self.link_url: html = '<a href="{url}" target="_blank">{html}</a>'.format( url=self.link_url, html=html) return dict(p, width=width, height=height, text=escape(html))
class VectorFieldPlot(ColorbarPlot): """ Renders vector fields in sheet coordinates. The vectors are expressed in polar coordinates and may be displayed according to angle alone (with some common, arbitrary arrow length) or may be true polar vectors. The color or magnitude can be mapped onto any dimension using the color_index and size_index. The length of the arrows is controlled by the 'scale' style option. The scaling of the arrows may also be controlled via the normalize_lengths and rescale_lengths plot option, which will normalize the lengths to a maximum of 1 and scale them according to the minimum distance respectively. """ color_index = param.ClassSelector(default=None, class_=(basestring, int), allow_None=True, doc=""" Index of the dimension from which the color will the drawn""") size_index = param.ClassSelector(default=None, class_=(basestring, int), allow_None=True, doc=""" Index of the dimension from which the sizes will the drawn.""") arrow_heads = param.Boolean(default=True, doc=""" Whether or not to draw arrow heads. If arrowheads are enabled, they may be customized with the 'headlength' and 'headaxislength' style options.""") normalize_lengths = param.Boolean(default=True, doc=""" Whether to normalize vector magnitudes automatically. If False, it will be assumed that the lengths have already been correctly normalized.""") rescale_lengths = param.Boolean(default=True, doc=""" Whether the lengths will be rescaled to take into account the smallest non-zero distance between two vectors.""") style_opts = [ 'alpha', 'color', 'edgecolors', 'facecolors', 'linewidth', 'marker', 'visible', 'cmap', 'scale', 'headlength', 'headaxislength', 'pivot', 'width', 'headwidth', 'norm' ] _plot_methods = dict(single='quiver') def __init__(self, *args, **params): super(VectorFieldPlot, self).__init__(*args, **params) self._min_dist = self._get_map_info(self.hmap) def _get_map_info(self, vmap): """ Get the minimum sample distance and maximum magnitude """ return np.min([get_min_distance(vfield) for vfield in vmap]) def get_data(self, element, ranges, style): input_scale = style.pop('scale', 1.0) xidx, yidx = (1, 0) if self.invert_axes else (0, 1) xs = element.dimension_values(xidx) if len(element.data) else [] ys = element.dimension_values(yidx) if len(element.data) else [] radians = element.dimension_values(2) if len(element.data) else [] if self.invert_axes: radians = radians + 1.5 * np.pi angles = list(np.rad2deg(radians)) if self.rescale_lengths: input_scale = input_scale / self._min_dist mag_dim = element.get_dimension(self.size_index) if mag_dim: magnitudes = element.dimension_values(mag_dim) _, max_magnitude = ranges[mag_dim.name] if self.normalize_lengths and max_magnitude != 0: magnitudes = magnitudes / max_magnitude else: magnitudes = np.ones(len(xs)) args = (xs, ys, magnitudes, [0.0] * len(element)) if self.color_index: colors = element.dimension_values(self.color_index) args += (colors, ) cdim = element.get_dimension(self.color_index) self._norm_kwargs(element, ranges, style, cdim) style['clim'] = (style.pop('vmin'), style.pop('vmax')) style.pop('color', None) if 'pivot' not in style: style['pivot'] = 'mid' if not self.arrow_heads: style['headaxislength'] = 0 style.update( dict(scale=input_scale, angles=angles, units='x', scale_units='x')) return args, style, {} def update_handles(self, key, axis, element, ranges, style): args, style, axis_kwargs = self.get_data(element, ranges, style) # Set magnitudes, angles and colors if supplied. quiver = self.handles['artist'] quiver.set_offsets(np.column_stack(args[:2])) quiver.U = args[2] quiver.angles = style['angles'] if self.color_index: quiver.set_array(args[-1]) quiver.set_clim(style['clim']) return axis_kwargs
class DistributionInterventions(pm.Parameterized): apply_constant_ubi = pm.Boolean(False) top_percent_hatchers = pm.Number(0.5, bounds=(0, 1), step=0.01) ubi = pm.Number(5, bounds=(0, 100), step=1) pareto_beta = pm.Number(0.4, bounds=(0, 1), step=0.01, precedence=-1) def __init__(self, data, **params): super(DistributionInterventions, self).__init__(**params) self.original_data = data self.data = data.copy() self.add_ubi() @pm.depends('ubi', 'apply_constant_ubi', watch=True) def add_ubi(self): if self.apply_constant_ubi: self.data[ 'Impact Hours'] = self.original_data['Impact Hours'] + self.ubi else: self.data['Impact Hours'] = self.original_data['Impact Hours'] def filtered_data(self): data = self.data.iloc[:round( len(self.data) * self.top_percent_hatchers)] data['% of distribution'] = data['Impact Hours'] / data[ 'Impact Hours'].sum() return data def total_impact_hours(self): return pn.Column( pn.Row( pn.Column( "Filtered Impact Hours:", round(self.filtered_data()['Impact Hours'].sum(), 2), ), pn.Column( "Percent of total Impact Hours:", round( self.filtered_data()['Impact Hours'].sum() / self.data['Impact Hours'].sum(), 2), )), "Summary:", self.filtered_data()['Impact Hours'].describe(), ) def percent_line(self): return hv.VLine(len(self.data) * self.top_percent_hatchers, color='red').opts(hv.opts.VLine(color='red')) def distribution(self): return (self.augmented_data().hvplot.area( y='Impact Hours', title='Impact Hours Distribution', height=320) * self.data.hvplot.line(y='Impact Hours', title='Impact Hours Distribution') * self.percent_line()).opts(shared_axes=False) def cum_dist(self, val): #cumulative distribution function prob_lt_val = (self.augmented_data()['Impact Hours'] < val).mean( ) # you can get proportions by taking average of boolean values return prob_lt_val def filtered_pareto(self): pct_values = np.arange(self.filtered_data()['Impact Hours'].min(), self.augmented_data()['Impact Hours'].max()) cum_dist_values = [self.cum_dist(p) for p in pct_values] pareto_rv = ss.pareto(self.pareto_beta) pareto = [pareto_rv.cdf(p) for p in range(len(pct_values))] distributions = pd.DataFrame( zip(cum_dist_values, pareto), columns=[ 'IH Cumulative Distribution', f'Pareto Distribution beta={self.pareto_beta}' ]) return distributions.hvplot.line().opts( hv.opts.VLine(color='red')).opts(shared_axes=False) def augmented_data(self): return self.filtered_data() def resources_percentage(self, p): data = self.augmented_data() data = pd.concat([data, self.data.iloc[len(data):]]) relevant_percentile = np.percentile(data['Impact Hours'], p) is_gt_relevant_percentile = data['Impact Hours'] > relevant_percentile filtered_data = data[is_gt_relevant_percentile] filtered_hours = filtered_data['Impact Hours'] pct_hours = filtered_hours.sum() / data['Impact Hours'].sum() return pct_hours def view_resources_percentage(self): message = {} for p in [99, 90, 50]: message["Top {}% Hatchers".format(100 - p)] = "{}%".format( round(self.resources_percentage(p) * 100, 2)) return pd.DataFrame(message, index=["Hold"]) def gini_coefficient(self): x = self.augmented_data()['Impact Hours'].values n = len(x) x_bar = np.mean(x) abs_diffs = np.array([np.sum(np.abs(x[i] - x)) for i in range(n)]) sum_abs_diffs = np.sum(abs_diffs) denominator = 2 * n * n * x_bar return sum_abs_diffs / denominator def view_gini_coefficient(self): g = self.gini_coefficient() return f"GINI Coefficient of filtered data: {g}" def view_data(self): return self.augmented_data()
class run_batch(ParameterizedFunction): """ Run a Topographica simulation in batch mode. Features: - Generates a unique, well-defined name for each 'experiment' (i.e. simulation run) based on the date, script file, and parameter settings. Note that very long names may be truncated (see the max_name_length parameter). - Allows parameters to be varied on the command-line, to allow comparing various settings - Saves a script capturing the simulation state periodically, to preserve parameter values from old experiments and to allow them to be reproduced exactly later - Can perform user-specified analysis routines periodically, to monitor the simulation as it progresses. - Stores commandline output (stdout) in the output directory A typical use of this function is for remote execution of a large number of simulations with different parameters, often on remote machines (such as clusters). The script_file parameter defines the .ty script we want to run in batch mode. The output_directory defines the root directory in which a unique individual directory will be created for this particular run. The optional analysis_fn can be any python function to be called at each of the simulation iterations defined in the analysis times list. The analysis_fn should perform whatever analysis of the simulation you want to perform, such as plotting or calculating some statistics. The analysis_fn should avoid using any GUI functions (i.e., should not import anything from topo.tkgui), and it should save all of its results into files. As a special case, a number can be passed for the times list, in which case it is used to scale a default list of times up to 10000; e.g. times=2 will select a default list of times up to 20000. Alternatively, an explicit list of times can be supplied. Any other optional parameters supplied will be set in the main namespace before any scripts are run. They will also be used to construct a unique topo.sim.name for the file, and they will be encoded into the simulation directory name, to make it clear how each simulation differs from the others. If requested by setting snapshot=True, saves a snapshot at the end of the simulation. If available and requested by setting vc_info=True, prints the revision number and any outstanding diffs from the version control system. Note that this function alters param.normalize_path.prefix so that all output goes into the same location. The original value of param.normalize_path.prefix is deliberately not restored at the end of the function so that the output of any subsequent commands will go into the same place. """ output_directory = param.String("Output") analysis_fn = param.Callable(default_analysis_function) times = param.Parameter(1.0) snapshot = param.Boolean(True) vc_info = param.Boolean(True) dirname_prefix = param.String(default="", doc=""" Optional prefix for the directory name (allowing e.g. easy grouping).""") tag = param.String(default="", doc=""" Optional tag to embed in directory prefix to allow unique directory naming across multiple independent batches that share a common timestamp.""") # CB: do any platforms also have a maximum total path length? max_name_length = param.Number(default=200, doc=""" The experiment's directory name will be truncated at this number of characters (since most filesystems have a limit).""") name_time_format = param.String(default="%Y%m%d%H%M", doc=""" String format for the time included in the output directory and file names. See the Python time module library documentation for codes. E.g. Adding '%S' to the default would include seconds.""") timestamp = param.NumericTuple(default=(0, 0), doc=""" Optional override of timestamp in Python struct_time 8-tuple format. Useful when running many run_batch commands as part of a group with a shared timestamp. By default, the timestamp used is the time when run_batch is started.""") save_global_params = param.Boolean(default=True, doc=""" Whether to save the script's global_parameters to a pickle in the output_directory after the script has been loaded (for e.g. future inspection of the experiment).""") dirname_params_filter = param.Callable(param_formatter.instance(), doc=""" Function to control how the parameter names will appear in the output_directory's name.""") metadata_dir = param.String(doc="""Specifies the name of a subdirectory used to output metadata from run_batch (if set).""") compress_metadata = param.ObjectSelector(default=None, objects=[None, 'tar.gz', 'zip'], doc=""" If not None and a metadata directory is specified, the metadata directory will be replaced by either a tar.gz file or a .zip file.""") save_script_repr = param.ObjectSelector( default='first', objects=[None, 'first', 'last', 'all'], doc=""" Whether to save a script_repr and if so, how often. If set to 'first', the script_repr is saved on the first time value, if set to 'last' then it will be saved on the last time value. If set to 'all' then a script repr is saved for all time values. Saving is disabled entirely if set to None.""") progress_bar = param.String(default='stdout', doc=""" The display mode for the progress bar. By default, the progress of run_batch is displayed using standard output but may also be set to 'disabled' as necessary.""") progress_interval = param.Number(default=100, doc=""" Interval between updates of the progress bar (if enabled) in units of topo.sim.time.""") def _truncate(self, p, s): """ If s is greater than the max_name_length parameter, truncate it (and indicate that it has been truncated). """ # '___' at the end is supposed to represent '...' return s if len(s) <= p.max_name_length else s[0:p.max_name_length - 3] + '___' def __call__(self, script_file, **params_to_override): p = ParamOverrides(self, params_to_override, allow_extra_keywords=True) import os import shutil # Construct simulation name, etc. scriptbase = re.sub('.ty$', '', os.path.basename(script_file)) prefix = "" if p.timestamp == (0, 0): prefix += time.strftime(p.name_time_format) else: prefix += time.strftime(p.name_time_format, p.timestamp) prefix += "_" + scriptbase + "_" + p.tag simname = prefix # Construct parameter-value portion of filename; should do more filtering # CBENHANCEMENT: should provide chance for user to specify a # function (i.e. make this a function, and have a parameter to # allow the function to be overridden). # And sort by name by default? Skip ones that aren't different # from default, or at least put them at the end? prefix += p.dirname_params_filter(p.extra_keywords()) # Set provided parameter values in main namespace from topo.misc.commandline import global_params global_params.set_in_context(**p.extra_keywords()) # Create output directories if not os.path.isdir(normalize_path(p.output_directory)): try: os.mkdir(normalize_path(p.output_directory)) except OSError: pass # Catches potential race condition (simultaneous run_batch runs) dirname = self._truncate(p, p.dirname_prefix + prefix) dirpath = normalize_path(os.path.join(p.output_directory, dirname)) normalize_path.prefix = dirpath metadata_dir = os.path.join(normalize_path.prefix, p.metadata_dir) simpath = os.path.join(metadata_dir, simname) if os.path.isdir(normalize_path.prefix): print "Batch run: Warning -- directory already exists!" print "Run aborted; wait one minute before trying again, or else rename existing directory: \n" + \ normalize_path.prefix sys.exit(-1) else: os.makedirs(metadata_dir) print "Batch run output will be in " + normalize_path.prefix if p.vc_info: _print_vc_info(simpath + ".diffs") hostinfo = "Host: " + " ".join(platform.uname()) topographicalocation = "Topographica: " + os.path.abspath(sys.argv[0]) topolocation = "topo package: " + os.path.abspath(topo.__file__) scriptlocation = "script: " + os.path.abspath(script_file) starttime = time.time() startnote = "Batch run started at %s." % time.strftime( "%a %d %b %Y %H:%M:%S +0000", time.gmtime()) # store a re-runnable copy of the command used to start this batch run try: # pipes.quote is undocumented, so I'm not sure which # versions of python include it (I checked python 2.6 and # 2.7 on linux; they both have it). import pipes quotefn = pipes.quote except (ImportError, AttributeError): # command will need a human to insert quotes before it can be re-used quotefn = lambda x: x command_used_to_start = string.join([quotefn(arg) for arg in sys.argv]) # CBENHANCEMENT: would be nice to separately write out a # runnable script that does everything necessary to # re-generate results (applies diffs etc). # Shadow stdout to a .out file in the output directory, so that # print statements will go to both the file and to stdout. batch_output = open(normalize_path(simpath + ".out"), 'w') batch_output.write(command_used_to_start + "\n") sys.stdout = MultiFile(batch_output, sys.stdout) print print hostinfo print topographicalocation print topolocation print scriptlocation print print startnote from topo.misc.commandline import auto_import_commands auto_import_commands() # Ensure that saved state includes all parameter values from topo.command import save_script_repr param.parameterized.script_repr_suppress_defaults = False # Save a copy of the script file for reference shutil.copy2(script_file, normalize_path.prefix) shutil.move(normalize_path(scriptbase + ".ty"), normalize_path(simpath + ".ty")) # Default case: times is just a number that scales a standard list of times times = p.times if not isinstance(times, list): times = [ t * times for t in [0, 50, 100, 500, 1000, 2000, 3000, 4000, 5000, 10000] ] # Run script in main error_count = 0 initial_warning_count = param.parameterized.warning_count try: execfile(script_file, __main__.__dict__) #global_params.context global_params.check_for_unused_names() if p.save_global_params: _save_parameters(p.extra_keywords(), simpath + ".global_params.pickle") print_sizes() topo.sim.name = simname from dataviews.ipython.widgets import ProgressBar, RunProgress import numpy as np ProgressBar.display = p.progress_bar progress_bar = RunProgress(run_hook=topo.sim.run, display=p.progress_bar, interval=p.progress_interval) if len(set(times)) == 1: completion = [0, 100] else: times = np.array(times) completion = 100 * (times - times.min()) / (times.max() - times.min()) completion = np.array([0] + list(completion)) # Run each segment, doing the analysis and saving the script state each time for i, run_to in enumerate(times): progress_bar.percent_range = (completion[i], completion[i + 1]) progress_bar(run_to - topo.sim.time()) p.analysis_fn() normalize_path.prefix = metadata_dir if p.save_script_repr == 'first' and run_to == times[0]: save_script_repr() elif p.save_script_repr == 'last' and (run_to == times[-1]): save_script_repr() elif p.save_script_repr == 'all': save_script_repr() normalize_path.prefix = dirpath elapsedtime = time.time() - starttime param.Parameterized(name="run_batch").message( "Elapsed real time %02d:%02d." % (int(elapsedtime / 60), int(elapsedtime % 60))) if p.snapshot: save_snapshot() except: error_count += 1 import traceback traceback.print_exc(file=sys.stdout) sys.stderr.write("Warning -- Error detected: execution halted.\n") if p.metadata_dir != '' and p.compress_metadata == 'tar.gz': _, name = os.path.split(metadata_dir) tar = tarfile.open(normalize_path("%s.tar.gz" % name), "w:gz") tar.add(metadata_dir, arcname=name) tar.close() shutil.rmtree(metadata_dir) elif p.metadata_dir != '' and p.compress_metadata == 'zip': _, name = os.path.split(metadata_dir) zipf = zipfile.ZipFile(normalize_path("%s.zip" % name), 'w') zipf.write(metadata_dir, arcname=name) for f in os.listdir(metadata_dir): zipf.write(os.path.join(metadata_dir, f), os.path.join(p.metadata_dir, f)) zipf.close() shutil.rmtree(metadata_dir) print "\nBatch run completed at %s." % time.strftime( "%a %d %b %Y %H:%M:%S +0000", time.gmtime()) print "There were %d error(s) and %d warning(s)%s." % \ (error_count,(param.parameterized.warning_count-initial_warning_count), ((" (plus %d warning(s) prior to entering run_batch)"%initial_warning_count if initial_warning_count>0 else ""))) # restore stdout sys.stdout = sys.__stdout__ batch_output.close()