def __call__(self, parameterized, plots=[], **params): self.p = param.ParamOverrides(self, params) if self.p.initializer: self.p.initializer(parameterized) self._id = uuid.uuid4().hex self._widgets = {} self.parameterized = parameterized widgets, views = self.widgets() layout = ipywidgets.Layout(display='flex', flex_flow=self.p.layout) if self.p.close_button: layout.border = 'solid 1px' widget_box = ipywidgets.VBox(children=widgets, layout=layout) plot_outputs = tuple(Output() for p in plots) if views or plots: outputs = tuple(views.values()) + plot_outputs view_box = ipywidgets.VBox(children=outputs, layout=layout) layout = self.p.view_position if layout in ['below', 'right']: children = [widget_box, view_box] else: children = [view_box, widget_box] box = ipywidgets.VBox if layout in ['below', 'above' ] else ipywidgets.HBox widget_box = box(children=children) display(widget_box) self._widget_box = widget_box self._display_handles = {} # Render defined View parameters for pname, view in views.items(): p_obj = self.parameterized.params(pname) value = getattr(self.parameterized, pname) if value is None: continue handle = self._update_trait(pname, p_obj.renderer(value)) if handle: self._display_handles[pname] = handle # Render supplied plots for p, o in zip(plots, plot_outputs): with o: display(p) # Keeps track of changes between button presses self._changed = {} if self.p.on_init: self.execute()
def __call__(self, data, **params): p = param.ParamOverrides(self, params) if isinstance(data, HoloMap): ranges = {d.name: data.range(d) for d in data.dimensions()} data = data.clone({k: GridMatrix(self._process(p, v, ranges)) for k, v in data.items()}).collate() if p.overlay_dims: data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,)) return data elif isinstance(data, Element): data = self._process(p, data) return GridMatrix(data)
def __call__(self, map_obj, **params): watch = params.pop('watch', True) self.p = param.ParamOverrides(self, params) callback = self._dynamic_operation(map_obj) streams = self._get_streams(map_obj, watch) if isinstance(map_obj, DynamicMap): dmap = map_obj.clone(callback=callback, shared_data=self.p.shared_data, streams=streams) if self.p.shared_data: dmap.data = OrderedDict([(k, callback.callable(*k)) for k, v in dmap.data]) else: dmap = self._make_dynamic(map_obj, callback, streams) return dmap
def __call__(self, nodes, edges, **params): p = param.ParamOverrides(self,params) # Convert graph into list of edge segments edges = _convert_graph_to_edge_segments(nodes, edges) # This is simply to let the work split out over multiple cores edge_batches = list(batches(edges, p.batch_size)) # This gets the edges split into lots of small segments # Doing this inside a delayed function lowers the transmission overhead edge_segments = [resample_edges(batch, p.min_segment_length, p.max_segment_length) for batch in edge_batches] for i in range(p.iterations): # Each step, the size of the 'blur' shrinks bandwidth = p.initial_bandwidth * p.decay**(i + 1) * p.accuracy # If it's this small, there won't be a change anyway if bandwidth < 2: break # Draw the density maps and combine them images = [draw_to_surface(segment, bandwidth, p.accuracy) for segment in edge_segments] overall_image = sum(images) gradients = get_gradients(overall_image) # Move edges along the gradients and resample when necessary # This could include smoothing to adjust the amount a graph can change edge_segments = [advect_resample_all(gradients, segment, p.advect_iterations, p.accuracy, p.min_segment_length, p.max_segment_length) for segment in edge_segments] # Do a final resample to a smaller size for nicer rendering edge_segments = [resample_edges(segment, p.min_segment_length, p.max_segment_length) for segment in edge_segments] # Finally things can be sent for computation edge_segments = compute(*edge_segments) # Smooth out the graph for i in range(10): for batch in edge_segments: smooth(batch, p.tension) # Flatten things new_segs = [] for batch in edge_segments: new_segs.extend(batch) # Convert list of edge segments to Pandas dataframe return _convert_edge_segments_to_dataframe(new_segs)
def download(self, catalog_id, file_path, dataset, **kwargs): p = param.ParamOverrides(self, kwargs) self.parameter = p.parameter self.end = pd.to_datetime(p.end) self.start = pd.to_datetime(p.start) self._catalog_id = catalog_id if dataset is None: dataset = 'station-' + catalog_id try: url = self.url logger.info('downloading data from %s' % url) data = pd.read_csv(url) if data.empty: raise ValueError('No Data Available') rename = {x: x.split()[0] for x in data.columns.tolist()} units = {x.split()[0]: x.split()[-1].strip('()').lower() for x in data.columns.tolist()} data.rename(columns=rename, inplace=True) data = data.set_index('time') data.index = pd.to_datetime(data.index) data.rename(columns={self.parameter_code: self.parameter}) file_path = os.path.join(file_path, self.BASE_PATH, self.service_name, dataset, '{0}.h5'.format(dataset)) metadata = { 'file_path': file_path, 'file_format': 'timeseries-hdf5', 'datatype': 'timeseries', 'parameter': p.parameter, 'unit': units[self.parameter_code], 'service_id': 'svc://noaa:{}/{}'.format(self.service_name, catalog_id) } # save data to disk io = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5'] io.write(file_path, data, metadata) del metadata['service_id'] return metadata except HTTPError as error: if error.code == 500: raise ValueError('No Data Available') elif error.code == 400: raise ValueError('Bad Request') else: raise error
def __call__(self, nodes, edges, **params): """ Convert a graph data structure into a path structure for plotting Given a set of nodes (as a dataframe with a unique ID for each node) and a set of edges (as a dataframe with with columns for the source and destination IDs for each edge), returns a dataframe with with one path for each edge suitable for use with Datashader. The returned dataframe has columns for x and y location, with paths represented as successive points separated by a point with NaN as the x or y value. """ p = param.ParamOverrides(self, params) edges, segment_class = _convert_graph_to_edge_segments(nodes, edges, p) return _convert_edge_segments_to_dataframe(edges, segment_class, p)
def __call__(self, nodes, edges=None, **params): p = param.ParamOverrides(self, params) np.random.seed(p.seed) df = nodes.copy() points = np.asarray( np.random.uniform(low=-115, high=-105, size=(len(df), 1))) pointsy = np.asarray( np.random.uniform(low=30, high=40, size=(len(df), 1))) df[p.x] = points[:, 0] df[p.y] = pointsy[:, 0] return df
def __call__(self, *args, **params): # Abort if IPython not found try: ip = params.pop('ip', None) or get_ipython() # noqa (get_ipython) except: return p = param.ParamOverrides(self, params) if hasattr(ip, 'kernel') and not self._loaded: # TODO: JLab extension and pyviz_comms should be changed # to allow multiple cleanup comms to be registered JupyterCommManager.get_client_comm(self._process_comm_msg, "hv-extension-comm") _load_nb(p.inline) self._loaded = True Viewable._comm_manager = JupyterCommManager
def process_element(self, element, key, **params): """ The process_element method allows a single element to be operated on given an externally supplied key. """ if hasattr(self, 'p'): if self._allow_extra_keywords: extras = self.p._extract_extra_keywords(params) self.p._extra_keywords.update(extras) params = {k: v for k, v in params.items() if k not in self.p._extra_keywords} self.p.update(params) self.p._check_params(params) else: self.p = param.ParamOverrides(self, params, allow_extra_keywords=self._allow_extra_keywords) return self._apply(element, key)
def __call__(self, **params): # Generates all channels, then returns the default channel p = param.ParamOverrides(self, params) params['xdensity'] = p.xdensity params['ydensity'] = p.ydensity params['bounds'] = p.bounds # (not **p) for i in range(len(p.generators)): self._channel_data[i] = p.generators[i](**params) for c in self.channel_transforms: self._channel_data = c(self._channel_data) return sum(act for act in self._channel_data) / len(self._channel_data)
def __call__(self, skyplots, **params): self.p = param.ParamOverrides(self, params) pointer = hv.streams.PointerXY(x=0, y=0) cross_opts = dict(style={'line_width': 1, 'color': 'black'}) cross_dmap = hv.DynamicMap(lambda x, y: (hv.VLine(x).opts(**cross_opts) * hv.HLine(y).opts(**cross_opts)), streams=[pointer]) plots = [] for s in skyplots: if self.p.crosshair: plot = (s*cross_dmap).relabel(s.label) else: plot = s plots.append(plot) return hv.Layout(plots)
def __call__(self, *args, **params): resources = self._get_resources(args, params) ip = params.pop('ip', None) p = param.ParamOverrides(self, params) Store.display_formats = p.display_formats if 'html' not in p.display_formats and len(p.display_formats) > 1: msg = ('Output magic unable to control displayed format ' 'as IPython notebook uses fixed precedence ' 'between %r' % p.display_formats) display(HTML('<b>Warning</b>: %s' % msg)) if notebook_extension._loaded == False: ip = get_ipython() if ip is None else ip # noqa (get_ipython) param_ext.load_ipython_extension(ip, verbose=False) load_magics(ip) OutputMagic.initialize() set_display_hooks(ip) notebook_extension._loaded = True css = '' if p.width is not None: css += '<style>div.container { width: %s%% }</style>' % p.width if p.css: css += '<style>%s</style>' % p.css if css: display(HTML(css)) resources = list(resources) if len(resources) == 0: return # Create a message for the logo (if shown) js_names = {'holoviews': 'HoloViewsJS'} # resource : displayed name loaded = ', '.join(js_names[r] if r in js_names else r.capitalize() + 'JS' for r in resources) load_hvjs(logo=p.logo, JS=('holoviews' in resources), message='%s successfully loaded in this cell.' % loaded) for r in [r for r in resources if r != 'holoviews']: Store.renderers[r].load_nb(inline=p.inline) if resources[-1] != 'holoviews': get_ipython().magic(u"output backend=%r" % resources[-1]) # noqa (get_ipython))
def __call__(self, **params_to_override): # Cache image to avoid channel_data being deleted before channel-specific processing completes. p = param.ParamOverrides(self, params_to_override) if not (p.cache_image and (p._image is not None)): self._cached_average = super(FileImage, self).__call__(**params_to_override) self._channel_data = self._process_channels( p, **params_to_override) for c in self.channel_transforms: self._channel_data = c(self._channel_data) if p.cache_image is False: self._image = None return self._cached_average
def download(self, catalog_id, file_path, dataset, **kwargs): p = param.ParamOverrides(self, kwargs) bbox = listify(p.bbox) tile_indices = self._get_indices_from_bbox(*bbox, zoom_level=p.zoom_level) pixel_indices = self._get_indices_from_bbox(*bbox, zoom_level=p.zoom_level, as_pixels=True) tile_bbox = self._get_bbox_from_indices(*tile_indices, zoom_level=p.zoom_level) pixel_bbox = self._get_bbox_from_indices(*pixel_indices, zoom_level=p.zoom_level, from_pixels=True) if p.crop_to_bbox: upper_left_corner = tile_bbox[0], tile_bbox[3] crop_bbox = self._get_crop_bbox(pixel_indices, *upper_left_corner, zoom_level=p.zoom_level) adjusted_bbox = pixel_bbox else: crop_bbox = None adjusted_bbox = tile_bbox image_array = self._download_and_stitch_tiles(p.url, tile_indices, crop_bbox, p.zoom_level, p.max_tiles) file_path = os.path.join(file_path, dataset + '.tiff') self._write_image_to_tif(image_array, adjusted_bbox, file_path) metadata = { 'metadata': { 'bbox': adjusted_bbox }, 'file_path': file_path, 'file_format': 'raster-gdal', 'datatype': 'image', } return metadata
def publish(self, **kwargs): try: p = param.ParamOverrides(self, kwargs) params = {'name': p.title, 'description': p.collection_description} resource_information_dict = self.gc.createResource( path='collection', params=params) folder_creation_dict = self.gc.createFolder( parentId=resource_information_dict['_id'], name=p.folder_name, description=p.folder_description, parentType='collection') for dataset in p.dataset: dataset_metadata = get_metadata(dataset)[dataset] fpath = dataset_metadata['file_path'] self.gc.uploadFileToFolder(folder_creation_dict['_id'], fpath) except Exception as e: raise e return resource_information_dict['_id']
def _build_specs(self, specs, kwargs, fp_precision): """ Returns the specs, the remaining kwargs and whether or not the constructor was called with kwarg or explicit specs. """ if specs is None: overrides = param.ParamOverrides(self, kwargs, allow_extra_keywords=True) extra_kwargs = overrides.extra_keywords() kwargs = dict([(k, v) for (k, v) in kwargs.items() if k not in extra_kwargs]) rounded_specs = list( self.round_floats([extra_kwargs], fp_precision)) if extra_kwargs == {}: return [], kwargs, True else: return rounded_specs, kwargs, False return list(self.round_floats(specs, fp_precision)), kwargs, True
def __call__(self, nodes, edges=None, **params): p = param.ParamOverrides(self, params) np.random.seed(p.seed) r = 0.5 # radius x0, y0 = 0.5, 0.5 # center of unit circle circumference = 2 * np.pi df = nodes.copy() if p.uniform: thetas = np.arange(circumference, step=circumference/len(df)) else: thetas = np.asarray(np.random.random((len(df),))) * circumference df[p.x] = x0 + r * np.cos(thetas) df[p.y] = y0 + r * np.sin(thetas) return df
def __call__(self, src, **params): self.p = param.ParamOverrides(self, params) dims, keys = unique_dimkeys(src) if isinstance(src, Layout) and not src.uniform: raise Exception("TreeOperation can only process uniform Layouts") if not dims: return self.process_element(src, None) else: dim_names = [d.name for d in dims] values = {} for key in keys: selection = src.select(**dict(zip(dim_names, key))) if not isinstance(selection, Layout): selection = Layout.from_values([selection]) processed = self._process(selection, key) if isinstance(processed, list): processed = Layout.from_values(processed) values[key] = processed return Collator(values, kdims=dims)()
def __call__(self, element, **kwargs): params = dict(kwargs) for k, v in kwargs.items(): if util.is_param_method(v, has_deps=True): params[k] = v() elif isinstance(v, param.Parameter) and isinstance( v.owner, param.Parameterized): params[k] = getattr(v.owner, v.name) self.p = param.ParamOverrides(self, params) if not self.p.dynamic: kwargs['dynamic'] = False if isinstance(element, HoloMap): # Backwards compatibility for key argument return element.clone([(k, self._apply(el, key=k)) for k, el in element.items()]) elif isinstance(element, ViewableElement): return self._apply(element) elif 'streams' not in kwargs: kwargs['streams'] = self.p.streams return element.apply(self, **kwargs)
def __call__(self, nodes, edges, **params): p = param.ParamOverrides(self, params) # Convert graph into sparse adjacency matrix and array of points points = _extract_points_from_nodes(nodes) matrix = _convert_edges_to_sparse_matrix(edges) if p.k is None: p.k = np.sqrt(1.0 / len(points)) # the initial "temperature" is about .1 of domain area (=1x1) # this is the largest step allowed in the dynamics. temperature = 0.1 # simple cooling scheme. # linearly step down by dt on each iteration so last iteration is size dt. cooling(matrix, points, temperature, p) # Return the nodes with updated positions return _merge_points_with_nodes(nodes, points)
def __call__(self, element, **params): self.p = param.ParamOverrides(self, params) if isinstance(element, ViewableElement): processed = self._process(element) elif isinstance(element, GridSpace): # Initialize an empty axis layout processed = GridSpace(None, label=element.label) # Populate the axis layout for pos, cell in element.items(): processed[pos] = self(cell, **params) elif isinstance(element, HoloMap): mapped_items = [(k, self._process(el, key=k)) for k, el in element.items()] refval = mapped_items[0][1] processed = element.clone(mapped_items, group=refval.group, label=refval.label) else: raise ValueError("Cannot process type %r" % type(element).__name__) return processed
def __call__(self, parameterized, **params): self.p = param.ParamOverrides(self, params) if self.p.initializer: self.p.initializer(parameterized) self._widgets = {} self.parameterized = parameterized widgets, views = self.widgets() layout = ipywidgets.Layout(display='flex', flex_flow=self.p.layout) if self.p.close_button: layout.border = 'solid 1px' widget_box = ipywidgets.VBox(children=widgets, layout=layout) if views: view_box = ipywidgets.VBox(children=views, layout=layout) layout = self.p.view_position if layout in ['below', 'right']: children = [widget_box, view_box] else: children = [view_box, widget_box] box = ipywidgets.VBox if layout in ['below', 'above'] else ipywidgets.HBox widget_box = box(children=children) display(Javascript(WIDGET_JS)) display(widget_box) self._widget_box = widget_box for view in views: p_obj = self.parameterized.params(view.name) value = getattr(self.parameterized, view.name) if value is not None: self._update_trait(view.name, p_obj.renderer(value)) # Keeps track of changes between button presses self._changed = {} if self.p.on_init: self.execute()
def __call__(self, paths=[], **params_to_override): """ Takes a single path string or a list of path strings and returns the corresponing version control information. """ p=param.ParamOverrides(self, dict(params_to_override, paths=paths)) if p.paths == []: raise Exception("No paths to version controlled repositories given.") paths = [p.paths] if isinstance(p.paths, str) else p.paths def _desc(path, ind): for vcs in p.commands.keys(): if os.path.exists(os.path.join(path, vcs)): proc = subprocess.Popen(p.commands[vcs][ind], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path) return str(proc.communicate()[0].decode()).strip() abspaths = [os.path.abspath(path) for path in paths] return {'vcs_versions' : dict((path, _desc(path,0)) for path in abspaths), 'vcs_messages': dict((path, _desc(path,1)) for path in abspaths), 'vcs_diffs': dict((path, _desc(path,2)) for path in abspaths)}
def __call__(self, element, **params): self.p = param.ParamOverrides(self, params) dynamic = ((self.p.dynamic == 'default' and isinstance(element, DynamicMap)) or self.p.dynamic is True) if isinstance(element, GridSpace): # Initialize an empty axis layout grid_data = ((pos, self(cell, **params)) for pos, cell in element.items()) processed = GridSpace(grid_data, label=element.label, kdims=element.kdims) elif dynamic: from ..util import Dynamic streams = getattr(self.p, 'streams', []) processed = Dynamic(element, streams=streams, operation=self, kwargs=params) elif isinstance(element, ViewableElement): processed = self._process(element) elif isinstance(element, DynamicMap): if any((not d.values) for d in element.kdims): raise ValueError('Applying a non-dynamic operation requires ' 'all DynamicMap key dimensions to define ' 'the sampling by specifying values.') samples = tuple(d.values for d in element.kdims) processed = self(element[samples], **params) elif isinstance(element, HoloMap): mapped_items = [(k, self._process(el, key=k)) for k, el in element.items()] processed = element.clone(mapped_items) else: raise ValueError("Cannot process type %r" % type(element).__name__) return processed
def download(self, catalog_id, file_path, dataset, **kwargs): p = param.ParamOverrides(self, kwargs) self.parameter = p.parameter self.end = pd.to_datetime(p.end) self.start = pd.to_datetime(p.start) self._catalog_entry = catalog_id if dataset is None: dataset = 'station-' + catalog_id # if end is None: # end = pd.datetime.now().strftime('%Y-%m-%d') # # if start is None: # start = pd.to_datetime(end) - pd.datetools.timedelta(days=30) # start = start.strftime('%Y-%m-%d') file_path = os.path.join(file_path, BASE_PATH, self.service_name, dataset, '{0}.h5'.format(dataset)) metadata = { 'file_path': file_path, 'file_format': 'timeseries-hdf5', 'datatype': DataType.TIMESERIES, 'parameter': self.parameter, 'unit': self._unit_map[self.parameter], 'service_id': 'svc://ncdc:{}/{}'.format(self.service_name, catalog_id) } # save data to disk io = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5'] io.write(file_path, self.data, metadata) del metadata['service_id'] return metadata
def layout(self, element, **params): self.p = param.ParamOverrides(self, params) graph = {'nodes': [], 'links': []} self.computeNodeLinks(element, graph) self.computeNodeValues(graph) self.computeNodeDepths(graph) self.computeNodeBreadths(graph) self.computeLinkBreadths(graph) paths = self.computePaths(graph) node_data = [] for node in graph['nodes']: node_data.append((np.mean([node['x0'], node['x1']]), np.mean([node['y0'], node['y1']]), node['index'])+tuple(node['values'])) if element.nodes.ndims == 3: kdims = element.nodes.kdims elif element.nodes.ndims: kdims = element.node_type.kdims[:2] + element.nodes.kdims[-1:] else: kdims = element.node_type.kdims nodes = element.node_type(node_data, kdims=kdims, vdims=element.nodes.vdims) edges = element.edge_type(paths) return nodes, edges, graph
def __call__(self, *args, **params): imports = [(name, b) for name, b in self._backends.items() if name in args or params.get(name, False)] if not imports or 'matplotlib' not in Store.renderers: imports = imports + [('matplotlib', 'mpl')] args = list(args) for backend, imp in imports: try: __import__('holoviews.plotting.%s' % imp) except ImportError: if backend in args: args.pop(args.index(backend)) if backend in params: params.pop(backend) self.warning("HoloViews %s backend could not be imported, " "ensure %s is installed." % (backend, backend)) finally: if backend == 'matplotlib' and not notebook_extension._loaded: svg_exporter = Store.renderers['matplotlib'].instance( holomap=None, fig='svg') holoviews.archive.exporters = [svg_exporter] +\ holoviews.archive.exporters OutputMagic.allowed['backend'] = list_backends() OutputMagic.allowed['fig'] = list_formats('fig', backend) OutputMagic.allowed['holomap'] = list_formats( 'holomap', backend) resources = self._get_resources(args, params) ip = params.pop('ip', None) p = param.ParamOverrides(self, params) Store.display_formats = p.display_formats if 'html' not in p.display_formats and len(p.display_formats) > 1: msg = ('Output magic unable to control displayed format ' 'as IPython notebook uses fixed precedence ' 'between %r' % p.display_formats) display(HTML('<b>Warning</b>: %s' % msg)) if notebook_extension._loaded == False: ip = get_ipython() if ip is None else ip # noqa (get_ipython) param_ext.load_ipython_extension(ip, verbose=False) load_magics(ip) OutputMagic.initialize(list(self._backends.keys())) set_display_hooks(ip) notebook_extension._loaded = True css = '' if p.width is not None: css += '<style>div.container { width: %s%% }</style>' % p.width if p.css: css += '<style>%s</style>' % p.css if css: display(HTML(css)) resources = list(resources) if len(resources) == 0: return # Create a message for the logo (if shown) js_names = {'holoviews': 'HoloViewsJS'} # resource : displayed name loaded = ', '.join(js_names[r] if r in js_names else r.capitalize() + 'JS' for r in resources) load_hvjs(logo=p.logo, JS=('holoviews' in resources), message='%s successfully loaded in this cell.' % loaded) for r in [r for r in resources if r != 'holoviews']: Store.renderers[r].load_nb(inline=p.inline) if resources[-1] != 'holoviews': get_ipython().magic(u"output backend=%r" % resources[-1]) # noqa (get_ipython))
def __call__(self, *args, **params): super(notebook_extension, self).__call__(*args, **params) # Abort if IPython not found try: ip = params.pop('ip', None) or get_ipython() # noqa (get_ipython) except: return # Notebook archive relies on display hooks being set to work. try: if version_info[0] >= 4: import nbformat # noqa (ensures availability) else: from IPython import nbformat # noqa (ensures availability) from .archive import notebook_archive holoviews.archive = notebook_archive except ImportError: pass # Not quite right, should be set when switching backends if 'matplotlib' in Store.renderers and not notebook_extension._loaded: svg_exporter = Store.renderers['matplotlib'].instance(holomap=None, fig='svg') holoviews.archive.exporters = [svg_exporter ] + holoviews.archive.exporters p = param.ParamOverrides( self, {k: v for k, v in params.items() if k != 'config'}) if p.case_sensitive_completion: from IPython.core import completer completer.completions_sorting_key = self.completions_sorting_key resources = self._get_resources(args, params) Store.display_formats = p.display_formats if 'html' not in p.display_formats and len(p.display_formats) > 1: msg = ('Output magic unable to control displayed format ' 'as IPython notebook uses fixed precedence ' 'between %r' % p.display_formats) display(HTML('<b>Warning</b>: %s' % msg)) loaded = notebook_extension._loaded if loaded == False: param_ext.load_ipython_extension(ip, verbose=False) load_magics(ip) Store.output_settings.initialize(list(Store.renderers.keys())) Store.set_display_hook('html+js', LabelledData, pprint_display) Store.set_display_hook('png', LabelledData, png_display) Store.set_display_hook('svg', LabelledData, svg_display) notebook_extension._loaded = True css = '' if p.width is not None: css += '<style>div.container { width: %s%% }</style>' % p.width if p.css: css += '<style>%s</style>' % p.css if css: display(HTML(css)) resources = list(resources) if len(resources) == 0: return Renderer.load_nb() for r in [r for r in resources if r != 'holoviews']: Store.renderers[r].load_nb(inline=p.inline) if hasattr(ip, 'kernel') and not loaded: Renderer.comm_manager.get_client_comm( notebook_extension._process_comm_msg, "hv-extension-comm") # Create a message for the logo (if shown) self.load_hvjs( logo=p.logo, bokeh_logo=p.logo and ('bokeh' in resources), mpl_logo=p.logo and (('matplotlib' in resources) or resources == ['holoviews']), plotly_logo=p.logo and ('plotly' in resources), JS=('holoviews' in resources))
def download(self, catalog_id, file_path, dataset, **kwargs): p = param.ParamOverrides(self, kwargs) parameter = p.parameter start = p.start end = p.end period = p.period if dataset is None: dataset = 'station-' + catalog_id if start and end: period = None pmap = self.parameter_map(invert=True) parameter_code, statistic_code = (pmap[parameter].split(':') + [None])[:2] data = nwis.get_site_data(catalog_id, parameter_code=parameter_code, statistic_code=statistic_code, start=start, end=end, period=period, service=self.service_name) # dict contains only one key since only one parameter/statistic was # downloaded, this would need to be changed if multiple # parameter/stat were downloaded together if not data: raise ValueError('No Data Available') data = list(data.values())[0] # convert to dataframe and cleanup bad data df = pd.DataFrame(data['values']) if df.empty: raise ValueError('No Data Available') df = df.set_index('datetime') df.value = df.value.astype(float) if statistic_code in ['00001', '00002', '00003']: df.index = pd.to_datetime(df.index).to_period('D') else: df.index = pd.to_datetime(df.index) # this is in UTC df[df.values == -999999] = pd.np.nan df.rename(columns={'value': parameter}, inplace=True) file_path = os.path.join(file_path, BASE_PATH, self.service_name, dataset, '{0}.h5'.format(dataset)) del data['values'] metadata = { 'name': dataset, 'metadata': data, 'file_path': file_path, 'file_format': 'timeseries-hdf5', 'datatype': 'timeseries', 'parameter': parameter, 'unit': data['variable']['units']['code'], 'service_id': 'svc://usgs-nwis:{}/{}'.format(self.service_name, catalog_id) } # save data to disk io = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5'] io.write(file_path, df, metadata) del metadata['service_id'] return metadata
def __call__(self, nodes, edges, **params): if skimage is None: raise ImportError( "hammer_bundle operation requires scikit-image. " "Ensure you install the dependency before applying " "bundling.") p = param.ParamOverrides(self, params) # Calculate min/max for coordinates xmin, xmax = np.min(nodes[p.x]), np.max(nodes[p.x]) ymin, ymax = np.min(nodes[p.y]), np.max(nodes[p.y]) # Normalize coordinates nodes = nodes.copy() nodes[p.x] = minmax_normalize(nodes[p.x], xmin, xmax) nodes[p.y] = minmax_normalize(nodes[p.y], ymin, ymax) # Convert graph into list of edge segments edges, segment_class = _convert_graph_to_edge_segments(nodes, edges, p) # This is simply to let the work split out over multiple cores edge_batches = list(batches(edges, p.batch_size)) # This gets the edges split into lots of small segments # Doing this inside a delayed function lowers the transmission overhead edge_segments = [ resample_edges(batch, p.min_segment_length, p.max_segment_length, segment_class.ndims) for batch in edge_batches ] for i in range(p.iterations): # Each step, the size of the 'blur' shrinks bandwidth = p.initial_bandwidth * p.decay**(i + 1) * p.accuracy # If it's this small, there won't be a change anyway if bandwidth < 2: break # Draw the density maps and combine them images = [ draw_to_surface(segment, bandwidth, p.accuracy, segment_class.accumulate) for segment in edge_segments ] overall_image = sum(images) gradients = get_gradients(overall_image) # Move edges along the gradients and resample when necessary # This could include smoothing to adjust the amount a graph can change edge_segments = [ advect_resample_all(gradients, segment, p.advect_iterations, p.accuracy, p.min_segment_length, p.max_segment_length, segment_class) for segment in edge_segments ] # Do a final resample to a smaller size for nicer rendering edge_segments = [ resample_edges(segment, p.min_segment_length, p.max_segment_length, segment_class.ndims) for segment in edge_segments ] # Finally things can be sent for computation edge_segments = compute(*edge_segments) # Smooth out the graph for i in range(10): for batch in edge_segments: smooth(batch, p.tension, segment_class.idx, segment_class.idy) # Flatten things new_segs = [] for batch in edge_segments: new_segs.extend(batch) # Convert list of edge segments to Pandas dataframe df = _convert_edge_segments_to_dataframe(new_segs, segment_class, p) # Denormalize coordinates df[p.x] = minmax_denormalize(df[p.x], xmin, xmax) df[p.y] = minmax_denormalize(df[p.y], ymin, ymax) return df