class TransformerWidget(Widget): d = List(Bool()).tag(sync=True, from_json=transform_fromjson)
class LayerGroup(Layer): _view_name = Unicode('LeafletLayerGroupView').tag(sync=True) _view_name = Unicode('LeafletLayerGroupModel').tag(sync=True) layers = List(Instance(Layer)).tag(sync=True, **widget_serialization)
class Map(DOMWidget, InteractMixin): @default('layout') def _default_layout(self): return Layout(height='400px', align_self='stretch') _view_name = Unicode('LeafletMapView').tag(sync=True) _model_name = Unicode('LeafletMapModel').tag(sync=True) _view_module = Unicode('jupyter-leaflet').tag(sync=True) _model_module = Unicode('jupyter-leaflet').tag(sync=True) # Map options center = List(def_loc).tag(sync=True, o=True) zoom_start = Int(12).tag(sync=True, o=True) zoom = Int(12).tag(sync=True, o=True) max_zoom = Int(18).tag(sync=True, o=True) min_zoom = Int(1).tag(sync=True, o=True) # Interaction options dragging = Bool(True).tag(sync=True, o=True) touch_zoom = Bool(True).tag(sync=True, o=True) scroll_wheel_zoom = Bool(False).tag(sync=True, o=True) double_click_zoom = Bool(True).tag(sync=True, o=True) box_zoom = Bool(True).tag(sync=True, o=True) tap = Bool(True).tag(sync=True, o=True) tap_tolerance = Int(15).tag(sync=True, o=True) world_copy_jump = Bool(False).tag(sync=True, o=True) close_popup_on_click = Bool(True).tag(sync=True, o=True) bounce_at_zoom_limits = Bool(True).tag(sync=True, o=True) keyboard = Bool(True).tag(sync=True, o=True) keyboard_pan_offset = Int(80).tag(sync=True, o=True) keyboard_zoom_offset = Int(1).tag(sync=True, o=True) inertia = Bool(True).tag(sync=True, o=True) inertia_deceleration = Int(3000).tag(sync=True, o=True) inertia_max_speed = Int(1500).tag(sync=True, o=True) # inertia_threshold = Int(?, o=True).tag(sync=True) zoom_control = Bool(True).tag(sync=True, o=True) attribution_control = Bool(True).tag(sync=True, o=True) # fade_animation = Bool(?).tag(sync=True, o=True) # zoom_animation = Bool(?).tag(sync=True, o=True) zoom_animation_threshold = Int(4).tag(sync=True, o=True) # marker_zoom_animation = Bool(?).tag(sync=True, o=True) options = List(trait=Unicode).tag(sync=True) @default('options') def _default_options(self): return [name for name in self.traits(o=True)] _south = Float(def_loc[0]).tag(sync=True) _north = Float(def_loc[0]).tag(sync=True) _east = Float(def_loc[1]).tag(sync=True) _west = Float(def_loc[1]).tag(sync=True) default_tiles = Instance(TileLayer, allow_none=True) @default('default_tiles') def _default_tiles(self): return TileLayer() @property def north(self): return self._north @property def south(self): return self._south @property def east(self): return self._east @property def west(self): return self._west @property def bounds_polygon(self): return [(self.north, self.west), (self.north, self.east), (self.south, self.east), (self.south, self.west)] @property def bounds(self): return [(self.south, self.west), (self.north, self.east)] def __init__(self, **kwargs): super(Map, self).__init__(**kwargs) self.on_displayed(self._fire_children_displayed) if self.default_tiles is not None: self.layers = (self.default_tiles, ) self.on_msg(self._handle_leaflet_event) def _fire_children_displayed(self, widget, **kwargs): for layer in self.layers: layer._handle_displayed(**kwargs) for control in self.controls: control._handle_displayed(**kwargs) layers = Tuple(trait=Instance(Layer)).tag(sync=True, **widget_serialization) layer_ids = List() @validate('layers') def _validate_layers(self, proposal): """Validate layers list. Makes sure only one instance of any given layer can exist in the layers list. """ self.layer_ids = [l.model_id for l in proposal['value']] if len(set(self.layer_ids)) != len(self.layer_ids): raise LayerException( 'duplicate layer detected, only use each layer once') return proposal['value'] def add_layer(self, layer): if layer.model_id in self.layer_ids: raise LayerException('layer already on map: %r' % layer) layer._map = self self.layers = tuple([l for l in self.layers] + [layer]) layer.visible = True def remove_layer(self, layer): if layer.model_id not in self.layer_ids: raise LayerException('layer not on map: %r' % layer) self.layers = tuple( [l for l in self.layers if l.model_id != layer.model_id]) layer.visible = False def clear_layers(self): self.layers = () controls = Tuple(trait=Instance(Control)).tag(sync=True, **widget_serialization) control_ids = List() @validate('controls') def _validate_controls(self, proposal): """Validate controls list. Makes sure only one instance of any given layer can exist in the controls list. """ self.control_ids = [c.model_id for c in proposal['value']] if len(set(self.control_ids)) != len(self.control_ids): raise ControlException( 'duplicate control detected, only use each control once') return proposal['value'] def add_control(self, control): if control.model_id in self.control_ids: raise ControlException('control already on map: %r' % control) control._map = self self.controls = tuple([c for c in self.controls] + [control]) control.visible = True def remove_control(self, control): if control.model_id not in self.control_ids: raise ControlException('control not on map: %r' % control) self.controls = tuple( [c for c in self.controls if c.model_id != control.model_id]) control.visible = False def clear_controls(self): self.controls = () def __iadd__(self, item): if isinstance(item, Layer): self.add_layer(item) elif isinstance(item, Control): self.add_control(item) return self def __isub__(self, item): if isinstance(item, Layer): self.remove_layer(item) elif isinstance(item, Control): self.remove_control(item) return self def __add__(self, item): if isinstance(item, Layer): self.add_layer(item) elif isinstance(item, Control): self.add_control(item) return self def _handle_leaflet_event(self, _, content): pass
class Drawing(GMapsWidgetMixin, widgets.Widget): """ Widget for a drawing layer Add this to a :class:`gmaps.Map` or :class:`gmaps.Figure` instance to let you draw on the map. You should not need to instantiate this directly. Instead, use the :func:`gmaps.drawing_layer` factory function. :Examples: {examples} {params} :param mode: Initial drawing mode. One of ``DISABLED``, ``MARKER``, ``LINE``, ``POLYGON`` or ``DELETE``. Defaults to ``MARKER`` if ``toolbar_controls.show_controls`` is True, otherwise defaults to ``DISABLED``. :type mode: str, optional :param toolbar_controls: Widget representing the drawing toolbar. :type toolbar_controls: :class:`gmaps.DrawingControls`, optional """ has_bounds = False _view_name = Unicode('DrawingLayerView').tag(sync=True) _model_name = Unicode('DrawingLayerModel').tag(sync=True) features = List().tag(sync=True, **widgets.widget_serialization) mode = Enum(ALLOWED_DRAWING_MODES).tag(sync=True) marker_options = widgets.trait_types.InstanceDict(MarkerOptions, allow_none=False) line_options = widgets.trait_types.InstanceDict(LineOptions, allow_none=False) toolbar_controls = Instance(DrawingControls, allow_none=False).tag( sync=True, **widgets.widget_serialization) def __init__(self, **kwargs): kwargs['mode'] = self._get_initial_mode(kwargs) if kwargs.get('features') is None: kwargs['features'] = [] if kwargs.get('marker_options') is None: kwargs['marker_options'] = self._default_marker_options() if kwargs.get('line_options') is None: kwargs['line_options'] = self._default_line_options() self._new_feature_callbacks = [] super(Drawing, self).__init__(**kwargs) self.on_msg(self._handle_message) def on_new_feature(self, callback): """ Register a callback called when new features are added :param callback: Callable to be called when a new feature is added. The callback should take a single argument, the feature that has been added. This can be an instance of :class:`gmaps.Line`, :class:`gmaps.Marker` or :class:`gmaps.Polygon`. :type callback: callable """ self._new_feature_callbacks.append(callback) def _get_initial_mode(self, constructor_kwargs): try: mode = constructor_kwargs['mode'] except KeyError: # mode not explicitly specified controls_hidden = ( 'toolbar_controls' in constructor_kwargs and not constructor_kwargs['toolbar_controls'].show_controls) if controls_hidden: mode = 'DISABLED' else: mode = DEFAULT_DRAWING_MODE return mode @default('marker_options') def _default_marker_options(self): return MarkerOptions() @default('line_options') def _default_line_options(self): return LineOptions() @default('toolbar_controls') def _default_toolbar_controls(self): return DrawingControls() @observe('features') def _on_new_feature(self, change): if self._new_feature_callbacks: old_features = set(change['old']) new_features = [ feature for feature in change['new'] if feature not in old_features ] for feature in new_features: for callback in self._new_feature_callbacks: callback(feature) def _delete_feature(self, model_id): updated_features = [ feature for feature in self.features if feature.model_id != model_id ] self.features = updated_features def _handle_message(self, _, content, buffers): if content.get('event') == 'FEATURE_ADDED': payload = content['payload'] if payload['featureType'] == 'MARKER': latitude = payload['latitude'] longitude = payload['longitude'] feature = self.marker_options.to_marker(latitude, longitude) elif payload['featureType'] == 'LINE': start = payload['start'] end = payload['end'] feature = self.line_options.to_line(start, end) elif payload['featureType'] == 'POLYGON': path = payload['path'] feature = Polygon(path) self.features = self.features + [feature] elif content.get('event') == 'MODE_CHANGED': payload = content['payload'] mode = payload['mode'] self.mode = mode elif content.get('event') == 'FEATURE_DELETED': payload = content['payload'] model_id = payload['modelId'] self._delete_feature(model_id)
class Rectangle(Polygon): _view_name = Unicode('LeafletRectangleView').tag(sync=True) _model_name = Unicode('LeafletRectangleModel').tag(sync=True) bounds = List(help="list of SW and NE location tuples").tag(sync=True)
class Layer(Widget, InteractMixin): _view_name = Unicode('LeafletLayerView').tag(sync=True) _model_name = Unicode('LeafletLayerModel').tag(sync=True) _view_module = Unicode('jupyter-leaflet').tag(sync=True) _model_module = Unicode('jupyter-leaflet').tag(sync=True) _view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True) _model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True) name = Unicode('').tag(sync=True) base = Bool(False).tag(sync=True) bottom = Bool(False).tag(sync=True) popup = Instance(Widget, allow_none=True, default_value=None).tag(sync=True, **widget_serialization) popup_min_width = Int(50).tag(sync=True) popup_max_width = Int(300).tag(sync=True) popup_max_height = Int(default_value=None, allow_none=True).tag(sync=True) options = List(trait=Unicode()).tag(sync=True) def __init__(self, **kwargs): super(Layer, self).__init__(**kwargs) self.on_msg(self._handle_mouse_events) @default('options') def _default_options(self): return [name for name in self.traits(o=True)] # Event handling _click_callbacks = Instance(CallbackDispatcher, ()) _dblclick_callbacks = Instance(CallbackDispatcher, ()) _mousedown_callbacks = Instance(CallbackDispatcher, ()) _mouseup_callbacks = Instance(CallbackDispatcher, ()) _mouseover_callbacks = Instance(CallbackDispatcher, ()) _mouseout_callbacks = Instance(CallbackDispatcher, ()) def _handle_mouse_events(self, _, content, buffers): event_type = content.get('type', '') if event_type == 'click': self._click_callbacks(**content) if event_type == 'dblclick': self._dblclick_callbacks(**content) if event_type == 'mousedown': self._mousedown_callbacks(**content) if event_type == 'mouseup': self._mouseup_callbacks(**content) if event_type == 'mouseover': self._mouseover_callbacks(**content) if event_type == 'mouseout': self._mouseout_callbacks(**content) def on_click(self, callback, remove=False): self._click_callbacks.register_callback(callback, remove=remove) def on_dblclick(self, callback, remove=False): self._dblclick_callbacks.register_callback(callback, remove=remove) def on_mousedown(self, callback, remove=False): self._mousedown_callbacks.register_callback(callback, remove=remove) def on_mouseup(self, callback, remove=False): self._mouseup_callbacks.register_callback(callback, remove=remove) def on_mouseover(self, callback, remove=False): self._mouseover_callbacks.register_callback(callback, remove=remove) def on_mouseout(self, callback, remove=False): self._mouseout_callbacks.register_callback(callback, remove=remove)
class SwarmSpawner(Spawner): """A Spawner for JupyterHub using Docker Engine in Swarm mode """ _executor = None @property def executor(self, max_workers=1): """single global executor""" cls = self.__class__ if cls._executor is None: cls._executor = ThreadPoolExecutor(max_workers) return cls._executor _client = None @property def client(self): """single global client instance""" cls = self.__class__ if cls._client is None: kwargs = {} if self.tls_config: kwargs['tls'] = docker.tls.TLSConfig(**self.tls_config) kwargs.update(kwargs_from_env()) client = docker.APIClient(version='auto', **kwargs) cls._client = client return cls._client service_id = Unicode() service_port = Int(8888, min=1, max=65535, config=True) service_image = Unicode("jupyterhub/singleuser", config=True) service_prefix = Unicode("jupyter", config=True, help=dedent(""" Prefix for service names. The full service name for a particular user will be <prefix>-<hash(username)>-<server_name>. """)) tls_config = Dict( config=True, help=dedent("""Arguments to pass to docker TLS configuration. Check for more info: http://docker-py.readthedocs.io/en/stable/tls.html """)) container_spec = Dict({}, config=True, help="Params to create the service") resource_spec = Dict({}, config=True, help="Params about cpu and memory limits") placement = List( [], config=True, help=dedent("""List of placement constraints into the swarm """)) networks = List( [], config=True, help=dedent("""Additional args to create_host_config for service create """)) use_user_options = Bool( False, config=True, help=dedent("""the spawner will use the dict passed through the form or as json body when using the Hub Api """)) jupyterhub_service_name = Unicode( config=True, help=dedent("""Name of the service running the JupyterHub """)) @property def tls_client(self): """A tuple consisting of the TLS client certificate and key if they have been provided, otherwise None. """ if self.tls_cert and self.tls_key: return (self.tls_cert, self.tls_key) return None _service_owner = None @property def service_owner(self): if self._service_owner is None: m = hashlib.md5() m.update(self.user.name.encode('utf-8')) self._service_owner = m.hexdigest() return self._service_owner @property def service_name(self): """ Service name inside the Docker Swarm service_suffix should be a numerical value unique for user {service_prefix}-{service_owner}-{service_suffix} """ if hasattr(self, "server_name") and self.server_name: server_name = self.server_name else: server_name = 1 return "{}-{}-{}".format(self.service_prefix, self.service_owner, server_name) def load_state(self, state): super().load_state(state) self.service_id = state.get('service_id', '') def get_state(self): state = super().get_state() if self.service_id: state['service_id'] = self.service_id return state def _env_keep_default(self): """it's called in traitlets. It's a special method name. Don't inherit any env from the parent process""" return [] def _public_hub_api_url(self): proto, path = self.hub.api_url.split('://', 1) _, rest = path.split(':', 1) return '{proto}://{name}:{rest}'.format( proto=proto, name=self.jupyterhub_service_name, rest=rest) def get_env(self): env = super().get_env() env.update( dict(JPY_USER=self.user.name, JPY_COOKIE_NAME=self.user.server.cookie_name, JPY_BASE_URL=self.user.server.base_url, JPY_HUB_PREFIX=self.hub.server.base_url)) if self.notebook_dir: env['NOTEBOOK_DIR'] = self.notebook_dir env['JPY_HUB_API_URL'] = self._public_hub_api_url() return env def _docker(self, method, *args, **kwargs): """wrapper for calling docker methods to be passed to ThreadPoolExecutor """ m = getattr(self.client, method) return m(*args, **kwargs) def docker(self, method, *args, **kwargs): """Call a docker method in a background thread returns a Future """ return self.executor.submit(self._docker, method, *args, **kwargs) @gen.coroutine def poll(self): """Check for a task state like `docker service ps id`""" service = yield self.get_service() if not service: self.log.warn("Docker service not found") return 0 task_filter = {'service': service['Spec']['Name']} tasks = yield self.docker('tasks', task_filter) running_task = None for task in tasks: task_state = task['Status']['State'] self.log.debug( "Task %s of Docker service %s status: %s", task['ID'][:7], self.service_id[:7], pformat(task_state), ) if task_state == 'running': # there should be at most one running task running_task = task if running_task is not None: return None else: return 1 @gen.coroutine def get_service(self): self.log.debug("Getting Docker service '%s'", self.service_name) try: service = yield self.docker('inspect_service', self.service_name) self.service_id = service['ID'] except APIError as err: if err.response.status_code == 404: self.log.info("Docker service '%s' is gone", self.service_name) service = None # Docker service is gone, remove service id self.service_id = '' elif err.response.status_code == 500: self.log.info("Docker Swarm Server error") service = None # Docker service is unhealthy, remove the service_id self.service_id = '' else: raise return service @gen.coroutine def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ # https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} self.log.warn("user_options: {}".format(user_options)) service = yield self.get_service() if service is None: if 'name' in user_options: self.server_name = user_options['name'] if hasattr(self, 'container_spec') and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: raise ("A container_spec is needed in to create a service") container_spec.update(user_options.get('container_spec', {})) # iterates over mounts to create # a new mounts list of docker.types.Mount container_spec['mounts'] = [] for mount in self.container_spec['mounts']: m = dict(**mount) if 'source' in m: m['source'] = m['source'].format( username=self.service_owner) if 'driver_config' in m: device = m['driver_config']['options']['device'].format( username=self.service_owner) m['driver_config']['options']['device'] = device m['driver_config'] = docker.types.DriverConfig( **m['driver_config']) container_spec['mounts'].append(docker.types.Mount(**m)) # some Envs are required by the single-user-image container_spec['env'] = self.get_env() if hasattr(self, 'resource_spec'): resource_spec = dict(**self.resource_spec) resource_spec.update(user_options.get('resource_spec', {})) # enable to set a human readable memory unit if 'mem_limit' in resource_spec: resource_spec['mem_limit'] = parse_bytes( resource_spec['mem_limit']) if 'mem_reservation' in resource_spec: resource_spec['mem_reservation'] = parse_bytes( resource_spec['mem_reservation']) if hasattr(self, 'networks'): networks = self.networks if user_options.get('networks') is not None: networks = user_options.get('networks') if hasattr(self, 'placement'): placement = self.placement if user_options.get('placement') is not None: placement = user_options.get('placement') image = container_spec['Image'] del container_spec['Image'] # create the service container_spec = docker.types.ContainerSpec( image, **container_spec) resources = docker.types.Resources(**resource_spec) task_spec = { 'container_spec': container_spec, 'resources': resources, 'placement': placement } task_tmpl = docker.types.TaskTemplate(**task_spec) resp = yield self.docker('create_service', task_tmpl, name=self.service_name, networks=networks) self.service_id = resp['ID'] self.log.info("Created Docker service '%s' (id: %s) from image %s", self.service_name, self.service_id[:7], image) else: self.log.info("Found existing Docker service '%s' (id: %s)", self.service_name, self.service_id[:7]) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env'] for line in envs: if line.startswith('JPY_API_TOKEN='): self.api_token = line.split('=', 1)[1] break ip = self.service_name port = self.service_port # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return (ip, port) @gen.coroutine def stop(self, now=False): """Stop and remove the service Consider using stop/start when Docker adds support """ self.log.info("Stopping and removing Docker service %s (id: %s)", self.service_name, self.service_id[:7]) yield self.docker('remove_service', self.service_id[:7]) self.log.info("Docker service %s (id: %s) removed", self.service_name, self.service_id[:7]) self.clear_state()
class DL1Extractor(Tool): name = "DL1Extractor" description = "Extract the dl1 information and store into a numpy file" aliases = Dict(dict(r='EventFileReaderFactory.reader', f='EventFileReaderFactory.input_path', max_events='EventFileReaderFactory.max_events', ped='CameraR1CalibratorFactory.pedestal_path', tf='CameraR1CalibratorFactory.tf_path', pe='CameraR1CalibratorFactory.pe_path', )) classes = List([EventFileReaderFactory, CameraR1CalibratorFactory, ]) def __init__(self, **kwargs): super().__init__(**kwargs) self.reader = None self.r1 = None self.dl0 = None self.output_dir = None self.baseline_rms_full = None def setup(self): self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]" kwargs = dict(config=self.config, tool=self) reader_factory = EventFileReaderFactory(**kwargs) reader_class = reader_factory.get_class() self.reader = reader_class(**kwargs) r1_factory = CameraR1CalibratorFactory(origin=self.reader.origin, **kwargs) r1_class = r1_factory.get_class() self.r1 = r1_class(**kwargs) self.dl0 = CameraDL0Reducer(**kwargs) self.output_dir = join(self.reader.output_directory, "extract_adc2pe") if not exists(self.output_dir): self.log.info("Creating directory: {}".format(self.output_dir)) makedirs(self.output_dir) n_events = self.reader.num_events first_event = self.reader.get_event(0) n_pixels = first_event.inst.num_pixels[0] n_samples = first_event.r0.tel[0].num_samples self.baseline_rms_full = np.zeros((n_events, n_pixels)) def start(self): n_events = self.reader.num_events first_event = self.reader.get_event(0) telid = list(first_event.r0.tels_with_data)[0] n_pixels = first_event.inst.num_pixels[0] n_samples = first_event.r0.tel[0].num_samples source = self.reader.read() desc = "Looping through file" with tqdm(total=n_events, desc=desc) as pbar: for event in source: pbar.update(1) ev = event.count self.r1.calibrate(event) self.dl0.reduce(event) dl0 = event.dl0.tel[telid].pe_samples[0] baseline_rms_full = np.std(dl0, axis=1) self.baseline_rms_full[ev] = baseline_rms_full def finish(self): output_path = self.reader.input_path.replace("_r0.tio", "_rms.npz") output_path = output_path.replace("_r1.tio", "_rms.npy") np.save(output_path, self.baseline_rms_full) self.log.info("RMS Numpy array saved to: {}".format(output_path))
class HistoryManager(HistoryAccessor): """A class to organize all history-related functionality in one place. """ # Public interface # An instance of the IPython shell we are attached to shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) # Lists to hold processed and raw history. These start with a blank entry # so that we can index them starting from 1 input_hist_parsed = List([""]) input_hist_raw = List([""]) # A list of directories visited during session dir_hist = List() @default('dir_hist') def _dir_hist_default(self): try: return [os.getcwd()] except OSError: return [] # A dict of output history, keyed with ints from the shell's # execution count. output_hist = Dict() # The text/plain repr of outputs. output_hist_reprs = Dict() # The number of the current session in the history database session_number = Integer() db_log_output = Bool(False, help="Should the history database include output? (default: no)" ).tag(config=True) db_cache_size = Integer(0, help="Write to database every x commands (higher values save disk access & power).\n" "Values of 1 or less effectively disable caching." ).tag(config=True) # The input and output caches db_input_cache = List() db_output_cache = List() # History saving in separate thread save_thread = Instance('IPython.core.history.HistorySavingThread', allow_none=True) save_flag = Instance(threading.Event, allow_none=True) # Private interface # Variables used to store the three last inputs from the user. On each new # history update, we populate the user's namespace with these, shifted as # necessary. _i00 = Unicode(u'') _i = Unicode(u'') _ii = Unicode(u'') _iii = Unicode(u'') # A regex matching all forms of the exit command, so that we don't store # them in the history (it's annoying to rewind the first entry and land on # an exit call). _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$") def __init__(self, shell=None, config=None, **traits): """Create a new history manager associated with a shell instance. """ # We need a pointer back to the shell for various tasks. super(HistoryManager, self).__init__(shell=shell, config=config, **traits) self.save_flag = threading.Event() self.db_input_cache_lock = threading.Lock() self.db_output_cache_lock = threading.Lock() try: self.new_session() except OperationalError: self.log.error("Failed to create history session in %s. History will not be saved.", self.hist_file, exc_info=True) self.hist_file = ':memory:' if self.enabled and self.hist_file != ':memory:': self.save_thread = HistorySavingThread(self) self.save_thread.start() def _get_hist_file_name(self, profile=None): """Get default history file name based on the Shell's profile. The profile parameter is ignored, but must exist for compatibility with the parent class.""" profile_dir = self.shell.profile_dir.location return os.path.join(profile_dir, 'history.sqlite') @needs_sqlite def new_session(self, conn=None): """Get a new session number.""" if conn is None: conn = self.db with conn: cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL, NULL, "") """, (datetime.datetime.now(),)) self.session_number = cur.lastrowid def end_session(self): """Close the database session, filling in the end time and line count.""" self.writeout_cache() with self.db: self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE session==?""", (datetime.datetime.now(), len(self.input_hist_parsed)-1, self.session_number)) self.session_number = 0 def name_session(self, name): """Give the current session a name in the history database.""" with self.db: self.db.execute("UPDATE sessions SET remark=? WHERE session==?", (name, self.session_number)) def reset(self, new_session=True): """Clear the session history, releasing all object references, and optionally open a new session.""" self.output_hist.clear() # The directory history can't be completely empty self.dir_hist[:] = [os.getcwd()] if new_session: if self.session_number: self.end_session() self.input_hist_parsed[:] = [""] self.input_hist_raw[:] = [""] self.new_session() # ------------------------------ # Methods for retrieving history # ------------------------------ def get_session_info(self, session=0): """Get info about a session. Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is the previous session. Returns ------- session_id : int Session ID number start : datetime Timestamp for the start of the session. end : datetime Timestamp for the end of the session, or None if IPython crashed. num_cmds : int Number of commands run, or None if IPython crashed. remark : unicode A manually set description. """ if session <= 0: session += self.session_number return super(HistoryManager, self).get_session_info(session=session) def _get_range_session(self, start=1, stop=None, raw=True, output=False): """Get input and output history from the current session. Called by get_range, and takes similar parameters.""" input_hist = self.input_hist_raw if raw else self.input_hist_parsed n = len(input_hist) if start < 0: start += n if not stop or (stop > n): stop = n elif stop < 0: stop += n for i in range(start, stop): if output: line = (input_hist[i], self.output_hist_reprs.get(i)) else: line = input_hist[i] yield (0, i, line) def get_range(self, session=0, start=1, stop=None, raw=True,output=False): """Retrieve input by session. Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. start : int First line to retrieve. stop : int End of line range (excluded from output itself). If None, retrieve to the end of the session. raw : bool If True, return untranslated input output : bool If True, attempt to include output. This will be 'real' Python objects for the current session, or text reprs from previous sessions if db_log_output was enabled at the time. Where no output is found, None is used. Returns ------- entries An iterator over the desired lines. Each line is a 3-tuple, either (session, line, input) if output is False, or (session, line, (input, output)) if output is True. """ if session <= 0: session += self.session_number if session==self.session_number: # Current session return self._get_range_session(start, stop, raw, output) return super(HistoryManager, self).get_range(session, start, stop, raw, output) ## ---------------------------- ## Methods for storing history: ## ---------------------------- def store_inputs(self, line_num, source, source_raw=None): """Store source and raw input in history and create input cache variables ``_i*``. Parameters ---------- line_num : int The prompt number of this input. source : str Python input. source_raw : str, optional If given, this is the raw input without any IPython transformations applied to it. If not given, ``source`` is used. """ if source_raw is None: source_raw = source source = source.rstrip('\n') source_raw = source_raw.rstrip('\n') # do not store exit/quit commands if self._exit_re.match(source_raw.strip()): return self.input_hist_parsed.append(source) self.input_hist_raw.append(source_raw) with self.db_input_cache_lock: self.db_input_cache.append((line_num, source, source_raw)) # Trigger to flush cache and write to DB. if len(self.db_input_cache) >= self.db_cache_size: self.save_flag.set() # update the auto _i variables self._iii = self._ii self._ii = self._i self._i = self._i00 self._i00 = source_raw # hackish access to user namespace to create _i1,_i2... dynamically new_i = '_i%s' % line_num to_main = {'_i': self._i, '_ii': self._ii, '_iii': self._iii, new_i : self._i00 } if self.shell is not None: self.shell.push(to_main, interactive=False) def store_output(self, line_num): """If database output logging is enabled, this saves all the outputs from the indicated prompt number to the database. It's called by run_cell after code has been executed. Parameters ---------- line_num : int The line number from which to save outputs """ if (not self.db_log_output) or (line_num not in self.output_hist_reprs): return output = self.output_hist_reprs[line_num] with self.db_output_cache_lock: self.db_output_cache.append((line_num, output)) if self.db_cache_size <= 1: self.save_flag.set() def _writeout_input_cache(self, conn): with conn: for line in self.db_input_cache: conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)", (self.session_number,)+line) def _writeout_output_cache(self, conn): with conn: for line in self.db_output_cache: conn.execute("INSERT INTO output_history VALUES (?, ?, ?)", (self.session_number,)+line) @needs_sqlite def writeout_cache(self, conn=None): """Write any entries in the cache to the database.""" if conn is None: conn = self.db with self.db_input_cache_lock: try: self._writeout_input_cache(conn) except sqlite3.IntegrityError: self.new_session(conn) print("ERROR! Session/line number was not unique in", "database. History logging moved to new session", self.session_number) try: # Try writing to the new session. If this fails, don't # recurse self._writeout_input_cache(conn) except sqlite3.IntegrityError: pass finally: self.db_input_cache = [] with self.db_output_cache_lock: try: self._writeout_output_cache(conn) except sqlite3.IntegrityError: print("!! Session/line number for output was not unique", "in database. Output will not be stored.") finally: self.db_output_cache = []
class IPEngineApp(BaseParallelApplication): name = 'ipengine' description = _description examples = _examples classes = List( [ZMQInteractiveShell, ProfileDir, Session, EngineFactory, Kernel, MPI]) startup_script = Unicode(u'', config=True, help='specify a script to be run at startup') startup_command = Unicode('', config=True, help='specify a command to be run at startup') url_file = Unicode( u'', config=True, help= """The full location of the file containing the connection information for the controller. If this is not given, the file must be in the security directory of the cluster directory. This location is resolved using the `profile` or `profile_dir` options.""", ) wait_for_url_file = Float( 5, config=True, help="""The maximum number of seconds to wait for url_file to exist. This is useful for batch-systems and shared-filesystems where the controller and engine are started at the same time and it may take a moment for the controller to write the connector files.""") url_file_name = Unicode(u'ipcontroller-engine.json', config=True) def _cluster_id_changed(self, name, old, new): if new: base = 'ipcontroller-%s' % new else: base = 'ipcontroller' self.url_file_name = "%s-engine.json" % base log_url = Unicode( '', config=True, help="""The URL for the iploggerapp instance, for forwarding logging to a central location.""") # an IPKernelApp instance, used to setup listening for shell frontends kernel_app = Instance(IPKernelApp, allow_none=True) aliases = Dict(aliases) flags = Dict(flags) @property def kernel(self): """allow access to the Kernel object, so I look like IPKernelApp""" return self.engine.kernel def find_url_file(self): """Set the url file. Here we don't try to actually see if it exists for is valid as that is hadled by the connection logic. """ # Find the actual controller key file if not self.url_file: self.url_file = os.path.join(self.profile_dir.security_dir, self.url_file_name) def load_connector_file(self): """load config from a JSON connector file, at a *lower* priority than command-line/config files. """ self.log.info("Loading url_file %r", self.url_file) config = self.config with open(self.url_file) as f: num_tries = 0 max_tries = 5 d = "" while not d: try: d = json.loads(f.read()) except ValueError: if num_tries > max_tries: raise num_tries += 1 time.sleep(0.5) # allow hand-override of location for disambiguation # and ssh-server if 'EngineFactory.location' not in config: config.EngineFactory.location = d['location'] if 'EngineFactory.sshserver' not in config: config.EngineFactory.sshserver = d.get('ssh') location = config.EngineFactory.location proto, ip = d['interface'].split('://') ip = disambiguate_ip_address(ip, location) d['interface'] = '%s://%s' % (proto, ip) # DO NOT allow override of basic URLs, serialization, or key # JSON file takes top priority there config.Session.key = cast_bytes(d['key']) config.Session.signature_scheme = d['signature_scheme'] config.EngineFactory.url = d['interface'] + ':%i' % d['registration'] config.Session.packer = d['pack'] config.Session.unpacker = d['unpack'] self.log.debug("Config changed:") self.log.debug("%r", config) self.connection_info = d def bind_kernel(self, **kwargs): """Promote engine to listening kernel, accessible to frontends.""" if self.kernel_app is not None: return self.log.info( "Opening ports for direct connections as an IPython kernel") kernel = self.kernel kwargs.setdefault('config', self.config) kwargs.setdefault('log', self.log) kwargs.setdefault('profile_dir', self.profile_dir) kwargs.setdefault('session', self.engine.session) app = self.kernel_app = IPKernelApp(**kwargs) # allow IPKernelApp.instance(): IPKernelApp._instance = app app.init_connection_file() # relevant contents of init_sockets: app.shell_port = app._bind_socket(kernel.shell_streams[0], app.shell_port) app.log.debug("shell ROUTER Channel on port: %i", app.shell_port) app.iopub_port = app._bind_socket(kernel.iopub_socket, app.iopub_port) app.log.debug("iopub PUB Channel on port: %i", app.iopub_port) kernel.stdin_socket = self.engine.context.socket(zmq.ROUTER) app.stdin_port = app._bind_socket(kernel.stdin_socket, app.stdin_port) app.log.debug("stdin ROUTER Channel on port: %i", app.stdin_port) # start the heartbeat, and log connection info: app.init_heartbeat() app.log_connection_info() app.connection_dir = self.profile_dir.security_dir app.write_connection_file() def init_engine(self): # This is the working dir by now. sys.path.insert(0, '') config = self.config # print config self.find_url_file() # was the url manually specified? keys = set(self.config.EngineFactory.keys()) keys = keys.union(set(self.config.RegistrationFactory.keys())) if self.wait_for_url_file and not os.path.exists(self.url_file): self.log.warn("url_file %r not found", self.url_file) self.log.warn("Waiting up to %.1f seconds for it to arrive.", self.wait_for_url_file) tic = time.time() while not os.path.exists(self.url_file) and ( time.time() - tic < self.wait_for_url_file): # wait for url_file to exist, or until time limit time.sleep(0.1) if os.path.exists(self.url_file): self.load_connector_file() else: self.log.fatal("Fatal: url file never arrived: %s", self.url_file) self.exit(1) exec_lines = [] for app in ('IPKernelApp', 'InteractiveShellApp'): if '%s.exec_lines' % app in config: exec_lines = config[app].exec_lines break exec_files = [] for app in ('IPKernelApp', 'InteractiveShellApp'): if '%s.exec_files' % app in config: exec_files = config[app].exec_files break config.IPKernelApp.exec_lines = exec_lines config.IPKernelApp.exec_files = exec_files if self.startup_script: exec_files.append(self.startup_script) if self.startup_command: exec_lines.append(self.startup_command) # Create the underlying shell class and Engine # shell_class = import_item(self.master_config.Global.shell_class) # print self.config try: self.engine = EngineFactory( config=config, log=self.log, connection_info=self.connection_info, ) except: self.log.error("Couldn't start the Engine", exc_info=True) self.exit(1) def forward_logging(self): if self.log_url: self.log.info("Forwarding logging to %s", self.log_url) context = self.engine.context lsock = context.socket(zmq.PUB) lsock.connect(self.log_url) handler = EnginePUBHandler(self.engine, lsock) handler.setLevel(self.log_level) self.log.addHandler(handler) def init_mpi(self): global mpi self.mpi = MPI(parent=self) mpi_import_statement = self.mpi.init_script if mpi_import_statement: try: self.log.info("Initializing MPI:") self.log.info(mpi_import_statement) exec(mpi_import_statement, globals()) except: mpi = None else: mpi = None @catch_config_error def initialize(self, argv=None): super(IPEngineApp, self).initialize(argv) self.init_mpi() self.init_engine() self.forward_logging() def start(self): self.engine.start() try: self.engine.loop.start() except KeyboardInterrupt: self.log.critical("Engine Interrupted, shutting down...\n")
class Plot(widgets.DOMWidget): """ Main K3D widget. Attributes: antialias: `int`: Enable antialiasing in WebGL renderer, changes have no effect after displaying. height: `int`: Height of the Widget in pixels, changes have no effect after displaying. background_color: `int`. Packed RGB color of the plot background (0xff0000 is red, 0xff is blue), -1 is for transparent. camera_auto_fit: `bool`. Enable automatic camera setting after adding, removing or changing a plot object. grid_auto_fit: `bool`. Enable automatic adjustment of the plot grid to contained objects. grid_visible: `bool`. Enable or disable grid. screenshot_scale: `Float`. Multipiler to screenshot resolution. voxel_paint_color: `int`. The (initial) int value to be inserted when editing voxels. lighting: `Float`. Lighting factor. grid: `array_like`. 6-element tuple specifying the bounds of the plot grid (x0, y0, z0, x1, y1, z1). camera: `array_like`. 9-element list or array specifying camera position. camera_no_rotate: `Bool`. Lock for camera rotation. camera_no_zoom: `Bool`. Lock for camera zoom. camera_no_pan: `Bool`. Lock for camera pan. camera_rotate_speed: `Float`. Speed of camera rotation. camera_zoom_speed: `Float`. Speed of camera zoom. camera_pan_speed: `Float`. Speed of camera pan. camera_fov: `Float`. Camera Field of View. snapshot_include_js: `Bool`. If it's true snapshot html is standalone. axes: `list`. Axes labels for plot. time: `list`. Time value (used in TimeSeries) name: `string`. Name of the plot. Used to filenames of snapshot/screenshot etc. mode: `str`. Mode of K3D viewer. Legal values are: :`view`: No interaction with objects, :`add`: On voxels objects adding mode, :`change`: On voxels objects edit mode, :`callback`: Handling click_callback and hover_callback on some type of objects, :`manipulate`: Enable object transform widget. camera_mode: `str`. Mode of camera movement. Legal values are: :`trackball`: orbit around point with dynamic up-vector of camera, :`orbit`: orbit around point with fixed up-vector of camera, :`fly`: orbit around point with dynamic up-vector of camera, mouse wheel also moves target point. manipulate_mode: `str`. Mode of manipulate widgets. Legal values are: :`translate`: Translation widget, :`rotate`: Rotation widget, :`scale`: Scaling widget. auto_rendering: `Bool`. State of auto rendering. fps: `Float`. Fps of animation. objects: `list`. List of `k3d.objects.Drawable` currently included in the plot, not to be changed directly. """ _view_name = Unicode('PlotView').tag(sync=True) _model_name = Unicode('PlotModel').tag(sync=True) _view_module = Unicode('k3d').tag(sync=True) _model_module = Unicode('k3d').tag(sync=True) _view_module_version = Unicode(version).tag(sync=True) _model_module_version = Unicode(version).tag(sync=True) _backend_version = Unicode(version).tag(sync=True) # readonly (specified at creation) antialias = Int(min=0, max=5).tag(sync=True) height = Int().tag(sync=True) # readonly (not to be modified directly) object_ids = List().tag(sync=True) # read-write camera_auto_fit = Bool(True).tag(sync=True) auto_rendering = Bool(True).tag(sync=True) snapshot_include_js = Bool(True).tag(sync=True) lighting = Float().tag(sync=True) fps = Float().tag(sync=True) grid_auto_fit = Bool(True).tag(sync=True) grid_visible = Bool(True).tag(sync=True) fps_meter = Bool(True).tag(sync=True) menu_visibility = Bool(True).tag(sync=True) screenshot_scale = Float().tag(sync=True) time = Float().tag(sync=True) grid = ListOrArray((-1, -1, -1, 1, 1, 1), minlen=6, maxlen=6).tag(sync=True) background_color = Int().tag(sync=True) voxel_paint_color = Int().tag(sync=True) camera = ListOrArray(minlen=9, maxlen=9, empty_ok=True).tag(sync=True) camera_animation = TimeSeries( ListOrArray(minlen=9, maxlen=9, empty_ok=True)).tag(sync=True) camera_no_rotate = Bool(False).tag(sync=True) camera_no_zoom = Bool(False).tag(sync=True) camera_no_pan = Bool(False).tag(sync=True) camera_rotate_speed = Float().tag(sync=True) camera_zoom_speed = Float().tag(sync=True) camera_pan_speed = Float().tag(sync=True) clipping_planes = ListOrArray(empty_ok=True).tag(sync=True) colorbar_object_id = Int(-1).tag(sync=True) colorbar_scientific = Bool(False).tag(sync=True) rendering_steps = Int(1).tag(sync=True) screenshot = Unicode().tag(sync=True) snapshot = Unicode().tag(sync=True) camera_fov = Float().tag(sync=True) name = Unicode(default_value=None, allow_none=True).tag(sync=True) axes = List(minlen=3, maxlen=3, default_value=['x', 'y', 'z']).tag(sync=True) axes_helper = Float().tag(sync=True) mode = Unicode().tag(sync=True) camera_mode = Unicode().tag(sync=True) manipulate_mode = Unicode().tag(sync=True) objects = [] def __init__(self, antialias=3, background_color=0xFFFFFF, camera_auto_fit=True, grid_auto_fit=True, grid_visible=True, height=512, voxel_paint_color=0, grid=(-1, -1, -1, 1, 1, 1), screenshot_scale=2.0, lighting=1.5, time=0.0, fps_meter=False, menu_visibility=True, colorbar_object_id=-1, rendering_steps=1, axes=['x', 'y', 'z'], camera_no_rotate=False, camera_no_zoom=False, snapshot_include_js=True, camera_no_pan=False, camera_rotate_speed=1.0, camera_zoom_speed=1.2, camera_pan_speed=0.3, camera_fov=45.0, axes_helper=1.0, name=None, mode='view', camera_mode='trackball', manipulate_mode='translate', auto_rendering=True, fps=25.0, *args, **kwargs): super(Plot, self).__init__() self.antialias = antialias self.camera_auto_fit = camera_auto_fit self.grid_auto_fit = grid_auto_fit self.fps_meter = fps_meter self.fps = fps self.grid = grid self.grid_visible = grid_visible self.background_color = background_color self.voxel_paint_color = voxel_paint_color self.screenshot_scale = screenshot_scale self.height = height self.lighting = lighting self.time = time self.menu_visibility = menu_visibility self.colorbar_object_id = colorbar_object_id self.rendering_steps = rendering_steps self.camera_no_rotate = camera_no_rotate self.camera_no_zoom = camera_no_zoom self.camera_no_pan = camera_no_pan self.camera_rotate_speed = camera_rotate_speed self.camera_zoom_speed = camera_zoom_speed self.camera_pan_speed = camera_pan_speed self.camera_fov = camera_fov self.axes = axes self.axes_helper = axes_helper self.name = name self.mode = mode self.snapshot_include_js = snapshot_include_js self.camera_mode = camera_mode self.manipulate_mode = manipulate_mode self.auto_rendering = auto_rendering self.camera = [2, -3, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] self.object_ids = [] self.objects = [] self.outputs = [] def __iadd__(self, objs): """Add Drawable to plot.""" assert isinstance(objs, Drawable) for obj in objs: if obj.id not in self.object_ids: self.object_ids = self.object_ids + [obj.id] self.objects.append(obj) return self def __isub__(self, objs): """Remove Drawable from plot.""" assert isinstance(objs, Drawable) for obj in objs: self.object_ids = [id_ for id_ in self.object_ids if id_ != obj.id] if obj in self.objects: self.objects.remove(obj) return self def display(self, **kwargs): """Show plot inside ipywidgets.Output().""" output = widgets.Output() with output: display(self, **kwargs) self.outputs.append(output) display(output) def render(self): """Trigger rendering on demand. Useful when self.auto_rendering == False.""" self.send({'msg_type': 'render'}) def start_auto_play(self): """Start animation of plot with objects using TimeSeries.""" self.send({'msg_type': 'start_auto_play'}) def stop_auto_play(self): """Stop animation of plot with objects using TimeSeries.""" self.send({'msg_type': 'stop_auto_play'}) def close(self): """Remove plot from all its ipywidgets.Output()-s.""" for output in self.outputs: output.clear_output() self.outputs = [] def camera_reset(self, factor=1.5): """Trigger auto-adjustment of camera. Useful when self.camera_auto_fit == False.""" self.send({'msg_type': 'reset_camera', 'factor': factor}) def get_auto_grid(self): d = np.stack([o.get_bounding_box() for o in self.objects]) return np.dstack( [np.min(d[:, 0::2], axis=0), np.max(d[:, 1::2], axis=0)]).flatten() def get_auto_camera(self, factor=1.5, yaw=25, pitch=15): bounds = self.get_auto_grid() center = (bounds[::2] + bounds[1::2]) / 2.0 radius = 0.5 * np.sum(np.abs(bounds[::2] - bounds[1::2])**2)**0.5 cam_distance = radius * factor / np.sin( np.deg2rad(self.camera_fov / 2.0)) x = np.sin(np.deg2rad(pitch)) * np.cos(np.deg2rad(yaw)) y = np.sin(np.deg2rad(pitch)) * np.sin(np.deg2rad(yaw)) z = np.cos(np.deg2rad(pitch)) if pitch not in [0, 180]: up = [0, 0, 1] else: up = [0, 1, 1] return [ center[0] + x * cam_distance, center[1] + y * cam_distance, center[2] + z * cam_distance, center[0], center[1], center[2], up[0], up[1], up[2] ] def fetch_screenshot(self, only_canvas=False): """Request creating a PNG screenshot on the JS side and saving it in self.screenshot The result is a string of a PNG file in base64 encoding. This function requires a round-trip of websocket messages. The result will be available after the current cell finishes execution.""" self.send({'msg_type': 'fetch_screenshot', 'only_canvas': only_canvas}) def yield_screenshots(self, generator_function): """Decorator for a generator function receiving screenshots via yield.""" @wraps(generator_function) def inner(): generator = generator_function() def send_new_value(change): try: generator.send(base64.b64decode(change.new)) except StopIteration: self.unobserve(send_new_value, 'screenshot') self.observe(send_new_value, 'screenshot') # start the decorated generator generator.send(None) return inner def fetch_snapshot(self, compression_level=9): """Request creating a HTML snapshot on the JS side and saving it in self.snapshot The result is a string: a HTML document with this plot embedded. This function requires a round-trip of websocket messages. The result will be available after the current cell finishes execution.""" self.send({ 'msg_type': 'fetch_snapshot', 'compression_level': compression_level }) def yield_snapshots(self, generator_function): """Decorator for a generator function receiving snapshots via yield.""" @wraps(generator_function) def inner(): generator = generator_function() def send_new_value(change): try: generator.send(base64.b64decode(change.new)) except StopIteration: self.unobserve(send_new_value, 'snapshot') self.observe(send_new_value, 'snapshot') # start the decorated generator generator.send(None) return inner def get_binary_snapshot_objects(self): import msgpack from .helpers import to_json snapshot = {"objects": [], "chunkList": []} for o in self.objects: obj = {} for k, v in o.traits().items(): if 'sync' in v.metadata: obj[k] = to_json(k, o[k], o, o['compression_level']) snapshot['objects'].append(obj) return msgpack.packb(snapshot, use_bin_type=True) def get_snapshot_params(self): return { "cameraAutoFit": self.camera_auto_fit, "menuVisibility": self.menu_visibility, "gridAutoFit": self.grid_auto_fit, "gridVisible": self.grid_visible, "grid": self.grid, "antialias": self.antialias, "screenshotScale": self.screenshot_scale, "clearColor": self.background_color, "clippingPlanes": self.clipping_planes, "lighting": self.lighting, "time": self.time, "fpsMeter": self.fps_meter, "cameraMode": self.camera_mode, "colorbarObjectId": self.colorbar_object_id, "axes": self.axes, "cameraNoRotate": self.camera_no_rotate, "cameraNoZoom": self.camera_no_zoom, "cameraNoPan": self.camera_no_pan, "cameraRotateSpeed": self.camera_rotate_speed, "cameraZoomSpeed": self.camera_zoom_speed, "cameraPanSpeed": self.camera_pan_speed, "name": self.name, "camera_fov": self.camera_fov, "axesHelper": self.axes_helper, "cameraAnimation": self.camera_animation, "fps": self.fps } def get_snapshot(self, compression_level=9, additional_js_code=''): """Produce on the Python side a HTML document with the current plot embedded.""" import os import io import zlib dir_path = os.path.dirname(os.path.realpath(__file__)) data = self.get_binary_snapshot_objects() data = base64.b64encode(zlib.compress(data, compression_level)) if self.snapshot_include_js: f = io.open(os.path.join(dir_path, 'static', 'snapshot_standalone.txt'), mode="r", encoding="utf-8") template = f.read() f.close() f = io.open(os.path.join(dir_path, 'static', 'standalone.js'), mode="r", encoding="utf-8") template = template.replace( '[K3D_SOURCE]', base64.b64encode( zlib.compress(f.read().encode(), compression_level)).decode("utf-8")) f.close() f = io.open(os.path.join(dir_path, 'static', 'require.js'), mode="r", encoding="utf-8") template = template.replace('[REQUIRE_JS]', f.read()) f.close() f = io.open(os.path.join(dir_path, 'static', 'pako_inflate.min.js'), mode="r", encoding="utf-8") template = template.replace('[PAKO_JS]', f.read()) f.close() else: f = io.open(os.path.join(dir_path, 'static', 'snapshot_online.txt'), mode="r", encoding="utf-8") template = f.read() f.close() template = template.replace('[VERSION]', self._view_module_version) template = template.replace('[DATA]', data.decode("utf-8")) params = self.get_snapshot_params() template = template.replace('[PARAMS]', json.dumps(params)) template = template.replace('[CAMERA]', str(self.camera)) template = template.replace('[ADDITIONAL]', additional_js_code) return template
class TFMaker(Component): name = 'TFMaker' vped_list = List(Int, None, allow_none=True, help='List of the vped value for each input ' 'file').tag(config=True) pedestal_path = Unicode(None, allow_none=True, help='Path to the pedestal file (TF requires the ' 'pedestal to be first subtracted before ' 'generating').tag(config=True) adc_step = Int(8, help='Step in ADC that the TF file will be stored ' 'in').tag(config=True) output_path = Unicode(None, allow_none=True, help='Path to save the TargetCalib pedestal ' 'file').tag(config=True) number_tms = Int(32, help='Number of TARGET modules ' 'connected').tag(config=True) vped_zero = Int(1050, help='VPed value for the pedestal').tag(config=True) compress = Bool(False, help='Compress the TF file?').tag(config=True) tf_input = Bool(False, help='Create a numpy file containing the input TF ' 'array before the switch of ' 'axis').tag(config=True) def __init__(self, config, tool, **kwargs): """ Generator of Transfer Function files. Parameters ---------- config : traitlets.loader.Config Configuration specified by config file or cmdline arguments. Used to set traitlet values. Set to None if no configuration to pass. tool : ctapipe.core.Tool Tool executable that is calling this component. Passes the correct logger to the component. Set to None if no Tool to pass. kwargs """ super().__init__(config=config, parent=tool, **kwargs) if self.vped_list is None: raise ValueError("Please supply vped_list") if self.pedestal_path is None: raise ValueError("Please specify a pedestal path") if self.output_path is None: raise ValueError("Please specify an output path to save " "TF file") self.ped = PedestalSubtractor(config=config, tool=tool, pedestal_path=self.pedestal_path) vpeds = np.array(self.vped_list, dtype=np.uint16) self.tf_obj = TCTfMaker(vpeds, self.number_tms, self.vped_zero) self.current_vped = None def add_event(self, event, vped): """ Add an event into the transfer function. Parameters ---------- event : container A `ctapipe` event container vped: int The vped of file from which the event comes from """ if self.current_vped != vped: self.current_vped = vped self.tf_obj.SetVpedIndex(vped) telid = 0 tm = event.meta['tm'] tmpix = event.meta['tmpix'] waveforms = event.r0.tel[telid].adc_samples[0] first_cell_ids = event.r0.tel[telid].first_cell_ids pedsub = np.zeros(waveforms.shape, dtype=np.float32) self.ped.apply(event, pedsub) self.tf_obj.AddEvent(pedsub, first_cell_ids) def save(self): """ Save the pedestal file. """ self.log.info("Saving transfer function to: {}".format( self.output_path)) self.tf_obj.Save(self.output_path, self.adc_step, self.compress) if self.tf_input: self.save_tf_input() def save_tf_input(self): tf_input = np.array(self.tf_obj.GetTf()) vped_vector = np.array(self.tf_obj.GetVpedVector()) tfinput_path = os.path.splitext(self.output_path)[0] + '_input.npy' vped_path = os.path.splitext(self.output_path)[0] + '_vped.npy' self.log.info("Saving TF input array to: {}".format(tfinput_path)) np.save(tfinput_path, tf_input) self.log.info("Saving Vped vector to: {}".format(vped_path)) np.save(vped_path, vped_vector)
class ProjView(RegulusDOMWidget): _model_name = Unicode('ProjModel').tag(sync=True) _view_name = Unicode('ProjView').tag(sync=True) data = Instance(klass=DataWidget, allow_none=True).tag(sync=True, **widget_serialization) show = List(Int()).tag(sync=True) show_graph = Bool(True).tag(sync=True) show_pts = Bool(False).tag(sync=True) show_inverse = Bool(False).tag(sync=True) highlight = Int(-2).tag(sync=True) axes = TypedTuple(trait=AxisTraitType()).tag(sync=True, **widget_serialization) color = Unicode(None, allow_none=True).tag(sync=True) color_info = List((None, 0, 1)).tag(sync=True) # colors = Any([]).tag(sync=True) inverse = Dict(allow_none=True).tag(sync=True) def __init__(self, data=None, **kwargs): self._inverse_cache = set() super().__init__(**kwargs) if data is not None: self.data = data @validate('data') def _valid_value(self, proposal): data = proposal['value'] if data is not None and not isinstance(data, DataWidget): data = DataWidget(data=data) return data @observe('color') def color_changed(self, _): if self.color is None or self.color is not '': self.color_info = (None, 0, 1) if self.data is not None and self.data.data is not None: data = self.data.data if self.color in data.values: c = data.values[self.color] self.color_info = (list(data.values).index(self.color), c.min(), c.max()) else: print('invalid color', self.color) def reset_inverse(self): self._inverse_cache.clear() self._show({'new': self.show}) def _send_msg(self, pid, data): self.inverse = {pid: data} self.inverse = None @observe('show') def _show(self, change): self._update_inverse() @observe('show_inverse') def _show_inverse(self, change): self._update_inverse() def _update_inverse(self): r = self.data.data if self.show_inverse and self.data is not None: if not self.data.data.attr.has('inverse_regression'): return pids = filter(lambda pid: pid not in self._inverse_cache, self.show) msg = {} t0 = time() for node in r.find_nodes(pids): curve, std = r.attr['inverse_regression'][node] msg[node.id] = curve.reset_index().values.tolist() self._inverse_cache.add(node.id) if time() - t0 > 1: self.inverse = msg self.inverse = None msg = {} t0 = time() if len(msg) > 0: self.inverse = msg self.inverse = None
class PredictionApplication(Application): name = Unicode(u'hdnnpy predict') description = ('Predict properties for atomic structures using trained' ' HDNNP.') verbose = Bool( False, help='Set verbose mode' ).tag(config=True) classes = List([PredictionConfig]) config_file = Path( 'prediction_config.py', help='Load this config file') aliases = Dict({ 'log_level': 'Application.log_level', }) flags = Dict({ 'verbose': ({ 'PredictionApplication': { 'verbose': True, }, }, 'Set verbose mode'), 'v': ({ 'PredictionApplication': { 'verbose': True, }, }, 'Set verbose mode'), 'debug': ({ 'Application': { 'log_level': 10, }, }, 'Set log level to DEBUG'), }) def __init__(self, **kwargs): super().__init__(**kwargs) self.dataset_config = None self.model_config = None self.prediction_config = None def initialize(self, argv=None): self.parse_command_line(argv) self.load_config_file(self.config_file) self.prediction_config = PredictionConfig(config=self.config) yaml.add_constructor('Path', pyyaml_path_constructor) training_result = yaml.load( (self.prediction_config.load_dir / 'training_result.yaml').open()) self.dataset_config = DatasetConfig(**training_result['dataset']) self.model_config = ModelConfig(**training_result['model']) def start(self): pc = self.prediction_config shutil.copy(self.config_file, pc.load_dir / self.config_file.name) tag_xyz_map, pc.elements = parse_xyz( pc.data_file, save=False, verbose=self.verbose) datasets = self.construct_datasets(tag_xyz_map) datasets = DatasetGenerator(*datasets).all() if MPI.rank == 0: results = self.predict(datasets) self.dump_result(results) def construct_datasets(self, tag_xyz_map): dc = self.dataset_config mc = self.model_config pc = self.prediction_config preprocesses = [] for (name, args, kwargs) in dc.preprocesses: preprocess = PREPROCESS[name](*args, **kwargs) preprocess.load( pc.load_dir / 'preprocess' / f'{name}.npz', verbose=self.verbose) preprocesses.append(preprocess) datasets = [] for pattern in pc.tags: for tag in fnmatch.filter(tag_xyz_map, pattern): if self.verbose: pprint(f'Construct sub dataset tagged as "{tag}"') tagged_xyz = tag_xyz_map.pop(tag) structures = AtomicStructure.read_xyz(tagged_xyz) # prepare descriptor dataset descriptor = DESCRIPTOR_DATASET[dc.descriptor]( pc.order, structures, **dc.parameters) descriptor.make(verbose=self.verbose) # prepare empty property dataset property_ = PROPERTY_DATASET[dc.property_]( pc.order, structures) # construct test dataset from descriptor & property datasets dataset = HDNNPDataset(descriptor, property_) dataset.construct( all_elements=pc.elements, preprocesses=preprocesses, shuffle=False, verbose=self.verbose) datasets.append(dataset) dc.n_sample += dataset.total_size mc.n_input = dataset.n_input mc.n_output = dataset.n_label return datasets def predict(self, datasets): mc = self.model_config pc = self.prediction_config results = [] # master model master_nnp = MasterNNP( pc.elements, mc.n_input, mc.hidden_layers, mc.n_output) chainer.serializers.load_npz( pc.load_dir / 'master_nnp.npz', master_nnp) for dataset in datasets: # hdnnp model hdnnp = HighDimensionalNNP( dataset.elemental_composition, mc.n_input, mc.hidden_layers, mc.n_output) hdnnp.sync_param_with(master_nnp) batch = chainer.dataset.concat_examples(dataset) inputs = [batch[f'inputs/{i}'] for i in range(pc.order + 1)] with chainer.using_config('train', False), \ chainer.using_config('enable_backprop', False): predictions = hdnnp.predict(inputs, pc.order) result = { **{'tag': dataset.tag}, **{property_: coefficient * prediction.data for property_, coefficient, prediction in zip(dataset.property.properties, dataset.property.coefficients, predictions)}, } results.append(result) return results def dump_result(self, results): pc = self.prediction_config result_file = pc.load_dir / f'prediction_result{pc.dump_format}' if pc.dump_format == '.npz': kv_result = {} for result in results: tag = result.pop('tag') kv_result.update({tag + '/' + key: value for key, value in result.items()}) np.savez(result_file, **kv_result)
class WebServer(Application): port = Integer(8081, help="""The internal port for the Hub process. This is the internal port of the hub itself. It should never be accessed directly. See JupyterHub.port for the public port to use when accessing jupyterhub. It is rare that this port should be set except in cases of port conflict. """).tag(config=True) ip = Unicode('127.0.0.1', help="""The ip address for the Hub process to *bind* to. By default, the hub listens on localhost only. This address must be accessible from the proxy and user servers. You may need to set this to a public ip or '' for all interfaces if the proxy or user servers are in containers or on a different host. See `hub_connect_ip` for cases where the bind and connect address should differ. """).tag(config=True) config_file = Unicode( 'config.py', help="The config file to load", ).tag(config=True) tornado_settings = Dict( help="Extra settings overrides to pass to the tornado application." ).tag(config=True) handlers = List() async def start(self): """Start the whole thing""" self.io_loop = loop = IOLoop.current() if self.subapp: self.subapp.start() loop.stop() return def init_handlers(self): h = [] # set default handlers h.extend(handlers.default_handlers) h.extend(apihandlers.default_handlers) self.handlers.extend(h) def init_tornado_application(self): properties = None """Instantiate the tornado Application object""" self.tornado_application = web.Application(self.handlers, **self.tornado_settings) print("\nWeb Server listening from port ", self.config.WebServer.port) self.tornado_application.listen(self.config.WebServer.port) @catch_config_error async def initialize(self, *args): self.load_config_file(self.config_file) self.init_handlers() self.init_tornado_application() async def launch_instance_async(self, argv=None): try: await self.initialize(argv) await self.start() except Exception as e: self.log.exception("%s", e) self.exit(1) @classmethod def launch_instance(cls, argv=None, **kwargs): """Launch an instance of a WebServer Application""" self = cls.instance() AsyncIOMainLoop().install() loop = IOLoop.current() loop.add_callback(self.launch_instance_async, argv) try: loop.start() except KeyboardInterrupt: print("\nKeyboard Interrupted")
class ContentsManager(LoggingConfigurable): """Base class for serving files and directories. This serves any text or binary file, as well as directories, with special handling for JSON notebook documents. Most APIs take a path argument, which is always an API-style unicode path, and always refers to a directory. - unicode, not url-escaped - '/'-separated - leading and trailing '/' will be stripped - if unspecified, path defaults to '', indicating the root path. """ notary = Instance(sign.NotebookNotary) def _notary_default(self): return sign.NotebookNotary(parent=self) hide_globs = List(Unicode(), [ u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~', ], config=True, help=""" Glob patterns to hide in file and directory listings. """) untitled_notebook = Unicode( "Untitled", config=True, help="The base name used when creating untitled notebooks.") untitled_file = Unicode( "untitled", config=True, help="The base name used when creating untitled files.") untitled_directory = Unicode( "Untitled Folder", config=True, help="The base name used when creating untitled directories.") pre_save_hook = Any(None, config=True, allow_none=True, help="""Python callable or importstring thereof To be called on a contents model prior to save. This can be used to process the structure, such as removing notebook outputs or other side effects that should not be saved. It will be called as (all arguments passed by keyword):: hook(path=path, model=model, contents_manager=self) - model: the model to be saved. Includes file contents. Modifying this dict will affect the file that is stored. - path: the API path of the save destination - contents_manager: this ContentsManager instance """) def _pre_save_hook_changed(self, name, old, new): if new and isinstance(new, string_types): self.pre_save_hook = import_item(self.pre_save_hook) elif new: if not callable(new): raise TraitError("pre_save_hook must be callable") def run_pre_save_hook(self, model, path, **kwargs): """Run the pre-save hook if defined, and log errors""" if self.pre_save_hook: try: self.log.debug("Running pre-save hook on %s", path) self.pre_save_hook(model=model, path=path, contents_manager=self, **kwargs) except Exception: self.log.error("Pre-save hook failed on %s", path, exc_info=True) checkpoints_class = Type(Checkpoints, config=True) checkpoints = Instance(Checkpoints, config=True) checkpoints_kwargs = Dict(config=True) def _checkpoints_default(self): return self.checkpoints_class(**self.checkpoints_kwargs) def _checkpoints_kwargs_default(self): return dict( parent=self, log=self.log, ) # ContentsManager API part 1: methods that must be # implemented in subclasses. def dir_exists(self, path): """Does a directory exist at the given path? Like os.path.isdir Override this method in subclasses. Parameters ---------- path : string The path to check Returns ------- exists : bool Whether the path does indeed exist. """ raise NotImplementedError def is_hidden(self, path): """Is path a hidden directory or file? Parameters ---------- path : string The path to check. This is an API path (`/` separated, relative to root dir). Returns ------- hidden : bool Whether the path is hidden. """ raise NotImplementedError def file_exists(self, path=''): """Does a file exist at the given path? Like os.path.isfile Override this method in subclasses. Parameters ---------- path : string The API path of a file to check for. Returns ------- exists : bool Whether the file exists. """ raise NotImplementedError('must be implemented in a subclass') def exists(self, path): """Does a file or directory exist at the given path? Like os.path.exists Parameters ---------- path : string The API path of a file or directory to check for. Returns ------- exists : bool Whether the target exists. """ return self.file_exists(path) or self.dir_exists(path) def get(self, path, content=True, type=None, format=None): """Get a file or directory model.""" raise NotImplementedError('must be implemented in a subclass') def save(self, model, path): """ Save a file or directory model to path. Should return the saved model with no content. Save implementations should call self.run_pre_save_hook(model=model, path=path) prior to writing any data. """ raise NotImplementedError('must be implemented in a subclass') def delete_file(self, path): """Delete the file or directory at path.""" raise NotImplementedError('must be implemented in a subclass') def rename_file(self, old_path, new_path): """Rename a file or directory.""" raise NotImplementedError('must be implemented in a subclass') # ContentsManager API part 2: methods that have useable default # implementations, but can be overridden in subclasses. def delete(self, path): """Delete a file/directory and any associated checkpoints.""" path = path.strip('/') if not path: raise HTTPError(400, "Can't delete root") self.delete_file(path) self.checkpoints.delete_all_checkpoints(path) def rename(self, old_path, new_path): """Rename a file and any checkpoints associated with that file.""" self.rename_file(old_path, new_path) self.checkpoints.rename_all_checkpoints(old_path, new_path) def update(self, model, path): """Update the file's path For use in PATCH requests, to enable renaming a file without re-uploading its contents. Only used for renaming at the moment. """ path = path.strip('/') new_path = model.get('path', path).strip('/') if path != new_path: self.rename(path, new_path) model = self.get(new_path, content=False) return model def info_string(self): return "Serving contents" def get_kernel_path(self, path, model=None): """Return the API path for the kernel KernelManagers can turn this value into a filesystem path, or ignore it altogether. The default value here will start kernels in the directory of the notebook server. FileContentsManager overrides this to use the directory containing the notebook. """ return '' def increment_filename(self, filename, path='', insert=''): """Increment a filename until it is unique. Parameters ---------- filename : unicode The name of a file, including extension path : unicode The API path of the target's directory Returns ------- name : unicode A filename that is unique, based on the input filename. """ path = path.strip('/') basename, ext = os.path.splitext(filename) for i in itertools.count(): if i: insert_i = '{}{}'.format(insert, i) else: insert_i = '' name = u'{basename}{insert}{ext}'.format(basename=basename, insert=insert_i, ext=ext) if not self.exists(u'{}/{}'.format(path, name)): break return name def validate_notebook_model(self, model): """Add failed-validation message to model""" try: validate(model['content']) except ValidationError as e: model['message'] = u'Notebook Validation failed: {}:\n{}'.format( e.message, json.dumps(e.instance, indent=1, default=lambda obj: '<UNKNOWN>'), ) return model def new_untitled(self, path='', type='', ext=''): """Create a new untitled file or directory in path path must be a directory File extension can be specified. Use `new` to create files with a fully specified path (including filename). """ path = path.strip('/') if not self.dir_exists(path): raise HTTPError(404, 'No such directory: %s' % path) model = {} if type: model['type'] = type if ext == '.ipynb': model.setdefault('type', 'notebook') else: model.setdefault('type', 'file') insert = '' if model['type'] == 'directory': untitled = self.untitled_directory insert = ' ' elif model['type'] == 'notebook': untitled = self.untitled_notebook ext = '.ipynb' elif model['type'] == 'file': untitled = self.untitled_file else: raise HTTPError(400, "Unexpected model type: %r" % model['type']) name = self.increment_filename(untitled + ext, path, insert=insert) path = u'{0}/{1}'.format(path, name) return self.new(model, path) def new(self, model=None, path=''): """Create a new file or directory and return its model with no content. To create a new untitled entity in a directory, use `new_untitled`. """ path = path.strip('/') if model is None: model = {} if path.endswith('.ipynb'): model.setdefault('type', 'notebook') else: model.setdefault('type', 'file') # no content, not a directory, so fill out new-file model if 'content' not in model and model['type'] != 'directory': if model['type'] == 'notebook': model['content'] = new_notebook() model['format'] = 'json' else: model['content'] = '' model['type'] = 'file' model['format'] = 'text' model = self.save(model, path) return model def copy(self, from_path, to_path=None): """Copy an existing file and return its new model. If to_path not specified, it will be the parent directory of from_path. If to_path is a directory, filename will increment `from_path-Copy#.ext`. from_path must be a full path to a file. """ path = from_path.strip('/') if to_path is not None: to_path = to_path.strip('/') if '/' in path: from_dir, from_name = path.rsplit('/', 1) else: from_dir = '' from_name = path model = self.get(path) model.pop('path', None) model.pop('name', None) if model['type'] == 'directory': raise HTTPError(400, "Can't copy directories") if to_path is None: to_path = from_dir if self.dir_exists(to_path): name = copy_pat.sub(u'.', from_name) to_name = self.increment_filename(name, to_path, insert='-Copy') to_path = u'{0}/{1}'.format(to_path, to_name) model = self.save(model, to_path) return model def log_info(self): self.log.info(self.info_string()) def trust_notebook(self, path): """Explicitly trust a notebook Parameters ---------- path : string The path of a notebook """ model = self.get(path) nb = model['content'] self.log.warn("Trusting notebook %s", path) self.notary.mark_cells(nb, True) self.save(model, path) def check_and_sign(self, nb, path=''): """Check for trusted cells, and sign the notebook. Called as a part of saving notebooks. Parameters ---------- nb : dict The notebook dict path : string The notebook's path (for logging) """ if self.notary.check_cells(nb): self.notary.sign(nb) else: self.log.warn("Saving untrusted notebook %s", path) def mark_trusted_cells(self, nb, path=''): """Mark cells as trusted if the notebook signature matches. Called as a part of loading notebooks. Parameters ---------- nb : dict The notebook object (in current nbformat) path : string The notebook's path (for logging) """ trusted = self.notary.check_signature(nb) if not trusted: self.log.warn("Notebook %s is not trusted", path) self.notary.mark_cells(nb, trusted) def should_list(self, name): """Should this file/directory name be displayed in a listing?""" return not any(fnmatch(name, glob) for glob in self.hide_globs) # Part 3: Checkpoints API def create_checkpoint(self, path): """Create a checkpoint.""" return self.checkpoints.create_checkpoint(self, path) def restore_checkpoint(self, checkpoint_id, path): """ Restore a checkpoint. """ self.checkpoints.restore_checkpoint(self, checkpoint_id, path) def list_checkpoints(self, path): return self.checkpoints.list_checkpoints(path) def delete_checkpoint(self, checkpoint_id, path): return self.checkpoints.delete_checkpoint(checkpoint_id, path)
class DisplayIntegrator(Tool): name = "ctapipe-display-integration" description = __doc__ event_index = Int(0, help="Event index to view.").tag(config=True) use_event_id = Bool( False, help= "event_index will obtain an event using event_id instead of index.", ).tag(config=True) telescope = Int( None, allow_none=True, help="Telescope to view. Set to None to display the first" "telescope with data.", ).tag(config=True) channel = Enum([0, 1], 0, help="Channel to view").tag(config=True) extractor_product = traits.enum_trait(ImageExtractor, default="NeighborPeakWindowSum") aliases = Dict( dict( f="EventSource.input_url", max_events="EventSource.max_events", extractor="DisplayIntegrator.extractor_product", E="DisplayIntegrator.event_index", T="DisplayIntegrator.telescope", C="DisplayIntegrator.channel", )) flags = Dict( dict(id=( { "DisplayDL1Calib": { "use_event_index": True } }, "event_index will obtain an event using event_id instead of index.", ))) classes = List([EventSource] + traits.classes_with_traits(ImageExtractor)) def __init__(self, **kwargs): super().__init__(**kwargs) # make sure gzip files are seekable self.config.SimTelEventSource.back_seekable = True self.eventseeker = None self.extractor = None self.calibrator = None def setup(self): self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]" event_source = self.add_component(EventSource.from_config(parent=self)) self.eventseeker = self.add_component( EventSeeker(event_source, parent=self)) self.extractor = self.add_component( ImageExtractor.from_name(self.extractor_product, parent=self, subarray=event_source.subarray)) self.calibrate = self.add_component( CameraCalibrator(parent=self, image_extractor=self.extractor, subarray=event_source.subarray)) def start(self): event_num = self.event_index if self.use_event_id: event_num = str(event_num) event = self.eventseeker[event_num] # Calibrate self.calibrate(event) # Select telescope tels = list(event.r0.tels_with_data) telid = self.telescope if telid is None: telid = tels[0] if telid not in tels: self.log.error("[event] please specify one of the following " "telescopes for this event: {}".format(tels)) exit() extractor_name = self.extractor.__class__.__name__ plot(event, telid, self.channel, extractor_name) def finish(self): pass
class LeToRIRFusionFeatureExtractor(LeToRFeatureExtractor): """ extract the IR fusion features """ feature_name_pre = Unicode('IRFusion') l_text_fields = List(Unicode, default_value=TARGET_TEXT_FIELDS).tag(config=True) l_model = List(Unicode, default_value=['lm_dir', 'bm25', 'coordinate', 'tf_idf']).tag(config=True) corpus_stat_pre = Unicode(help="the file pre of corpus stats").tag( config=True) def __init__(self, **kwargs): super(LeToRIRFusionFeatureExtractor, self).__init__(**kwargs) self.s_model = set(self.l_model) self.h_field_h_df = dict() if self.corpus_stat_pre: l_field_h_df, self.h_corpus_stat = load_corpus_stat( self.corpus_stat_pre, self.l_text_fields) self.h_field_h_df = dict(l_field_h_df) for field in self.l_text_fields: assert field in self.h_corpus_stat assert field in self.h_field_h_df def set_external_info(self, external_info): super(LeToRIRFusionFeatureExtractor, self).set_external_info(external_info) self.h_field_h_df = external_info.h_field_h_df self.h_corpus_stat = external_info.h_corpus_stat return def extract_for_text(self, query, docno, h_q_info, h_doc_info): h_feature = {} # logging.info('extracting IR fusion for q [%s], doc [%s]', query, docno) # logging.info('q_info %s', json.dumps(h_q_info)) # logging.info('doc_info %s', json.dumps(h_doc_info)) h_tf = text2lm(query.lower()) # title_ts = None for field in self.l_text_fields: total_df = self.h_corpus_stat[field]['total_df'] avg_doc_len = self.h_corpus_stat[field]['average_len'] h_doc_df = self.h_field_h_df[field] h_doc_tf = {} if field in h_doc_info: h_doc_tf = text2lm(h_doc_info[field].lower(), clean=True) term_stat = TermStat() term_stat.set_from_raw(h_tf, h_doc_tf, h_doc_df, total_df, avg_doc_len) # if field == 'title': # title_ts = term_stat l_sim_score = term_stat.mul_scores() for sim, score in l_sim_score: if sim in self.s_model: feature_name = self.feature_name_pre + sim.title( ) + field.title() h_feature[feature_name] = score # # for feature, score in h_feature.items(): # if score != h_old_feature[feature]: # logging.warn('ltr feature value different') # logging.warn('old feature: %s', json.dumps(h_old_feature)) # logging.warn('new feature: %s', json.dumps(h_feature)) # # logging.warn('old ts: %s', title_old_ts.pretty_print()) # logging.warn('new ts: %s', title_ts.pretty_print()) # logging.warn('query: %s, h_tf: %s', query, json.dumps(h_tf)) # break return h_feature def extract_doc_feature(self, docno, h_doc_info): h_feature = {} if 'is_wiki' in self.s_model: score = 0 if 'enwp' in docno: score = 1 h_feature[self.feature_name_pre + 'IsWiki'] = score return h_feature def extract(self, qid, docno, h_q_info, h_doc_info): query = h_q_info['query'] h_feature = self.extract_for_text(query, docno, h_q_info, h_doc_info) h_feature.update(self.extract_doc_feature(docno, h_q_info)) return h_feature
class Map(DOMWidget, InteractMixin): _view_name = Unicode('LeafletMapView').tag(sync=True) _model_name = Unicode('LeafletMapModel').tag(sync=True) _view_module = Unicode('jupyter-leaflet').tag(sync=True) _model_module = Unicode('jupyter-leaflet').tag(sync=True) _view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True) _model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True) # URL of the window where the map is displayed window_url = Unicode(read_only=True).tag(sync=True) # Map options center = List(def_loc).tag(sync=True, o=True) zoom_start = CFloat(12).tag(sync=True, o=True) zoom = CFloat(12).tag(sync=True, o=True) max_zoom = CFloat(18).tag(sync=True, o=True) min_zoom = CFloat(1).tag(sync=True, o=True) zoom_delta = CFloat(1).tag(sync=True, o=True) zoom_snap = CFloat(1).tag(sync=True, o=True) interpolation = Unicode('bilinear').tag(sync=True, o=True) crs = Dict(default_value=projections.EPSG3857).tag(sync=True) # Specification of the basemap basemap = Union( (Dict(), Instance(TileLayer)), default_value=dict( url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', max_zoom=19, attribution= 'Map data (c) <a href="https://openstreetmap.org">OpenStreetMap</a> contributors' )) modisdate = Unicode('yesterday').tag(sync=True) # Interaction options dragging = Bool(True).tag(sync=True, o=True) touch_zoom = Bool(True).tag(sync=True, o=True) scroll_wheel_zoom = Bool(False).tag(sync=True, o=True) double_click_zoom = Bool(True).tag(sync=True, o=True) box_zoom = Bool(True).tag(sync=True, o=True) tap = Bool(True).tag(sync=True, o=True) tap_tolerance = Int(15).tag(sync=True, o=True) world_copy_jump = Bool(False).tag(sync=True, o=True) close_popup_on_click = Bool(True).tag(sync=True, o=True) bounce_at_zoom_limits = Bool(True).tag(sync=True, o=True) keyboard = Bool(True).tag(sync=True, o=True) keyboard_pan_offset = Int(80).tag(sync=True, o=True) keyboard_zoom_offset = Int(1).tag(sync=True, o=True) inertia = Bool(True).tag(sync=True, o=True) inertia_deceleration = Int(3000).tag(sync=True, o=True) inertia_max_speed = Int(1500).tag(sync=True, o=True) # inertia_threshold = Int(?, o=True).tag(sync=True) # fade_animation = Bool(?).tag(sync=True, o=True) # zoom_animation = Bool(?).tag(sync=True, o=True) zoom_animation_threshold = Int(4).tag(sync=True, o=True) # marker_zoom_animation = Bool(?).tag(sync=True, o=True) fullscreen = Bool(False).tag(sync=True, o=True) options = List(trait=Unicode()).tag(sync=True) style = InstanceDict(MapStyle).tag(sync=True, **widget_serialization) default_style = InstanceDict(MapStyle).tag(sync=True, **widget_serialization) dragging_style = InstanceDict(MapStyle).tag(sync=True, **widget_serialization) zoom_control = Bool(True) attribution_control = Bool(True) @default('dragging_style') def _default_dragging_style(self): return {'cursor': 'move'} @default('options') def _default_options(self): return [name for name in self.traits(o=True)] south = Float(def_loc[0], read_only=True).tag(sync=True) north = Float(def_loc[0], read_only=True).tag(sync=True) east = Float(def_loc[1], read_only=True).tag(sync=True) west = Float(def_loc[1], read_only=True).tag(sync=True) bottom = Int(0, read_only=True).tag(sync=True) top = Int(9007199254740991, read_only=True).tag(sync=True) right = Int(0, read_only=True).tag(sync=True) left = Int(9007199254740991, read_only=True).tag(sync=True) layers = Tuple().tag(trait=Instance(Layer), sync=True, **widget_serialization) @default('layers') def _default_layers(self): basemap = self.basemap if isinstance(self.basemap, TileLayer) else basemap_to_tiles( self.basemap, self.modisdate) basemap.base = True return (basemap, ) bounds = Tuple(read_only=True) bounds_polygon = Tuple(read_only=True) pixel_bounds = Tuple(read_only=True) @observe('south', 'north', 'east', 'west') def _observe_bounds(self, change): self.set_trait('bounds', ((self.south, self.west), (self.north, self.east))) self.set_trait('bounds_polygon', ((self.north, self.west), (self.north, self.east), (self.south, self.east), (self.south, self.west))) @observe('bottom', 'top', 'right', 'left') def _observe_pixel_bounds(self, change): self.set_trait('pixel_bounds', ((self.left, self.top), (self.right, self.bottom))) def __init__(self, **kwargs): self.zoom_control_instance = None self.attribution_control_instance = None super(Map, self).__init__(**kwargs) self.on_msg(self._handle_leaflet_event) if self.zoom_control: self.zoom_control_instance = ZoomControl() self.add_control(self.zoom_control_instance) if self.attribution_control: self.attribution_control_instance = AttributionControl( position='bottomright') self.add_control(self.attribution_control_instance) @observe('zoom_control') def observe_zoom_control(self, change): if change['new']: self.zoom_control_instance = ZoomControl() self.add_control(self.zoom_control_instance) else: if self.zoom_control_instance is not None and self.zoom_control_instance in self.controls: self.remove_control(self.zoom_control_instance) @observe('attribution_control') def observe_attribution_control(self, change): if change['new']: self.attribution_control_instance = AttributionControl( position='bottomright') self.add_control(self.attribution_control_instance) else: if self.attribution_control_instance is not None and self.attribution_control_instance in self.controls: self.remove_control(self.attribution_control_instance) _layer_ids = List() @validate('layers') def _validate_layers(self, proposal): '''Validate layers list. Makes sure only one instance of any given layer can exist in the layers list. ''' self._layer_ids = [layer.model_id for layer in proposal.value] if len(set(self._layer_ids)) != len(self._layer_ids): raise LayerException( 'duplicate layer detected, only use each layer once') return proposal.value def add_layer(self, layer): if isinstance(layer, dict): layer = basemap_to_tiles(layer) if layer.model_id in self._layer_ids: raise LayerException('layer already on map: %r' % layer) self.layers = tuple([layer for layer in self.layers] + [layer]) def remove_layer(self, rm_layer): if rm_layer.model_id not in self._layer_ids: raise LayerException('layer not on map: %r' % rm_layer) self.layers = tuple([ layer for layer in self.layers if layer.model_id != rm_layer.model_id ]) def substitute_layer(self, old, new): if isinstance(new, dict): new = basemap_to_tiles(new) if old.model_id not in self._layer_ids: raise LayerException( 'Could not substitute layer: layer not on map.') self.layers = tuple([ new if layer.model_id == old.model_id else layer for layer in self.layers ]) def clear_layers(self): self.layers = () controls = Tuple().tag(trait=Instance(Control), sync=True, **widget_serialization) _control_ids = List() @validate('controls') def _validate_controls(self, proposal): '''Validate controls list. Makes sure only one instance of any given layer can exist in the controls list. ''' self._control_ids = [c.model_id for c in proposal.value] if len(set(self._control_ids)) != len(self._control_ids): raise ControlException( 'duplicate control detected, only use each control once') return proposal.value def add_control(self, control): if control.model_id in self._control_ids: raise ControlException('control already on map: %r' % control) self.controls = tuple([c for c in self.controls] + [control]) def remove_control(self, control): if control.model_id not in self._control_ids: raise ControlException('control not on map: %r' % control) self.controls = tuple( [c for c in self.controls if c.model_id != control.model_id]) def clear_controls(self): self.controls = () def save(self, outfile, **kwargs): """Save the Map to an .html file. Parameters ---------- outfile: str or file-like object The file to write the HTML output to. kwargs: keyword-arguments Extra parameters to pass to the ipywidgets.embed.embed_minimal_html function. """ embed_minimal_html(outfile, views=[self], **kwargs) def __iadd__(self, item): if isinstance(item, Layer): self.add_layer(item) elif isinstance(item, Control): self.add_control(item) return self def __isub__(self, item): if isinstance(item, Layer): self.remove_layer(item) elif isinstance(item, Control): self.remove_control(item) return self def __add__(self, item): if isinstance(item, Layer): self.add_layer(item) elif isinstance(item, Control): self.add_control(item) return self # Event handling _interaction_callbacks = Instance(CallbackDispatcher, ()) def _handle_leaflet_event(self, _, content, buffers): if content.get('event', '') == 'interaction': self._interaction_callbacks(**content) def on_interaction(self, callback, remove=False): self._interaction_callbacks.register_callback(callback, remove=remove)
class GenerateApp(DsCreate): name = u'generate' description = u''' Splits an nbgrader assignment into student facing and teacher facing files and uses the arguments to determine which sub application should be activated. **Behavior:** GenerateApp uses three major variables. 1. ``pipeline_steps`` * This variable is a list containing the converters and controllers that are applied to the repository. 2. ``branches`` * This variable is a list containing the name of git branches and is used by CheckoutControllers (included in the ``pipeline_steps`` list) to move sequentially across the branches. * *It is worth noting that the ``pipeline_steps`` list cannot contain more CheckoutControllers than the length of ``branches``. This app uses nbgrader's preprocessors to create student facing and and teacher facing versions for the README markdown files. The curriculum notebook is saved to each branch. ''' flags = generate_flags aliases = generate_aliases edit_branch = Unicode('master', help=""" Sets the name of the git branch used for curriculum development. Default: 'curriculum' """).tag(config=True) pipeline_steps = List([ CollectCurriculum, ReleaseConverter, Commit, Push, Checkout, BaseConverter, SourceConverter, Commit, Push, CheckoutEditBranch, ]).tag(config=True) branches = List(['master', 'solution'], help=""" Sets the branches used for the notebook split. Default: ['master', 'solution'] """).tag(config=True) def start(self) -> None: """ Activates the application. * Adds the name of the edit branch to the application configuration object. * Configures the DsPipeline object * Adds the branches to the controller objects * Initializes a DsPipeline * Activates thee pipeline """ super().start() c = Config() c.edit_branch = self.edit_branch c.DsPipeline.steps = self.pipeline_steps c.BaseController.branches = self.branches self.config.merge(c) pipeline = DsPipeline(config=self.config) pipeline.start()
class Viewer(widgets.DOMWidget): """ Generic object for viewing and labeling Candidate objects in their rendered Contexts. """ _view_name = Unicode('ViewerView').tag(sync=True) _view_module = Unicode('viewer').tag(sync=True) cids = List().tag(sync=True) html = Unicode('<h3>Error!</h3>').tag(sync=True) _labels_serialized = Unicode().tag(sync=True) _selected_cid = Int().tag(sync=True) def __init__(self, candidates, session, gold=[], n_per_page=3, height=225, annotator_name=None): """ Initializes a Viewer. The Viewer uses the keyword argument annotator_name to define a AnnotatorLabelKey with that name. :param candidates: A Python container of Candidates (e.g., not a CandidateSet, but candidate_set.candidates) :param session: The SnorkelSession for the database backend :param gold: Optional, Python container of Candidates that are know to have positive labels :param n_per_page: Optional, number of Contexts to display per page :param height: Optional, the height in pixels of the Viewer :param annotator_name: Name of the human using the Viewer, for saving their work. Defaults to system username. """ super(Viewer, self).__init__() self.session = session # By default, use the username as annotator name name = annotator_name if annotator_name is not None else getpass.getuser( ) # Sets up the AnnotationKey to use self.annotator = self.session.query(GoldLabelKey).filter( GoldLabelKey.name == name).first() if self.annotator is None: self.annotator = GoldLabelKey(name=name) session.add(self.annotator) session.commit() # Viewer display configs self.n_per_page = n_per_page self.height = height # Note that the candidates are not necessarily commited to the DB, so they *may not have* non-null ids # Hence, we index by their position in this list # We get the sorted candidates and all contexts required, either from unary or binary candidates self.gold = list(gold) self.candidates = sorted(list(candidates), key=lambda c: c[0].char_start) self.contexts = list( set(c[0].get_parent() for c in self.candidates + self.gold)) # If committed, sort contexts by id try: self.contexts = sorted(self.contexts, key=lambda c: c.id) except: pass # Loads existing annotations self.annotations = [None] * len(self.candidates) self.annotations_stable = [None] * len(self.candidates) init_labels_serialized = [] for i, candidate in enumerate(self.candidates): # First look for the annotation in the primary annotations table existing_annotation = self.session.query(GoldLabel) \ .filter(GoldLabel.key == self.annotator) \ .filter(GoldLabel.candidate == candidate) \ .first() if existing_annotation is not None: self.annotations[i] = existing_annotation if existing_annotation.value == 1: value_string = 'true' elif existing_annotation.value == -1: value_string = 'false' else: raise ValueError( str(existing_annotation) + ' has value not in {1, -1}, which Viewer does not support.' ) init_labels_serialized.append(str(i) + '~~' + value_string) # If the annotator label is in the main table, also get its stable version context_stable_ids = '~~'.join( [c.stable_id for c in candidate.get_contexts()]) existing_annotation_stable = self.session.query(StableLabel) \ .filter(StableLabel.context_stable_ids == context_stable_ids)\ .filter(StableLabel.annotator_name == name).one_or_none() # If stable version is not available, create it here # NOTE: This is for versioning issues, should be removed? if existing_annotation_stable is None: context_stable_ids = '~~'.join( [c.stable_id for c in candidate.get_contexts()]) existing_annotation_stable = StableLabel(context_stable_ids=context_stable_ids,\ annotator_name=self.annotator.name,\ split=candidate.split,\ value=existing_annotation.value) self.session.add(existing_annotation_stable) self.session.commit() self.annotations_stable[i] = existing_annotation_stable self._labels_serialized = ','.join(init_labels_serialized) # Configures message handler self.on_msg(self.handle_label_event) # display js, construct html and pass on to widget model self.render() def _tag_span(self, html, cids, gold=False): """ Create the span around a segment of the context associated with one or more candidates / gold annotations """ classes = ['candidate'] if len(cids) > 0 else [] classes += ['gold-annotation'] if gold else [] classes += list(map(str, cids)) # Scrub for non-ascii characters; replace with ? return u'<span class="{classes}">{html}</span>'.format( classes=' '.join(classes), html=html) def _tag_context(self, context, candidates, gold): """Given the raw context, tag the spans using the generic _tag_span method""" raise NotImplementedError() def render(self): """Renders viewer pane""" cids = [] # Iterate over pages of contexts pid = 0 pages = [] N = len(self.contexts) for i in range(0, N, self.n_per_page): page_cids = [] lis = [] for j in range(i, min(N, i + self.n_per_page)): context = self.contexts[j] # Get the candidates in this context candidates = [ c for c in self.candidates if c[0].get_parent() == context ] gold = [g for g in self.gold if g.context_id == context.id] # Construct the <li> and page view elements li_data = self._tag_context(context, candidates, gold) lis.append(LI_HTML.format(data=li_data, context_id=context.id)) page_cids.append( [self.candidates.index(c) for c in candidates]) # Assemble the page... pages.append( PAGE_HTML.format( pid=pid, data=''.join(lis), etc=' style="display: block;"' if i == 0 else '')) cids.append(page_cids) pid += 1 # Render in primary Viewer template self.cids = cids self.html = open(HOME + '/viewer/viewer.html').read() % ( self.height, ''.join(pages)) display(Javascript(open(HOME + '/viewer/viewer.js').read())) def _get_labels(self): """ De-serialize labels from Javascript widget, map to internal candidate id, and return as list of tuples """ LABEL_MAP = {'true': 1, 'false': -1} labels = [ x.split('~~') for x in self._labels_serialized.split(',') if len(x) > 0 ] vals = [(int(cid), LABEL_MAP.get(l, 0)) for cid, l in labels] return vals def handle_label_event(self, _, content, buffers): """ Handles label event by persisting new label """ if content.get('event', '') == 'set_label': cid = content.get('cid', None) value = content.get('value', None) if value is True: value = 1 elif value is False: value = -1 else: raise ValueError('Unexpected label returned from widget: ' + str(value) + '. Expected values are True and False.') # If label already exists, just update value (in both AnnotatorLabel and StableLabel) if self.annotations[cid] is not None: if self.annotations[cid].value != value: self.annotations[cid].value = value self.annotations_stable[cid].value = value self.session.commit() # Otherwise, create a AnnotatorLabel *and a StableLabel* else: candidate = self.candidates[cid] # Create AnnotatorLabel self.annotations[cid] = GoldLabel(key=self.annotator, candidate=candidate, value=value) self.session.add(self.annotations[cid]) # Create StableLabel context_stable_ids = '~~'.join( [c.stable_id for c in candidate.get_contexts()]) self.annotations_stable[cid] = StableLabel(context_stable_ids=context_stable_ids,\ annotator_name=self.annotator.name,\ value=value,\ split=candidate.split) self.session.add(self.annotations_stable[cid]) self.session.commit() elif content.get('event', '') == 'delete_label': cid = content.get('cid', None) self.session.delete(self.annotations[cid]) self.annotations[cid] = None self.session.delete(self.annotations_stable[cid]) self.annotations_stable[cid] = None self.session.commit() def get_selected(self): return self.candidates[self._selected_cid]
class LoadBalancedView(View): """An load-balancing View that only executes via the Task scheduler. Load-balanced views can be created with the client's `view` method: >>> v = client.load_balanced_view() or targets can be specified, to restrict the potential destinations: >>> v = client.load_balanced_view([1,3]) which would restrict loadbalancing to between engines 1 and 3. """ follow=Any() after=Any() timeout=CFloat() retries = Integer(0) _task_scheme = Any() _flag_names = List(['targets', 'block', 'track', 'follow', 'after', 'timeout', 'retries']) def __init__(self, client=None, socket=None, **flags): super(LoadBalancedView, self).__init__(client=client, socket=socket, **flags) self._task_scheme=client._task_scheme def _validate_dependency(self, dep): """validate a dependency. For use in `set_flags`. """ if dep is None or isinstance(dep, string_types + (AsyncResult, Dependency)): return True elif isinstance(dep, (list,set, tuple)): for d in dep: if not isinstance(d, string_types + (AsyncResult,)): return False elif isinstance(dep, dict): if set(dep.keys()) != set(Dependency().as_dict().keys()): return False if not isinstance(dep['msg_ids'], list): return False for d in dep['msg_ids']: if not isinstance(d, string_types): return False else: return False return True def _render_dependency(self, dep): """helper for building jsonable dependencies from various input forms.""" if isinstance(dep, Dependency): return dep.as_dict() elif isinstance(dep, AsyncResult): return dep.msg_ids elif dep is None: return [] else: # pass to Dependency constructor return list(Dependency(dep)) def set_flags(self, **kwargs): """set my attribute flags by keyword. A View is a wrapper for the Client's apply method, but with attributes that specify keyword arguments, those attributes can be set by keyword argument with this method. Parameters ---------- block : bool whether to wait for results track : bool whether to create a MessageTracker to allow the user to safely edit after arrays and buffers during non-copying sends. after : Dependency or collection of msg_ids Only for load-balanced execution (targets=None) Specify a list of msg_ids as a time-based dependency. This job will only be run *after* the dependencies have been met. follow : Dependency or collection of msg_ids Only for load-balanced execution (targets=None) Specify a list of msg_ids as a location-based dependency. This job will only be run on an engine where this dependency is met. timeout : float/int or None Only for load-balanced execution (targets=None) Specify an amount of time (in seconds) for the scheduler to wait for dependencies to be met before failing with a DependencyTimeout. retries : int Number of times a task will be retried on failure. """ super(LoadBalancedView, self).set_flags(**kwargs) for name in ('follow', 'after'): if name in kwargs: value = kwargs[name] if self._validate_dependency(value): setattr(self, name, value) else: raise ValueError("Invalid dependency: %r"%value) if 'timeout' in kwargs: t = kwargs['timeout'] if not isinstance(t, (int, float, type(None))): if (not PY3) and (not isinstance(t, long)): raise TypeError("Invalid type for timeout: %r"%type(t)) if t is not None: if t < 0: raise ValueError("Invalid timeout: %s"%t) self.timeout = t @sync_results @save_ids def _really_apply(self, f, args=None, kwargs=None, block=None, track=None, after=None, follow=None, timeout=None, targets=None, retries=None): """calls f(*args, **kwargs) on a remote engine, returning the result. This method temporarily sets all of `apply`'s flags for a single call. Parameters ---------- f : callable args : list [default: empty] kwargs : dict [default: empty] block : bool [default: self.block] whether to block track : bool [default: self.track] whether to ask zmq to track the message, for safe non-copying sends !!!!!! TODO: THE REST HERE !!!! Returns ------- if self.block is False: returns AsyncResult else: returns actual result of f(*args, **kwargs) on the engine(s) This will be a list of self.targets is also a list (even length 1), or the single result if self.targets is an integer engine id """ # validate whether we can run if self._socket.closed(): msg = "Task farming is disabled" if self._task_scheme == 'pure': msg += " because the pure ZMQ scheduler cannot handle" msg += " disappearing engines." raise RuntimeError(msg) if self._task_scheme == 'pure': # pure zmq scheme doesn't support extra features msg = "Pure ZMQ scheduler doesn't support the following flags:" "follow, after, retries, targets, timeout" if (follow or after or retries or targets or timeout): # hard fail on Scheduler flags raise RuntimeError(msg) if isinstance(f, dependent): # soft warn on functional dependencies warnings.warn(msg, RuntimeWarning) # build args args = [] if args is None else args kwargs = {} if kwargs is None else kwargs block = self.block if block is None else block track = self.track if track is None else track after = self.after if after is None else after retries = self.retries if retries is None else retries follow = self.follow if follow is None else follow timeout = self.timeout if timeout is None else timeout targets = self.targets if targets is None else targets if not isinstance(retries, int): raise TypeError('retries must be int, not %r'%type(retries)) if targets is None: idents = [] else: idents = self.client._build_targets(targets)[0] # ensure *not* bytes idents = [ ident.decode() for ident in idents ] after = self._render_dependency(after) follow = self._render_dependency(follow) metadata = dict(after=after, follow=follow, timeout=timeout, targets=idents, retries=retries) future = self.client.send_apply_request(self._socket, f, args, kwargs, track=track, metadata=metadata) ar = AsyncResult(self.client, future, fname=getname(f), targets=None, owner=True, ) if block: try: return ar.get() except KeyboardInterrupt: pass return ar @sync_results @save_ids def map(self, f, *sequences, **kwargs): """``view.map(f, *sequences, block=self.block, chunksize=1, ordered=True)`` => list|AsyncMapResult Parallel version of builtin `map`, load-balanced by this View. `block`, and `chunksize` can be specified by keyword only. Each `chunksize` elements will be a separate task, and will be load-balanced. This lets individual elements be available for iteration as soon as they arrive. Parameters ---------- f : callable function to be mapped *sequences: one or more sequences of matching length the sequences to be distributed and passed to `f` block : bool [default self.block] whether to wait for the result or not track : bool whether to create a MessageTracker to allow the user to safely edit after arrays and buffers during non-copying sends. chunksize : int [default 1] how many elements should be in each task. ordered : bool [default True] Whether the results should be gathered as they arrive, or enforce the order of submission. Only applies when iterating through AsyncMapResult as results arrive. Has no effect when block=True. Returns ------- if block=False An :class:`~ipyparallel.client.asyncresult.AsyncMapResult` instance. An object like AsyncResult, but which reassembles the sequence of results into a single list. AsyncMapResults can be iterated through before all results are complete. else A list, the result of ``map(f,*sequences)`` """ # default block = kwargs.get('block', self.block) chunksize = kwargs.get('chunksize', 1) ordered = kwargs.get('ordered', True) keyset = set(kwargs.keys()) extra_keys = keyset.difference_update(set(['block', 'chunksize'])) if extra_keys: raise TypeError("Invalid kwargs: %s"%list(extra_keys)) assert len(sequences) > 0, "must have some sequences to map onto!" pf = ParallelFunction(self, f, block=block, chunksize=chunksize, ordered=ordered) return pf.map(*sequences) def register_joblib_backend(self, name='ipyparallel', make_default=False): """Register this View as a joblib parallel backend To make this the default backend, set make_default=True. Use with:: p = Parallel(backend='ipyparallel') ... See joblib docs for details Requires joblib >= 0.10 .. versionadded:: 5.1 """ from joblib.parallel import register_parallel_backend from ._joblib import IPythonParallelBackend register_parallel_backend(name, lambda : IPythonParallelBackend(view=self), make_default=make_default)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp): name = u'ipython' description = usage.cl_usage crash_handler_class = IPAppCrashHandler examples = _examples flags = Dict(flags) aliases = Dict(aliases) classes = List() def _classes_default(self): """This has to be in a method, for TerminalIPythonApp to be available.""" return [ InteractiveShellApp, # ShellApp comes before TerminalApp, because self.__class__, # it will also affect subclasses (e.g. QtConsole) TerminalInteractiveShell, PromptManager, HistoryManager, ProfileDir, PlainTextFormatter, IPCompleter, ScriptMagics, StoreMagics, ] subcommands = dict( qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp', """DEPRECATD: Launch the Jupyter Qt Console."""), notebook=('notebook.notebookapp.NotebookApp', """DEPRECATED: Launch the Jupyter HTML Notebook Server."""), profile=("IPython.core.profileapp.ProfileApp", "Create and manage IPython profiles."), kernel=("ipykernel.kernelapp.IPKernelApp", "Start a kernel without an attached frontend."), console=('jupyter_console.app.ZMQTerminalIPythonApp', """DEPRECATED: Launch the Jupyter terminal-based Console."""), locate=('IPython.terminal.ipapp.LocateIPythonApp', LocateIPythonApp.description), history=('IPython.core.historyapp.HistoryApp', "Manage the IPython history database."), nbconvert=('nbconvert.nbconvertapp.NbConvertApp', "DEPRECATED: Convert notebooks to/from other formats."), trust= ('nbformat.sign.TrustNotebookApp', "DEPRECATED: Sign notebooks to trust their potentially unsafe contents at load." ), kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp', "DEPRECATED: Manage Jupyter kernel specifications."), ) subcommands['install-nbextension'] = ( "notebook.nbextensions.InstallNBExtensionApp", "DEPRECATED: Install Jupyter notebook extension files") # *do* autocreate requested profile, but don't create the config file. auto_create = Bool(True) # configurables quick = Bool( False, config=True, help= """Start IPython quickly by skipping the loading of config files.""") def _quick_changed(self, name, old, new): if new: self.load_config_file = lambda *a, **kw: None display_banner = Bool( True, config=True, help="Whether to display a banner upon starting IPython.") # if there is code of files to run from the cmd line, don't interact # unless the --i flag (App.force_interact) is true. force_interact = Bool( False, config=True, help="""If a command or file is given via the command-line, e.g. 'ipython foo.py', start an interactive shell after executing the file or command.""") def _force_interact_changed(self, name, old, new): if new: self.interact = True def _file_to_run_changed(self, name, old, new): if new: self.something_to_run = True if new and not self.force_interact: self.interact = False _code_to_run_changed = _file_to_run_changed _module_to_run_changed = _file_to_run_changed # internal, not-configurable interact = Bool(True) something_to_run = Bool(False) def parse_command_line(self, argv=None): """override to allow old '-pylab' flag with deprecation warning""" argv = sys.argv[1:] if argv is None else argv if '-pylab' in argv: # deprecated `-pylab` given, # warn and transform into current syntax argv = argv[:] # copy, don't clobber idx = argv.index('-pylab') warn.warn( "`-pylab` flag has been deprecated.\n" " Use `--matplotlib <backend>` and import pylab manually.") argv[idx] = '--pylab' return super(TerminalIPythonApp, self).parse_command_line(argv) @catch_config_error def initialize(self, argv=None): """Do actions after construct, but before starting the app.""" super(TerminalIPythonApp, self).initialize(argv) if self.subapp is not None: # don't bother initializing further, starting subapp return # print self.extra_args if self.extra_args and not self.something_to_run: self.file_to_run = self.extra_args[0] self.init_path() # create the shell self.init_shell() # and draw the banner self.init_banner() # Now a variety of things that happen after the banner is printed. self.init_gui_pylab() self.init_extensions() self.init_code() def init_shell(self): """initialize the InteractiveShell instance""" # Create an InteractiveShell instance. # shell.display_banner should always be False for the terminal # based app, because we call shell.show_banner() by hand below # so the banner shows *before* all extension loading stuff. self.shell = TerminalInteractiveShell.instance( parent=self, display_banner=False, profile_dir=self.profile_dir, ipython_dir=self.ipython_dir, user_ns=self.user_ns) self.shell.configurables.append(self) def init_banner(self): """optionally display the banner""" if self.display_banner and self.interact: self.shell.show_banner() # Make sure there is a space below the banner. if self.log_level <= logging.INFO: print() def _pylab_changed(self, name, old, new): """Replace --pylab='inline' with --pylab='auto'""" if new == 'inline': warn.warn("'inline' not available as pylab backend, " "using 'auto' instead.") self.pylab = 'auto' def start(self): if self.subapp is not None: return self.subapp.start() # perform any prexec steps: if self.interact: self.log.debug("Starting IPython's mainloop...") self.shell.mainloop() else: self.log.debug("IPython not interactive...")
class PluginBlock(Block): _view_name = Unicode('PluginBlockView').tag(sync=True) _model_name = Unicode('PluginBlockModel').tag(sync=True) _parent_block = Instance(BlockType, allow_none=True, default_value=None) _available_input_data = List([]) _available_input_components = List([]) _input_data_dim = Int(allow_none=True, default_value=None) # TODO Validate data/components names and synchronise JavaScript -> Python input_data = Unicode(allow_none=True, default_value=None).tag(sync=True) input_components = List(Union((Unicode(), Int()))).tag(sync=True) def __init__(self, *args, **kwargs): super(PluginBlock, self).__init__(*args, **kwargs) self.input_data_wid = None self.input_components_wid = None def _ipython_display_(self, *args, **kwargs): display(self.interact()) def interact(self): pass def _get_data(self, parent): block = parent while not isinstance(block, DataBlock): block = block._parent_block return block.mesh.data @observe('_parent_block') def _update_input_data(self, change): parent = change['new'] if parent is None: return data = self._get_data(parent) self._available_input_data = [d.name for d in data] self.input_data = self._available_input_data[0] @observe('input_data') def _update_available_components(self, change): data = self._get_data(self._parent_block) for d in data: if d.name == change['new']: current_data = d self._available_input_components = [c.name for c in current_data.components] + [0] @observe('_available_input_components') def _update_input_components(self, change): if self._input_data_dim is None: return available_components = change['new'] if self.input_components_wid is not None: for component_wid in self.input_components_wid: component_wid.options = available_components # Check current components validity components_are_valid = True if not len(self.input_components): components_are_valid = False for c in self.input_components: if c not in available_components: components_are_valid = False if components_are_valid: return new_components = [] for dim in range(self._input_data_dim): if len(available_components) <= dim: new_components.append(0) continue new_components.append(available_components[dim]) self.input_components = new_components def _link_dropdown(self, dropdown, dim): def handle_dropdown_change(change): copy = self.input_components.copy() copy[dim] = change['new'] self.input_components = copy dropdown.observe(handle_dropdown_change, names=['value']) def handle_input_change(change): dropdown.value = self.input_components[dim] self.observe(handle_input_change, names=['input_components']) link((dropdown, 'options'), (self, '_available_input_components')) def _init_input_data_widgets(self): self.input_components_wid = [Label('Input components')] for dim in range(self._input_data_dim): dropdown = Dropdown( options=self._available_input_components, value=self.input_components[dim] ) dropdown.layout.width = 'fit-content' self._link_dropdown(dropdown, dim) self.input_components_wid.append(dropdown) self.input_data_wid = Dropdown( description='Input data', options=self._available_input_data, value=self.input_data ) self.input_data_wid.layout.width = 'fit-content' link((self.input_data_wid, 'value'), (self, 'input_data')) def _interact(self): if self._input_data_dim is not None: if self.input_data_wid is None: self._init_input_data_widgets() return (VBox((self.input_data_wid, HBox(self.input_components_wid))), ) return () def _get_component_min_max(self, data_name, component_name): data = self._get_data(self._parent_block) for d in data: if d.name == data_name: for c in d.components: if c.name == component_name: return (c.min, c.max) raise RuntimeError('Unknown component {}.{}'.format( data_name, component_name))
class Circle(Path): _view_name = Unicode('LeafletCircleView').tag(sync=True) _model_name = Unicode('LeafletCircleModel').tag(sync=True) location = List(def_loc).tag(sync=True) radius = Int(1000, help="radius of circle in meters").tag(sync=True)
class Block(Widget, BlockType): _view_name = Unicode('BlockView').tag(sync=True) _model_name = Unicode('BlockModel').tag(sync=True) _view_module = Unicode('odysis').tag(sync=True) _model_module = Unicode('odysis').tag(sync=True) _view_module_version = Unicode(odysis_version).tag(sync=True) _model_module_version = Unicode(odysis_version).tag(sync=True) _blocks = List(Instance(BlockType)).tag(sync=True, **widget_serialization) visible = Bool(True).tag(sync=True) def apply(self, block): block._validate_parent(self) if block._parent_block is not None: raise RuntimeError('Cannot apply the same effect at different places') block._parent_block = self self._blocks = list([b for b in self._blocks] + [block]) def remove(self, block): block._parent_block = None self._blocks = list([b for b in self._blocks if b.model_id != block.model_id]) def color_mapping(self, *args, **kwargs): effect = ColorMapping(*args, **kwargs) self.apply(effect) return effect def grid(self, *args, **kwargs): effect = Grid(*args, **kwargs) self.apply(effect) return effect def warp(self, *args, **kwargs): effect = Warp(*args, **kwargs) self.apply(effect) return effect def vector_field(self, *args, **kwargs): effect = VectorField(*args, **kwargs) self.apply(effect) return effect def point_cloud(self, *args, **kwargs): effect = PointCloud(*args, **kwargs) self.apply(effect) return effect def clip(self, *args, **kwargs): effect = Clip(*args, **kwargs) self.apply(effect) return effect def slice(self, *args, **kwargs): effect = Slice(*args, **kwargs) self.apply(effect) return effect def threshold(self, *args, **kwargs): effect = Threshold(*args, **kwargs) self.apply(effect) return effect def iso_surface(self, *args, **kwargs): effect = IsoSurface(*args, **kwargs) self.apply(effect) return effect def __init__(self, *args, **kwargs): super(Block, self).__init__(*args, **kwargs) self.colormap_wid = None self.colormapslider_wid = None def _validate_parent(self, parent): pass
class MultiPolygon(FeatureGroup): _view_name = Unicode('LeafletMultiPolygonView').tag(sync=True) _model_name = Unicode('LeafletMultiPolygonModel').tag(sync=True) locations = List().tag(sync=True)
class LabConfig(HasTraits): """The lab application configuration object. """ app_name = Unicode('', help='The name of the application.') app_version = Unicode('', help='The version of the application.') app_namespace = Unicode('', help='The namespace of the application.') app_url = Unicode('/lab', help='The url path for the application.') app_settings_dir = Unicode('', help='The application settings directory.') extra_labextensions_path = List( Unicode(), help="""Extra paths to look for dynamic JupyterLab extensions""") labextensions_path = List( Unicode(), help='The standard paths to look in for dynamic JupyterLab extensions') templates_dir = Unicode('', help='The application templates directory.') static_dir = Unicode('', help=('The optional location of local static files. ' 'If given, a static file handler will be ' 'added.')) static_url = Unicode(help=('The url path for static application ' 'assets. This can be a CDN if desired.')) labextensions_url = Unicode( '', help='The url for dynamic JupyterLab extensions') settings_url = Unicode(help='The url path of the settings handler.') user_settings_dir = Unicode('', help=('The optional location of the user ' 'settings directory.')) schemas_dir = Unicode('', help=('The optional location of the settings ' 'schemas directory. If given, a handler will ' 'be added for settings.')) workspaces_api_url = Unicode(help='The url path of the workspaces API.') workspaces_dir = Unicode('', help=('The optional location of the saved ' 'workspaces directory. If given, a handler ' 'will be added for workspaces.')) listings_url = Unicode(help='The listings url.') themes_url = Unicode(help='The theme url.') themes_dir = Unicode('', help=('The optional location of the themes ' 'directory. If given, a handler will be added ' 'for themes.')) translations_api_url = Unicode( help='The url path of the translations handler.') tree_url = Unicode(help='The url path of the tree handler.') cache_files = Bool(True, help=('Whether to cache files on the server. ' 'This should be `True` except in dev mode.')) @default('template_dir') def _default_template_dir(self): return DEFAULT_TEMPLATE_PATH @default('static_url') def _default_static_url(self): return ujoin('static/', self.app_namespace) @default('labextensions_url') def _default_labextensions_url(self): return ujoin(self.app_url, "extensions/") @default('labextensions_path') def _default_labextensions_path(self): return jupyter_path('labextensions') @default('workspaces_url') def _default_workspaces_url(self): return ujoin(self.app_url, 'workspaces/') @default('workspaces_api_url') def _default_workspaces_api_url(self): return ujoin(self.app_url, 'api', 'workspaces/') @default('settings_url') def _default_settings_url(self): return ujoin(self.app_url, 'api', 'settings/') @default('listings_url') def _default_listings_url(self): return ujoin(self.app_url, 'api', 'listings/') @default('themes_url') def _default_themes_url(self): return ujoin(self.app_url, 'api', 'themes/') @default('tree_url') def _default_tree_url(self): return ujoin(self.app_url, 'tree/') @default('translations_api_url') def _default_translations_api_url(self): return ujoin(self.app_url, 'api', 'translations/') @default('tree_url') def _default_tree_url(self): return ujoin(self.app_url, 'tree/')
class View(HasTraits): """Base View class for more convenint apply(f,*args,**kwargs) syntax via attributes. Don't use this class, use subclasses. Methods ------- spin flushes incoming results and registration state changes control methods spin, and requesting `ids` also ensures up to date wait wait on one or more msg_ids execution methods apply legacy: execute, run data movement push, pull, scatter, gather query methods get_result, queue_status, purge_results, result_status control methods abort, shutdown """ # flags block = Bool(False) track = Bool(False) targets = Any() history = List() outstanding = Set() results = Dict() client = Instance('ipyparallel.Client', allow_none=True) _socket = Instance('zmq.Socket', allow_none=True) _flag_names = List(['targets', 'block', 'track']) _in_sync_results = Bool(False) _targets = Any() _idents = Any() def __init__(self, client=None, socket=None, **flags): super(View, self).__init__(client=client, _socket=socket) self.results = client.results self.block = client.block self.executor = ViewExecutor(self) self.set_flags(**flags) assert not self.__class__ is View, "Don't use base View objects, use subclasses" def __repr__(self): strtargets = str(self.targets) if len(strtargets) > 16: strtargets = strtargets[:12]+'...]' return "<%s %s>"%(self.__class__.__name__, strtargets) def __len__(self): if isinstance(self.targets, list): return len(self.targets) elif isinstance(self.targets, int): return 1 else: return len(self.client) def set_flags(self, **kwargs): """set my attribute flags by keyword. Views determine behavior with a few attributes (`block`, `track`, etc.). These attributes can be set all at once by name with this method. Parameters ---------- block : bool whether to wait for results track : bool whether to create a MessageTracker to allow the user to safely edit after arrays and buffers during non-copying sends. """ for name, value in iteritems(kwargs): if name not in self._flag_names: raise KeyError("Invalid name: %r"%name) else: setattr(self, name, value) @contextmanager def temp_flags(self, **kwargs): """temporarily set flags, for use in `with` statements. See set_flags for permanent setting of flags Examples -------- >>> view.track=False ... >>> with view.temp_flags(track=True): ... ar = view.apply(dostuff, my_big_array) ... ar.tracker.wait() # wait for send to finish >>> view.track False """ # preflight: save flags, and set temporaries saved_flags = {} for f in self._flag_names: saved_flags[f] = getattr(self, f) self.set_flags(**kwargs) # yield to the with-statement block try: yield finally: # postflight: restore saved flags self.set_flags(**saved_flags) #---------------------------------------------------------------- # apply #---------------------------------------------------------------- def _sync_results(self): """to be called by @sync_results decorator after submitting any tasks. """ delta = self.outstanding.difference(self.client.outstanding) completed = self.outstanding.intersection(delta) self.outstanding = self.outstanding.difference(completed) @sync_results @save_ids def _really_apply(self, f, args, kwargs, block=None, **options): """wrapper for client.send_apply_request""" raise NotImplementedError("Implement in subclasses") def apply(self, f, *args, **kwargs): """calls ``f(*args, **kwargs)`` on remote engines, returning the result. This method sets all apply flags via this View's attributes. Returns :class:`~ipyparallel.client.asyncresult.AsyncResult` instance if ``self.block`` is False, otherwise the return value of ``f(*args, **kwargs)``. """ return self._really_apply(f, args, kwargs) def apply_async(self, f, *args, **kwargs): """calls ``f(*args, **kwargs)`` on remote engines in a nonblocking manner. Returns :class:`~ipyparallel.client.asyncresult.AsyncResult` instance. """ return self._really_apply(f, args, kwargs, block=False) def apply_sync(self, f, *args, **kwargs): """calls ``f(*args, **kwargs)`` on remote engines in a blocking manner, returning the result. """ return self._really_apply(f, args, kwargs, block=True) #---------------------------------------------------------------- # wrappers for client and control methods #---------------------------------------------------------------- @sync_results def spin(self): """spin the client, and sync""" self.client.spin() @sync_results def wait(self, jobs=None, timeout=-1): """waits on one or more `jobs`, for up to `timeout` seconds. Parameters ---------- jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects ints are indices to self.history strs are msg_ids default: wait on all outstanding messages timeout : float a time in seconds, after which to give up. default is -1, which means no timeout Returns ------- True : when all msg_ids are done False : timeout reached, some msg_ids still outstanding """ if jobs is None: jobs = self.history return self.client.wait(jobs, timeout) def abort(self, jobs=None, targets=None, block=None): """Abort jobs on my engines. Parameters ---------- jobs : None, str, list of strs, optional if None: abort all jobs. else: abort specific msg_id(s). """ block = block if block is not None else self.block targets = targets if targets is not None else self.targets jobs = jobs if jobs is not None else list(self.outstanding) return self.client.abort(jobs=jobs, targets=targets, block=block) def queue_status(self, targets=None, verbose=False): """Fetch the Queue status of my engines""" targets = targets if targets is not None else self.targets return self.client.queue_status(targets=targets, verbose=verbose) def purge_results(self, jobs=[], targets=[]): """Instruct the controller to forget specific results.""" if targets is None or targets == 'all': targets = self.targets return self.client.purge_results(jobs=jobs, targets=targets) def shutdown(self, targets=None, restart=False, hub=False, block=None): """Terminates one or more engine processes, optionally including the hub. """ block = self.block if block is None else block if targets is None or targets == 'all': targets = self.targets return self.client.shutdown(targets=targets, restart=restart, hub=hub, block=block) def get_result(self, indices_or_msg_ids=None, block=None, owner=False): """return one or more results, specified by history index or msg_id. See :meth:`ipyparallel.client.client.Client.get_result` for details. """ if indices_or_msg_ids is None: indices_or_msg_ids = -1 if isinstance(indices_or_msg_ids, int): indices_or_msg_ids = self.history[indices_or_msg_ids] elif isinstance(indices_or_msg_ids, (list,tuple,set)): indices_or_msg_ids = list(indices_or_msg_ids) for i,index in enumerate(indices_or_msg_ids): if isinstance(index, int): indices_or_msg_ids[i] = self.history[index] return self.client.get_result(indices_or_msg_ids, block=block, owner=owner) #------------------------------------------------------------------- # Map #------------------------------------------------------------------- @sync_results def map(self, f, *sequences, **kwargs): """override in subclasses""" raise NotImplementedError def map_async(self, f, *sequences, **kwargs): """Parallel version of builtin :func:`python:map`, using this view's engines. This is equivalent to ``map(...block=False)``. See `self.map` for details. """ if 'block' in kwargs: raise TypeError("map_async doesn't take a `block` keyword argument.") kwargs['block'] = False return self.map(f,*sequences,**kwargs) def map_sync(self, f, *sequences, **kwargs): """Parallel version of builtin :func:`python:map`, using this view's engines. This is equivalent to ``map(...block=True)``. See `self.map` for details. """ if 'block' in kwargs: raise TypeError("map_sync doesn't take a `block` keyword argument.") kwargs['block'] = True return self.map(f,*sequences,**kwargs) def imap(self, f, *sequences, **kwargs): """Parallel version of :func:`itertools.imap`. See `self.map` for details. """ return iter(self.map_async(f,*sequences, **kwargs)) #------------------------------------------------------------------- # Decorators #------------------------------------------------------------------- def remote(self, block=None, **flags): """Decorator for making a RemoteFunction""" block = self.block if block is None else block return remote(self, block=block, **flags) def parallel(self, dist='b', block=None, **flags): """Decorator for making a ParallelFunction""" block = self.block if block is None else block return parallel(self, dist=dist, block=block, **flags)
class SimpleWidget(Widget): a = Bool().tag(sync=True) b = Tuple(Bool(), Bool(), Bool(), default_value=(False, False, False)).tag(sync=True) c = List(Bool()).tag(sync=True)