def fileOpened(self, filename): rf_frequency_cell = self.context.get_absolute_frequency_cell() self.__start_frequency, self.__frequency_subscription = rf_frequency_cell.subscribe2( self.__check_modified_frequency, SubscriptionContext(reactor=self._reactor, poller=None)) self.__invalidated_by_frequency_change = False self.state_changed()
def __init__(self, reactor, root_object, filename, get_defaults=_no_defaults): """ root_object: Object to persist. filename: path to state file to read/write, or None to not actually do persistence. get_defaults: function accepting root_object and returning state dict to use if file does not exist. """ assert isinstance(root_object, ExportedState) self.__reactor = reactor self.__filename = filename self.__delayed_write_call = None if filename is None: self.__pcd = None self.__scheduled = False return if os.path.isfile(filename): root_object.state_from_json(json.load(open(filename, 'r'))) # make a backup in case this code version misreads the state and loses things on save (but only if the load succeeded, in case the file but not its backup is bad) shutil.copyfile(filename, filename + '~') else: root_object.state_from_json(get_defaults(root_object)) self.__pcd = PersistenceChangeDetector( root_object, self.__write_later, SubscriptionContext(reactor=reactor, poller=None)) # Start implicit write-to-disk loop, but don't actually write. # This is because it is useful in some failure modes to not immediately overwrite a good state file with a bad one on startup. self.__pcd.get()
def __init__(self, reactor, root_object, filename, get_defaults=_no_defaults, _suppress_error_for_test=False): """ root_object: Object to persist. filename: path to state file to read/write, or None to not actually do persistence. get_defaults: function accepting root_object and returning state dict to use if file does not exist. """ assert isinstance(root_object, ExportedState) def apply_defaults(): root_object.state_from_json(get_defaults(root_object)) self.__reactor = reactor self.__filename = filename self.__delayed_write_call = None if filename is None: apply_defaults() self.__pcd = None return state_json = self.__attempt_to_read_file(filename, _suppress_error_for_test=_suppress_error_for_test) if state_json is not None: root_object.state_from_json(state_json) # make a backup in case this code version misreads the state and loses things on save (but only if the load succeeded, in case the file but not its backup is bad) # TODO: should automatically use backup if main file is missing or broken shutil.copyfile(filename, filename + b'~') else: apply_defaults() self.__pcd = PersistenceChangeDetector(root_object, self.__write_later, SubscriptionContext(reactor=reactor, poller=None)) # Start implicit write-to-disk loop, but don't actually write. # This is because it is useful in some failure modes to not immediately overwrite a good state file with a bad one on startup. self.__pcd.get()
def __init__(self, reactor, cap_table, http_endpoint, ws_endpoint, root_cap, title): # Constants self.__http_endpoint_string = str(http_endpoint) self.__http_endpoint = endpoints.serverFromString(reactor, self.__http_endpoint_string) self.__ws_endpoint = endpoints.serverFromString(reactor, str(ws_endpoint)) self.__visit_path = _make_cap_url(root_cap) wcommon = WebServiceCommon( reactor=reactor, title=title, ws_endpoint_string=ws_endpoint) # TODO: Create poller actually for the given reactor w/o redundancy -- perhaps there should be a one-poller-per-reactor map subscription_context = SubscriptionContext(reactor=reactor, poller=the_poller) def resource_factory(entry_point): # TODO: If not an IWebEntryPoint, return a generic result return IWebEntryPoint(entry_point).get_entry_point_resource(wcommon=wcommon) # pylint: disable=redundant-keyword-arg server_root = CapAccessResource(cap_table=cap_table, resource_factory=resource_factory) _put_root_static(wcommon, server_root) if UNIQUE_PUBLIC_CAP in cap_table: # TODO: consider factoring out "generate URL for cap" server_root.putChild('', Redirect(_make_cap_url(UNIQUE_PUBLIC_CAP))) self.__ws_protocol = txws.WebSocketFactory( FactoryWithArgs.forProtocol(WebSocketDispatcherProtocol, cap_table, subscription_context)) self.__site = SiteWithDefaultHeaders(server_root) self.__ws_port_obj = None self.__http_port_obj = None
def setUp(self): cap_table = CapTable(unserializer=None) cap_table.add(EntryPointStub(), cap=u'foo') self.clock = Clock() self.transport = FakeWebSocketTransport() self.protocol = OurStreamProtocol( caps=cap_table.as_unenumerable_collection(), subscription_context=SubscriptionContext(reactor=self.clock, poller=None)) self.protocol.transport = self.transport
def __init__(self, reactor, cap_table, read_only_dbs, writable_db, http_endpoint, ws_endpoint, root_cap, title, flowgraph_for_debug): # Constants self.__http_endpoint_string = http_endpoint self.__http_endpoint = endpoints.serverFromString( reactor, http_endpoint) self.__ws_endpoint = endpoints.serverFromString(reactor, ws_endpoint) self.__visit_path = _make_cap_url(root_cap) wcommon = WebServiceCommon(ws_endpoint_string=ws_endpoint) # TODO: Create poller actually for the given reactor w/o redundancy -- perhaps there should be a one-poller-per-reactor map subscription_context = SubscriptionContext(reactor=reactor, poller=the_poller) def BoundSessionResource(session): return SessionResource(session, wcommon, reactor, title, read_only_dbs, writable_db, flowgraph_for_debug) server_root = CapAccessResource(cap_table=cap_table, resource_ctor=BoundSessionResource) _put_root_static(server_root) if UNIQUE_PUBLIC_CAP in cap_table: # TODO: consider factoring out "generate URL for cap" server_root.putChild('', Redirect(_make_cap_url(UNIQUE_PUBLIC_CAP))) self.__ws_protocol = txws.WebSocketFactory( FactoryWithArgs.forProtocol(OurStreamProtocol, cap_table, subscription_context)) self.__site = _SiteWithHeaders(server_root) self.__ws_port_obj = None self.__http_port_obj = None
def __init__(self): self.__clock = Clock() self.context = SubscriptionContext(reactor=self.__clock, poller=Poller())
index = bisect.bisect_left(sorted_list, key) if sorted_list[index] != key: # TODO: This has been observed to happen. Need to diagnose. raise Exception( "can't happen: while removing last value %r for key %r from %r, %r was found instead of the key at index %r in the sorted list" % (value, key, self, sorted_list[index], index)) sorted_list[index:index + 1] = [] return last_out def count_keys(self): return len(self.__dict) def count_values(self): return self.__value_count class _FailureToSubscribe(Exception): """Indicates that the cell being subscribed to failed to cooperate. Should not be observed outside of this module. Should not be logged; the original cause will already have been. """ # this is done last for load order the_poller = AutomaticPoller(reactor=the_reactor) __all__.append('the_poller') the_subscription_context = SubscriptionContext(reactor=the_reactor, poller=the_poller) __all__.append('the_subscription_context')