def stop(self): globalLogPublisher.removeObserver(self) if self._filename: self._output.close() self._output = None if self.firehose: self.firehose.stop()
def test_vladimir_illegal_interface_key_does_not_propagate(blockchain_ursulas): """ Although Ursulas propagate each other's interface information, as demonstrated above, they do not propagate interface information for Vladimir. Specifically, if Vladimir tries to perform the most obvious imitation attack - propagating his own wallet address along with Ursula's information - the validity check will catch it and Ursula will refuse to propagate it and also record Vladimir's details. """ warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) ursulas = list(blockchain_ursulas) ursula_whom_vladimir_will_imitate, other_ursula = ursulas[0], ursulas[1] # Vladimir sees Ursula on the network and tries to use her public information. vladimir = Vladimir.from_target_ursula(ursula_whom_vladimir_will_imitate) # This Ursula is totally legit... ursula_whom_vladimir_will_imitate.verify_node(MockRestMiddleware()) globalLogPublisher.addObserver(warning_trapper) vladimir.network_middleware.propagate_shitty_interface_id( other_ursula, vladimir.metadata()) globalLogPublisher.removeObserver(warning_trapper) # So far, Ursula hasn't noticed any Vladimirs. assert len(warnings) == 0 # ...but now, Ursula will now try to learn about Vladimir on a different thread. other_ursula.block_until_specific_nodes_are_known( [vladimir.checksum_address]) vladimir_as_learned = other_ursula.known_nodes[vladimir.checksum_address] # OK, so cool, let's see what happens when Ursula tries to learn with Vlad as the teacher. other_ursula._current_teacher_node = vladimir_as_learned globalLogPublisher.addObserver(warning_trapper) result = other_ursula.learn_from_teacher_node() globalLogPublisher.removeObserver(warning_trapper) # Indeed, Ursula noticed that something was up. assert len(warnings) == 1 warning = warnings[0]['log_format'] assert "Teacher " + str(vladimir_as_learned) + " is invalid" in warning assert "Metadata signature is invalid" in warning # TODO: Cleanup logging templates
def test_blockchain_ursula_stamp_verification_tolerance( blockchain_ursulas, mocker): # # Setup # lonely_blockchain_learner, blockchain_teacher, unsigned, *the_others = list( blockchain_ursulas) warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) # # Attempt to verify unsigned stamp # unsigned._Teacher__decentralized_identity_evidence = NOT_SIGNED # Wipe known nodes! lonely_blockchain_learner._Learner__known_nodes = FleetSensor( domain=TEMPORARY_DOMAIN) lonely_blockchain_learner._current_teacher_node = blockchain_teacher lonely_blockchain_learner.remember_node(blockchain_teacher) globalLogPublisher.addObserver(warning_trapper) lonely_blockchain_learner.learn_from_teacher_node(eager=True) globalLogPublisher.removeObserver(warning_trapper) # We received one warning during learning, and it was about this very matter. assert len(warnings) == 1 warning = warnings[0]['log_format'] assert str(unsigned) in warning assert "stamp is unsigned" in warning # TODO: Cleanup logging templates # TODO: Buckets! #567 # assert unsigned not in lonely_blockchain_learner.known_nodes # minus 2: self and the unsigned ursula. # assert len(lonely_blockchain_learner.known_nodes) == len(blockchain_ursulas) - 2 assert blockchain_teacher in lonely_blockchain_learner.known_nodes # Learn about a node with a badly signed payload mocker.patch.object(lonely_blockchain_learner, 'verify_from', side_effect=Learner.InvalidSignature) lonely_blockchain_learner.learn_from_teacher_node(eager=True) assert len(lonely_blockchain_learner. suspicious_activities_witnessed['vladimirs']) == 1
def nucypher_cli(click_config, verbose, mock_networking, json_ipc, no_logs, quiet, debug, no_registry, log_level): # Session Emitter for pre and post character control engagement. if json_ipc: emitter = JSONRPCStdoutEmitter( quiet=quiet, capture_stdout=NucypherClickConfig.capture_stdout) else: emitter = StdoutEmitter( quiet=quiet, capture_stdout=NucypherClickConfig.capture_stdout) click_config.attach_emitter(emitter) if not json_ipc: click_config.emit(message=NUCYPHER_BANNER) if log_level: GlobalConsoleLogger.set_log_level(log_level_name=log_level) globalLogPublisher.addObserver(SimpleObserver()) if debug and quiet: raise click.BadOptionUsage( option_name="quiet", message="--debug and --quiet cannot be used at the same time.") if debug: click_config.log_to_sentry = False click_config.log_to_file = True # File Logging globalLogPublisher.addObserver(SimpleObserver()) # Console Logging globalLogPublisher.removeObserver(logToSentry) # No Sentry GlobalConsoleLogger.set_log_level(log_level_name='debug') elif quiet: # Disable Logging globalLogPublisher.removeObserver(logToSentry) globalLogPublisher.removeObserver(SimpleObserver) globalLogPublisher.removeObserver(getJsonFileObserver()) # Logging if not no_logs: GlobalConsoleLogger.start_if_not_started() # CLI Session Configuration click_config.verbose = verbose click_config.mock_networking = mock_networking click_config.json_ipc = json_ipc click_config.no_logs = no_logs click_config.quiet = quiet click_config.no_registry = no_registry click_config.debug = debug # Only used for testing outputs; # Redirects outputs to in-memory python containers. if mock_networking: click_config.emit(message="WARNING: Mock networking is enabled") click_config.middleware = MockRestMiddleware() else: click_config.middleware = RestMiddleware() # Global Warnings if click_config.verbose: click_config.emit(message="Verbose mode is enabled", color='blue')
def test_doubleEncodingError(self): """ If it is not possible to encode a response to the request (for example, because L{xmlrpclib.dumps} raises an exception when encoding a L{Fault}) the exception which prevents the response from being generated is logged and the request object is finished anyway. """ logObserver = EventLoggingObserver() filtered = FilteringLogObserver( logObserver, [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) globalLogPublisher.addObserver(filtered) self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered)) d = self.proxy().callRemote("echo", "") # *Now* break xmlrpclib.dumps. Hopefully the client already used it. def fakeDumps(*args, **kwargs): raise RuntimeError("Cannot encode anything at all!") self.patch(xmlrpclib, "dumps", fakeDumps) # It doesn't matter how it fails, so long as it does. Also, it happens # to fail with an implementation detail exception right now, not # something suitable as part of a public interface. d = self.assertFailure(d, Exception) def cbFailed(ignored): # The fakeDumps exception should have been logged. self.assertEquals(1, len(logObserver)) self.assertIsInstance(logObserver[0]["log_failure"].value, RuntimeError) self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) d.addCallback(cbFailed) return d
def reset_log_file(): global log_observer if log_observer: print('removing log observer') globalLogPublisher.removeObserver(log_observer) log_level = parsed_args.log_level or config['log_level'] info_predicate = LogLevelFilterPredicate( LogLevel.levelWithName(log_level.lower())) if mlog_file_path: mlog_file = open(mlog_file_path, 'a+') else: mlog_file = sys.stderr mlog_observer = FilteringLogObserver(textFileLogObserver(mlog_file), predicates=[info_predicate]) globalLogPublisher.addObserver(mlog_observer) # logger.info('resetting log output file') return
def tearDown(self): super(DispersyTestFunc, self).tearDown() for dispersy in self.dispersy_objects: yield dispersy.stop() peercache = os.path.join(dispersy._working_directory, PEERCACHE_FILENAME) if os.path.isfile(peercache): os.unlink(peercache) if self._dispersy.running: yield self._dispersy.stop() globalLogPublisher.removeObserver(self.failure_check) pending = reactor.getDelayedCalls() if pending: self._logger.warning("Found delayed calls in reactor:") for dc in pending: fun = dc.func self._logger.warning(" %s", fun) self._logger.warning("Failing") self.assertFalse( pending, "The reactor was not clean after shutting down all dispersy instances." ) if self._fired_unhandled_exceptions: self._logger.error("Found %d unhandled exceptions on threads:", len(self._fired_unhandled_exceptions)) for exception in self._fired_unhandled_exceptions: self._logger.error(exception) self.assertFalse(self._fired_unhandled_exceptions) # We should not have any rogue threads left. rogue_threads = False for thread in threading.enumerate(): if thread.name not in self._starting_threads: rogue_threads = True self._logger.error("Found rogue thread: %s", thread) self.assertFalse(rogue_threads, "Rogue threads active, see log")
def test_worker_failure_non_resilience(): """ abort on error is True for this one """ # Control time clock = Clock() worktracker = WorkTrackerThatFailsHalfTheTime(clock, True) def advance_one_cycle(_): clock.advance(WorkTrackerThatFailsHalfTheTime.INTERVAL_CEIL) def checkworkstate(_): assert worktracker.workdone == 0 def start(): worktracker.start() d = threads.deferToThread(start) for i in range(10): d.addCallback(advance_one_cycle) d.addCallback(checkworkstate) critical = [] def critical_trapper(event): if event['log_level'] == LogLevel.critical: critical.append(event) globalLogPublisher.addObserver(critical_trapper) with GlobalLoggerSettings.pause_all_logging_while( ): # To suppress the traceback being displayed from the cricial error. globalLogPublisher.addObserver(critical_trapper) yield d globalLogPublisher.removeObserver(critical_trapper) assert len(critical) == 1 assert critical[0]['failure'].getErrorMessage( ) == "zomg something went wrong"
def test_emit_warning_upon_new_version(ursula_federated_test_config, caplog): lonely_ursula_maker = partial(make_federated_ursulas, ursula_config=ursula_federated_test_config, quantity=2, know_each_other=True) learner = lonely_ursula_maker().pop() teacher, new_node = lonely_ursula_maker() new_node.TEACHER_VERSION = learner.LEARNER_VERSION + 1 learner._current_teacher_node = teacher warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) globalLogPublisher.addObserver(warning_trapper) learner.learn_from_teacher_node() assert len(warnings) == 1 assert warnings[0]['log_format'] == learner.unknown_version_message.format( new_node, new_node.TEACHER_VERSION, learner.LEARNER_VERSION) # Now let's go a little further: make the version totally unrecognizable. crazy_bytes_representation = int(learner.LEARNER_VERSION + 1).to_bytes(2, byteorder="big") \ + b"totally unintelligible nonsense" Response = namedtuple("MockResponse", ("content", "status_code")) response = Response(content=crazy_bytes_representation, status_code=200) learner.network_middleware.get_nodes_via_rest = lambda *args, **kwargs: response learner.learn_from_teacher_node() # TODO: #1039 - Fails because the above mocked Response is unsigned, and the API now enforces interface signatures # assert len(warnings) == 2 # assert warnings[1]['log_format'] == learner.unknown_version_message.format(new_node, # new_node.TEACHER_VERSION, # learner.LEARNER_VERSION) globalLogPublisher.removeObserver(warning_trapper)
def test_blockchain_ursula_stamp_verification_tolerance(blockchain_ursulas): # # Setup # lonely_blockchain_learner, blockchain_teacher, unsigned, *the_others = list( blockchain_ursulas) warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) # # Attempt to verify unsigned stamp # unsigned._Teacher__decentralized_identity_evidence = NOT_SIGNED # Wipe known nodes! lonely_blockchain_learner._Learner__known_nodes = FleetStateTracker() lonely_blockchain_learner._current_teacher_node = blockchain_teacher lonely_blockchain_learner.remember_node(blockchain_teacher) globalLogPublisher.addObserver(warning_trapper) lonely_blockchain_learner.learn_from_teacher_node(eager=True) globalLogPublisher.removeObserver(warning_trapper) # We received one warning during learning, and it was about this very matter. assert len(warnings) == 1 warning = warnings[0]['log_format'] assert str(unsigned) in warning assert "stamp is unsigned" in warning # TODO: Cleanup logging templates # TODO: Buckets! #567 # assert unsigned not in lonely_blockchain_learner.known_nodes # minus 2: self and the unsigned ursula. # assert len(lonely_blockchain_learner.known_nodes) == len(blockchain_ursulas) - 2 assert blockchain_teacher in lonely_blockchain_learner.known_nodes
def __init__(self, port=8080, encrypt=None, config=0, auth=LDAP_AUTH_SIMPLE, validate=False): threading.Thread.__init__(self) self.is_running = False logfile = open("ldaptor.log", 'a') level = LogLevel.info predicate = LogLevelFilterPredicate(defaultLogLevel=level) observer = FilteringLogObserver(textFileLogObserver(logfile), [predicate]) # remove std logger to stderr in case of Failure globalLogPublisher.removeObserver(globalLogPublisher._observers[0]) globalLogPublisher.addObserver(observer) registerAdapter(lambda x: x.root, LDAPServerFactory, IConnectedLDAPEntry) self._createListner(port, encrypt, config, auth, validate)
def test_worker_failure_resilience(): # Control time clock = Clock() worktracker = WorkTrackerThatFailsHalfTheTime(clock, False) def advance_one_cycle(_): clock.advance(worktracker.INTERVAL_CEIL) def checkworkstate(_): if worktracker.attempts % 2: assert worktracker._consecutive_fails > 0 else: assert worktracker.attempts / 2 == worktracker.workdone def start(): worktracker.start() d = threads.deferToThread(start) for i in range(10): d.addCallback(advance_one_cycle) d.addCallback(checkworkstate) warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) globalLogPublisher.addObserver(warning_trapper) yield d globalLogPublisher.removeObserver(warning_trapper) assert warnings for warning in warnings: assert "zomg something went wrong" in warning['failure'].getErrorMessage()
def test_errors(self): """ Verify that for each way a method exposed via XML-RPC can fail, the correct 'Content-type' header is set in the response and that the client-side Deferred is errbacked with an appropriate C{Fault} instance. """ logObserver = EventLoggingObserver() filtered = FilteringLogObserver( logObserver, [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) globalLogPublisher.addObserver(filtered) self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered)) dl = [] for code, methodName in [ (666, "fail"), (666, "deferFail"), (12, "fault"), (23, "noSuchMethod"), (17, "deferFault"), (42, "SESSION_TEST"), ]: d = self.proxy().callRemote(methodName) d = self.assertFailure(d, xmlrpc.Fault) d.addCallback( lambda exc, code=code: self.assertEqual(exc.faultCode, code)) dl.append(d) d = defer.DeferredList(dl, fireOnOneErrback=True) def cb(ign): for factory in self.factories: self.assertEqual(factory.headers[b"content-type"], b"text/xml; charset=utf-8") self.assertEquals(2, len(logObserver)) f1 = logObserver[0]["log_failure"].value f2 = logObserver[1]["log_failure"].value if isinstance(f1, TestValueError): self.assertIsInstance(f2, TestRuntimeError) else: self.assertIsInstance(f1, TestRuntimeError) self.assertIsInstance(f2, TestValueError) self.flushLoggedErrors(TestRuntimeError, TestValueError) d.addCallback(cb) return d
def test_doStartLoggingStatement(self): """ L{Factory.doStart} logs that it is starting a factory, followed by the L{repr} of the L{Factory} instance that is being started. """ events = [] globalLogPublisher.addObserver(events.append) self.addCleanup( lambda: globalLogPublisher.removeObserver(events.append)) f = Factory() f.doStart() self.assertIs(events[0]['factory'], f) self.assertEqual(events[0]['log_level'], LogLevel.info) self.assertEqual(events[0]['log_format'], 'Starting factory {factory!r}')
def test_doStartLoggingStatement(self): """ L{Factory.doStart} logs that it is starting a factory, followed by the L{repr} of the L{Factory} instance that is being started. """ events = [] globalLogPublisher.addObserver(events.append) self.addCleanup( lambda: globalLogPublisher.removeObserver(events.append)) f = Factory() f.doStart() self.assertIs(events[0]["factory"], f) self.assertEqual(events[0]["log_level"], LogLevel.info) self.assertEqual(events[0]["log_format"], "Starting factory {factory!r}")
def test_doStopLoggingStatement(self): """ L{Factory.doStop} logs that it is stopping a factory, followed by the L{repr} of the L{Factory} instance that is being stopped. """ events = [] globalLogPublisher.addObserver(events.append) self.addCleanup( lambda: globalLogPublisher.removeObserver(events.append)) class MyFactory(Factory): numPorts = 1 f = MyFactory() f.doStop() self.assertIs(events[0]['factory'], f) self.assertEqual(events[0]['log_level'], LogLevel.info) self.assertEqual(events[0]['log_format'], 'Stopping factory {factory!r}')
def stopService(self): for dest in self.destinations: remove_destination(dest) globalLogPublisher.removeObserver(self.twisted_observer) self.stdlib_cleanup() return Service.stopService(self)
def stopService(self): globalLogPublisher.removeObserver(_count_errors) return Service.stopService(self)
def stop(observer): globalLogPublisher.removeObserver(observer=observer) # pragma nocover
def stop_json_file_logging(cls): globalLogPublisher.removeObserver(get_json_file_observer())
def stop_sentry_logging(cls): globalLogPublisher.removeObserver(sentry_observer)
def stop_console_logging(cls): globalLogPublisher.removeObserver(console_observer)
def __del__(self): globalLogPublisher.removeObserver(self.storeObserver)
def test_emit_warning_upon_new_version(ursula_federated_test_config, caplog): nodes = make_federated_ursulas(ursula_config=ursula_federated_test_config, quantity=3, know_each_other=False) teacher, learner, new_node = nodes learner.remember_node(teacher) teacher.remember_node(learner) teacher.remember_node(new_node) new_node.TEACHER_VERSION = learner.LEARNER_VERSION + 1 warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) globalLogPublisher.addObserver(warning_trapper) learner.learn_from_teacher_node() assert len(warnings) == 1 assert warnings[0]['log_format'] == learner.unknown_version_message.format( new_node, new_node.TEACHER_VERSION, learner.LEARNER_VERSION) # Now let's go a little further: make the version totally unrecognizable. # First, there's enough garbage to at least scrape a potential checksum address fleet_snapshot = os.urandom(32 + 4) random_bytes = os.urandom(50) # lots of garbage in here future_version = learner.LEARNER_VERSION + 42 version_bytes = future_version.to_bytes(2, byteorder="big") crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes + random_bytes) signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes)) Response = namedtuple("MockResponse", ("content", "status_code")) response = Response(content=signed_crazy_bytes + crazy_bytes, status_code=200) learner._current_teacher_node = teacher learner.network_middleware.get_nodes_via_rest = lambda *args, **kwargs: response learner.learn_from_teacher_node() # If you really try, you can read a node representation from the garbage accidental_checksum = to_checksum_address(random_bytes[:20]) accidental_nickname = nickname_from_seed(accidental_checksum)[0] accidental_node_repr = Character._display_name_template.format( "Ursula", accidental_nickname, accidental_checksum) assert len(warnings) == 2 assert warnings[1]['log_format'] == learner.unknown_version_message.format( accidental_node_repr, future_version, learner.LEARNER_VERSION) # This time, however, there's not enough garbage to assume there's a checksum address... random_bytes = os.urandom(2) crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes + random_bytes) signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes)) response = Response(content=signed_crazy_bytes + crazy_bytes, status_code=200) learner._current_teacher_node = teacher learner.learn_from_teacher_node() assert len(warnings) == 3 # ...so this time we get a "really unknown version message" assert warnings[2][ 'log_format'] == learner.really_unknown_version_message.format( future_version, learner.LEARNER_VERSION) globalLogPublisher.removeObserver(warning_trapper)
def ursula( click_config, action, dev, quiet, dry_run, force, lonely, network, teacher_uri, min_stake, rest_host, rest_port, db_filepath, checksum_address, federated_only, poa, config_root, config_file, metadata_dir, # TODO: Start nodes from an additional existing metadata dir provider_uri, recompile_solidity, no_registry, registry_filepath) -> None: """ Manage and run an "Ursula" PRE node. \b Actions ------------------------------------------------- \b run Run an "Ursula" node. init Create a new Ursula node configuration. view View the Ursula node's configuration. forget Forget all known nodes. save-metadata Manually write node metadata to disk without running destroy Delete Ursula node configuration. """ # # Boring Setup Stuff # if not quiet: log = Logger('ursula.cli') if click_config.debug and quiet: raise click.BadOptionUsage( option_name="quiet", message="--debug and --quiet cannot be used at the same time.") if click_config.debug: click_config.log_to_sentry = False click_config.log_to_file = True globalLogPublisher.removeObserver(logToSentry) # Sentry GlobalConsoleLogger.set_log_level("debug") elif quiet: globalLogPublisher.removeObserver(logToSentry) globalLogPublisher.removeObserver(SimpleObserver) globalLogPublisher.removeObserver(getJsonFileObserver()) if not click_config.json_ipc and not click_config.quiet: click.secho(URSULA_BANNER) # # Pre-Launch Warnings # if not quiet: if dev: click.secho("WARNING: Running in development mode", fg='yellow') if force: click.secho("WARNING: Force is enabled", fg='yellow') # # Unauthenticated Configurations # if action == "init": """Create a brand-new persistent Ursula""" if not network: raise click.BadArgumentUsage( '--network is required to initialize a new configuration.') if dev: actions.handle_control_output( message="WARNING: Using temporary storage area", color='yellow', quiet=quiet, json=click_config.json) if not config_root: # Flag config_root = click_config.config_file # Envvar if not rest_host: rest_host = click.prompt( "Enter Ursula's public-facing IPv4 address" ) # TODO: Remove this step ursula_config = UrsulaConfiguration.generate( password=click_config.get_password(confirm=True), config_root=config_root, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath, domains={network} if network else None, federated_only=federated_only, checksum_public_address=checksum_address, no_registry=federated_only or no_registry, registry_filepath=registry_filepath, provider_uri=provider_uri, poa=poa) click_config.emitter(message="Generated keyring {}".format( ursula_config.keyring_dir), color='green') click_config.emitter(message="Saved configuration file {}".format( ursula_config.config_file_location), color='green') # Give the use a suggestion as to what to do next... how_to_run_message = "\nTo run an Ursula node from the default configuration filepath run: \n\n'{}'\n" suggested_command = 'nucypher ursula run' if config_root is not None: config_file_location = os.path.join( config_root, config_file or UrsulaConfiguration.CONFIG_FILENAME) suggested_command += ' --config-file {}'.format( config_file_location) return click_config.emitter( message=how_to_run_message.format(suggested_command), color='green') # Development Configuration if dev: ursula_config = UrsulaConfiguration( dev_mode=True, domains={TEMPORARY_DOMAIN}, poa=poa, registry_filepath=registry_filepath, provider_uri=provider_uri, checksum_public_address=checksum_address, federated_only=federated_only, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath) # Authenticated Configurations else: # Deserialize network domain name if override passed if network: domain_constant = getattr(constants, network.upper()) domains = {domain_constant} else: domains = None ursula_config = UrsulaConfiguration.from_configuration_file( filepath=config_file, domains=domains, registry_filepath=registry_filepath, provider_uri=provider_uri, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath, # TODO: Handle Boolean overrides # poa=poa, # federated_only=federated_only, ) actions.unlock_keyring(configuration=ursula_config, password=click_config.get_password()) if not ursula_config.federated_only: actions.connect_to_blockchain(configuration=ursula_config, recompile_contracts=recompile_solidity) click_config.ursula_config = ursula_config # Pass Ursula's config onto staking sub-command # # Launch Warnings # if ursula_config.federated_only: click_config.emitter(message="WARNING: Running in Federated mode", color='yellow') # # Action Switch # if action == 'run': """Seed, Produce, Run!""" # # Seed - Step 1 # teacher_uris = [teacher_uri] if teacher_uri else list() teacher_nodes = actions.load_seednodes( teacher_uris=teacher_uris, min_stake=min_stake, federated_only=federated_only, network_middleware=click_config.middleware) # # Produce - Step 2 # URSULA = ursula_config(known_nodes=teacher_nodes, lonely=lonely) # GO! try: # # Run - Step 3 # click_config.emitter(message="Connecting to {}".format(','.join( str(d) for d in ursula_config.domains)), color='green', bold=True) click_config.emitter(message="Running Ursula {} on {}".format( URSULA, URSULA.rest_interface), color='green', bold=True) if not click_config.debug: stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA)) if dry_run: # That's all folks! return URSULA.get_deployer().run() # <--- Blocking Call (Reactor) except Exception as e: ursula_config.log.critical(str(e)) click_config.emitter(message="{} {}".format( e.__class__.__name__, str(e)), color='red', bold=True) raise # Crash :-( finally: click_config.emitter(message="Stopping Ursula", color='green') ursula_config.cleanup() click_config.emitter(message="Ursula Stopped", color='red') return elif action == "save-metadata": """Manually save a node self-metadata file""" URSULA = ursula_config.produce(ursula_config=ursula_config) metadata_path = ursula.write_node_metadata(node=URSULA) return click_config.emitter( message="Successfully saved node metadata to {}.".format( metadata_path), color='green') elif action == "view": """Paint an existing configuration to the console""" response = UrsulaConfiguration._read_configuration_file( filepath=config_file or ursula_config.config_file_location) return click_config.emitter(response=response) elif action == "forget": # TODO: Move to character control actions.forget(configuration=ursula_config) return elif action == "destroy": """Delete all configuration files from the disk""" if dev: message = "'nucypher ursula destroy' cannot be used in --dev mode" raise click.BadOptionUsage(option_name='--dev', message=message) destroyed_filepath = destroy_system_configuration( config_class=UrsulaConfiguration, config_file=config_file, network=network, config_root=ursula_config.config_file_location, force=force) return click_config.emitter(message=f"Destroyed {destroyed_filepath}", color='green') else: raise click.BadArgumentUsage("No such argument {}".format(action))
def __exit__(self, type, value, traceback): globalLogPublisher.removeObserver(self.logs.append) set_global_log_level(self._old_log_level)
def test_emit_warning_upon_new_version(lonely_ursula_maker, caplog): seed_node, teacher, new_node = lonely_ursula_maker(quantity=3, domain="no hardcodes", know_each_other=True) learner, _bystander = lonely_ursula_maker(quantity=2, domain="no hardcodes") learner.learning_domain = "no hardcodes" learner.remember_node(teacher) teacher.remember_node(learner) teacher.remember_node(new_node) learner._seed_nodes = [seed_node.seed_node_metadata()] seed_node.TEACHER_VERSION = learner.LEARNER_VERSION + 1 warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) globalLogPublisher.addObserver(warning_trapper) # First we'll get a warning, because we're loading a seednode with a version from the future. learner.load_seednodes() assert len(warnings) == 1 expected_message = learner.unknown_version_message.format( seed_node, seed_node.TEACHER_VERSION, learner.LEARNER_VERSION) assert expected_message in warnings[0]['log_format'] # We don't use the above seednode as a teacher, but when our teacher tries to tell us about it, we get another of the same warning. learner.learn_from_teacher_node() assert len(warnings) == 2 assert expected_message in warnings[1]['log_format'] # Now let's go a little further: make the version totally unrecognizable. # First, there's enough garbage to at least scrape a potential checksum address fleet_snapshot = os.urandom(32 + 4) random_bytes = os.urandom(50) # lots of garbage in here future_version = learner.LEARNER_VERSION + 42 version_bytes = future_version.to_bytes(2, byteorder="big") crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes + random_bytes) signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes)) Response = namedtuple("MockResponse", ("content", "status_code")) response = Response(content=signed_crazy_bytes + crazy_bytes, status_code=200) learner._current_teacher_node = teacher learner.network_middleware.get_nodes_via_rest = lambda *args, **kwargs: response learner.learn_from_teacher_node() # If you really try, you can read a node representation from the garbage accidental_checksum = to_checksum_address(random_bytes[:20]) accidental_nickname = nickname_from_seed(accidental_checksum)[0] accidental_node_repr = Character._display_name_template.format( "Ursula", accidental_nickname, accidental_checksum) assert len(warnings) == 3 expected_message = learner.unknown_version_message.format( accidental_node_repr, future_version, learner.LEARNER_VERSION) assert expected_message in warnings[2]['log_format'] # This time, however, there's not enough garbage to assume there's a checksum address... random_bytes = os.urandom(2) crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes + random_bytes) signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes)) response = Response(content=signed_crazy_bytes + crazy_bytes, status_code=200) learner._current_teacher_node = teacher learner.learn_from_teacher_node() assert len(warnings) == 4 # ...so this time we get a "really unknown version message" assert warnings[3][ 'log_format'] == learner.really_unknown_version_message.format( future_version, learner.LEARNER_VERSION) globalLogPublisher.removeObserver(warning_trapper)
def remove_observer(): globalLogPublisher.removeObserver(observer)
def test_invalid_workers_tolerance(testerchain, test_registry, blockchain_ursulas, agency, idle_staker, token_economics, ursula_decentralized_test_config ): # # Setup # lonely_blockchain_learner, blockchain_teacher, unsigned, *the_others = list(blockchain_ursulas) _, staking_agent, _ = agency warnings = [] def warning_trapper(event): if event['log_level'] == LogLevel.warn: warnings.append(event) # We start with an "idle_staker" (i.e., no tokens in StakingEscrow) assert 0 == staking_agent.owned_tokens(idle_staker.checksum_address) # Now let's create an active worker for this staker. # First, stake something (e.g. the bare minimum) amount = token_economics.minimum_allowed_locked periods = token_economics.minimum_locked_periods # Mock Powerup consumption (Staker) testerchain.transacting_power = TransactingPower(account=idle_staker.checksum_address) idle_staker.initialize_stake(amount=amount, lock_periods=periods) # Stake starts next period (or else signature validation will fail) testerchain.time_travel(periods=1) idle_staker.stake_tracker.refresh() # We create an active worker node for this staker worker = make_ursula_for_staker(staker=idle_staker, worker_address=testerchain.unassigned_accounts[-1], ursula_config=ursula_decentralized_test_config, blockchain=testerchain, commit_to_next_period=True, ursulas_to_learn_about=None) # Since we made a commitment, we need to advance one period testerchain.time_travel(periods=1) # The worker is valid and can be verified (even with the force option) worker.verify_node(force=True, network_middleware=MockRestMiddleware(), certificate_filepath="quietorl") # In particular, we know that it's bonded to a staker who is really staking. assert worker._worker_is_bonded_to_staker(registry=test_registry) assert worker._staker_is_really_staking(registry=test_registry) # OK. Now we learn about this worker. lonely_blockchain_learner.remember_node(worker) # The worker already committed one period before. Let's commit to the remaining 29. for i in range(29): worker.commit_to_next_period() testerchain.time_travel(periods=1) # The stake period has ended, and the staker wants her tokens back ("when lambo?"). # She withdraws up to the last penny (well, last nunit, actually). # Mock Powerup consumption (Staker) testerchain.transacting_power = TransactingPower(account=idle_staker.checksum_address) idle_staker.mint() testerchain.time_travel(periods=1) i_want_it_all = staking_agent.owned_tokens(idle_staker.checksum_address) idle_staker.withdraw(i_want_it_all) # OK...so...the staker is not staking anymore ... assert 0 == staking_agent.owned_tokens(idle_staker.checksum_address) # ... but the worker node still is "verified" (since we're not forcing on-chain verification) worker.verify_node(network_middleware=MockRestMiddleware(), certificate_filepath="quietorl") # If we force, on-chain verification, the worker is of course not verified with pytest.raises(worker.NotStaking): worker.verify_node(force=True, network_middleware=MockRestMiddleware(), certificate_filepath="quietorl") # Let's learn from this invalid node lonely_blockchain_learner._current_teacher_node = worker globalLogPublisher.addObserver(warning_trapper) lonely_blockchain_learner.learn_from_teacher_node() # lonely_blockchain_learner.remember_node(worker) # The same problem occurs if we directly try to remember this node globalLogPublisher.removeObserver(warning_trapper) # TODO: What should we really check here? (#1075) assert len(warnings) == 1 warning = warnings[-1]['log_format'] assert str(worker) in warning assert "no active stakes" in warning # TODO: Cleanup logging templates assert worker not in lonely_blockchain_learner.known_nodes
You should have received a copy of the GNU Affero General Public License along with nucypher. If not, see <https://www.gnu.org/licenses/>. """ import os import pytest from twisted.logger import globalLogPublisher from nucypher.characters.control.emitters import WebEmitter from nucypher.cli.config import NucypherClickConfig from nucypher.utilities.logging import GlobalConsoleLogger, logToSentry # Logger Configuration # from nucypher.utilities.sandbox.constants import INSECURE_DEVELOPMENT_PASSWORD globalLogPublisher.removeObserver(logToSentry) # Disable click sentry and file logging NucypherClickConfig.log_to_sentry = False NucypherClickConfig.log_to_file = True # Crash on server error by default WebEmitter._crash_on_error_default = False ########################################## @pytest.fixture(autouse=True, scope='session') def __very_pretty_and_insecure_scrypt_do_not_use(): """ # WARNING: DO NOT USE THIS CODE ANYWHERE #
def __del__(self) -> None: globalLogPublisher.removeObserver(self.storeObserver)
def stop(self): if self._output and self._output != sys.stdout: globalLogPublisher.removeObserver(self) if self._filename: self._output.close() self._output = None
def __exit__(self, type, value, traceback): globalLogPublisher.removeObserver(self._got_log) set_global_log_level(self._old_log_level)
def ursula( click_config, action, debug, dev, quiet, dry_run, force, lonely, network, teacher_uri, min_stake, rest_host, rest_port, db_filepath, checksum_address, federated_only, poa, config_root, config_file, metadata_dir, # TODO: Start nodes from an additional existing metadata dir provider_uri, no_registry, registry_filepath) -> None: """ Manage and run an "Ursula" PRE node. \b Actions ------------------------------------------------- \b run Run an "Ursula" node. init Create a new Ursula node configuration. view View the Ursula node's configuration. forget Forget all known nodes. save-metadata Manually write node metadata to disk without running destroy Delete Ursula node configuration. """ # # Boring Setup Stuff # if not quiet: click.secho(URSULA_BANNER) log = Logger('ursula.cli') if debug and quiet: raise click.BadOptionUsage( option_name="quiet", message="--debug and --quiet cannot be used at the same time.") if debug: click_config.log_to_sentry = False click_config.log_to_file = True globalLogPublisher.removeObserver(logToSentry) # Sentry GlobalConsoleLogger.set_log_level("debug") elif quiet: globalLogPublisher.removeObserver(logToSentry) globalLogPublisher.removeObserver(SimpleObserver) globalLogPublisher.removeObserver(getJsonFileObserver()) # # Pre-Launch Warnings # if not quiet: if dev: click.secho("WARNING: Running in development mode", fg='yellow') if force: click.secho("WARNING: Force is enabled", fg='yellow') # # Unauthenticated Configurations # if action == "init": """Create a brand-new persistent Ursula""" if dev and not quiet: click.secho("WARNING: Using temporary storage area", fg='yellow') if not config_root: # Flag config_root = click_config.config_file # Envvar if not rest_host: rest_host = click.prompt( "Enter Ursula's public-facing IPv4 address") ursula_config = UrsulaConfiguration.generate( password=click_config.get_password(confirm=True), config_root=config_root, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath, domains={network} if network else None, federated_only=federated_only, checksum_public_address=checksum_address, no_registry=federated_only or no_registry, registry_filepath=registry_filepath, provider_uri=provider_uri, poa=poa) if not quiet: click.secho("Generated keyring {}".format( ursula_config.keyring_dir), fg='green') click.secho("Saved configuration file {}".format( ursula_config.config_file_location), fg='green') # Give the use a suggestion as to what to do next... how_to_run_message = "\nTo run an Ursula node from the default configuration filepath run: \n\n'{}'\n" suggested_command = 'nucypher ursula run' if config_root is not None: config_file_location = os.path.join( config_root, config_file or UrsulaConfiguration.CONFIG_FILENAME) suggested_command += ' --config-file {}'.format( config_file_location) click.secho(how_to_run_message.format(suggested_command), fg='green') return # FIN else: click.secho("OK") elif action == "destroy": """Delete all configuration files from the disk""" if dev: message = "'nucypher ursula destroy' cannot be used in --dev mode" raise click.BadOptionUsage(option_name='--dev', message=message) destroy_system_configuration(config_class=UrsulaConfiguration, config_file=config_file, network=network, config_root=config_root, force=force, log=log) if not quiet: click.secho("Destroyed {}".format(config_root)) return # Development Configuration if dev: ursula_config = UrsulaConfiguration( dev_mode=True, domains={TEMPORARY_DOMAIN}, poa=poa, registry_filepath=registry_filepath, provider_uri=provider_uri, checksum_public_address=checksum_address, federated_only=federated_only, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath) # Authenticated Configurations else: # Deserialize network domain name if override passed if network: domain_constant = getattr(constants, network.upper()) domains = {domain_constant} else: domains = None ursula_config = UrsulaConfiguration.from_configuration_file( filepath=config_file, domains=domains, registry_filepath=registry_filepath, provider_uri=provider_uri, rest_host=rest_host, rest_port=rest_port, db_filepath=db_filepath, # TODO: Handle Boolean overrides # poa=poa, # federated_only=federated_only, ) try: # Unlock Keyring if not quiet: click.secho('Decrypting keyring...', fg='blue') ursula_config.keyring.unlock(password=click_config.get_password() ) # Takes ~3 seconds, ~1GB Ram except CryptoError: raise ursula_config.keyring.AuthenticationFailed if not ursula_config.federated_only: try: ursula_config.connect_to_blockchain(recompile_contracts=False) ursula_config.connect_to_contracts() except EthereumContractRegistry.NoRegistry: message = "Cannot configure blockchain character: No contract registry found; " \ "Did you mean to pass --federated-only?" raise EthereumContractRegistry.NoRegistry(message) click_config.ursula_config = ursula_config # Pass Ursula's config onto staking sub-command # # Launch Warnings # if not quiet: if ursula_config.federated_only: click.secho("WARNING: Running in Federated mode", fg='yellow') # # Action Switch # if action == 'run': """Seed, Produce, Run!""" # # Seed - Step 1 # teacher_nodes = list() if teacher_uri: node = Ursula.from_teacher_uri( teacher_uri=teacher_uri, min_stake=min_stake, federated_only=ursula_config.federated_only) teacher_nodes.append(node) # # Produce - Step 2 # ursula = ursula_config(known_nodes=teacher_nodes, lonely=lonely) # GO! try: # # Run - Step 3 # click.secho("Connecting to {}".format(','.join( str(d) for d in ursula_config.domains)), fg='blue', bold=True) click.secho("Running Ursula {} on {}".format( ursula, ursula.rest_interface), fg='green', bold=True) if not debug: stdio.StandardIO(UrsulaCommandProtocol(ursula=ursula)) if dry_run: # That's all folks! return ursula.get_deployer().run() # <--- Blocking Call (Reactor) except Exception as e: ursula_config.log.critical(str(e)) click.secho("{} {}".format(e.__class__.__name__, str(e)), fg='red') raise # Crash :-( finally: if not quiet: click.secho("Stopping Ursula") ursula_config.cleanup() if not quiet: click.secho("Ursula Stopped", fg='red') return elif action == "save-metadata": """Manually save a node self-metadata file""" ursula = ursula_config.produce(ursula_config=ursula_config) metadata_path = ursula.write_node_metadata(node=ursula) if not quiet: click.secho("Successfully saved node metadata to {}.".format( metadata_path), fg='green') return elif action == "view": """Paint an existing configuration to the console""" paint_configuration( config_filepath=config_file or ursula_config.config_file_location) return elif action == "forget": """Forget all known nodes via storages""" click.confirm("Permanently delete all known node data?", abort=True) ursula_config.forget_nodes() message = "Removed all stored node node metadata and certificates" click.secho(message=message, fg='red') return else: raise click.BadArgumentUsage("No such argument {}".format(action))