def shutdown(): from logs import lg from main import config from system import bpio lg.out(2, 'bpmain.shutdown') import shutdowner shutdowner.A('reactor-stopped') from automats import automat automat.objects().clear() if len(automat.index()) > 0: lg.warn('%d automats was not cleaned' % len(automat.index())) for a in automat.index().keys(): lg.out(2, ' %r' % a) else: lg.out(2, 'bpmain.shutdown automat.objects().clear() SUCCESS, no state machines left in memory') config.conf().removeCallback('logs/debug-level') lg.out(2, 'bpmain.shutdown currently %d threads running:' % len(threading.enumerate())) for t in threading.enumerate(): lg.out(2, ' ' + str(t)) lg.out(2, 'bpmain.shutdown finishing and closing log file, EXIT') automat.CloseLogFile() lg.close_log_file() if bpio.Windows() and bpio.isFrozen(): lg.stdout_stop_redirecting() return 0
def _check_update_original_identity(self): from logs import lg from lib import misc from main.config import conf from userid import identity orig_ident_xmlsrc = conf().getData( 'services/proxy-transport/my-original-identity', '').strip() current_router_idurl = conf().getString( 'services/proxy-transport/current-router', '').strip() if not orig_ident_xmlsrc: if current_router_idurl: lg.warn( 'current-router is %s, but my-original-identity is empty' % current_router_idurl) self._reset_my_original_identity() return orig_ident = identity.identity(xmlsrc=orig_ident_xmlsrc) if not orig_ident.isCorrect() or not orig_ident.Valid(): lg.warn('my original identity is not valid') self._reset_my_original_identity() return externalIP = misc.readExternalIP() if externalIP and orig_ident.getIP() != externalIP: lg.warn('external IP was changed : restore my original identity') self._reset_my_original_identity() return if not current_router_idurl: lg.warn('original identity is correct, but current router is empty') self._reset_my_original_identity()
def doStartListening(self, arg): """ Action method. """ try: _, info = arg self.router_proto_host = (info.proto, info.host) except: try: s = config.conf().getString('services/proxy-transport/current-router').strip() _, router_proto, router_host = s.split(' ') self.router_proto_host = (router_proto, router_host) except: lg.exc() self.router_identity = identitycache.FromCache(self.router_idurl) config.conf().setString('services/proxy-transport/current-router', '%s %s %s' % ( self.router_idurl, self.router_proto_host[0], self.router_proto_host[1])) if ReadMyOriginalIdentitySource(): lg.warn('my original identity is not empty') else: config.conf().setData('services/proxy-transport/my-original-identity', my_id.getLocalIdentity().serialize()) self.request_service_packet_id = [] callback.insert_inbox_callback(0, self._on_inbox_packet_received) if _Debug: lg.out(2, 'proxy_receiver.doStartListening !!!!!!! router: %s at %s://%s' % ( self.router_idurl, self.router_proto_host[0], self.router_proto_host[1]))
def start(self): from storage import backup_fs from storage import backup_control from storage import backup_matrix from storage import backup_monitor from main import settings from main.config import conf from p2p import p2p_connector backup_fs.init() backup_control.init() backup_matrix.init() if settings.NewWebGUI(): from web import control backup_matrix.SetBackupStatusNotifyCallback(control.on_backup_stats) backup_matrix.SetLocalFilesNotifyCallback(control.on_read_local_files) else: from web import webcontrol backup_matrix.SetBackupStatusNotifyCallback(webcontrol.OnBackupStats) backup_matrix.SetLocalFilesNotifyCallback(webcontrol.OnReadLocalFiles) backup_monitor.A('init') backup_monitor.A('restart') conf().addCallback('services/backups/keep-local-copies-enabled', self._on_keep_local_copies_modified) conf().addCallback('services/backups/wait-suppliers-enabled', self._on_wait_suppliers_modified) p2p_connector.A().addStateChangedCallback( self._on_p2p_connector_state_changed, 'INCOMMING?', 'CONNECTED') p2p_connector.A().addStateChangedCallback( self._on_p2p_connector_state_changed, 'MY_IDENTITY', 'CONNECTED') return True
def start(self): from customer import fire_hire from main.config import conf fire_hire.A("init") conf().addCallback("services/customer/suppliers-number", self._on_suppliers_number_modified) conf().addCallback("services/customer/needed-space", self._on_needed_space_modified) return True
def doRestoreMyIdentity(self, arg): """ Action method. """ modified = my_id.rebuildLocalIdentity() if not modified: lg.warn('my identity was not modified') config.conf().setData('services/proxy-transport/my-original-identity', '')
def stop(self): from supplier import customers_rejector from main.config import conf from supplier import local_tester local_tester.shutdown() conf().removeCallback('services/supplier/donated-space') customers_rejector.Destroy() return True
def stop(self): from customer import fire_hire from main.config import conf conf().removeCallback("services/customer/suppliers-number") conf().removeCallback("services/customer/needed-space") fire_hire.Destroy() return True
def print_all_settings(): from main import config print config.conf().configDir for path in config.conf().listAllEntries(): value = config.conf().getData(path, '').replace('\n', ' ') # label = config.conf().labels.get(path, '') # info = config.conf().infos.get(path, '') print ' %s %s' % (path.ljust(50), value.ljust(20)) return 0
def start(self): from logs import lg from userid import my_id from main.config import conf my_id.loadLocalIdentity() if my_id._LocalIdentity is None: lg.warn('Loading local identity failed - need to create an identity first') return False from contacts import identitycache from userid import known_servers from p2p import propagate from contacts import contactsdb identitycache.init() d = contactsdb.init() propagate.init() conf().addConfigNotifier('services/identity-propagate/known-servers', self._on_known_servers_changed) lg.info('known ID servers are : %r' % known_servers.by_host()) return d
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass settings.init(base_dir='/tmp/.bitdust_tmp') lg.set_debug_level(30) try: os.makedirs('/tmp/.bitdust_tmp/logs') except: pass local_fs.WriteTextFile('/tmp/.bitdust_tmp/logs/parallelp.log', '') if self.child_processes_enabled: config.conf().setBool( 'services/rebuilding/child-processes-enabled', True) else: config.conf().setBool( 'services/rebuilding/child-processes-enabled', False)
def nodes(): """ Here is a well known DHT nodes, this is "genesis" network. Every new node in the network will first connect one or several of those nodes, and then will be routed to some other nodes already registered. Right now we have started several BitDust nodes on vps hosting across the world. If you willing to support the project and already started your own BitDust node on reliable machine, contact us and we will include your address here. So other nodes will be able to use your machine to connect to DHT network. The load is not big, but as network will grow we will have more machines listed here, so all traffic, maintanance and ownership will be distributed across the world. You can override those "genesis" nodes (before you join network first time) by configuring list of your preferred DHT nodes (host or IP address) in the program settings: api.config_set( "services/entangled-dht/known-nodes", "firstnode.net:14441, secondmachine.com:1234, 123.45.67.89:9999", ) This way you can create your own DHT network, inside BitDust, under your full control. """ from main import config from lib import strng try: overridden_dht_nodes_str = str(config.conf().getData('services/entangled-dht/known-nodes')) except: overridden_dht_nodes_str = '' if overridden_dht_nodes_str in ['genesis', 'root', b'genesis', b'root', ]: # "genesis" node must not connect anywhere return [] if not overridden_dht_nodes_str: return default_nodes() overridden_dht_nodes = [] for dht_node_str in re.split('\n|;|,| ', overridden_dht_nodes_str): if dht_node_str.strip(): try: dht_node = dht_node_str.strip().split(':') dht_node_host = strng.to_bin(dht_node[0].strip()) dht_node_port = int(dht_node[1].strip()) except: continue overridden_dht_nodes.append((dht_node_host, dht_node_port, )) if overridden_dht_nodes: # from logs import lg # lg.info('DHT seeds was overridden in local settings: %s' % overridden_dht_nodes) return overridden_dht_nodes return default_nodes()
def start(self): from logs import lg from dht import dht_service from dht import known_nodes from main import settings from main.config import conf from userid import my_id conf().addCallback('services/entangled-dht/udp-port', self._on_udp_port_modified) dht_service.init(udp_port=settings.getDHTPort(), db_file_path=settings.DHTDBFile()) known_seeds = known_nodes.nodes() lg.info('known seed nodes are : %r' % known_seeds) d = dht_service.connect(seed_nodes=known_seeds) d.addCallback(self._on_connected) d.addErrback(self._on_connect_failed) if my_id.getLocalID(): dht_service.set_node_data('idurl', my_id.getLocalID()) return d
def start(self): from twisted.internet import reactor # @UnresolvedImport from twisted.internet.defer import Deferred from transport.http import http_interface from transport import network_transport from transport import gateway from main.config import conf self.starting_deferred = Deferred() self.transport = network_transport.NetworkTransport( 'http', http_interface.GateInterface()) self.transport.automat( 'init', (gateway.listener(), self._on_transport_state_changed)) reactor.callLater(0, self.transport.automat, 'start') # @UndefinedVariable conf().addCallback('services/http-transport/enabled', self._on_enabled_disabled) conf().addCallback('services/http-transport/receiving-enabled', self._on_receiving_enabled_disabled) return self.starting_deferred
def doStopListening(self, arg): """ Action method. """ if contact_status.isKnown(self.router_idurl): contact_status.A(self.router_idurl).removeStateChangedCallback( self._on_router_contact_status_connected) contact_status.A(self.router_idurl).removeStateChangedCallback( self._on_router_contact_status_offline) WriteMyOriginalIdentitySource('') config.conf().setString('services/proxy-transport/current-router', '') callback.remove_inbox_callback(self._on_inbox_packet_received) self.router_identity = None self.router_idurl = None self.router_proto_host = None self.request_service_packet_id = [] my_id.rebuildLocalIdentity() if _Debug: lg.out(2, 'proxy_receiver.doStopListening')
def init(): """ """ if _Debug: lg.out(_DebugLevel, 'driver.init') available_services_dir = os.path.join(bpio.getExecutableDir(), 'services') loaded = set() for filename in os.listdir(available_services_dir): if not filename.endswith('.py') and not filename.endswith('.pyo') and not filename.endswith('.pyc'): continue if not filename.startswith('service_'): continue name = str(filename[:filename.rfind('.')]) if name in loaded: continue if name in disabled_services(): if _Debug: lg.out(_DebugLevel, '%s is hard disabled' % name) continue try: py_mod = importlib.import_module('services.' + name) except: if _Debug: lg.out(_DebugLevel, '%s exception during module import' % name) lg.exc() continue try: services()[name] = py_mod.create_service() except: if _Debug: lg.out(_DebugLevel, '%s exception while creating service instance' % name) lg.exc() continue loaded.add(name) if not services()[name].enabled(): if _Debug: lg.out(_DebugLevel, '%s is switched off' % name) continue enabled_services().add(name) if _Debug: lg.out(_DebugLevel, '%s initialized' % name) build_order() config.conf().addCallback('services/', on_service_enabled_disabled)
def __init__(self, supplierIdentity, creatorID, customerIDURL=None): self.customerIDURL = customerIDURL if self.customerIDURL is None: self.customerIDURL = my_id.getLocalID() self.creatorID = creatorID self.remoteID = supplierIdentity self.remoteName = nameurl.GetName(self.remoteID) # all sends we'll hold on to, only several will be active, # but will hold onto the next ones to be sent # active files self.fileSendMaxLength = config.conf().getInt( 'services/data-motion/supplier-sending-queue-size', 8) # an array of packetId, preserving first in first out, # of which the first maxLength are the "active" sends self.fileSendQueue = [] # dictionary of FileUp's using packetId as index, # hold onto stuff sent and acked for some period as a history? self.fileSendDict = {} # all requests we'll hold on to, # only several will be active, but will hold onto the next ones to be sent # active requests self.fileRequestMaxLength = config.conf().getInt( 'services/data-motion/supplier-request-queue-size', 8) # an array of PacketIDs, preserving first in first out self.fileRequestQueue = [] # FileDown's, indexed by PacketIDs self.fileRequestDict = {} self.shutdown = False self.ackedCount = 0 self.failedCount = 0 self.uploadingTimeoutCount = 0 self.downloadingTimeoutCount = 0 self._runSend = False self.sendTask = None self.sendTaskDelay = 0.1 self.requestTask = None self.requestTaskDelay = 0.1
def doSendMyIdentity(self, *args, **kwargs): """ Action method. """ reactor.callLater(0, self._do_send_identity_to_router, my_id.getLocalIdentity().serialize(), failed_event='fail-received') # @UndefinedVariable identity_source = config.conf().getData('services/proxy-transport/my-original-identity').strip() if identity_source: if _Debug: lg.out(_DebugLevel, ' also sending identity loaded from "my-original-identity" config') reactor.callLater(0, self._do_send_identity_to_router, identity_source, failed_event='fail-received') # @UndefinedVariable
def doInit(self, *args, **kwargs): """ Action method. """ global _PacketLogFileEnabled _PacketLogFileEnabled = config.conf().getBool('logs/packet-enabled') self.traffic_out = 0 self.pending_packets = [] self.pending_ping_packets = [] self.max_pending_packets = 100 # TODO: read from settings
def doFindNewSupplier(self, *args, **kwargs): """ Action method. """ if _Debug: lg.out( _DebugLevel, 'fire_hire.doFindNewSupplier desired_suppliers=%d current_suppliers=%r' % (settings.getSuppliersNumberDesired(), contactsdb.suppliers())) from p2p import network_connector if network_connector.A().state != 'CONNECTED': if _Debug: lg.out( _DebugLevel, ' network_connector is not CONNECTED at the moment, SKIP' ) self.automat('search-failed') return position_for_new_supplier = None for pos in range(settings.getSuppliersNumberDesired()): if pos in self.hire_list: continue supplier_idurl = contactsdb.supplier(pos) if not supplier_idurl: lg.info( 'found empty supplier at position %d and going to find new supplier on that position' % pos) position_for_new_supplier = pos break if id_url.is_in(supplier_idurl, self.dismiss_list, as_field=False): lg.info( 'going to find new supplier on existing position %d to replace supplier %s' % ( pos, supplier_idurl, )) position_for_new_supplier = pos break if position_for_new_supplier is None: lg.err('did not found position for new supplier') self.automat('search-failed') return from customer import supplier_finder for idurl_txt in strng.to_text(config.conf().getData( 'services/employer/candidates')).split(','): if idurl_txt.strip(): supplier_finder.AddSupplierToHire(idurl_txt) self.hire_list.append(position_for_new_supplier) supplier_finder.A( 'start', family_position=position_for_new_supplier, ecc_map=eccmap.Current().name, family_snapshot=id_url.to_bin_list(contactsdb.suppliers()), )
def start(self): import os from twisted.internet import reactor # @UnresolvedImport from main import config from main import settings from main import events from blockchain import pybc_service self.flag_public_key_registered = False self.flag_public_key_transaction_sent = False pybc_home = settings.BlockchainDir() seeds = config.conf().getString('services/blockchain/seeds') if seeds: seed_nodes = [( i.split(':')[0], int(i.split(':')[1]), ) for i in seeds.split(',')] else: seed_nodes = pybc_service.seed_nodes() pybc_service.init( host=config.conf().getData('services/blockchain/host'), port=config.conf().getInt('services/blockchain/port'), seed_nodes=seed_nodes, blockstore_filename=os.path.join(pybc_home, 'blocks'), keystore_filename=os.path.join(pybc_home, 'keys'), peerstore_filename=os.path.join(pybc_home, 'peers'), minify=None, loglevel='DEBUG', logfilepath=os.path.join(pybc_home, 'log'), stats_filename=None, ) if config.conf().getBool('services/blockchain/explorer/enabled'): pybc_service.start_block_explorer( config.conf().getInt('services/blockchain/explorer/port'), pybc_service.node()) if config.conf().getBool('services/blockchain/wallet/enabled'): pybc_service.start_wallet( config.conf().getInt('services/blockchain/wallet/port'), pybc_service.node(), pybc_service.wallet()) if config.conf().getBool('services/blockchain/miner/enabled'): reactor.callFromThread( pybc_service.generate_block, json_data={}, with_inputs=True, repeat=True, ) events.add_subscriber(self._on_local_identity_modified, 'local-identity-modified') events.add_subscriber(self._on_blockchain_forward, 'blockchain-forward') events.add_subscriber(self._on_blockchain_sync, 'blockchain-sync') reactor.callLater(0, self._do_check_register_my_identity) return True
def shutdown(): from logs import lg from main import config from system import bpio lg.out(2, 'bpmain.shutdown') from . import shutdowner shutdowner.A('reactor-stopped') from main import events events.shutdown() from automats import automat automat.objects().clear() if len(automat.index()) > 0: lg.warn('%d automats was not cleaned' % len(automat.index())) for a in automat.index().keys(): lg.out(2, ' %r' % a) else: lg.out( 2, 'bpmain.shutdown automat.objects().clear() SUCCESS, no state machines left in memory' ) config.conf().removeCallback('logs/debug-level') lg.out( 2, 'bpmain.shutdown currently %d threads running:' % len(threading.enumerate())) for t in threading.enumerate(): lg.out(2, ' ' + str(t)) lg.out(2, 'bpmain.shutdown finishing and closing log file, EXIT') automat.CloseLogFile() lg.close_log_file() if bpio.Windows() and bpio.isFrozen(): lg.stdout_stop_redirecting() return 0
def doStopListening(self, *args, **kwargs): """ Action method. """ if online_status.isKnown(self.router_idurl): online_status.remove_online_status_listener_callback( idurl=self.router_idurl, callback_method=self._on_router_contact_status_connected, ) online_status.remove_online_status_listener_callback( idurl=self.router_idurl, callback_method=self._on_router_contact_status_offline, ) active_router_session_machine_index = self.router_connection_info.get( 'index', None) if active_router_session_machine_index is not None: active_router_session_machine = automat.objects().get( active_router_session_machine_index, None) if active_router_session_machine is not None: active_router_session_machine.removeStateChangedCallback( self._on_router_session_disconnected) lg.info('removed callback from router active session: %r' % active_router_session_machine) else: lg.err( 'did not found active router session state machine with index %s' % active_router_session_machine_index) # if contact_status.isKnown(self.router_idurl): # contact_status.A(self.router_idurl).removeStateChangedCallback(self._on_router_contact_status_connected) # contact_status.A(self.router_idurl).removeStateChangedCallback(self._on_router_contact_status_offline) WriteMyOriginalIdentitySource('') config.conf().setString('services/proxy-transport/current-router', '') callback.remove_inbox_callback(self._on_inbox_packet_received) self.router_identity = None self.router_idurl = None self.router_id = '' self.router_proto_host = None self.request_service_packet_id = [] self.router_connection_info = None my_id.rebuildLocalIdentity() if _Debug: lg.out(_DebugLevel, 'proxy_receiver.doStopListening')
def doLoadRouterInfo(self, *args, **kwargs): """ Action method. """ s = config.conf().getString('services/proxy-transport/current-router').strip() try: self.router_idurl = strng.to_bin(s.split(' ')[0]) except: lg.exc() if _Debug: lg.out(_DebugLevel, 'proxy_receiver.doLoadRouterInfo : %s' % self.router_idurl)
def doStartListening(self, arg): """ Action method. """ try: _, info = arg self.router_proto_host = (info.proto, info.host) except: try: s = config.conf().getString( 'services/proxy-transport/current-router').strip() _, router_proto, router_host = s.split(' ') self.router_proto_host = (router_proto, router_host) except: lg.exc() self.router_identity = identitycache.FromCache(self.router_idurl) config.conf().setString( 'services/proxy-transport/current-router', '%s %s %s' % (self.router_idurl, self.router_proto_host[0], self.router_proto_host[1])) current_identity = my_id.getLocalIdentity().serialize() previous_identity = ReadMyOriginalIdentitySource() if previous_identity: lg.warn('my original identity is not empty, SKIP overwriting') lg.out(2, '\nPREVIOUS ORIGINAL IDENTITY:\n%s\n' % current_identity) else: WriteMyOriginalIdentitySource(current_identity) lg.warn('current identity was stored as my-original-identity') self.request_service_packet_id = [] callback.insert_inbox_callback(0, self._on_inbox_packet_received) if contact_status.isKnown(self.router_idurl): contact_status.A(self.router_idurl).addStateChangedCallback( self._on_router_contact_status_connected, newstate='CONNECTED') contact_status.A(self.router_idurl).addStateChangedCallback( self._on_router_contact_status_offline, newstate='OFFLINE') if _Debug: lg.out( 2, 'proxy_receiver.doStartListening !!!!!!! router: %s at %s://%s' % (self.router_idurl, self.router_proto_host[0], self.router_proto_host[1]))
def by_host(): """ Here is a well known identity servers to support the network. Keys are domain names or global IP address (not recommended) of the ID server. Values are ``Web port`` (reading) and ``TCP port`` (writing) numbers. This is some kind of "genesis" network. If you willing to support the project and started your own BitDust node on reliable machine, contact us and we will include your address here. So other nodes will be able to use your machine to host their identities. You can override those "genesis" nodes by configuring list of your preferred identity servers in the program settings: api.config_set( "services/identity-propagate/known-servers", "myfirstserver.net:80:6661, secondmachine.net:8080:6662, thirdnode.gov.eu:80:16661", ) This way you can create your own BitDust network, under your full control. """ global _KnownServers if _KnownServers is not None: return _KnownServers try: from main import config overridden_identity_servers_str = str(config.conf().getData('services/identity-propagate/known-servers')) except: overridden_identity_servers_str = '' if not overridden_identity_servers_str: _KnownServers = default_nodes() return _KnownServers overridden_identity_servers = {} for id_server_str in overridden_identity_servers_str.split(','): if id_server_str.strip(): try: id_server = id_server_str.strip().split(':') id_server_host = id_server[0].strip() id_server_web_port = int(id_server[1].strip()) id_server_tcp_port = int(id_server[2].strip()) except: continue overridden_identity_servers[id_server_host] = (id_server_web_port, id_server_tcp_port, ) if overridden_identity_servers: from logs import lg lg.info('Identity servers was overridden in local settings: %s' % overridden_identity_servers) _KnownServers = overridden_identity_servers return _KnownServers _KnownServers = default_nodes() return _KnownServers
def stop(self): from storage import backup_fs from storage import backup_monitor from storage import backup_control from transport import callback from p2p import p2p_connector from main import events from main.config import conf events.remove_subscriber(self._on_key_erased, 'key-erased') events.remove_subscriber(self._on_my_identity_rotated, 'my-identity-rotated') callback.remove_inbox_callback(self._on_inbox_packet_received) if p2p_connector.A(): p2p_connector.A().removeStateChangedCallback( self._on_p2p_connector_state_changed) backup_monitor.Destroy() backup_fs.shutdown() backup_control.shutdown() conf().removeConfigNotifier( 'services/backups/keep-local-copies-enabled') return True
def doSendMyIdentity(self, arg): """ Action method. """ if _Debug: lg.out(_DebugLevel, 'proxy_receiver.doSendMyIdentity to %s' % self.router_idurl) self._do_send_identity_to_router(my_id.getLocalIdentity().serialize(), failed_event='fail-received') identity_source = config.conf().getData('services/proxy-transport/my-original-identity').strip() if identity_source: if _Debug: lg.out(_DebugLevel, ' also sending identity loaded from "my-original-identity" config') self._do_send_identity_to_router(identity_source, failed_event='fail-received')
def start(self): from twisted.internet import reactor # @UnresolvedImport from twisted.internet.defer import Deferred from logs import lg from transport.proxy import proxy_interface from transport import network_transport from transport import gateway from main.config import conf if len(self._available_transports()) == 0: lg.warn('no transports available') return False self._check_reset_original_identity() self.starting_deferred = Deferred() self.transport = network_transport.NetworkTransport( 'proxy', proxy_interface.GateInterface()) self.transport.automat( 'init', (gateway.listener(), self._on_transport_state_changed)) reactor.callLater(0, self.transport.automat, 'start') conf().addCallback('services/proxy-transport/enabled', self._on_enabled_disabled) conf().addCallback('services/proxy-transport/sending-enabled', self._on_sending_enabled_disabled) conf().addCallback('services/proxy-transport/receiving-enabled', self._on_receiving_enabled_disabled) return self.starting_deferred
def start(self): from twisted.internet import reactor # @UnresolvedImport from twisted.internet.defer import Deferred from logs import lg from transport.proxy import proxy_interface from transport import network_transport from transport import gateway from services import driver from main import events from main.config import conf if len(self._available_transports()) == 0: lg.warn('no transports available') return False events.add_subscriber(self._on_dht_layer_connected, 'dht-layer-connected') self._check_reset_original_identity() self.starting_deferred = Deferred() self.transport = network_transport.NetworkTransport( 'proxy', proxy_interface.GateInterface()) conf().addConfigNotifier('services/proxy-transport/enabled', self._on_enabled_disabled) conf().addConfigNotifier('services/proxy-transport/sending-enabled', self._on_sending_enabled_disabled) conf().addConfigNotifier('services/proxy-transport/receiving-enabled', self._on_receiving_enabled_disabled) if driver.is_on('service_entangled_dht'): self._do_join_proxy_routers_dht_layer() else: self.transport.automat( 'init', (gateway.listener(), self._on_transport_state_changed)) reactor.callLater(0, self.transport.automat, 'start') # @UndefinedVariable return self.starting_deferred
def state_changed(self, oldstate, newstate, event, *args, **kwargs): if newstate == 'INCOMMING?' and event != 'instant': self.automat('instant') if newstate == 'CONNECTED': self.health_check_task = LoopingCall( self._do_id_server_health_check) self.health_check_task.start(config.conf().getInt( 'services/identity-propagate/health-check-interval-seconds'), now=False) else: if self.health_check_task: self.health_check_task.stop() self.health_check_task = None
def init(): """ """ global _LocalListener global _PacketLogFileEnabled if _Debug: lg.out(4, 'gateway.init') open_transport_log(settings.TransportLog()) if _LocalListener: lg.warn('local listener already exist') else: _LocalListener = TransportGateLocalProxy() _PacketLogFileEnabled = config.conf().getBool('logs/packet-enabled')
def doSaveMyName(self, arg): """ Action method. """ try: login = arg['username'] except: login = arg[0] if len(arg) > 1: self.preferred_servers = map(lambda s: s.strip(), arg[1].split(',')) if not self.known_servers: self.known_servers = known_servers.by_host() if not self.preferred_servers: try: from main import config for srv in str(config.conf().getData( 'services/identity-propagate/preferred-servers') ).split(','): if srv.strip(): self.preferred_servers.append(srv.strip()) except: pass self.min_servers = max( settings.MinimumIdentitySources(), config.conf().getInt('services/identity-propagate/min-servers') or settings.MinimumIdentitySources()) self.max_servers = min( settings.MaximumIdentitySources(), config.conf().getInt('services/identity-propagate/max-servers') or settings.MaximumIdentitySources()) lg.out(4, 'id_registrator.doSaveMyName [%s]' % login) lg.out(4, ' known_servers=%s' % self.known_servers) lg.out(4, ' preferred_servers=%s' % self.preferred_servers) lg.out(4, ' min_servers=%s' % self.min_servers) lg.out(4, ' max_servers=%s' % self.max_servers) bpio.WriteFile(settings.UserNameFilename(), login)
def start(self): from twisted.internet.defer import Deferred from broadcast import broadcasters_finder from broadcast import broadcaster_node from broadcast import broadcast_listener from broadcast import broadcast_service from main.config import conf from main import settings self.starting_deferred = Deferred() broadcasters_finder.A('init') if settings.enableBroadcastRouting(): broadcaster_node.A('init', broadcast_service.on_incoming_broadcast_message) broadcaster_node.A().addStateChangedCallback( self._on_broadcaster_node_switched) else: broadcast_listener.A( 'init', broadcast_service.on_incoming_broadcast_message) broadcast_listener.A().addStateChangedCallback( self._on_broadcast_listener_switched) broadcast_listener.A('connect', self.scope) conf().addConfigNotifier('services/broadcasting/routing-enabled', self._on_broadcast_routing_enabled_disabled) return self.starting_deferred
def start(self): from storage import backup_fs from storage import backup_control from storage import backup_matrix from storage import backup_monitor from main import settings from main.config import conf from transport import callback from p2p import p2p_connector backup_fs.init() backup_control.init() backup_matrix.init() if settings.NewWebGUI(): from web import control backup_matrix.SetBackupStatusNotifyCallback( control.on_backup_stats) backup_matrix.SetLocalFilesNotifyCallback( control.on_read_local_files) else: from web import webcontrol backup_matrix.SetBackupStatusNotifyCallback( webcontrol.OnBackupStats) backup_matrix.SetLocalFilesNotifyCallback( webcontrol.OnReadLocalFiles) backup_monitor.A('init') backup_monitor.A('restart') conf().addCallback('services/backups/keep-local-copies-enabled', self._on_keep_local_copies_modified) conf().addCallback('services/backups/wait-suppliers-enabled', self._on_wait_suppliers_modified) p2p_connector.A().addStateChangedCallback( self._on_p2p_connector_state_changed, 'INCOMMING?', 'CONNECTED') p2p_connector.A().addStateChangedCallback( self._on_p2p_connector_state_changed, 'MY_IDENTITY', 'CONNECTED') callback.append_inbox_callback(self._on_inbox_packet_received) return True
def doSaveMyName(self, *args, **kwargs): """ Action method. """ if args: login = args[0]['username'] preferred_servers = args[0].get('preferred_servers', []) else: login = kwargs['username'] preferred_servers = kwargs.get('preferred_servers', []) self.preferred_servers = [s.strip() for s in preferred_servers] if not self.known_servers: self.known_servers = known_servers.by_host() if not self.preferred_servers: try: for srv in strng.to_text(config.conf().getData( 'services/identity-propagate/preferred-servers') ).split(','): if srv.strip(): self.preferred_servers.append(srv.strip()) except: pass self.min_servers = max( settings.MinimumIdentitySources(), config.conf().getInt('services/identity-propagate/min-servers') or settings.MinimumIdentitySources()) self.max_servers = min( settings.MaximumIdentitySources(), config.conf().getInt('services/identity-propagate/max-servers') or settings.MaximumIdentitySources()) lg.out(4, 'id_registrator.doSaveMyName [%s]' % login) lg.out(4, ' known_servers=%s' % self.known_servers) lg.out(4, ' preferred_servers=%s' % self.preferred_servers) lg.out(4, ' min_servers=%s' % self.min_servers) lg.out(4, ' max_servers=%s' % self.max_servers) bpio.WriteTextFile(settings.UserNameFilename(), login)
def stop(self): from twisted.internet.defer import succeed from main.config import conf conf().removeCallback('services/proxy-transport/enabled') conf().removeCallback('services/proxy-transport/sending-enabled') conf().removeCallback('services/proxy-transport/receiving-enabled') t = self.transport self.transport = None t.automat('shutdown') return succeed(True)
def _on_my_keys_synchronize_failed(self, evt): from logs import lg from main import config from interface import api from userid import global_id from userid import my_id if not config.conf().getBool( 'services/keys-storage/reset-unreliable-backup-copies'): return global_keys_folder_path = global_id.MakeGlobalID( key_alias='master', customer=my_id.getGlobalID(), path='.keys') lg.info('about to erase ".keys" folder in the catalog: %r' % global_keys_folder_path) res = api.file_delete(global_keys_folder_path) if res['status'] == 'OK': api.network_reconnect()
def on_incoming_message(request, info, status, error_message): """ Message came in for us """ global _IncomingMessageCallbacks if _Debug: lg.out(_DebugLevel, "message.on_incoming_message new PrivateMessage %r from %s" % (request.PacketID, request.OwnerID, )) private_message_object = PrivateMessage.deserialize(request.Payload) if private_message_object is None: lg.err("PrivateMessage deserialize failed, can not extract message from request payload of %d bytes" % len(request.Payload)) return False # if request.PacketID.startswith('queue_'): # queue_id, unique_id = packetid.SplitQueueMessagePacketID(request.PacketID) try: decrypted_message = private_message_object.decrypt() json_message = serialization.BytesToDict( decrypted_message, unpack_types=True, encoding='utf-8', ) json_message = jsn.dict_keys_to_text(jsn.dict_values_to_text(json_message)) except Exception as exc: lg.err('decrypt %r failed: %r' % (private_message_object, exc, )) return False if request.PacketID in received_messages_ids(): lg.warn("skip incoming message %s because found in recent history" % request.PacketID) return False received_messages_ids().append(request.PacketID) if len(received_messages_ids()) > 100: received_messages_ids(True) handled = False try: for cb in _IncomingMessageCallbacks: handled = cb(request, private_message_object, json_message) if _Debug: lg.args(_DebugLevel, cb=cb, packet_id=request.PacketID, handled=handled) if handled: break except: lg.exc() if _Debug: lg.args(_DebugLevel, msg_len=len(decrypted_message), handled=handled) if handled: return True if config.conf().getBool('services/private-messages/acknowledge-unread-messages-enabled'): p2p_service.SendAckNoRequest(request.OwnerID, request.PacketID, response='unread') return True