def _on_user_priv_key_shared(self, response): lg.info('private group key %s was sent to %s' % ( self.group_key_id, self.remote_idurl, )) self.automat('private-key-shared', response) return None
def _do_restore_next_backup(self, backup_index): if _Debug: lg.args(_DebugLevel, backup_index=backup_index, selected_backups=len(self.selected_backups)) if backup_index >= len(self.selected_backups): lg.info('all selected backups are processed') self.automat('extract-all-done', self.extracted_messages) return backup_id = self.selected_backups[backup_index] outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backup_id.replace('@', '_').replace('.', '_').replace( '/', '_').replace(':', '_') + '_', ) rw = restore_worker.RestoreWorker(backup_id, outfd, KeyID=self.group_key_id) rw.MyDeferred.addCallback(self._on_restore_done, backup_id, outfd, outfilename, backup_index) if _Debug: rw.MyDeferred.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='archive_reader.doStartRestoreWorker') rw.MyDeferred.addErrback(self._on_restore_failed, backup_id, outfd, outfilename, backup_index) rw.automat('init')
def create_archive_folder(group_key_id, force_path_id=None): group_key_alias, group_creator_idurl = my_keys.split_key_id(group_key_id) catalog_path = os.path.join('.archive', group_key_alias) archive_folder_catalog_path = global_id.MakeGlobalID( key_alias=group_key_alias, customer=group_creator_idurl.to_id(), path=catalog_path) res = api.file_exists(archive_folder_catalog_path) if res['status'] != 'OK': lg.err('failed to check archive folder in the catalog: %r' % res) return None if res['result']['exist']: ret = res['result']['path_id'] if force_path_id is not None: if force_path_id != ret: lg.err('archive folder exists, but have different path ID in the catalog: %r' % ret) return None return ret res = api.file_create(archive_folder_catalog_path, as_folder=True, exist_ok=True, force_path_id=force_path_id) if res['status'] != 'OK': lg.err('failed to create archive folder in the catalog: %r' % res) return None if res['result']['created']: lg.info('created new archive folder in the catalog: %r' % res) else: lg.info('archive folder already exist in the catalog: %r' % res) ret = res['result']['path_id'] if force_path_id is not None: if force_path_id != ret: lg.err('archive folder exists, but have different path ID in the catalog: %r' % ret) return None return ret
def StopOverridingIdentity(idurl): """ """ global _OverriddenIdentities idurl = id_url.field(idurl) if not idurl.is_latest(): if idurl.original() in _OverriddenIdentities: if idurl.to_bin() not in _OverriddenIdentities: _OverriddenIdentities[ idurl.to_bin()] = _OverriddenIdentities.pop( idurl.original()) lg.info( 'detected and processed idurl rotate for overridden identity : %r -> %r' % (idurl.original(), idurl.to_bin())) idurl = id_url.to_bin(idurl) result = _OverriddenIdentities.pop(idurl, None) if _Debug: lg.out( _DebugLevel, 'identitycache.StopOverridingIdentity removed overridden source for %s' % idurl) if result: lg.out( _DebugLevel, ' previous overridden identity was %d bytes' % len(result)) lg.out( _DebugLevel, ' total number of overrides is %d' % len(_OverriddenIdentities)) return result
def default_nodes(): """ List of DHT nodes currently maintained : (host, UDP port number) """ from system import bpio from system import local_fs from lib import serialization from main import settings from logs import lg networks_json = serialization.BytesToDict( local_fs.ReadBinaryFile( os.path.join(bpio.getExecutableDir(), 'networks.json'))) my_network = local_fs.ReadTextFile(settings.NetworkFileName()).strip() if not my_network: my_network = 'main' if my_network not in networks_json: my_network = 'main' network_info = networks_json[my_network] dht_seeds = [] for dht_seed in network_info['dht-seeds']: dht_seeds.append(( dht_seed['host'], dht_seed['udp_port'], )) lg.info('Active network is [%s] dht_seeds=%s' % ( my_network, dht_seeds, )) return dht_seeds
def state_changed(self, oldstate, newstate, event, *args, **kwargs): """ Method to catch the moment when `online_status()` state were changed. """ if _Debug: lg.out(_DebugLevel - 2, '%s : [%s]->[%s]' % (self.name, oldstate, newstate)) if newstate == 'CONNECTED': lg.info('remote node connected : %s' % self.idurl) events.send('node-connected', data=dict( global_id=self.idurl.to_id(), idurl=self.idurl, old_state=oldstate, new_state=newstate, )) listeners.push_snapshot('online_status', snap_id=self.idurl.to_bin(), data=self.to_json()) if newstate == 'OFFLINE' and oldstate != 'AT_STARTUP': lg.info('remote node disconnected : %s' % self.idurl) events.send('node-disconnected', data=dict( global_id=self.idurl.to_id(), idurl=self.idurl, old_state=oldstate, new_state=newstate, )) listeners.push_snapshot('online_status', snap_id=self.idurl.to_bin(), data=self.to_json()) if newstate == 'PING?' and oldstate != 'AT_STARTUP': listeners.push_snapshot('online_status', snap_id=self.idurl.to_bin(), data=self.to_json())
def _on_my_dht_relations_discovered(self, discovered_suppliers_list): from p2p import p2p_service from contacts import contactsdb from logs import lg suppliers_to_be_dismissed = set() # clean up old suppliers for idurl in discovered_suppliers_list: if not idurl: continue if not contactsdb.is_supplier(idurl): lg.warn('dht relation with %s is not valid anymore' % idurl) suppliers_to_be_dismissed.add(idurl) for idurl in suppliers_to_be_dismissed: p2p_service.SendCancelService( remote_idurl=idurl, service_name='service_supplier', ) p2p_service.SendCancelService( remote_idurl=idurl, service_name='service_supplier_relations', ) if suppliers_to_be_dismissed: lg.info( 'found %d suppliers to be cleaned and sent CancelService() packets' % len(suppliers_to_be_dismissed))
def _on_my_suppliers_all_hired(self, evt): from logs import lg from services import driver if driver.is_enabled('service_data_motion'): if not driver.is_started('service_data_motion'): lg.info('all my suppliers are hired, starting service_data_motion()') driver.start_single('service_data_motion')
def _do_check_sync_keys(self, result): from logs import lg from interface import api from storage import keys_synchronizer from userid import global_id from userid import my_id self.sync_keys_requested = False global_keys_folder_path = global_id.MakeGlobalID( key_alias='master', customer=my_id.getGlobalID(), path='.keys') res = api.file_exists(global_keys_folder_path) if res['status'] != 'OK' or not res['result'] or not res['result'].get( 'exist'): res = api.file_create(global_keys_folder_path, as_folder=True) if res['status'] != 'OK': lg.err( 'failed to create ".keys" folder "%s" in the catalog: %r' % (global_keys_folder_path, res)) result.errback( Exception( 'failed to create keys folder "%s" in the catalog: %r' % (global_keys_folder_path, res))) return lg.info('created new remote folder ".keys" in the catalog: %r' % global_keys_folder_path) keys_synchronizer.A('sync', result)
def doFindNewSupplier(self, *args, **kwargs): """ Action method. """ if _Debug: lg.out(_DebugLevel, 'fire_hire.doFindNewSupplier') position_for_new_supplier = None for pos in range(settings.getSuppliersNumberDesired()): if pos in self.hire_list: continue supplier_idurl = contactsdb.supplier(pos) if not supplier_idurl: lg.info('found empty supplier at position %d and going to find new supplier on that position' % pos) position_for_new_supplier = pos break if supplier_idurl in self.dismiss_list: lg.info('going to find new supplier on existing position %d to replace supplier %s' % ( pos, supplier_idurl, )) position_for_new_supplier = pos break if position_for_new_supplier is None: lg.err('did not found position for new supplier') self.automat('search-failed') return self.hire_list.append(position_for_new_supplier) supplier_finder.A( 'start', family_position=position_for_new_supplier, ecc_map=eccmap.Current().name, family_snapshot=contactsdb.suppliers(), )
def on_ping_success(response_tuple, idurl): global _LastUserPingTime _LastUserPingTime[idurl] = time.time() lg.info('node %s replied with Ack : %s' % ( idurl, response_tuple, ))
def doTestIdleDays(self, *args, **kwargs): """ Action method. """ dead_customers = [] customer_idle_days = config.conf().getInt( 'services/customer-patrol/customer-idle-days', 0) if not customer_idle_days: self.automat('no-idle-customers') return for customer_idurl in contactsdb.customers(): connected_time = ratings.connected_time(customer_idurl.to_bin()) if connected_time is None: lg.warn( 'last connected_time for customer %r is unknown, rejecting customer' % customer_idurl) dead_customers.append(customer_idurl) continue if utime.get_sec1970( ) - connected_time > customer_idle_days * 24 * 60 * 60: lg.warn( 'customer %r connected last time %r seconds ago, rejecting customer' % ( customer_idurl, utime.get_sec1970() - connected_time, )) dead_customers.append(customer_idurl) if dead_customers: lg.warn('found idle customers: %r' % dead_customers) self.automat('found-idle-customers', dead_customers) else: lg.info( 'all customers has some activity recently, no idle customers found' ) self.automat('no-idle-customers')
def audit_public_key(key_id, untrusted_idurl, timeout=10): """ Be sure remote user stores given public key. I also need to stores that public key in order to do such audit. I will send him a random string, he needs to encrypt it and send me back. I can compare his encrypted output with mine. Returns Deferred object. """ if _Debug: lg.out(_DebugLevel, 'key_ring.audit_public_key testing %s from %s' % (key_id, untrusted_idurl)) key_id = my_keys.latest_key_id(key_id) result = Deferred() recipient_id_obj = identitycache.FromCache(untrusted_idurl) if not recipient_id_obj: lg.warn('not found "%s" in identity cache' % untrusted_idurl) result.errback(Exception('not found "%s" in identity cache' % untrusted_idurl)) return result key_alias, creator_idurl = my_keys.split_key_id(key_id) if not key_alias or not creator_idurl: lg.warn('wrong key_id') result.errback(Exception('wrong key_id')) return result if untrusted_idurl == creator_idurl and key_alias == 'master': lg.info('doing audit of master key (public part) of remote user') else: if not my_keys.is_key_registered(key_id): lg.warn('unknown key: "%s"' % key_id) result.errback(Exception('unknown key: "%s"' % key_id)) return result public_test_sample = key.NewSessionKey(session_key_type=key.SessionKeyType()) json_payload = { 'key_id': key_id, 'audit': { 'public_sample': base64.b64encode(public_test_sample), 'private_sample': '', } } raw_payload = serialization.DictToBytes(json_payload, values_to_text=True) block = encrypted.Block( BackupID=key_id, Data=raw_payload, SessionKey=key.NewSessionKey(session_key_type=key.SessionKeyType()), SessionKeyType=key.SessionKeyType(), # encrypt data using public key of recipient EncryptKey=lambda inp: recipient_id_obj.encrypt(inp), ) encrypted_payload = block.Serialize() p2p_service.SendAuditKey( remote_idurl=recipient_id_obj.getIDURL(), encrypted_payload=encrypted_payload, packet_id=key_id, timeout=timeout, callbacks={ commands.Ack(): lambda response, info: _on_audit_public_key_response(response, info, key_id, untrusted_idurl, public_test_sample, result), commands.Fail(): lambda response, info: result.errback(Exception(response)), None: lambda pkt_out: result.errback(Exception('timeout')), # timeout }, ) return result
def _on_transfer_key_response(response, info, key_id, result): if not response or not info: if not result.called: result.errback(Exception('timeout')) if _Debug: lg.warn('transfer failed, response timeout') return None if response.Command == commands.Ack(): if not result.called: result.callback(response) if _Debug: lg.info('key %s transfer success to %s' % (key_id, response.OwnerID)) return None if response.Command == commands.Fail(): err_msg = strng.to_text(response.Payload, errors='ignore') if err_msg.count('key already registered'): # it is okay to have "Fail()" response in that case if not result.called: result.callback(response) if _Debug: lg.warn('key %s already registered on %s' % (key_id, response.OwnerID)) return None if not result.called: result.errback(Exception(response.Payload)) if _Debug: lg.warn('key transfer failed: %s' % response.Payload) return None
def QueueSendFile(self, fileName, packetID, remoteID, ownerID, callOnAck=None, callOnFail=None): #out(10, "io_throttle.QueueSendFile %s to %s" % (packetID, nameurl.GetName(remoteID))) remoteID = id_url.field(remoteID) ownerID = id_url.field(ownerID) if not os.path.exists(fileName): lg.err("%s not exist" % fileName) if callOnFail is not None: reactor.callLater(.01, callOnFail, remoteID, packetID, 'not exist') # @UndefinedVariable return False if remoteID not in list(self.supplierQueues.keys()): self.supplierQueues[remoteID] = SupplierQueue( remoteID, self.creatorID) lg.info("made a new sending queue for %s" % nameurl.GetName(remoteID)) return self.supplierQueues[remoteID].SupplierSendFile( fileName, packetID, ownerID, callOnAck, callOnFail, )
def eraseLocalIdentity(do_backup=True): if do_backup: if os.path.isfile(settings.LocalIdentityFilename()): current_identity_xmlsrc = local_fs.ReadBinaryFile(settings.LocalIdentityFilename()) if current_identity_xmlsrc: fd, fname = tempfile.mkstemp(prefix='localidentity_', dir=settings.MetaDataDir()) os.write(fd, current_identity_xmlsrc) os.close(fd) lg.info('created backup copy of my local identity in the file : %r' % fname) filename = bpio.portablePath(settings.LocalIdentityFilename()) if not os.path.exists(filename): if _Debug: lg.out(_DebugLevel, "my_id.eraseLocalIdentity SKIP file %s not exist" % filename) return True if not os.path.isfile(filename): if _Debug: lg.out(_DebugLevel, "my_id.eraseLocalIdentity ERROR path %s is not a file" % filename) return False try: os.remove(filename) except: lg.exc() return False events.send('local-identity-erased', data=dict()) if _Debug: lg.out(_DebugLevel, "my_id.eraseLocalIdentity file %s was deleted" % filename) return True
def QueueRequestFile(self, callOnReceived, creatorID, packetID, ownerID, remoteID): # make sure that we don't actually already have the file # if packetID != settings.BackupInfoFileName(): remoteID = id_url.field(remoteID) ownerID = id_url.field(ownerID) creatorID = id_url.field(creatorID) if packetID not in [ settings.BackupInfoFileName(), settings.BackupInfoFileNameOld(), settings.BackupInfoEncryptedFileName(), ]: customer, pathID = packetid.SplitPacketID(packetID) filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID) if os.path.exists(filename): lg.warn("%s already exist " % filename) if callOnReceived: reactor.callLater(0, callOnReceived, packetID, 'exist') # @UndefinedVariable return False if remoteID not in list(self.supplierQueues.keys()): # made a new queue for this man self.supplierQueues[remoteID] = SupplierQueue( remoteID, self.creatorID) lg.info("made a new receiving queue for %s" % nameurl.GetName(remoteID)) # lg.out(10, "io_throttle.QueueRequestFile asking for %s from %s" % (packetID, nameurl.GetName(remoteID))) return self.supplierQueues[remoteID].SupplierRequestFile( callOnReceived, creatorID, packetID, ownerID)
def doInitInterfaces(self, *args, **kwargs): if _Debug: lg.out(_DebugLevel, 'initializer.doInitInterfaces') # if settings.enableFTPServer(): # try: # from interface import ftp_server # ftp_server.init() # except: # lg.exc() if settings.enableAPIAuthSecret(): current_secret = local_fs.ReadTextFile(settings.APISecretFile()) if not current_secret: new_secret = cipher.generate_secret_text(10) local_fs.WriteTextFile(settings.APISecretFile(), new_secret) lg.info('generated new API auth secret text and stored in %r' % settings.APISecretFile()) if settings.enableRESTHTTPServer(): try: from interface import api_rest_http_server api_rest_http_server.init(port=settings.getRESTHTTPServerPort()) except: lg.exc() if settings.enableWebSocketServer(): try: from interface import api_web_socket api_web_socket.init(port=settings.getWebSocketServerPort()) except: lg.exc() reactor.callLater(0, self.automat, 'init-interfaces-done') # @UndefinedVariable
def check_create(idurl): """ """ if idurl not in _ContactsStatusDict.keys(): A(idurl) lg.info('contact %s is not found, made a new instance' % idurl) return True
def close_queue(queue_id): global _ActiveQueues if not valid_queue_id(queue_id): raise Exception('invalid queue id') if queue_id not in queue(): raise Exception('queue not exist') if _Debug: lg.args(_DebugLevel, queue_id=queue_id) for message_id in list(queue(queue_id).keys()): if message_id not in queue(queue_id): continue for consumer_id in list( queue(queue_id)[message_id].notifications.keys()): msg_obj = queue(queue_id).get(message_id) if msg_obj: callback_object = queue( queue_id)[message_id].notifications.get(consumer_id) if callback_object and not callback_object.called: lg.info( 'canceling non-finished notification in the queue %s' % queue_id) callback_object.cancel() for consumer_id in consumer().keys(): if is_consumer_subscribed(consumer_id, queue_id): unsubscribe_consumer(consumer_id, queue_id) _ActiveQueues.pop(queue_id) return True
def start(self): from twisted.internet.defer import Deferred, succeed from logs import lg from dht import dht_records from dht import dht_service from dht import known_nodes from main import settings from main.config import conf conf().addConfigNotifier('services/entangled-dht/udp-port', self._on_udp_port_modified) known_seeds = known_nodes.nodes() dht_layers = list(dht_records.LAYERS_REGISTRY.keys()) dht_service.init( udp_port=settings.getDHTPort(), dht_dir_path=settings.ServiceDir('service_entangled_dht'), open_layers=dht_layers, ) lg.info('DHT known seed nodes are : %r DHT layers are : %r' % (known_seeds, dht_layers, )) self.starting_deferred = Deferred() d = dht_service.connect( seed_nodes=known_seeds, layer_id=0, attach=True, ) d.addCallback(self._on_connected) d.addErrback(self._on_connect_failed) return self.starting_deferred or succeed(True)
def add_consumer(consumer_id): global _Consumers if consumer_id in consumer(): raise Exception('consumer already exist') _Consumers[consumer_id] = ConsumerInfo(consumer_id) lg.info('new consumer added: %s' % consumer_id) return True
def _do_detect_latest_revision(self, dht_info, my_info): try: my_revision = int(my_info['revision']) except: lg.warn( 'my own info is unknown or invalid, assume my revision is 0') my_revision = 0 try: dht_revision = int(dht_info['revision']) except: lg.warn('DHT info is unknown or invalid, assume DHT revision is 0') dht_revision = 0 if my_revision == dht_revision: return dht_revision if my_revision > dht_revision: # TODO: SECURITY need to find a solution to prevent cheating here # another supplier could publish a record where he is only alone present and with a correct revision # that means he actually brutally dropped all other suppliers from the family lg.info( 'known DHT info for customer %s is more fresh, will rewrite DHT record' % self.customer_idurl) if my_revision > dht_revision + 1: lg.warn( 'switching revision too far, normally always increase by one on every change' ) return my_revision return dht_revision
def add_producer(producer_id): global _Producers if is_producer_exist(producer_id): raise Exception('producer already exist') _Producers[producer_id] = ProducerInfo(producer_id) lg.info('new producer added: %s' % producer_id) return True
def __init__(self, ncpus): self._ncpus = ncpus if six.PY34: try: multiprocessing.set_start_method('spawn') except RuntimeError: pass multiprocessing.util.log_to_stderr(multiprocessing.util.SUBDEBUG) from system import bpio if bpio.Windows(): from system import deploy deploy.init_base_dir() venv_python_path = os.path.join(deploy.current_base_dir(), 'venv', 'Scripts', 'BitDustNode.exe') lg.info('will use %s as multiprocessing executable' % venv_python_path) multiprocessing.set_executable(venv_python_path) self.processor = multiprocessing.Pool(ncpus) #: implement queue per Manager instance # self.queue = multiprocessing.Queue() self.tasks = OrderedDict({}) self.task_id = 0 self.thread = Thread(target=func_thread, args=(self.tasks, self.processor)) self.thread.daemon = True self.thread.start() self._propagate_queue()
def finish_notification(consumer_id, queue_id, message_id, success): if not valid_queue_id(queue_id): raise Exception('invalid queue id') if queue_id not in queue(): raise Exception('queue not exist') if message_id not in queue(queue_id): raise Exception('message not exist') if consumer_id not in queue(queue_id)[message_id].notifications: raise Exception('not found pending notification for given consumer') defer_result = queue(queue_id)[message_id].notifications[consumer_id] if defer_result is None: raise Exception('notification already finished') if not isinstance(defer_result, Deferred): raise Exception('invalid notification type') queue(queue_id)[message_id].notifications[consumer_id] = None # queue(queue_id)[message_id].notifications.pop(consumer_id) if success: queue(queue_id)[message_id].success_notifications.append(consumer_id) consumer(consumer_id).success_notifications += 1 else: queue(queue_id)[message_id].failed_notifications.append(consumer_id) consumer(consumer_id).failed_notifications += 1 if not defer_result.called: lg.info('canceling non-finished notification in the queue %s' % queue_id) defer_result.cancel() del defer_result if _Debug: lg.args(_DebugLevel, consumer_id=consumer_id, queue_id=queue_id, message_id=message_id, success=success, notifications=len(queue(queue_id)[message_id].notifications)) return True
def _on_restore_done(self, result, backup_id, outfd, tarfilename, backup_index): try: os.close(outfd) except: lg.exc() if result == 'done': lg.info('archive %r restore success from %r' % ( backup_id, tarfilename, )) else: lg.err('archive %r restore failed from %r with : %r' % ( backup_id, tarfilename, result, )) if result != 'done': tmpfile.throw_out(tarfilename, 'restore ' + result) self.automat('restore-failed', backup_id=backup_id, tarfilename=tarfilename) return None self.automat('restore-done', backup_id=backup_id, tarfilename=tarfilename, backup_index=backup_index) return
def register_customer_key(customer_public_key_id, customer_public_key): """ Check/refresh/store customer public key locally. """ if not customer_public_key_id or not customer_public_key: lg.warn('customer public key was not provided in the request') return False customer_public_key_id = my_keys.latest_key_id(customer_public_key_id) if my_keys.is_key_registered(customer_public_key_id): known_customer_public_key = my_keys.get_public_key_raw( customer_public_key_id) if known_customer_public_key == customer_public_key: lg.info( 'customer public key %r already known and public key is matching' % customer_public_key_id) else: lg.warn('rewriting customer public key %r' % customer_public_key_id) my_keys.erase_key(customer_public_key_id) key_id, key_object = my_keys.read_key_info(customer_public_key) if not my_keys.register_key(key_id, key_object): lg.err('failed to register customer public key: %r' % customer_public_key_id) return False lg.info('new customer public key registered: %r' % customer_public_key_id) return True
def _on_my_suppliers_yet_not_hired(self, evt): from logs import lg from services import driver if driver.is_enabled('service_list_files'): if driver.is_started('service_list_files'): lg.info('my suppliers failed to hire, stopping service_list_files()') driver.stop_single('service_list_files')
def start(self): from twisted.internet.defer import Deferred from logs import lg from main.config import conf from main import events from raid import eccmap from services import driver from customer import fire_hire self.starting_deferred = Deferred() self.starting_deferred.addErrback( lambda err: lg.warn('service %r was not started: %r' % (self.service_name, err.getErrorMessage() if err else 'unknown reason'))) self.all_suppliers_hired_event_sent = False if driver.is_on('service_entangled_dht'): self._do_join_suppliers_dht_layer() eccmap.Update() fire_hire.A('init') fire_hire.A().addStateChangedCallback(self._on_fire_hire_ready, None, 'READY') conf().addConfigNotifier('services/customer/suppliers-number', self._on_suppliers_number_modified) conf().addConfigNotifier('services/customer/needed-space', self._on_needed_space_modified) events.add_subscriber(self._on_supplier_modified, 'supplier-modified') events.add_subscriber(self._on_dht_layer_connected, event_id='dht-layer-connected') if fire_hire.IsAllHired(): self.starting_deferred.callback(True) self.starting_deferred = None lg.info('all my suppliers are already hired') return True fire_hire.A('restart') return self.starting_deferred