def Message(request): """ Message came in for us so we: 1) check that it is a correspondent 2) decrypt message body 3) save on local HDD 4) call the GUI 5) send an "Ack" back to sender. """ global _IncomingMessageCallbacks lg.out(6, "message.Message from " + str(request.OwnerID)) # senderidentity = contactsdb.get_correspondent_identity(request.OwnerID) # if not senderidentity: # lg.warn("had sender not in correspondents list " + request.OwnerID) # # return # contactsdb.add_correspondent(request.OwnerID, nameurl.GetName(request.OwnerID)) # contactsdb.save_correspondents() new_message = misc.StringToObject(request.Payload) if new_message is None: lg.warn("wrong Payload, can not extract message from request") return for old_id, old_message in inbox_history(): if old_id == request.PacketID: lg.out(6, "message.Message SKIP, message %s found in history" % old_message) return inbox_history().append((request.PacketID, new_message)) clear_message = new_message.ClearBody() # SaveMessage(clearmessage) from p2p import p2p_service p2p_service.SendAck(request) for cb in _IncomingMessageCallbacks: cb(request, clear_message)
def doStop(self, arg): """ Action method. """ if _Debug: lg.out(8, 'network_transport.doStop disconnecting %s transport' % (self.proto.upper())) self.interface.disconnect()
def RemoteFileReport(backupID, blockNum, supplierNum, dataORparity, result): """ Writes info for a single piece of data into "remote" matrix. May be called when you got an Ack packet from remote supplier after you sent him some Data packet . """ blockNum = int(blockNum) supplierNum = int(supplierNum) if supplierNum > contactsdb.num_suppliers(): lg.out(4, "backup_matrix.RemoteFileReport got too big supplier number, possible this is an old packet") return if backupID not in remote_files(): remote_files()[backupID] = {} lg.out(8, "backup_matrix.RemoteFileReport new remote entry for %s created in the memory" % backupID) if blockNum not in remote_files()[backupID]: remote_files()[backupID][blockNum] = { "D": [0] * contactsdb.num_suppliers(), "P": [0] * contactsdb.num_suppliers(), } # save backed up block info into remote info structure, synchronize on hand info flag = 1 if result else 0 if dataORparity == "Data": remote_files()[backupID][blockNum]["D"][supplierNum] = flag elif dataORparity == "Parity": remote_files()[backupID][blockNum]["P"][supplierNum] = flag else: lg.warn("incorrect backup ID: %s" % backupID) # if we know only 5 blocks stored on remote machine # but we have backed up 6th block - remember this remote_max_block_numbers()[backupID] = max(remote_max_block_numbers().get(backupID, -1), blockNum) # mark to repaint this backup in gui RepaintBackup(backupID)
def SendMessage(remote_idurl, messagebody, packet_id=None): """ Send command.Message() packet to remote peer. """ global _OutgoingMessageCallback if not packet_id: packet_id = packetid.UniqueID() remote_identity = identitycache.FromCache(remote_idurl) if remote_identity is None: d = identitycache.immediatelyCaching(remote_idurl, 20) d.addCallback(lambda src: SendMessage( remote_idurl, messagebody, packet_id)) d.addErrback(lambda err: lg.warn('failed to retrieve ' + remote_idurl)) return d Amessage = MessageClass(remote_identity, messagebody) Payload = misc.ObjectToString(Amessage) lg.out(6, "message.SendMessage to %s with %d bytes" % (remote_idurl, len(Payload))) outpacket = signed.Packet( commands.Message(), my_id.getLocalID(), my_id.getLocalID(), packet_id, Payload, remote_idurl) result = gateway.outbox(outpacket, wide=True) if _OutgoingMessageCallback: _OutgoingMessageCallback(result, messagebody, remote_identity, packet_id) return result
def doStart(self, arg): """ Action method. """ options = {'idurl': my_id.getLocalID(), } id_contact = '' default_host = '' ident = my_id.getLocalIdentity() if ident: id_contact = ident.getContactsByProto().get(self.proto, '') if id_contact: assert id_contact.startswith(self.proto + '://') id_contact = id_contact.strip(self.proto + '://') if self.proto == 'tcp': if not id_contact: default_host = misc.readExternalIP() + ':' + str(settings.getTCPPort()) options['host'] = id_contact or default_host options['tcp_port'] = settings.getTCPPort() elif self.proto == 'udp': if not id_contact: default_host = nameurl.GetName(my_id.getLocalID()) + '@' + platform.node() options['host'] = id_contact or default_host options['dht_port'] = settings.getDHTPort() options['udp_port'] = settings.getUDPPort() elif self.proto == 'proxy': pass if _Debug: lg.out(8, 'network_transport.doStart connecting %s transport : %s' % (self.proto.upper(), options)) self.interface.connect(options)
def _find_random_node(self): if _Debug: lg.out(_DebugLevel + 10, 'stun_client._find_random_node') new_key = dht_service.random_key() d = dht_service.find_node(new_key) d.addCallback(self._some_nodes_found) d.addErrback(self._nodes_not_found)
def _some_nodes_found(self, nodes): if _Debug: lg.out(_DebugLevel + 10, 'stun_client._some_nodes_found : %d' % len(nodes)) if len(nodes) > 0: self.automat('found-some-nodes', nodes) else: self.automat('dht-nodes-not-found')
def doAddStunServer(self, arg): """ Action method. """ if _Debug: lg.out(_DebugLevel + 10, 'stun_client.doAddStunServer %s' % str(arg)) self.stun_servers.append(arg)
def doReportSuccess(self, arg): """ Action method. """ try: min_port = min(map(lambda addr: addr[1], self.stun_results.values())) max_port = max(map(lambda addr: addr[1], self.stun_results.values())) my_ip = self.stun_results.values()[0][0] if min_port == max_port: result = ('stun-success', 'non-symmetric', my_ip, min_port) else: result = ('stun-success', 'symmetric', my_ip, self.stun_results) self.my_address = (my_ip, min_port) except: lg.exc() result = ('stun-failed', None, None, []) self.my_address = None if self.my_address: bpio.WriteFile(settings.ExternalIPFilename(), self.my_address[0]) bpio.WriteFile(settings.ExternalUDPPortFilename(), str(self.my_address[1])) if _Debug: lg.out(_DebugLevel, 'stun_client.doReportSuccess based on %d nodes: %s' % ( len(self.stun_results), str(self.my_address))) if _Debug: lg.out(_DebugLevel + 10, ' %s' % str(result)) for cb in self.callbacks: cb(result[0], result[1], result[2], result[3]) self.callbacks = []
def _failed_my_incoming(self, err, key, position): if _Debug: lg.out( _DebugLevel, 'udp_node._got_my_incoming incoming empty: %s' % str(position)) self.automat('dht-read-result', None)
def doShowGUI(self, arg): lg.out(2, "initializer.doShowGUI") if settings.NewWebGUI(): from web import control d = control.init() else: from web import webcontrol d = webcontrol.init() try: from system.tray_icon import USE_TRAY_ICON except: USE_TRAY_ICON = False lg.exc() if USE_TRAY_ICON: from system import tray_icon if settings.NewWebGUI(): # tray_icon.SetControlFunc(control.on_tray_icon_command) tray_icon.SetControlFunc(self._on_tray_icon_command) else: tray_icon.SetControlFunc(webcontrol.OnTrayIconCommand) if not settings.NewWebGUI(): webcontrol.ready() if self.flagGUI or not self.is_installed: if settings.NewWebGUI(): def _show_gui(wsgiport): reactor.callLater(0.1, control.show) d.addCallback(_show_gui) # reactor.callLater(0.1, control.show) else: d.addCallback(webcontrol.show)
def _send_new_identity(self): """ Send created identity to the identity server to register it. TODO: need to close transport and gateway after that """ lg.out(4, 'id_registrator._send_new_identity ') from transport import gateway from transport import network_transport from transport.tcp import tcp_interface gateway.init() interface = tcp_interface.GateInterface() transport = network_transport.NetworkTransport('tcp', interface) transport.automat('init', gateway.listener()) transport.automat('start') gateway.start() sendfilename = settings.LocalIdentityFilename() + '.new' dlist = [] for idurl in self.new_identity.sources: self.free_idurls.remove(idurl) _, host, _, _ = nameurl.UrlParse(idurl) _, tcpport = known_servers.by_host().get( host, (settings.IdentityWebPort(), settings.IdentityServerPort())) srvhost = '%s:%d' % (host, tcpport) dlist.append(gateway.send_file_single( idurl, 'tcp', srvhost, sendfilename, 'Identity')) assert len(self.free_idurls) == 0 return DeferredList(dlist)
def _got_my_address(self, value, key): if not isinstance(value, dict): lg.warn('can not read my address') self.automat('dht-write-failed') return try: addr = value[dht_service.key_to_hash(key)].strip('\n').strip() except: if _Debug: lg.out( 4, 'udp_node._got_my_address ERROR wrong key in response: %r' % value) lg.exc() self.automat('dht-write-failed') return if addr != '%s:%d' % (self.my_address[0], self.my_address[1]): if _Debug: lg.out( 4, 'udp_node._got_my_address ERROR value not fit: %r' % value) self.automat('dht-write-failed') return self.automat('dht-write-success')
def doRequestServers(self, arg): """ Action method. """ login = bpio.ReadTextFile(settings.UserNameFilename()) def _cb(xmlsrc, idurl, host): lg.out(4, ' EXIST: %s' % idurl) self.registrations.remove(idurl) self.automat('id-exist', idurl) def _eb(err, idurl, host): lg.out(4, ' NOT EXIST: %s' % idurl) if self.preferred_server and self.preferred_server == host: self.free_idurls.insert(0, idurl) else: self.free_idurls.append(idurl) self.registrations.remove(idurl) self.automat('id-not-exist', idurl) for host in self.good_servers: webport, tcpport = known_servers.by_host().get( host, (settings.IdentityWebPort(), settings.IdentityServerPort())) if webport == 80: webport = '' idurl = nameurl.UrlMake('http', host, webport, login + '.xml') lg.out(4, ' %s' % idurl) d = net_misc.getPageTwisted(idurl, timeout=10) d.addCallback(_cb, idurl, host) d.addErrback(_eb, idurl, host) self.registrations.append(idurl) lg.out(4, 'id_registrator.doRequestServers login=%s registrations=%d' % (login, len(self.registrations)))
def doSaveMyIdentity(self, arg): """ Action method. """ lg.out(4, 'id_registrator.doSaveMyIdentity %s' % self.new_identity) my_id.setLocalIdentity(self.new_identity) my_id.saveLocalIdentity()
def _try(): lg.out(0, '_try') appList = bpio.find_process([ 'bitdust.exe', 'bpgui.exe', 'bppipe.exe', 'bptester.exe', 'bitstarter.exe', ]) lg.out(0, 'appList:' + str(appList)) if len(appList) == 0: lg.out(0, 'finished') reactor.stop() do_uninstall() return 0 total_count += 1 lg.out(0, '%d' % total_count) if total_count > 10: lg.out(0, 'not responding') ret = kill() reactor.stop() if ret == 0: do_uninstall() return ret reactor.callLater(1, _try)
def doPingServers(self, arg): """ Action method. """ lg.out(4, 'id_registrator.doPingServers %d in list' % len(self.discovered_servers)) def _cb(htmlsrc, id_server_host): lg.out(4, ' RESPONDED: %s' % id_server_host) if self.preferred_server and id_server_host == self.preferred_server: self.good_servers.insert(0, id_server_host) else: self.good_servers.append(id_server_host) self.discovered_servers.remove(id_server_host) self.automat('id-server-response', (id_server_host, htmlsrc)) def _eb(err, id_server_host): lg.out(4, ' FAILED: %s' % id_server_host) self.discovered_servers.remove(id_server_host) self.automat('id-server-failed', (id_server_host, err)) for host in self.discovered_servers: webport, tcpport = known_servers.by_host().get(host, (settings.IdentityWebPort(), settings.IdentityServerPort())) if webport == 80: webport = '' server_url = nameurl.UrlMake('http', host, webport, '') d = net_misc.getPageTwisted(server_url, timeout=10) d.addCallback(_cb, host) d.addErrback(_eb, host)
def _getPageFail(x, idurl, res): global _CachingTasks _CachingTasks.pop(idurl) res.errback(x) if _Debug: lg.out(14, ' [cache failed] %s' % idurl) return None
def kill(): lg.out(0, 'kill') total_count = 0 found = False while True: appList = bpio.find_process([ 'bitdust.exe', 'bpmain.py', 'bitdust.py', 'regexp:^/usr/bin/python.*bitdust.*$', 'bpgui.exe', 'bpgui.py', 'bppipe.exe', 'bppipe.py', 'bptester.exe', 'bptester.py', 'bitstarter.exe', ]) if len(appList) > 0: found = True for pid in appList: lg.out(0, 'trying to stop pid %d' % pid) bpio.kill_process(pid) if len(appList) == 0: if found: lg.out(0, 'BitDust stopped\n') else: lg.out(0, 'BitDust was not started\n') return 0 total_count += 1 if total_count > 10: lg.out(0, 'some BitDust process found, but can not stop it\n') return 1 time.sleep(1)
def connectionMade(self): if _Debug: lg.out(_DebugLevel, "tcp_connection.connectionMade %s:%d" % self.getTransportAddress()) address = self.getAddress() name = "tcp_connection[%s:%d]" % (address[0], address[1]) automat.Automat.__init__(self, name, "AT_STARTUP", _DebugLevel, _Debug) self.automat("connection-made")
def getPageFail(x, idurl): """ This is called when identity request is failed. """ if _Debug: lg.out(6, "identitycache.getPageFail NETERROR in request to " + idurl) return x
def doStartListening(self, arg): """ Action method. """ try: _, info = arg self.router_proto_host = (info.proto, info.host) except: try: s = config.conf().getString('services/proxy-transport/current-router').strip() _, router_proto, router_host = s.split(' ') self.router_proto_host = (router_proto, router_host) except: lg.exc() self.router_identity = identitycache.FromCache(self.router_idurl) config.conf().setString('services/proxy-transport/current-router', '%s %s %s' % ( self.router_idurl, self.router_proto_host[0], self.router_proto_host[1])) if ReadMyOriginalIdentitySource(): lg.warn('my original identity is not empty') else: config.conf().setData('services/proxy-transport/my-original-identity', my_id.getLocalIdentity().serialize()) self.request_service_packet_id = [] callback.insert_inbox_callback(0, self._on_inbox_packet_received) if _Debug: lg.out(2, 'proxy_receiver.doStartListening !!!!!!! router: %s at %s://%s' % ( self.router_idurl, self.router_proto_host[0], self.router_proto_host[1]))
def next_time(self): lasttime = self.lasttime if lasttime == '': # let it be one year ago (we can schedule 1 month maximum) and one day lasttime = str(time.time() - 366 * 24 * 60 * 60) try: # turned off - return -1 if self.type in ['none', 'disabled']: return -1 # every N seconds elif self.type == 'continuously': return maths.shedule_continuously(lasttime, int(self.interval),) # every N hours, exactly when hour begins, minutes and seconds are 0 elif self.type == 'hourly': return maths.shedule_next_hourly(lasttime, int(self.interval),) # every N days, at given time elif self.type == 'daily': return maths.shedule_next_daily(lasttime, self.interval, self.daytime) # every N weeks, at given time and selected week days elif self.type == 'weekly': week_days = self.details.split(' ') week_day_numbers = [] week_day_names = list(calendar.day_name) for week_label in week_days: try: i = week_day_names.index(week_label) except: continue week_day_numbers.append(i) return maths.shedule_next_weekly(lasttime, self.interval, self.daytime, week_day_numbers) # monthly, at given time and day elif self.type == 'monthly': month_dates = self.details.split(' ') return maths.shedule_next_monthly(lasttime, self.interval, self.daytime, month_dates) # yearly, at given time and month, day, NOT DONE YET! elif self.type == 'yearly': months_labels = self.details.split(' ') months_numbers = [] months_names = list(calendar.month_name) for month_label in months_labels: try: i = months_names.index(month_label) except: continue months_numbers.append(i) return maths.shedule_next_monthly(lasttime, self.interval, self.daytime, months_numbers) else: lg.out(1, 'schedule.next_time ERROR wrong schedule type: ' + self.type) return None except: lg.exc() return None
def doReadKey(self, arg): # keyfn = arg['keyfilename'] src = arg['keysrc'] lg.out(2, 'installer.doReadKey length=%s' % len(src)) # src = bpio.ReadBinaryFile(keyfn) if len(src) > 1024 * 10: self.doPrint(('file is too big for private key', 'red')) return try: lines = src.splitlines() idurl = lines[0].strip() keysrc = '\n'.join(lines[1:]) if idurl != nameurl.FilenameUrl(nameurl.UrlFilename(idurl)): idurl = '' keysrc = src except: lg.exc() idurl = '' keysrc = src if self.state not in self.output: self.output[self.state] = {'data': [('', 'black')]} self.output[self.state] = {'data': [('', 'black')]} self.output[self.state]['idurl'] = idurl self.output[self.state]['keysrc'] = keysrc if 'RECOVER' not in self.output: self.output['RECOVER'] = {'data': [('', 'black')]} if keysrc and idurl: self.output['RECOVER']['data'].append(('private key and IDURL was loaded', 'green')) elif not idurl and keysrc: self.output['RECOVER']['data'].append(('private key was loaded, provide correct IDURL now', 'blue'))
def start(): """ """ global _StartingDeferred if _StartingDeferred: lg.warn('driver.start already called') return _StartingDeferred if _Debug: lg.out(_DebugLevel - 6, 'driver.start') dl = [] for name in boot_up_order(): svc = services().get(name, None) if not svc: raise ServiceNotFound(name) if not svc.enabled(): continue if svc.state == 'ON': continue d = Deferred() dl.append(d) svc.automat('start', d) if len(dl) == 0: return succeed(1) _StartingDeferred = DeferredList(dl) _StartingDeferred.addCallback(on_started_all_services) return _StartingDeferred
def doBlockPushAndRaid(self, arg): """ Action method. """ newblock = arg if self.terminating: self.automat('block-raid-done', (newblock.BlockNumber, None)) lg.out(_DebugLevel, 'backup.doBlockPushAndRaid SKIP, terminating=True') return fileno, filename = tmpfile.make('raid') serializedblock = newblock.Serialize() blocklen = len(serializedblock) os.write(fileno, str(blocklen) + ":" + serializedblock) os.close(fileno) self.workBlocks[newblock.BlockNumber] = filename dt = time.time() outputpath = os.path.join(settings.getLocalBackupsDir(), self.backupID) task_params = (filename, self.eccmap.name, self.backupID, newblock.BlockNumber, outputpath) raid_worker.add_task('make', task_params, lambda cmd, params, result: self._raidmakeCallback(params, result, dt),) self.automat('block-raid-started', newblock) del serializedblock if _Debug: lg.out(_DebugLevel, 'backup.doBlockPushAndRaid %s : start process data from %s to %s, %d' % ( newblock.BlockNumber, filename, outputpath, id(self.terminating)))
def doDestroyMe(self, arg): self.currentBlockData.close() del self.currentBlockData self.destroy() collected = gc.collect() if _Debug: lg.out(_DebugLevel, 'backup.doDestroyMe [%s] collected %d objects' % (self.backupID, collected))
def Ack(newpacket, info): if _Debug: lg.out( _DebugLevel, "p2p_service.Ack %s from [%s] at %s://%s : %s" % (newpacket.PacketID, nameurl.GetName(newpacket.CreatorID), info.proto, info.host, newpacket.Payload), )
def dbcur(): global _DBConnection global _DBCursor if _DBCursor is None: lg.out(4, 'sqlio.dbcur created a new DB cursor') _DBCursor = _DBConnection.cursor() return _DBCursor
def doReadHello(self, arg): """ Action method. """ from transport.tcp import tcp_node try: command, payload = arg peeraddress, peeridurl = payload.split(" ") peerip, peerport = peeraddress.split(":") peerport = int(peerport) peeraddress = (peerip, peerport) except: return # self.peer_external_address = (self.peer_external_address[0], peerport) self.peer_external_address = peeraddress self.peer_idurl = peeridurl if self.peer_address != self.peer_external_address: tcp_node.opened_connections()[self.peer_address].remove(self) if len(tcp_node.opened_connections()[self.peer_address]) == 0: tcp_node.opened_connections().pop(self.peer_address) self.peer_address = self.peer_external_address if self.peer_address not in tcp_node.opened_connections(): tcp_node.opened_connections()[self.peer_address] = [] tcp_node.opened_connections()[self.peer_address].append(self) lg.out(6, "%s : external peer address changed to %s" % (self, self.peer_address))
def SendBroadcastMessage(outpacket): if _Debug: lg.out(_DebugLevel, "p2p_service.SendBroadcastMessage to %s" % outpacket.RemoteID) gateway.outbox(outpacket) return outpacket
def outReceived(self, inp): self.out += inp for line in inp.splitlines(): if _Debug: lg.out(_DebugLevel, '[git:out]: %s' % strng.to_text(line))
def run_sync(): lg.out(6, 'git_proc.run_sync') reactor.callLater(0, sync, sync_callback) reactor.callLater(0, loop)
def _on_enabled_disabled(self, path, value, oldvalue, result): from p2p import network_connector from logs import lg lg.out(2, 'service_tcp_transport._on_enabled_disabled : %s->%s : %s' % ( oldvalue, value, path)) network_connector.A('reconnect')
def shutdown(): if _Debug: lg.out(_DebugLevel, 'p2p_service.shutdown')
def init(): if _Debug: lg.out(_DebugLevel, 'p2p_service.init')
def RetrieveCoin(request, info): if _Debug: lg.out(_DebugLevel, "p2p_service.RetrieveCoin from %s : %s" % ( nameurl.GetName(info.sender_idurl), request.Payload))
def CheckWholeBackup(BackupID): if _Debug: lg.out(_DebugLevel, "p2p_service.CheckWholeBackup with BackupID=" + BackupID)
def Fail(newpacket): if _Debug: lg.out(_DebugLevel, "p2p_service.Fail from [%s]: %s" % (newpacket.CreatorID, newpacket.Payload))
def Broadcast(request, info): if _Debug: lg.out(_DebugLevel, 'p2p_service.Broadcast %d bytes in [%s]' % (len(request.Payload), request.PacketID)) lg.out(_DebugLevel, ' from remoteID=%s ownerID=%s creatorID=%s sender_idurl=%s' % ( request.RemoteID, request.OwnerID, request.CreatorID, info.sender_idurl))
def outbox(outpacket): if _Debug: lg.out(_DebugLevel, "p2p_service.outbox [%s] to %s" % (outpacket.Command, nameurl.GetName(outpacket.RemoteID))) return True
def Correspondent(request): if _Debug: lg.out(_DebugLevel, 'p2p_service.Correspondent %d bytes in [%s]' % (len(request.Payload), request.PacketID)) lg.out(_DebugLevel, ' from remoteID=%s ownerID=%s creatorID=%s' % ( request.RemoteID, request.OwnerID, request.CreatorID))
def shutdown(): if _Debug: lg.out(_DebugLevel, 'p2p_queue.shutdown') remove_event_handler(do_handle_event_packet) stop()
def Ack(newpacket, info): if _Debug: lg.out(_DebugLevel, "p2p_service.Ack %s from [%s] at %s://%s with %d bytes payload" % ( newpacket.PacketID, nameurl.GetName(newpacket.CreatorID), info.proto, info.host, len(newpacket.Payload)))
def cancel(self): lg.out(6, 'tcp_stream.OutboxFile.cancel timeout=%d' % self.timeout) self.stop()
def start(): if _Debug: lg.out(_DebugLevel, 'p2p_queue.start') reactor.callLater(0, process_queues) # @UndefinedVariable return True
def init(): lg.out(_DebugLevel, 'accounting.init')
def init(): if _Debug: lg.out(_DebugLevel, 'p2p_queue.init') add_event_handler(do_handle_event_packet) start()
def proxy_errback(x): if _Debug: lg.out(6, 'tcp_interface.proxy_errback ERROR %s' % x) return None
def on_inbox_file_register_failed(self, err, file_id): if _Debug: lg.warn('failed to register, file_id=%s err:\n%s' % (str(file_id), str(err))) lg.out(_DebugLevel - 8, ' close session %s' % self.session) self.connection.automat('disconnect')
def shutdown(): global _InitDone if _Debug: lg.out(_DebugLevel, 'ratings.shutdown') stop() _InitDone = False
def _node_failed(self, response, info): if _Debug: lg.out(_DebugLevel, 'p2p_service_seeker._node_failed %r %r' % (response, info)) self.automat('service-denied')
def inbox(info): """ 1) The protocol modules write to temporary files and gives us that filename 2) We unserialize 3) We check that it is for us 4) We check that it is from one of our contacts. 5) We use signed.validate() to check signature and that number fields are numbers 6) Any other sanity checks we can do and if anything funny we toss out the packet 7) Then change the filename to the PackedID that it should be and call the right function(s) for this new packet: (encryptedblock, scrubber, remotetester, customerservice, ...) to dispatch it to right place(s). """ global _LastInboxPacketTime # if _DoingShutdown: # if _Debug: # lg.out(_DebugLevel, "gateway.inbox ignoring input since _DoingShutdown ") # return None if _Debug: lg.out(_DebugLevel, "gateway.inbox [%s]" % info.filename) if info.filename == "" or not os.path.exists(info.filename): lg.err("bad filename=" + info.filename) return None try: data = bpio.ReadBinaryFile(info.filename) except: lg.err("gateway.inbox ERROR reading file " + info.filename) return None if len(data) == 0: lg.err("gateway.inbox ERROR zero byte file from %s://%s" % (info.proto, info.host)) return None if callback.run_finish_file_receiving_callbacks(info, data): lg.warn( 'incoming data of %d bytes was filtered out in file receiving callbacks' % len(data)) return None try: newpacket = signed.Unserialize(data) except: lg.err("gateway.inbox ERROR during Unserialize data from %s://%s" % (info.proto, info.host)) lg.exc() return None if newpacket is None: lg.warn("newpacket from %s://%s is None" % (info.proto, info.host)) return None # newpacket.Valid() will be called later in the flow in packet_in.handle() method try: Command = newpacket.Command OwnerID = newpacket.OwnerID CreatorID = newpacket.CreatorID PacketID = newpacket.PacketID Date = newpacket.Date Payload = newpacket.Payload RemoteID = newpacket.RemoteID Signature = newpacket.Signature packet_sz = len(data) except: lg.err("gateway.inbox ERROR during Unserialize data from %s://%s" % (info.proto, info.host)) lg.err("data length=" + str(len(data))) lg.exc() return None _LastInboxPacketTime = time.time() if _Debug: lg.out( _DebugLevel - 2, "gateway.inbox [%s] signed by %s|%s (for %s) from %s://%s" % (Command, nameurl.GetName(OwnerID), nameurl.GetName(CreatorID), nameurl.GetName(RemoteID), info.proto, info.host)) if _PacketLogFileEnabled: lg.out( 0, ' \033[1;49;92mINBOX %s(%s) %s %s for %s\033[0m' % ( newpacket.Command, newpacket.PacketID, global_id.UrlToGlobalID(newpacket.OwnerID), global_id.UrlToGlobalID(newpacket.CreatorID), global_id.UrlToGlobalID(newpacket.RemoteID), ), log_name='packet', showtime=True) return newpacket
def _on_connect_failed(self, err): from logs import lg lg.out(self.debug_level, 'service_entangled_dht._on_connect_failed : %r' % err) return err
def _in(a, b, c, d): lg.out(2, 'INBOX %d : %r' % (globals()['num_in'], a)) globals()['num_in'] += 1 return False
def outbox( outpacket, wide=False, callbacks={}, target=None, route=None, response_timeout=None, keep_alive=True, ): """ Sends `packet` to the network. :param outpacket: an instance of ``signed.Packet`` :param wide: set to True if you need to send the packet to all contacts of Remote Identity :param callbacks: provide a callback methods to get response here need to provide a callback for given command callback arguments are: (response_packet, info) :param target: if your recipient is not equal to outpacket.RemoteID :param route: dict with parameters, you can manage how to process this packet: 'packet': <another packet to be send>, 'proto': <receiver proto>, 'host': <receiver host>, 'remoteid': <receiver idurl>, 'description': <description on the packet>, :param response_timeout None, or integer to indicate how long to wait for an ack Returns: `None` if data was not sent, no filter was applied `Deferred` object if filter was applied but sending was delayed `packet_out.PacketOut` object if packet was sent """ if _Debug: lg.out( _DebugLevel, "gateway.outbox [%s] signed by %s|%s to %s (%s), wide=%s" % ( outpacket.Command, nameurl.GetName(outpacket.OwnerID), nameurl.GetName(outpacket.CreatorID), nameurl.GetName(outpacket.RemoteID), nameurl.GetName(target), wide, )) if _PacketLogFileEnabled: lg.out(0, '\033[1;49;96mOUTBOX %s(%s) %s %s to %s\033[0m' % ( outpacket.Command, outpacket.PacketID, global_id.UrlToGlobalID(outpacket.OwnerID), global_id.UrlToGlobalID(outpacket.CreatorID), global_id.UrlToGlobalID(outpacket.RemoteID), ), log_name='packet', showtime=True) return callback.run_outbox_filter_callbacks( outpacket, wide=wide, callbacks=callbacks, target=target, route=route, response_timeout=response_timeout, keep_alive=keep_alive, )
def rewrite_indexes(db_instance, source_db_instance): """ """ if _Debug: lg.out(_DebugLevel, 'coins_db.rewrite_indexes') source_location = os.path.join(source_db_instance.path, '_indexes') source_indexes = os.listdir(source_location) existing_location = os.path.join(db_instance.path, '_indexes') existing_indexes = os.listdir(existing_location) for existing_index_file in existing_indexes: if existing_index_file != '00id.py': index_name = existing_index_file[2:existing_index_file.index('.')] existing_index_path = os.path.join(existing_location, existing_index_file) os.remove(existing_index_path) if _Debug: lg.out(_DebugLevel, ' removed index at %s' % existing_index_path) buck_path = os.path.join(db_instance.path, index_name + '_buck') if os.path.isfile(buck_path): os.remove(buck_path) if _Debug: lg.out(_DebugLevel, ' also bucket at %s' % buck_path) stor_path = os.path.join(db_instance.path, index_name + '_stor') if os.path.isfile(stor_path): os.remove(stor_path) if _Debug: lg.out(_DebugLevel, ' also storage at %s' % stor_path) for source_index_file in source_indexes: if source_index_file != '00id.py': index_name = source_index_file[2:source_index_file.index('.')] destination_index_path = os.path.join(existing_location, source_index_file) source_index_path = os.path.join(source_location, source_index_file) if not bpio.AtomicWriteFile(destination_index_path, bpio.ReadTextFile(source_index_path)): lg.warn('failed writing index to %s' % destination_index_path) continue destination_buck_path = os.path.join(db_instance.path, index_name + '_buck') source_buck_path = os.path.join(source_db_instance.path, index_name + '_buck') if not bpio.AtomicWriteFile(destination_buck_path, bpio.ReadBinaryFile(source_buck_path)): lg.warn('failed writing index bucket to %s' % destination_buck_path) continue destination_stor_path = os.path.join(db_instance.path, index_name + '_stor') source_stor_path = os.path.join(source_db_instance.path, index_name + '_stor') if not bpio.AtomicWriteFile(destination_stor_path, bpio.ReadBinaryFile(source_stor_path)): lg.warn('failed writing index storage to %s' % destination_stor_path) continue if _Debug: lg.out( _DebugLevel, ' wrote index %s from %s' % (index_name, source_index_path))
def verify(): """ """ ordered_list = list(transports().keys()) ordered_list.sort(key=settings.getTransportPriority, reverse=True) if _Debug: lg.out(4, 'gateway.verify sorted list : %r' % ordered_list) my_id_obj = my_id.getLocalIdentity() resulted = Deferred() all_results = {} def _verify_transport(proto): if _Debug: lg.out(_DebugLevel - 2, ' verifying %s_transport' % proto) if not settings.transportIsEnabled(proto): if _Debug: lg.out(_DebugLevel - 2, ' %s_transport is disabled' % proto) return succeed(True) transp = transport(proto) if transp.state == 'OFFLINE': if _Debug: lg.out(_DebugLevel - 2, ' %s_transport state is OFFLINE' % proto) return succeed(True) if transp.state != 'LISTENING': if _Debug: lg.out(_DebugLevel - 2, ' %s_transport state is not LISTENING' % proto) return succeed(True) transp_result = transp.interface.verify_contacts(my_id_obj) if _Debug: lg.out(_DebugLevel - 2, ' %s result is %r' % (proto, transp_result)) if isinstance(transp_result, bool) and transp_result: return succeed(True) if isinstance(transp_result, bool) and transp_result == False: return succeed(False) if isinstance(transp_result, Deferred): ret = Deferred() transp_result.addCallback( lambda result_value: ret.callback(result_value)) return ret lg.warn( 'incorrect result returned from %s_interface.verify_contacts(): %r' % (proto, transp_result)) return succeed(False) def _on_verified_one(t_result, proto): all_results[proto] = t_result if _Debug: lg.out( _DebugLevel - 2, ' verified %s transport, result=%r' % (proto, t_result)) if len(all_results) == len(ordered_list): resulted.callback((ordered_list, all_results)) for proto in ordered_list: d = _verify_transport(proto) d.addCallback(_on_verified_one, proto) return resulted
def Broadcast(request, info): if _Debug: lg.out( _DebugLevel, "p2p_service.Broadcast %r from %s" % (request, info.sender_idurl))
def doTestMyCapacity2(self, arg): """ Here are some values. - donated_bytes : you set this in the config - spent_bytes : how many space is taken from you by other users right now - free_bytes = donated_bytes - spent_bytes : not yet allocated space - used_bytes : size of all files, which you store on your disk for your customers """ current_customers = contactsdb.customers() removed_customers = [] spent_bytes = 0 donated_bytes = settings.getDonatedBytes() if os.path.isfile(settings.CustomersSpaceFile()): space_dict = bpio._read_dict(settings.CustomersSpaceFile(), {}) else: space_dict = {'free': donated_bytes} used_dict = bpio._read_dict(settings.CustomersUsedSpaceFile(), {}) lg.out(8, 'customers_rejector.doTestMyCapacity donated=%d' % donated_bytes) try: int(space_dict['free']) for idurl, customer_bytes in space_dict.items(): if idurl != 'free': spent_bytes += int(customer_bytes) except: lg.exc() space_dict = {'free': donated_bytes} spent_bytes = 0 removed_customers = list(current_customers) current_customers = [] self.automat('space-overflow', (space_dict, spent_bytes, current_customers, removed_customers)) return lg.out(8, ' spent=%d' % spent_bytes) if spent_bytes < donated_bytes: space_dict['free'] = donated_bytes - spent_bytes bpio._write_dict(settings.CustomersSpaceFile(), space_dict) lg.out(8, ' space is OK !!!!!!!!') self.automat('space-enough') return used_space_ratio_dict = {} for customer_pos in xrange(contactsdb.num_customers()): customer_idurl = contactsdb.customer(customer_pos) try: allocated_bytes = int(space_dict[customer_idurl]) except: if customer_idurl in current_customers: current_customers.remove(customer_idurl) removed_customers.append(customer_idurl) else: lg.warn('%s not customers' % customer_idurl) lg.warn('%s allocated space unknown' % customer_idurl) continue if allocated_bytes <= 0: if customer_idurl in current_customers: current_customers.remove(customer_idurl) removed_customers.append(customer_idurl) else: lg.warn('%s not customers' % customer_idurl) lg.warn('%s allocated_bytes==0' % customer_idurl) continue try: files_size = int(used_dict.get(customer_idurl, 0)) ratio = float(files_size) / float(allocated_bytes) except: if customer_idurl in current_customers: current_customers.remove(customer_idurl) removed_customers.append(customer_idurl) else: lg.warn('%s not customers' % customer_idurl) lg.warn('%s used_dict have wrong value' % customer_idurl) continue if ratio > 1.0: if customer_idurl in current_customers: current_customers.remove(customer_idurl) removed_customers.append(customer_idurl) else: lg.warn('%s not customers' % customer_idurl) spent_bytes -= allocated_bytes lg.warn('%s space overflow, where is bptester?' % customer_idurl) continue used_space_ratio_dict[customer_idurl] = ratio customers_sorted = sorted(current_customers, key=lambda i: used_space_ratio_dict[i],) while len(customers_sorted) > 0: customer_idurl = customers_sorted.pop() allocated_bytes = int(space_dict[customer_idurl]) spent_bytes -= allocated_bytes space_dict.pop(customer_idurl) current_customers.remove(customer_idurl) removed_customers.append(customer_idurl) lg.out(8, ' customer %s REMOVED' % customer_idurl) if spent_bytes < donated_bytes: break space_dict['free'] = donated_bytes - spent_bytes lg.out(8, ' SPACE NOT ENOUGH !!!!!!!!!!') self.automat('space-overflow', (space_dict, spent_bytes, current_customers, removed_customers))