def bm_command_getdata(self): items = self.decode_payload_content("l32s") # skip? if time.time() < self.skipUntil: return True #TODO make this more asynchronous helper_random.randomshuffle(items) for i in map(str, items): if Dandelion().hasHash(i) and \ self != Dandelion().objectChildStem(i): self.antiIntersectionDelay() logger.info( '%s asked for a stem object we didn\'t offer to it.', self.destination) break else: try: self.append_write_buf( protocol.CreatePacket('object', Inventory()[i].payload)) except KeyError: self.antiIntersectionDelay() logger.info('%s asked for an object we don\'t have.', self.destination) break # I think that aborting after the first missing/stem object is more secure # when using random reordering, as the recipient won't know exactly which objects we refuse to deliver return True
def __init__(self, host=None, sock=None, announcing=False): super(BMProto, self).__init__(sock=sock) self.verackReceived = True self.verackSent = True # TODO sort out streams self.streams = [1] self.fullyEstablished = True self.connectedAt = 0 self.skipUntil = 0 if sock is None: if host is None: host = '' self.create_socket( socket.AF_INET6 if ":" in host else socket.AF_INET, socket.SOCK_DGRAM) self.set_socket_reuse() logger.info("Binding UDP socket to %s:%i", host, self.port) self.socket.bind((host, self.port)) else: self.socket = sock self.set_socket_reuse() self.listening = state.Peer(*self.socket.getsockname()) self.destination = state.Peer(*self.socket.getsockname()) ObjectTracker.__init__(self) self.connecting = False self.connected = True self.announcing = announcing self.set_state("bm_header", expectBytes=protocol.Header.size)
def run_task(task): # save current directory cwd = os.getcwd() # move to 'loadcases' directory directory = os.path.join(cwd, constants.LOADCASES_DIR) if not os.path.exists(directory): os.makedirs(directory) os.chdir(directory) try: for lc in task.loadcases: if lc.is_filetransfer: do_file_transfer(lc.transfer_params['host'], lc.name, lc.scheme) logger.info('File transfer complete') # TODO May be preexecute must be static method? solver = get_solver(lc.solver) solver.preexecute(lc) finally: # move to parent directory os.chdir(cwd) # set task id task.id = run_task.request.id task = localworker.run_task(task) return task
def _checkAndShareGetpubkeyWithPeers(data): if len(data) < 42: logger.info('getpubkey message doesn\'t contain enough data. Ignoring.') return if len(data) > 200: logger.info('getpubkey is abnormally long. Sanity check failed. Ignoring object.') embeddedTime, = unpack('>Q', data[8:16]) readPosition = 20 # bypass the nonce, time, and object type requestedAddressVersionNumber, addressVersionLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += addressVersionLength streamNumber, streamNumberLength = decodeVarint( data[readPosition:readPosition + 10]) if not streamNumber in state.streamsInWhichIAmParticipating: logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber) return readPosition += streamNumberLength inventoryHash = calculateInventoryHash(data) if inventoryHash in Inventory(): logger.debug('We have already received this getpubkey request. Ignoring it.') return objectType = 0 Inventory()[inventoryHash] = ( objectType, streamNumber, data, embeddedTime,'') # This getpubkey request is valid. Forward to peers. logger.debug('advertising inv with hash: %s', hexlify(inventoryHash)) broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash)) # Now let's queue it to be processed ourselves. objectProcessorQueue.put((objectType,data))
def run(self): while True: objectType, data = queues.objectProcessorQueue.get() self.checkackdata(data) try: if objectType == 0: # getpubkey self.processgetpubkey(data) elif objectType == 1: # pubkey self.processpubkey(data) elif objectType == 2: # msg self.processmsg(data) elif objectType == 3: # broadcast self.processbroadcast(data) # is more of a command, not an object type. Is used to get # this thread past the queue.get() so that it will check # the shutdown variable. elif objectType == 'checkShutdownVariable': pass else: if isinstance(objectType, int): logger.info( 'Don\'t know how to handle object type 0x%08X', objectType) else: logger.info('Don\'t know how to handle object type %s', objectType) except helper_msgcoding.DecompressionSizeException as e: logger.error( 'The object is too big after decompression (stopped' ' decompressing at %ib, your configured limit %ib).' ' Ignoring', e.size, BMConfigParser().safeGetInt("zlib", "maxsize")) except varintDecodeError as e: logger.debug( 'There was a problem with a varint while processing an' ' object. Some details: %s', e) except Exception: logger.critical( 'Critical error within objectProcessorThread: \n', exc_info=True) if state.shutdown: # Wait just a moment for most of the connections to close time.sleep(.5) numberOfObjectsThatWereInTheObjectProcessorQueue = 0 with SqlBulkExecute() as sql: while queues.objectProcessorQueue.curSize > 0: objectType, data = queues.objectProcessorQueue.get() sql.execute( 'INSERT INTO objectprocessorqueue VALUES (?,?)', objectType, data) numberOfObjectsThatWereInTheObjectProcessorQueue += 1 logger.debug( 'Saved %s objects from the objectProcessorQueue to' ' disk. objectProcessorThread exiting.', numberOfObjectsThatWereInTheObjectProcessorQueue) state.shutdown = 2 break
def AddPortMapping( self, externalPort, internalPort, internalClient, protocol, description, leaseDuration=0, enabled=1, ): # pylint: disable=too-many-arguments """Add UPnP port mapping""" resp = self.soapRequest( self.upnp_schema + ':1', 'AddPortMapping', [('NewRemoteHost', ''), ('NewExternalPort', str(externalPort)), ('NewProtocol', protocol), ('NewInternalPort', str(internalPort)), ('NewInternalClient', internalClient), ('NewEnabled', str(enabled)), ('NewPortMappingDescription', str(description)), ('NewLeaseDuration', str(leaseDuration))]) self.extPort = externalPort logger.info( "Successfully established UPnP mapping for %s:%i on external port %i", internalClient, internalPort, externalPort) return resp
def recinv(self, data): numberOfItemsInInv, lengthOfVarint = decodeVarint(data[:10]) if numberOfItemsInInv > 50000: sys.stderr.write('Too many items in inv message!') return if len(data) < lengthOfVarint + (numberOfItemsInInv * 32): logger.info('inv message doesn\'t contain enough data. Ignoring.') return startTime = time.time() advertisedSet = set() for i in range(numberOfItemsInInv): advertisedSet.add(data[lengthOfVarint + (32 * i):32 + lengthOfVarint + (32 * i)]) objectsNewToMe = advertisedSet for stream in self.streamNumber: objectsNewToMe -= Inventory().hashes_by_stream(stream) logger.info( 'inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.', numberOfItemsInInv, len(objectsNewToMe), time.time() - startTime) for item in objectsNewToMe: PendingDownload().add(item) self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware[ item] = 0 # helps us keep from sending inv messages to peers that already know about the objects listed therein
def lookupAppdataFolder(): APPNAME = "PyBitmessage" from os import path, environ if sys.platform == 'darwin': if "HOME" in environ: dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/' else: logger.critical('Could not find home folder, please report this message and your ' 'OS X version to the BitMessage Github.') sys.exit() elif 'win32' in sys.platform or 'win64' in sys.platform: dataFolder = path.join(environ['APPDATA'], APPNAME) + '\\' else: from shutil import move try: dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME) except KeyError: dataFolder = path.join(environ["HOME"], ".config", APPNAME) # Migrate existing data to the proper location if this is an existing install try: logger.info("Moving data folder to %s" % (dataFolder)) move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder) except IOError: pass dataFolder = dataFolder + '/' return dataFolder
def __init__(self): threading.Thread.__init__(self, name="Downloader") self.initStop() self.name = "Downloader" logger.info("init download thread") self.pending = {} self.lastCleaned = time.time()
def checkackdata(self, data): # Let's check whether this is a message acknowledgement bound for us. if len(data) < 32: return readPosition = 20 # bypass the nonce, time, and object type # chomp version number versionNumber, varIntLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += varIntLength # chomp stream number streamNumber, varIntLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += varIntLength if data[readPosition:] in shared.ackdataForWhichImWatching: logger.info('This object is an acknowledgement bound for me.') del shared.ackdataForWhichImWatching[data[readPosition:]] sqlExecute( 'UPDATE sent SET status=?, lastactiontime=? WHERE ackdata=?', 'ackreceived', int(time.time()), data[readPosition:]) queues.UISignalQueue.put( ('updateSentItemStatusByAckdata', (data[readPosition:], tr._translate( "MainWindow", 'Acknowledgement of the message received %1').arg( l10n.formatTimestamp())))) else: logger.debug('This object is not an acknowledgement bound for me.')
def checkSensitiveFilePermissions(filename): if sys.platform == 'win32': # TODO: This might deserve extra checks by someone familiar with # Windows systems. return True elif sys.platform[:7] == 'freebsd': # FreeBSD file systems are the same as major Linux file systems present_permissions = os.stat(filename)[0] disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO return present_permissions & disallowed_permissions == 0 else: try: # Skip known problems for non-Win32 filesystems # without POSIX permissions. fstype = subprocess.check_output('stat -f -c "%%T" %s' % (filename), shell=True, stderr=subprocess.STDOUT) if 'fuseblk' in fstype: logger.info( 'Skipping file permissions check for %s.' ' Filesystem fuseblk detected.', filename) return True except: # Swallow exception here, but we might run into trouble later! logger.error('Could not determine filesystem type. %s', filename) present_permissions = os.stat(filename)[0] disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO return present_permissions & disallowed_permissions == 0
def initCL(): """Initlialise OpenCL engine""" # pylint: disable=global-statement global ctx, queue, program, hash_dt, libAvailable if libAvailable is False: return del enabledGpus[:] del vendors[:] del gpus[:] ctx = False try: hash_dt = numpy.dtype([('target', numpy.uint64), ('v', numpy.str_, 73)]) try: for platform in cl.get_platforms(): gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor: enabledGpus.extend(platform.get_devices( device_type=cl.device_type.GPU)) if platform.vendor not in vendors: vendors.append(platform.vendor) except: pass if enabledGpus: ctx = cl.Context(devices=enabledGpus) queue = cl.CommandQueue(ctx) f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r') fstr = ''.join(f.readlines()) program = cl.Program(ctx, fstr).build(options="") logger.info("Loaded OpenCL kernel") else: logger.info("No OpenCL GPUs found") del enabledGpus[:] except Exception: logger.error("OpenCL fail: ", exc_info=True) del enabledGpus[:]
def _checkAndShareGetpubkeyWithPeers(data): if len(data) < 42: logger.info( 'getpubkey message doesn\'t contain enough data. Ignoring.') return embeddedTime, = unpack('>Q', data[8:16]) readPosition = 20 # bypass the nonce, time, and object type requestedAddressVersionNumber, addressVersionLength = \ decodeVarint(data[readPosition:readPosition + 10]) readPosition += addressVersionLength streamNumber, streamNumberLength = \ decodeVarint(data[readPosition:readPosition + 10]) if streamNumber not in state.streamsInWhichIAmParticipating: logger.debug('The streamNumber %i isn\'t one we are interested in.', streamNumber) return readPosition += streamNumberLength inventoryHash = calculateInventoryHash(data) if inventoryHash in Inventory(): logger.debug( 'We have already received this getpubkey request. Ignoring it.') return objectType = 0 Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime, '') # This getpubkey request is valid. Forward to peers. logger.debug('advertising inv with hash: %s', hexlify(inventoryHash)) protocol.broadcastToSendDataQueues( (streamNumber, 'advertiseobject', inventoryHash)) # Now let's queue it to be processed ourselves. objectProcessorQueue.put((objectType, data))
def queryHTTP(self, data): """Query the server via HTTP.""" result = None try: self.con.putrequest("POST", "/") self.con.putheader("Connection", "Keep-Alive") self.con.putheader("User-Agent", "bitmessage") self.con.putheader("Host", self.host) self.con.putheader("Content-Type", "application/json") self.con.putheader("Content-Length", str(len(data))) self.con.putheader("Accept", "application/json") authstr = "%s:%s" % (self.user, self.password) self.con.putheader("Authorization", "Basic %s" % base64.b64encode(authstr)) self.con.endheaders() self.con.send(data) try: resp = self.con.getresponse() result = resp.read() if resp.status != 200: raise Exception( "Namecoin returned status %i: %s" % resp.status, resp.reason) except: logger.info("HTTP receive error") except: logger.info("HTTP connection error") return result
def __init__(self, host=None, sock=None, announcing=False): super(BMProto, self).__init__(sock=sock) self.verackReceived = True self.verackSent = True # TODO sort out streams self.streams = [1] self.fullyEstablished = True self.connectedAt = 0 self.skipUntil = 0 if sock is None: if host is None: host = '' if ":" in host: self.create_socket(socket.AF_INET6, socket.SOCK_DGRAM) else: self.create_socket(socket.AF_INET, socket.SOCK_DGRAM) self.set_socket_reuse() logger.info("Binding UDP socket to %s:%i", host, UDPSocket.port) self.socket.bind((host, UDPSocket.port)) #BINDTODEVICE is only available on linux and requires root #try: #print "binding to %s" % (host) #self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, host) #except AttributeError: else: self.socket = sock self.set_socket_reuse() self.listening = state.Peer(self.socket.getsockname()[0], self.socket.getsockname()[1]) self.destination = state.Peer(self.socket.getsockname()[0], self.socket.getsockname()[1]) ObjectTracker.__init__(self) self.connecting = False self.connected = True self.announcing = announcing self.set_state("bm_header", expectBytes=protocol.Header.size)
def initCL(): global ctx, queue, program, hash_dt, libAvailable if libAvailable is False: return del enabledGpus[:] del vendors[:] del gpus[:] ctx = False try: hash_dt = numpy.dtype([('target', numpy.uint64), ('v', numpy.str_, 73)]) try: for platform in cl.get_platforms(): gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor: enabledGpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) if platform.vendor not in vendors: vendors.append(platform.vendor) except: pass if (len(enabledGpus) > 0): ctx = cl.Context(devices=enabledGpus) queue = cl.CommandQueue(ctx) f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r') fstr = ''.join(f.readlines()) program = cl.Program(ctx, fstr).build(options="") logger.info("Loaded OpenCL kernel") else: logger.info("No OpenCL GPUs found") del enabledGpus[:] except Exception as e: logger.error("OpenCL fail: ", exc_info=True) del enabledGpus[:]
def peerValidityChecks(self): if self.remoteProtocolVersion < 3: self.sendDataThreadQueue.put((0, 'sendRawData',protocol.assembleErrorMessage( fatal=2, errorText="Your is using an old protocol. Closing connection."))) logger.debug ('Closing connection to old protocol version ' + str(self.remoteProtocolVersion) + ' node: ' + str(self.peer)) return False if self.timeOffset > 3600: self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection."))) logger.info("%s's time is too far in the future (%s seconds). Closing connection to it.", self.peer, self.timeOffset) shared.timeOffsetWrongCount += 1 time.sleep(2) return False elif self.timeOffset < -3600: self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection."))) logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.", self.peer, self.timeOffset) shared.timeOffsetWrongCount += 1 return False else: shared.timeOffsetWrongCount = 0 if len(self.streamNumber) == 0: self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText="We don't have shared stream interests. Closing connection."))) logger.debug ('Closed connection to ' + str(self.peer) + ' because there is no overlapping interest in streams.') return False return True
def DeletePortMapping(self, externalPort, protocol): from debug import logger resp = self.soapRequest('WANIPConnection:1', 'DeletePortMapping', [ ('NewExternalPort', str(externalPort)), ('NewProtocol', protocol), ]) logger.info("Removed UPnP mapping on external port %i", externalPort) return resp
def try_add_known_node(stream, addr, port, method=''): try: socket.inet_aton(addr) except (TypeError, socket.error): return logger.info('Adding %s to knownNodes based on %s DNS bootstrap method', addr, method) addKnownNode(stream, state.Peer(addr, port))
def init(): """Initialise PoW""" # pylint: disable=global-statement global bitmsglib, bmpow openclpow.initCL() if sys.platform == "win32": if ctypes.sizeof(ctypes.c_voidp) == 4: bitmsglib = 'bitmsghash32.dll' else: bitmsglib = 'bitmsghash64.dll' try: # MSVS bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib) bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong _doCPoW(2**63, "") logger.info("Successfully tested C PoW DLL (stdcall) %s", bitmsglib) except: logger.error("C PoW test fail.", exc_info=True) try: # MinGW bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib) bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong _doCPoW(2**63, "") logger.info("Successfully tested C PoW DLL (cdecl) %s", bitmsglib) except: logger.error("C PoW test fail.", exc_info=True) bso = None else: try: bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) except OSError: import glob try: bso = ctypes.CDLL(glob.glob(os.path.join( paths.codePath(), "bitmsghash", "bitmsghash*.so" ))[0]) except (OSError, IndexError): bso = None except: bso = None else: logger.info("Loaded C PoW DLL %s", bitmsglib) if bso: try: bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong except: bmpow = None else: bmpow = None if bmpow is None: buildCPoW()
def process_message(self, peer, mailfrom, rcpttos, data): # print 'Receiving message from:', peer p = re.compile(".*<([^>]+)>") if not hasattr(self.channel, "auth") or not self.channel.auth: logger.error("Missing or invalid auth") return try: self.msg_headers = Parser().parsestr(data) except: logger.error("Invalid headers") return try: sender, domain = p.sub(r'\1', mailfrom).split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s", sender) except Exception as err: logger.debug("Bad envelope from %s: %s", mailfrom, repr(err)) msg_from = self.decode_header("from") try: msg_from = p.sub(r'\1', self.decode_header("from")[0]) sender, domain = msg_from.split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s", sender) except Exception as err: logger.error("Bad headers from %s: %s", msg_from, repr(err)) return try: msg_subject = self.decode_header('subject')[0] except: msg_subject = "Subject missing..." msg_tmp = email.message_from_string(data) body = u'' for part in msg_tmp.walk(): if part and part.get_content_type() == "text/plain": body += part.get_payload(decode=1).decode( part.get_content_charset('utf-8'), errors='replace') for to in rcpttos: try: rcpt, domain = p.sub(r'\1', to).split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject) self.send(sender, rcpt, msg_subject, body) logger.info("Relayed %s to %s", sender, rcpt) except Exception as err: logger.error("Bad to %s: %s", to, repr(err)) continue return
def powQueueSize(): curWorkerQueue = queues.workerQueue.qsize() for thread in threading.enumerate(): try: if thread.name == "singleWorker": curWorkerQueue += thread.busy except Exception as err: logger.info('Thread error %s', err) return curWorkerQueue
def try_add_known_node(stream, addr, port, method=''): try: socket.inet_aton(addr) except (TypeError, socket.error): return logger.info( 'Adding %s to knownNodes based on %s DNS bootstrap method', addr, method) addKnownNode(stream, state.Peer(addr, port))
def DeletePortMapping(self, externalPort, protocol): from debug import logger resp = self.soapRequest(self.upnp_schema + ':1', 'DeletePortMapping', [ ('NewRemoteHost', ''), ('NewExternalPort', str(externalPort)), ('NewProtocol', protocol), ]) logger.info("Removed UPnP mapping on external port %i", externalPort) return resp
def bm_command_object(self): objectOffset = self.payloadOffset nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content( "QQIvv") self.object = BMObject(nonce, expiresTime, objectType, version, streamNumber, self.payload, self.payloadOffset) if len(self.payload ) - self.payloadOffset > BMProto.maxObjectPayloadSize: logger.info( 'The payload length of this object is too large (%d bytes). Ignoring it.' % (len(self.payload) - self.payloadOffset)) raise BMProtoExcessiveDataError() try: self.object.checkProofOfWorkSufficient() self.object.checkEOLSanity() self.object.checkAlreadyHave() except (BMObjectExpiredError, BMObjectAlreadyHaveError, BMObjectInsufficientPOWError) as e: BMProto.stopDownloadingObject(self.object.inventoryHash) raise e try: self.object.checkStream() except (BMObjectUnwantedStreamError, ) as e: BMProto.stopDownloadingObject( self.object.inventoryHash, BMConfigParser().get("inventory", "acceptmismatch")) if not BMConfigParser().get("inventory", "acceptmismatch"): raise e try: self.object.checkObjectByType() objectProcessorQueue.put( (self.object.objectType, buffer(self.object.data))) except BMObjectInvalidError as e: BMProto.stopDownloadingObject(self.object.inventoryHash, True) else: try: del state.missingObjects[self.object.inventoryHash] except KeyError: pass if self.object.inventoryHash in Inventory() and Dandelion().hasHash( self.object.inventoryHash): Dandelion().removeHash(self.object.inventoryHash, "cycle detection") Inventory()[self.object.inventoryHash] = ( self.object.objectType, self.object.streamNumber, buffer(self.payload[objectOffset:]), self.object.expiresTime, buffer(self.object.tag)) self.handleReceivedObject(self.object.streamNumber, self.object.inventoryHash) invQueue.put((self.object.streamNumber, self.object.inventoryHash, self.destination)) return True
def init(): global bitmsglib, bso, bmpow openclpow.initCL() if "win32" == sys.platform: if ctypes.sizeof(ctypes.c_voidp) == 4: bitmsglib = 'bitmsghash32.dll' else: bitmsglib = 'bitmsghash64.dll' try: # MSVS bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib) bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong _doCPoW(2**63, "") logger.info("Successfully tested C PoW DLL (stdcall) %s", bitmsglib) except: logger.error("C PoW test fail.", exc_info=True) try: # MinGW bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib) bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong _doCPoW(2**63, "") logger.info("Successfully tested C PoW DLL (cdecl) %s", bitmsglib) except: logger.error("C PoW test fail.", exc_info=True) bso = None else: try: bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) except OSError: import glob try: bso = ctypes.CDLL(glob.glob(os.path.join( paths.codePath(), "bitmsghash", "bitmsghash*.so" ))[0]) except (OSError, IndexError): bso = None except: bso = None else: logger.info("Loaded C PoW DLL %s", bitmsglib) if bso: try: bmpow = bso.BitmessagePOW bmpow.restype = ctypes.c_ulonglong except: bmpow = None else: bmpow = None if bmpow is None: buildCPoW()
def process_message(self, peer, mailfrom, rcpttos, data): # print 'Receiving message from:', peer p = re.compile(".*<([^>]+)>") if not hasattr(self.channel, "auth") or not self.channel.auth: logger.error("Missing or invalid auth") return try: self.msg_headers = Parser().parsestr(data) except: logger.error("Invalid headers") return try: sender, domain = p.sub(r'\1', mailfrom).split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s", sender) except Exception as err: logger.debug("Bad envelope from %s: %s", mailfrom, repr(err)) msg_from = self.decode_header("from") try: msg_from = p.sub(r'\1', self.decode_header("from")[0]) sender, domain = msg_from.split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s", sender) except Exception as err: logger.error("Bad headers from %s: %s", msg_from, repr(err)) return try: msg_subject = self.decode_header('subject')[0] except: msg_subject = "Subject missing..." msg_tmp = email.message_from_string(data) body = u'' for part in msg_tmp.walk(): if part and part.get_content_type() == "text/plain": body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace') for to in rcpttos: try: rcpt, domain = p.sub(r'\1', to).split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s", domain) logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject) self.send(sender, rcpt, msg_subject, body) logger.info("Relayed %s to %s", sender, rcpt) except Exception as err: logger.error( "Bad to %s: %s", to, repr(err)) continue return
def checkEOLSanity(self): # EOL sanity check if self.expiresTime - int(time.time()) > BMObject.maxTTL: logger.info('This object\'s End of Life time is too far in the future. Ignoring it. Time is %i', self.expiresTime) # TODO: remove from download queue raise BMObjectExpiredError() if self.expiresTime - int(time.time()) < BMObject.minTTL: logger.info('This object\'s End of Life time was too long ago. Ignoring the object. Time is %i', self.expiresTime) # TODO: remove from download queue raise BMObjectExpiredError()
def peerValidityChecks(self): if self.remoteProtocolVersion < 3: self.sendDataThreadQueue.put( (0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText= "Your is using an old protocol. Closing connection."))) logger.debug('Closing connection to old protocol version ' + str(self.remoteProtocolVersion) + ' node: ' + str(self.peer)) return False if self.timeOffset > 3600: self.sendDataThreadQueue.put(( 0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText= "Your time is too far in the future compared to mine. Closing connection." ))) logger.info( "%s's time is too far in the future (%s seconds). Closing connection to it.", self.peer, self.timeOffset) shared.timeOffsetWrongCount += 1 time.sleep(2) return False elif self.timeOffset < -3600: self.sendDataThreadQueue.put(( 0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText= "Your time is too far in the past compared to mine. Closing connection." ))) logger.info( "%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.", self.peer, self.timeOffset) shared.timeOffsetWrongCount += 1 return False else: shared.timeOffsetWrongCount = 0 if len(self.streamNumber) == 0: self.sendDataThreadQueue.put(( 0, 'sendRawData', protocol.assembleErrorMessage( fatal=2, errorText= "We don't have shared stream interests. Closing connection." ))) logger.debug( 'Closed connection to ' + str(self.peer) + ' because there is no overlapping interest in streams.') return False return True
def connectionFullyEstablished(self): if self.connectionIsOrWasFullyEstablished: # there is no reason to run this function a second time return if not self.sslHandshake(): return if self.peerValidityChecks() == False: time.sleep(2) self.sendDataThreadQueue.put((0, 'shutdown','no data')) self.checkTimeOffsetNotification() return self.connectionIsOrWasFullyEstablished = True shared.timeOffsetWrongCount = 0 # Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (self.services, self.sslSock))) if not self.initiatedConnection: shared.clientHasReceivedIncomingConnections = True queues.UISignalQueue.put(('setStatusIcon', 'green')) self.sock.settimeout( 600) # We'll send out a ping every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately. queues.UISignalQueue.put(('updateNetworkStatusTab', 'no data')) logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \ 'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \ 'The length of sendDataQueues is now: ' + str(len(state.sendDataQueues)) + "\n" + \ 'broadcasting addr from within connectionFullyEstablished function.') if self.initiatedConnection: state.networkProtocolAvailability[protocol.networkType(self.peer.host)] = True # we need to send our own objects to this node PendingUpload().add() # Let all of our peers know about this new node. for stream in self.remoteStreams: dataToSend = (int(time.time()), stream, self.services, self.peer.host, self.remoteNodeIncomingPort) protocol.broadcastToSendDataQueues(( stream, 'advertisepeer', dataToSend)) self.sendaddr() # This is one large addr message to this one peer. if len(shared.connectedHostsList) > \ BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections", 200): logger.info ('We are connected to too many people. Closing connection.') if self.initiatedConnection: self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Thank you for providing a listening node."))) else: self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Server full, please try again later."))) self.sendDataThreadQueue.put((0, 'shutdown','no data')) return self.sendBigInv()
def sendMessages(self, address): """ This function is called by the possibleNewPubkey function when that function sees that we now have the necessary pubkey to send one or more messages. """ logger.info('We have been awaiting the arrival of this pubkey.') sqlExecute( '''UPDATE sent SET status='doingmsgpow', retrynumber=0 WHERE toaddress=? AND (status='awaitingpubkey' or status='doingpubkeypow') AND folder='sent' ''', address) shared.workerQueue.put(('sendmessage', ''))
def sendMessages(self, ripe): """ This function is called by the possibleNewPubkey function when that function sees that we now have the necessary pubkey to send one or more messages. """ logger.info('We have been awaiting the arrival of this pubkey.') sqlExecute( '''UPDATE sent SET status='doingmsgpow' WHERE toripe=? AND (status='awaitingpubkey' or status='doingpubkeypow') and folder='sent' ''', ripe) shared.workerQueue.put(('sendmessage', ''))
def test(self): """ Test the connection settings. This routine tries to query a "getinfo" command, and builds either an error message or a success message with some info from it. """ try: if self.nmctype == "namecoind": try: vers = self.callRPC("getinfo", [])["version"] except RPCError: vers = self.callRPC("getnetworkinfo", [])["version"] v3 = vers % 100 vers = vers / 100 v2 = vers % 100 vers = vers / 100 v1 = vers if v3 == 0: versStr = "0.%d.%d" % (v1, v2) else: versStr = "0.%d.%d.%d" % (v1, v2, v3) message = ('success', tr._translate( "MainWindow", 'Success! Namecoind version %1 running.').arg( unicode(versStr))) elif self.nmctype == "nmcontrol": res = self.callRPC("data", ["status"]) prefix = "Plugin data running" if ("reply" in res) and res["reply"][:len(prefix)] == prefix: return ('success', tr._translate( "MainWindow", 'Success! NMControll is up and running.')) logger.error("Unexpected nmcontrol reply: %s", res) message = ('failed', tr._translate("MainWindow", 'Couldn\'t understand NMControl.')) else: print "Unsupported Namecoin type" sys.exit(1) return message except Exception: logger.info("Namecoin connection test failure") return ('failed', tr._translate("MainWindow", "The connection to namecoin failed."))
def sendMessages(self, address): """ This function is called by the possibleNewPubkey function when that function sees that we now have the necessary pubkey to send one or more messages. """ logger.info('We have been awaiting the arrival of this pubkey.') sqlExecute( "UPDATE sent SET status='doingmsgpow', retrynumber=0" " WHERE toaddress=?" " AND (status='awaitingpubkey' OR status='doingpubkeypow')" " AND folder='sent'", address) queues.workerQueue.put(('sendmessage', ''))
def recinv(self, data): totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = 0 # this counts duplicates separately because they take up memory if len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer) > 0: for key, value in shared.numberOfObjectsThatWeHaveYetToGetPerPeer.items(): totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers += value with shared.printLock: print 'number of keys(hosts) in shared.numberOfObjectsThatWeHaveYetToGetPerPeer:', len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer) print 'totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = ', totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers numberOfItemsInInv, lengthOfVarint = decodeVarint(data[:10]) if numberOfItemsInInv > 50000: sys.stderr.write('Too many items in inv message!') return if len(data) < lengthOfVarint + (numberOfItemsInInv * 32): print 'inv message doesn\'t contain enough data. Ignoring.' return if numberOfItemsInInv == 1: # we'll just request this data from the person who advertised the object. if totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers > 200000 and len(self.objectsThatWeHaveYetToGetFromThisPeer) > 1000: # inv flooding attack mitigation with shared.printLock: print 'We already have', totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers, 'items yet to retrieve from peers and over 1000 from this node in particular. Ignoring this inv message.' return self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware[ data[lengthOfVarint:32 + lengthOfVarint]] = 0 shared.numberOfInventoryLookupsPerformed += 1 if data[lengthOfVarint:32 + lengthOfVarint] in shared.inventory: with shared.printLock: print 'Inventory (in memory) has inventory item already.' elif shared.isInSqlInventory(data[lengthOfVarint:32 + lengthOfVarint]): print 'Inventory (SQL on disk) has inventory item already.' else: self.sendgetdata(data[lengthOfVarint:32 + lengthOfVarint]) else: # There are many items listed in this inv message. Let us create a # 'set' of objects we are aware of and a set of objects in this inv # message so that we can diff one from the other cheaply. startTime = time.time() advertisedSet = set() for i in range(numberOfItemsInInv): advertisedSet.add(data[lengthOfVarint + (32 * i):32 + lengthOfVarint + (32 * i)]) objectsNewToMe = advertisedSet - shared.inventorySets[self.streamNumber] logger.info('inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.', numberOfItemsInInv, len(objectsNewToMe), time.time()-startTime) for item in objectsNewToMe: if totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers > 200000 and len(self.objectsThatWeHaveYetToGetFromThisPeer) > 1000: # inv flooding attack mitigation with shared.printLock: print 'We already have', totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers, 'items yet to retrieve from peers and over', len(self.objectsThatWeHaveYetToGetFromThisPeer), 'from this node in particular. Ignoring the rest of this inv message.' break self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware[item] = 0 # helps us keep from sending inv messages to peers that already know about the objects listed therein self.objectsThatWeHaveYetToGetFromThisPeer[item] = 0 # upon finishing dealing with an incoming message, the receiveDataThread will request a random object of from peer out of this data structure. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers. if len(self.objectsThatWeHaveYetToGetFromThisPeer) > 0: shared.numberOfObjectsThatWeHaveYetToGetPerPeer[ self.peer] = len(self.objectsThatWeHaveYetToGetFromThisPeer)
def AddPortMapping(self, externalPort, internalPort, internalClient, protocol, description, leaseDuration = 0, enabled = 1): from debug import logger resp = self.soapRequest('WANIPConnection:1', 'AddPortMapping', [ ('NewExternalPort', str(externalPort)), ('NewProtocol', protocol), ('NewInternalPort', str(internalPort)), ('NewInternalClient', internalClient), ('NewEnabled', str(enabled)), ('NewPortMappingDescription', str(description)), ('NewLeaseDuration', str(leaseDuration)) ]) self.extPort = externalPort logger.info("Successfully established UPnP mapping for %s:%i on external port %i", internalClient, internalPort, externalPort) return resp
def checkackdata(self, data): # Let's check whether this is a message acknowledgement bound for us. if len(data) < 32: return if data[-32:] in shared.ackdataForWhichImWatching: logger.info('This object is an acknowledgement bound for me.') del shared.ackdataForWhichImWatching[data[-32:]] sqlExecute('UPDATE sent SET status=?, lastactiontime=? WHERE ackdata=?', 'ackreceived', int(time.time()), data[-32:]) queues.UISignalQueue.put(('updateSentItemStatusByAckdata', (data[-32:], tr._translate("MainWindow",'Acknowledgement of the message received %1').arg(l10n.formatTimestamp())))) else: logger.debug('This object is not an acknowledgement bound for me.')
def connectionFullyEstablished(self): if self.connectionIsOrWasFullyEstablished: # there is no reason to run this function a second time return self.connectionIsOrWasFullyEstablished = True self.sslSock = self.sock if ((self.services & shared.NODE_SSL == shared.NODE_SSL) and shared.haveSSL(not self.initiatedConnection)): logger.debug("Initialising TLS") self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA') if hasattr(self.sslSock, "context"): self.sslSock.context.set_ecdh_curve("secp256k1") while True: try: self.sslSock.do_handshake() break except ssl.SSLError as e: if e.errno == 2: select.select([self.sslSock], [self.sslSock], []) else: break except: break # Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (self.services, self.sslSock))) if not self.initiatedConnection: shared.clientHasReceivedIncomingConnections = True shared.UISignalQueue.put(('setStatusIcon', 'green')) self.sock.settimeout( 600) # We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately. shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data')) logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \ 'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \ 'The length of sendDataQueues is now: ' + str(len(shared.sendDataQueues)) + "\n" + \ 'broadcasting addr from within connectionFullyEstablished function.') # Let all of our peers know about this new node. dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort) shared.broadcastToSendDataQueues(( self.streamNumber, 'advertisepeer', dataToSend)) self.sendaddr() # This is one large addr message to this one peer. if not self.initiatedConnection and len(shared.connectedHostsList) > 200: logger.info ('We are connected to too many people. Closing connection.') self.sendDataThreadQueue.put((0, 'shutdown','no data')) return self.sendBigInv()
def ackDataHasAVaildHeader(self, ackData): if len(ackData) < shared.Header.size: logger.info('The length of ackData is unreasonably short. Not sending ackData.') return False magic,command,payloadLength,checksum = shared.Header.unpack(ackData[:shared.Header.size]) if magic != 0xE9BEB4D9: logger.info('Ackdata magic bytes were wrong. Not sending ackData.') return False payload = ackData[shared.Header.size:] if len(payload) != payloadLength: logger.info('ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.') return False if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message. """ The largest message should be either an inv or a getdata message at 1.6 MB in size. That doesn't mean that the object may be that big. The shared.checkAndShareObjectWithPeers function will verify that it is no larger than 2^18 bytes. """ return False if checksum != hashlib.sha512(payload).digest()[0:4]: # test the checksum in the message. logger.info('ackdata checksum wrong. Not sending ackdata.') return False command = command.rstrip('\x00') if command != 'object': return False return True
def ackDataHasAVaildHeader(self, ackData): if len(ackData) < shared.Header.size: logger.info( 'The length of ackData is unreasonably short. Not sending ackData.' ) return False magic, command, payloadLength, checksum = shared.Header.unpack( ackData[:shared.Header.size]) if magic != 0xE9BEB4D9: logger.info('Ackdata magic bytes were wrong. Not sending ackData.') return False payload = ackData[shared.Header.size:] if len(payload) != payloadLength: logger.info( 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.' ) return False if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message. """ The largest message should be either an inv or a getdata message at 1.6 MB in size. That doesn't mean that the object may be that big. The shared.checkAndShareObjectWithPeers function will verify that it is no larger than 2^18 bytes. """ return False if checksum != hashlib.sha512( payload).digest()[0:4]: # test the checksum in the message. logger.info('ackdata checksum wrong. Not sending ackdata.') return False command = command.rstrip('\x00') if command != 'object': return False return True
def handleSubmission(id, lang, code): # Maximum time try: with tlim(5): logger.info(f'[PROCESSING] {multiprocessing.current_process().name}') # Get compiler c = Client.__compiler.getCompiler(lang) s = Submission(id, lang, code, c) s.start() except TimeoutException: return finally: pass
def peerValidityChecks(self): if self.remoteProtocolVersion < 3: self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="Your is using an old protocol. Closing connection.")) logger.debug ('Closing connection to old protocol version %s, node: %s', str(self.remoteProtocolVersion), str(self.destination)) return False if self.timeOffset > BMProto.maxTimeOffset: self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection.")) logger.info("%s's time is too far in the future (%s seconds). Closing connection to it.", self.destination, self.timeOffset) shared.timeOffsetWrongCount += 1 return False elif self.timeOffset < -BMProto.maxTimeOffset: self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection.")) logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.", self.destination, self.timeOffset) shared.timeOffsetWrongCount += 1 return False else: shared.timeOffsetWrongCount = 0 if not self.streams: self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="We don't have shared stream interests. Closing connection.")) logger.debug ('Closed connection to %s because there is no overlapping interest in streams.', str(self.destination)) return False if self.destination in network.connectionpool.BMConnectionPool().inboundConnections: try: if not protocol.checkSocksIP(self.destination.host): self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="Too many connections from your IP. Closing connection.")) logger.debug ('Closed connection to %s because we are already connected to that IP.', str(self.destination)) return False except: pass if network.connectionpool.BMConnectionPool().isAlreadyConnected(self.nonce): self.append_write_buf(protocol.assembleErrorMessage(fatal=2, errorText="I'm connected to myself. Closing connection.")) logger.debug ("Closed connection to %s because I'm connected to myself.", str(self.destination)) return False return True
def dns(): # DNS bootstrap. This could be programmed to use the SOCKS proxy to do the # DNS lookup some day but for now we will just rely on the entries in # defaultKnownNodes.py. Hopefully either they are up to date or the user # has run Bitmessage recently without SOCKS turned on and received good # bootstrap nodes using that method. # TODO: Clarify the integrity of DNS data? if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none': try: for item in socket.getaddrinfo('bootstrap8080.bitmessage.org', 80): logger.info('Adding ' + item[4][0] + ' to knownNodes based on DNS bootstrap method') shared.knownNodes[1][shared.Peer(item[4][0], 8080)] = int(time.time()) except: logger.error('bootstrap8080.bitmessage.org DNS bootstrapping failed.') try: for item in socket.getaddrinfo('bootstrap8444.bitmessage.org', 80): logger.info('Adding ' + item[4][0] + ' to knownNodes based on DNS bootstrap method') shared.knownNodes[1][shared.Peer(item[4][0], 8444)] = int(time.time()) except: logger.error('bootstrap8444.bitmessage.org DNS bootstrapping failed.') elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5': shared.knownNodes[1][shared.Peer('quzwelsuziwqgpt2.onion', 8444)] = int(time.time()) logger.debug("Adding quzwelsuziwqgpt2.onion:8444 to knownNodes.") for port in [8080, 8444]: logger.debug("Resolving %i through SOCKS...", port) address_family = socket.AF_INET sock = socks.socksocket(address_family, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.settimeout(20) proxytype = socks.PROXY_TYPE_SOCKS5 sockshostname = shared.config.get('bitmessagesettings', 'sockshostname') socksport = shared.config.getint('bitmessagesettings', 'socksport') # Do domain name lookups through the proxy; # though this setting doesn't really matter # since we won't be doing any domain name lookups anyway. rdns = True if shared.config.getboolean('bitmessagesettings', 'socksauthentication'): socksusername = shared.config.get('bitmessagesettings', 'socksusername') sockspassword = shared.config.get('bitmessagesettings', 'sockspassword') sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword) else: sock.setproxy( proxytype, sockshostname, socksport, rdns) try: ip = sock.resolve("bootstrap" + str(port) + ".bitmessage.org") sock.shutdown(socket.SHUT_RDWR) sock.close() except: logger.error("SOCKS DNS resolving failed", exc_info=True) if ip is not None: logger.info('Adding ' + ip + ' to knownNodes based on SOCKS DNS bootstrap method') shared.knownNodes[1][shared.Peer(ip, port)] = time.time() else: logger.info('DNS bootstrap skipped because the proxy type does not support DNS resolution.')
def checkackdata(self, data): # Let's check whether this is a message acknowledgement bound for us. if len(data) < 32: return # bypass nonce and time, retain object type/version/stream + body readPosition = 16 if data[readPosition:] in shared.ackdataForWhichImWatching: logger.info('This object is an acknowledgement bound for me.') del shared.ackdataForWhichImWatching[data[readPosition:]] sqlExecute('UPDATE sent SET status=?, lastactiontime=? WHERE ackdata=?', 'ackreceived', int(time.time()), data[readPosition:]) queues.UISignalQueue.put(('updateSentItemStatusByAckdata', (data[readPosition:], tr._translate("MainWindow",'Acknowledgement of the message received %1').arg(l10n.formatTimestamp())))) else: logger.debug('This object is not an acknowledgement bound for me.')
def recinv(self, data): numberOfItemsInInv, lengthOfVarint = decodeVarint(data[:10]) if numberOfItemsInInv > 50000: sys.stderr.write('Too many items in inv message!') return if len(data) < lengthOfVarint + (numberOfItemsInInv * 32): logger.info('inv message doesn\'t contain enough data. Ignoring.') return startTime = time.time() advertisedSet = set() for i in range(numberOfItemsInInv): advertisedSet.add(data[lengthOfVarint + (32 * i):32 + lengthOfVarint + (32 * i)]) objectsNewToMe = advertisedSet for stream in self.streamNumber: objectsNewToMe -= Inventory().hashes_by_stream(stream) logger.info('inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.', numberOfItemsInInv, len(objectsNewToMe), time.time()-startTime) for item in random.sample(objectsNewToMe, len(objectsNewToMe)): self.downloadQueue.put(item)
def initCL(): global ctx, queue, program, gpus, hash_dt try: hash_dt = numpy.dtype([("target", numpy.uint64), ("v", numpy.str_, 73)]) for platform in cl.get_platforms(): gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) if len(gpus) > 0: ctx = cl.Context(devices=gpus) queue = cl.CommandQueue(ctx) f = open(os.path.join(codePath(), "bitmsghash", "bitmsghash.cl"), "r") fstr = "".join(f.readlines()) program = cl.Program(ctx, fstr).build(options="") logger.info("Loaded OpenCL kernel") else: logger.info("No OpenCL GPUs found") ctx = False except Exception as e: logger.error("OpenCL fail: ", exc_info=True) ctx = False
def handle_read(self): try: # wait for write buffer flush if self.tlsStarted and not self.tlsDone and not self.write_buf: #logger.debug("%s:%i TLS handshaking (read)", self.destination.host, self.destination.port) self.tls_handshake() else: #logger.debug("%s:%i Not TLS handshaking (read)", self.destination.host, self.destination.port) return AdvancedDispatcher.handle_read(self) except AttributeError: return AdvancedDispatcher.handle_read(self) except ssl.SSLError as err: if err.errno == ssl.SSL_ERROR_WANT_READ: return elif err.errno in _DISCONNECTED_SSL: self.handle_close() return logger.info("SSL Error: %s", str(err)) self.handle_close() return
def get(self, result_ids): """ Wait and return result of tasks with ids specified in result_ids list """ results = [] for result_id in result_ids: try: if not self._is_local_work: async_result = AsyncResult(result_id) result_task = async_result.get() # transfer files if need for lc in result_task.loadcases: try: if lc.is_filetransfer: logger.info('Begin filetransfer') from transfer_util import do_file_transfer # create directory for loadcase directory = os.path.join(os.getcwd(), lc.name) if not os.path.exists(directory): os.makedirs(directory) do_file_transfer(result_task.result[lc.name]['host'], directory, result_task.result[lc.name]['file']) except Exception as ex: import traceback traceback.print_exc() else: result_task = self._id_to_task.pop(result_id) # it's new instance of Task, therefore need to reassign id field result_task.id = result_id except Exception as e: #TODO right error handling result_task = None results.append(result_task.result) result_dict = _list_to_dict(results) # if only one loadcase calculated, return value of the one loadcase # else return all dictionary #if len(result_dict) is 1: # return result_dict[result_dict.keys()[0]] #else: return result_dict
def run(self): while True: objectType, data = queues.objectProcessorQueue.get() self.checkackdata(data) try: if objectType == 0: # getpubkey self.processgetpubkey(data) elif objectType == 1: #pubkey self.processpubkey(data) elif objectType == 2: #msg self.processmsg(data) elif objectType == 3: #broadcast self.processbroadcast(data) elif objectType == 'checkShutdownVariable': # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable. pass else: if isinstance(objectType, int): logger.info('Don\'t know how to handle object type 0x%08X', objectType) else: logger.info('Don\'t know how to handle object type %s', objectType) except helper_msgcoding.DecompressionSizeException as e: logger.error("The object is too big after decompression (stopped decompressing at %ib, your configured limit %ib). Ignoring", e.size, BMConfigParser().safeGetInt("zlib", "maxsize")) except varintDecodeError as e: logger.debug("There was a problem with a varint while processing an object. Some details: %s" % e) except Exception as e: logger.critical("Critical error within objectProcessorThread: \n%s" % traceback.format_exc()) if state.shutdown: time.sleep(.5) # Wait just a moment for most of the connections to close numberOfObjectsThatWereInTheObjectProcessorQueue = 0 with SqlBulkExecute() as sql: while queues.objectProcessorQueue.curSize > 0: objectType, data = queues.objectProcessorQueue.get() sql.execute('''INSERT INTO objectprocessorqueue VALUES (?,?)''', objectType,data) numberOfObjectsThatWereInTheObjectProcessorQueue += 1 logger.debug('Saved %s objects from the objectProcessorQueue to disk. objectProcessorThread exiting.' % str(numberOfObjectsThatWereInTheObjectProcessorQueue)) state.shutdown = 2 break
def bm_command_object(self): objectOffset = self.payloadOffset nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content("QQIvv") self.object = BMObject(nonce, expiresTime, objectType, version, streamNumber, self.payload, self.payloadOffset) if len(self.payload) - self.payloadOffset > BMProto.maxObjectPayloadSize: logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(self.payload) - self.payloadOffset) raise BMProtoExcessiveDataError() try: self.object.checkProofOfWorkSufficient() self.object.checkEOLSanity() self.object.checkAlreadyHave() except (BMObjectExpiredError, BMObjectAlreadyHaveError, BMObjectInsufficientPOWError) as e: BMProto.stopDownloadingObject(self.object.inventoryHash) raise e try: self.object.checkStream() except (BMObjectUnwantedStreamError,) as e: BMProto.stopDownloadingObject(self.object.inventoryHash, BMConfigParser().get("inventory", "acceptmismatch")) if not BMConfigParser().get("inventory", "acceptmismatch"): raise e try: self.object.checkObjectByType() objectProcessorQueue.put((self.object.objectType, buffer(self.object.data))) except BMObjectInvalidError as e: BMProto.stopDownloadingObject(self.object.inventoryHash, True) else: try: del state.missingObjects[self.object.inventoryHash] except KeyError: pass Inventory()[self.object.inventoryHash] = ( self.object.objectType, self.object.streamNumber, buffer(self.payload[objectOffset:]), self.object.expiresTime, buffer(self.object.tag)) invQueue.put((self.object.streamNumber, self.object.inventoryHash, self.destination)) return True
def bm_command_getdata(self): items = self.decode_payload_content("l32s") # skip? if time.time() < self.skipUntil: return True #TODO make this more asynchronous random.shuffle(items) for i in map(str, items): if i in Dandelion().hashMap and \ self != Dandelion().hashMap[i]: self.antiIntersectionDelay() logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination) break else: try: self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload)) except KeyError: self.antiIntersectionDelay() logger.info('%s asked for an object we don\'t have.', self.destination) break # I think that aborting after the first missing/stem object is more secure # when using random reordering, as the recipient won't know exactly which objects we refuse to deliver return True
def checkackdata(self, data): # Let's check whether this is a message acknowledgement bound for us. if len(data) < 32: return readPosition = 20 # bypass the nonce, time, and object type # chomp version number versionNumber, varIntLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += varIntLength # chomp stream number streamNumber, varIntLength = decodeVarint( data[readPosition:readPosition + 10]) readPosition += varIntLength if data[readPosition:] in shared.ackdataForWhichImWatching: logger.info('This object is an acknowledgement bound for me.') del shared.ackdataForWhichImWatching[data[readPosition:]] sqlExecute('UPDATE sent SET status=?, lastactiontime=? WHERE ackdata=?', 'ackreceived', int(time.time()), data[readPosition:]) queues.UISignalQueue.put(('updateSentItemStatusByAckdata', (data[readPosition:], tr._translate("MainWindow",'Acknowledgement of the message received %1').arg(l10n.formatTimestamp())))) else: logger.debug('This object is not an acknowledgement bound for me.')
def checkAndShareObjectWithPeers(data): """ This function is called after either receiving an object off of the wire or after receiving one as ackdata. Returns the length of time that we should reserve to process this message if we are receiving it off of the wire. """ if len(data) > 2 ** 18: logger.info('The payload length of this object is too large (%s bytes). Ignoring it.', len(data)) return 0 # Let us check to make sure that the proof of work is sufficient. if not isProofOfWorkSufficient(data): logger.info('Proof of work is insufficient.') return 0 endOfLifeTime, = unpack('>Q', data[8:16]) if endOfLifeTime - int(time.time()) > 28 * 24 * 60 * 60 + 10800: # The TTL may not be larger than 28 days + 3 hours of wiggle room logger.info('This object\'s End of Life time is too far in the future. Ignoring it. Time is %s', endOfLifeTime) return 0 if endOfLifeTime - int(time.time()) < - 3600: # The EOL time was more than an hour ago. That's too much. logger.info('This object\'s End of Life time was more than an hour ago. Ignoring the object. Time is %s', endOfLifeTime) return 0 intObjectType, = unpack('>I', data[16:20]) try: if intObjectType == 0: _checkAndShareGetpubkeyWithPeers(data) return 0.1 elif intObjectType == 1: _checkAndSharePubkeyWithPeers(data) return 0.1 elif intObjectType == 2: _checkAndShareMsgWithPeers(data) return 0.6 elif intObjectType == 3: _checkAndShareBroadcastWithPeers(data) return 0.6 else: _checkAndShareUndefinedObjectWithPeers(data) return 0.6 except varintDecodeError as e: logger.debug("There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s", e) except Exception as e: logger.critical('There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n%s', traceback.format_exc()) return 0