Exemplo n.º 1
0
def _doFastPoW(target, initialHash):
    import shared
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores
    logger.debug('Creating POW pool with %s workers.' % (pool_size))
    pool = Pool(processes=pool_size)
    logger.debug('Created POW pool.')
    result = []
    for i in range(pool_size):
        result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
    while True:
        if shared.shutdown:
            pool.terminate()
            while True:
                time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py
            return
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join() #Wait for the workers to exit...
                return result[0], result[1]
        time.sleep(0.2)
 def possibleNewPubkey(self, address):
     """
     We have inserted a pubkey into our pubkey table which we received from a
     pubkey, msg, or broadcast message. It might be one that we have been
     waiting for. Let's check.
     """
     
     # For address versions <= 3, we wait on a key with the correct address version,
     # stream number, and RIPE hash.
     status, addressVersion, streamNumber, ripe = decodeAddress(address)
     if addressVersion <=3:
         if address in shared.neededPubkeys:
             del shared.neededPubkeys[address]
             self.sendMessages(address)
         else:
             logger.debug('We don\'t need this pub key. We didn\'t ask for it. For address: %s' % address)
     # For address versions >= 4, we wait on a pubkey with the correct tag.
     # Let us create the tag from the address and see if we were waiting
     # for it.
     elif addressVersion >= 4:
         tag = hashlib.sha512(hashlib.sha512(encodeVarint(
             addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()[32:]
         if tag in shared.neededPubkeys:
             del shared.neededPubkeys[tag]
             self.sendMessages(address)
 def _sleepForTimingAttackMitigation(self, sleepTime):
     # We don't need to do the timing attack mitigation if we are
     # only connected to the trusted peer because we can trust the
     # peer not to attack
     if sleepTime > 0 and doTimingAttackMitigation and shared.trustedPeer == None:
         logger.debug('Timing attack mitigation: Sleeping for ' + str(sleepTime) + ' seconds.')
         time.sleep(sleepTime)
Exemplo n.º 4
0
def run(target, initialHash):
    if 'linux' in sys.platform:
        logger.debug('calling _doSafePoW as a TEMPORARY fix.')
        return _doSafePoW(target, initialHash)
        #return _doFastPoW(target, initialHash)
    else:
        return _doSafePoW(target, initialHash)
Exemplo n.º 5
0
def _doFastPoW(target, initialHash):
    logger.debug("Fast PoW start")
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores
    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
    while True:
        if shutdown >= 1:
            pool.terminate()
            raise Exception("Interrupted")
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join() #Wait for the workers to exit...
                logger.debug("Fast PoW done")
                return result[0], result[1]
        time.sleep(0.2)
    def recgetdata(self, data):
        numberOfRequestedInventoryItems, lengthOfVarint = decodeVarint(
            data[:10])
        if len(data) < lengthOfVarint + (32 * numberOfRequestedInventoryItems):
            logger.debug('getdata message does not contain enough data. Ignoring.')
            return
        for i in xrange(numberOfRequestedInventoryItems):
            hash = data[lengthOfVarint + (
                i * 32):32 + lengthOfVarint + (i * 32)]
            logger.debug('received getdata request for item:' + hash.encode('hex'))

            shared.numberOfInventoryLookupsPerformed += 1
            shared.inventoryLock.acquire()
            if hash in shared.inventory:
                objectType, streamNumber, payload, expiresTime, tag = shared.inventory[hash]
                shared.inventoryLock.release()
                self.sendObject(payload)
            else:
                shared.inventoryLock.release()
                queryreturn = sqlQuery(
                    '''select payload from inventory where hash=? and expirestime>=?''',
                    hash,
                    int(time.time()))
                if queryreturn != []:
                    for row in queryreturn:
                        payload, = row
                    self.sendObject(payload)
                else:
                    logger.warning('%s asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. We probably cleaned it out after advertising it but before they got around to asking for it.' % (self.peer,))
 def peerValidityChecks(self):
     if self.remoteProtocolVersion < 3:
         self.sendDataThreadQueue.put((0, 'sendRawData',protocol.assembleErrorMessage(
             fatal=2, errorText="Your is using an old protocol. Closing connection.")))
         logger.debug ('Closing connection to old protocol version ' + str(self.remoteProtocolVersion) + ' node: ' + str(self.peer))
         return False
     if self.timeOffset > 3600:
         self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
             fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection.")))
         logger.info("%s's time is too far in the future (%s seconds). Closing connection to it.", self.peer, self.timeOffset)
         shared.timeOffsetWrongCount += 1
         time.sleep(2)
         return False
     elif self.timeOffset < -3600:
         self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
             fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection.")))
         logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.", self.peer, self.timeOffset)
         shared.timeOffsetWrongCount += 1
         return False
     else:
         shared.timeOffsetWrongCount = 0
     if len(self.streamNumber) == 0:
         self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
             fatal=2, errorText="We don't have shared stream interests. Closing connection.")))
         logger.debug ('Closed connection to ' + str(self.peer) + ' because there is no overlapping interest in streams.')
         return False
     return True
    def run(self):
        while True:
            objectType, data = shared.objectProcessorQueue.get()

            try:
                if objectType == 0: # getpubkey
                    self.processgetpubkey(data)
                elif objectType == 1: #pubkey
                    self.processpubkey(data)
                elif objectType == 2: #msg
                    self.processmsg(data)
                elif objectType == 3: #broadcast
                    self.processbroadcast(data)
                elif objectType == 'checkShutdownVariable': # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable.
                    pass
                else:
                    logger.critical('Error! Bug! The class_objectProcessor was passed an object type it doesn\'t recognize: %s' % str(objectType))
            except varintDecodeError as e:
                logger.debug("There was a problem with a varint while processing an object. Some details: %s" % e)
            except Exception as e:
                logger.critical("Critical error within objectProcessorThread: \n%s" % traceback.format_exc())

            if shared.shutdown:
                time.sleep(.5) # Wait just a moment for most of the connections to close
                numberOfObjectsThatWereInTheObjectProcessorQueue = 0
                with SqlBulkExecute() as sql:
                    while shared.objectProcessorQueue.curSize > 0:
                        objectType, data = shared.objectProcessorQueue.get()
                        sql.execute('''INSERT INTO objectprocessorqueue VALUES (?,?)''',
                                   objectType,data)
                        numberOfObjectsThatWereInTheObjectProcessorQueue += 1
                logger.debug('Saved %s objects from the objectProcessorQueue to disk. objectProcessorThread exiting.' % str(numberOfObjectsThatWereInTheObjectProcessorQueue))
                shared.shutdown = 2
                break
Exemplo n.º 9
0
    def checkBroadcast(self):
        if len(self.data) < 180:
            logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
            raise BMObjectInvalidError()

        # this isn't supported anymore
        if self.version < 2:
            raise BMObjectInvalidError()
Exemplo n.º 10
0
def resendMsg(ackdata):
    logger.debug('It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.')
    sqlExecute(
        '''UPDATE sent SET status='msgqueued' WHERE ackdata=?''',
        ackdata)
    shared.workerQueue.put(('sendmessage', ''))
    shared.UISignalQueue.put((
    'updateStatusBar', 'Doing work necessary to again attempt to deliver a message...'))
Exemplo n.º 11
0
 def handle_connect(self):
     self.set_state("init")
     try:
         AdvancedDispatcher.handle_connect(self)
     except socket.error as e:
         if e.errno in asyncore._DISCONNECTED:
             logger.debug("%s:%i: Connection failed: %s", self.destination.host, self.destination.port, str(e))
             return
     self.state_init()
Exemplo n.º 12
0
def _doSafePoW(target, initialHash):
    logger.debug("Safe PoW start")
    nonce = 0
    trialValue = float('inf')
    while trialValue > target:
        nonce += 1
        trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
    logger.debug("Safe PoW done")
    return [trialValue, nonce]
Exemplo n.º 13
0
def dns():
    # DNS bootstrap. This could be programmed to use the SOCKS proxy to do the
    # DNS lookup some day but for now we will just rely on the entries in
    # defaultKnownNodes.py. Hopefully either they are up to date or the user
    # has run Bitmessage recently without SOCKS turned on and received good
    # bootstrap nodes using that method.
    # TODO: Clarify the integrity of DNS data?
    if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none':
        try:
            for item in socket.getaddrinfo('bootstrap8080.bitmessage.org', 80):
                logger.info('Adding ' + item[4][0] + ' to knownNodes based on DNS bootstrap method')
                shared.knownNodes[1][shared.Peer(item[4][0], 8080)] = int(time.time())
        except:
            logger.error('bootstrap8080.bitmessage.org DNS bootstrapping failed.')
        try:
            for item in socket.getaddrinfo('bootstrap8444.bitmessage.org', 80):
                logger.info('Adding ' + item[4][0] + ' to knownNodes based on DNS bootstrap method')
                shared.knownNodes[1][shared.Peer(item[4][0], 8444)] = int(time.time())
        except:
            logger.error('bootstrap8444.bitmessage.org DNS bootstrapping failed.')
    elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
        shared.knownNodes[1][shared.Peer('quzwelsuziwqgpt2.onion', 8444)] = int(time.time())
        logger.debug("Adding quzwelsuziwqgpt2.onion:8444 to knownNodes.")
        for port in [8080, 8444]:
            logger.debug("Resolving %i through SOCKS...", port)
            address_family = socket.AF_INET
            sock = socks.socksocket(address_family, socket.SOCK_STREAM)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.settimeout(20)
            proxytype = socks.PROXY_TYPE_SOCKS5
            sockshostname = shared.config.get('bitmessagesettings', 'sockshostname')
            socksport = shared.config.getint('bitmessagesettings', 'socksport')

            # Do domain name lookups through the proxy;
            # though this setting doesn't really matter
            # since we won't be doing any domain name lookups anyway.
            rdns = True

            if shared.config.getboolean('bitmessagesettings', 'socksauthentication'):
                socksusername = shared.config.get('bitmessagesettings', 'socksusername')
                sockspassword = shared.config.get('bitmessagesettings', 'sockspassword')
                sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
            else:
                sock.setproxy(
                    proxytype, sockshostname, socksport, rdns)
            try:
                ip = sock.resolve("bootstrap" + str(port) + ".bitmessage.org")
                sock.shutdown(socket.SHUT_RDWR)
                sock.close()
            except:
                logger.error("SOCKS DNS resolving failed", exc_info=True)
            if ip is not None:
                logger.info('Adding ' + ip + ' to knownNodes based on SOCKS DNS bootstrap method')
                shared.knownNodes[1][shared.Peer(ip, port)] = time.time()
    else:
        logger.info('DNS bootstrap skipped because the proxy type does not support DNS resolution.')
Exemplo n.º 14
0
def _doCPoW(target, initialHash):
    h = initialHash
    m = target
    out_h = ctypes.pointer(ctypes.create_string_buffer(h, 64))
    out_m = ctypes.c_ulonglong(m)
    logger.debug("C PoW start")
    nonce = bmpow(out_h, out_m)
    trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
    logger.debug("C PoW done")
    return [trialValue, nonce]
Exemplo n.º 15
0
    def process_message(self, peer, mailfrom, rcpttos, data):
#        print 'Receiving message from:', peer
        p = re.compile(".*<([^>]+)>")
        if not hasattr(self.channel, "auth") or not self.channel.auth:
            logger.error("Missing or invalid auth")
            return
        try:
            self.msg_headers = Parser().parsestr(data)
        except:
            logger.error("Invalid headers")
            return

        try:
            sender, domain = p.sub(r'\1', mailfrom).split("@")
            if domain != SMTPDOMAIN:
                raise Exception("Bad domain %s", domain)
            if sender not in BMConfigParser().addresses():
                raise Exception("Nonexisting user %s", sender)
        except Exception as err:
            logger.debug("Bad envelope from %s: %s", mailfrom, repr(err))
            msg_from = self.decode_header("from")
            try:
                msg_from = p.sub(r'\1', self.decode_header("from")[0])
                sender, domain = msg_from.split("@")
                if domain != SMTPDOMAIN:
                    raise Exception("Bad domain %s", domain)
                if sender not in BMConfigParser().addresses():
                    raise Exception("Nonexisting user %s", sender)
            except Exception as err:
                logger.error("Bad headers from %s: %s", msg_from, repr(err))
                return

        try:
            msg_subject = self.decode_header('subject')[0]
        except:
            msg_subject = "Subject missing..."

        msg_tmp = email.message_from_string(data)
        body = u''
        for part in msg_tmp.walk():
            if part and part.get_content_type() == "text/plain":
                body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace')

        for to in rcpttos:
            try:
                rcpt, domain = p.sub(r'\1', to).split("@")
                if domain != SMTPDOMAIN:
                    raise Exception("Bad domain %s", domain)
                logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject)
                self.send(sender, rcpt, msg_subject, body)
                logger.info("Relayed %s to %s", sender, rcpt) 
            except Exception as err:
                logger.error( "Bad to %s: %s", to, repr(err))
                continue
        return
Exemplo n.º 16
0
def _doSafePoW(target, initialHash):
    logger.debug("Safe PoW start")
    nonce = 0
    trialValue = float('inf')
    while trialValue > target and shutdown == 0:
        nonce += 1
        trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
    if shutdown != 0:
        raise Exception("Interrupted")
    logger.debug("Safe PoW done")
    return [trialValue, nonce]
Exemplo n.º 17
0
def kill_phi(block, phi):
    logger.debug("Killing phi: %s", phi)

    block.symtab.pop(phi.variable.renamed_name)

    for incoming_var in phi.incoming:
        # A single definition can reach a block multiple times,
        # remove all references
        refs = [ref for ref in incoming_var.cf_references
                        if ref.variable is not phi.variable]
        incoming_var.cf_references = refs
    def connectionFullyEstablished(self):
        if self.connectionIsOrWasFullyEstablished:
            # there is no reason to run this function a second time
            return

        if not self.sslHandshake():
            return

        if self.peerValidityChecks() == False:
            time.sleep(2)
            self.sendDataThreadQueue.put((0, 'shutdown','no data'))
            self.checkTimeOffsetNotification()
            return

        self.connectionIsOrWasFullyEstablished = True
        shared.timeOffsetWrongCount = 0

        # Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also
        self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (self.services, self.sslSock)))

        if not self.initiatedConnection:
            shared.clientHasReceivedIncomingConnections = True
            queues.UISignalQueue.put(('setStatusIcon', 'green'))
        self.sock.settimeout(
            600)  # We'll send out a ping every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
        queues.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
        logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
            'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
            'The length of sendDataQueues is now: ' + str(len(state.sendDataQueues)) + "\n" + \
            'broadcasting addr from within connectionFullyEstablished function.')

        if self.initiatedConnection:
            state.networkProtocolAvailability[protocol.networkType(self.peer.host)] = True

        # we need to send our own objects to this node
        PendingUpload().add()

        # Let all of our peers know about this new node.
        for stream in self.remoteStreams:
            dataToSend = (int(time.time()), stream, self.services, self.peer.host, self.remoteNodeIncomingPort)
            protocol.broadcastToSendDataQueues((
                stream, 'advertisepeer', dataToSend))

        self.sendaddr()  # This is one large addr message to this one peer.
        if len(shared.connectedHostsList) > \
            BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections", 200):
            logger.info ('We are connected to too many people. Closing connection.')
            if self.initiatedConnection:
                self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Thank you for providing a listening node.")))
            else:
                self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Server full, please try again later.")))
            self.sendDataThreadQueue.put((0, 'shutdown','no data'))
            return
        self.sendBigInv()
Exemplo n.º 19
0
 def handle_connect(self):
     try:
         AdvancedDispatcher.handle_connect(self)
     except socket.error as e:
         if e.errno in asyncore._DISCONNECTED:
             logger.debug("%s:%i: Connection failed: %s" % (self.destination.host, self.destination.port, str(e)))
             return
     self.nodeid = randomBytes(8)
     self.append_write_buf(protocol.assembleVersionMessage(self.destination.host, self.destination.port, \
             network.connectionpool.BMConnectionPool().streams, False, nodeid=self.nodeid))
     #print "%s:%i: Sending version"  % (self.destination.host, self.destination.port)
     self.connectedAt = time.time()
     receiveDataQueue.put(self.destination)
Exemplo n.º 20
0
def _doGPUPoW(target, initialHash):
    logger.debug("GPU PoW start")
    nonce = openclpow.do_opencl_pow(initialHash.encode("hex"), target)
    trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
    #print "{} - value {} < {}".format(nonce, trialValue, target)
    if trialValue > target:
        deviceNames = ", ".join(gpu.name for gpu in openclpow.gpus)
        UISignalQueue.put(('updateStatusBar', tr.translateText("MainWindow",'Your GPU(s) did not calculate correctly, disabling OpenCL. Please report to the developers.')))
        logger.error("Your GPUs (%s) did not calculate correctly, disabling OpenCL. Please report to the developers.", deviceNames)
        openclpow.ctx = False
        raise Exception("GPU did not calculate correctly.")
    logger.debug("GPU PoW done")
    return [trialValue, nonce]
Exemplo n.º 21
0
    def sendVersionMessage(self):
        datatosend = protocol.assembleVersionMessage(
            self.peer.host, self.peer.port, state.streamsInWhichIAmParticipating, not self.initiatedConnection)  # the IP and port of the remote host, and my streamNumber.

        logger.debug('Sending version packet: ' + repr(datatosend))

        try:
            self.sendBytes(datatosend)
        except Exception as err:
            # if not 'Bad file descriptor' in err:
            logger.error('sock.sendall error: %s\n' % err)
            
        self.versionSent = 1
Exemplo n.º 22
0
 def antiIntersectionDelay(self, initial = False):
     # estimated time for a small object to propagate across the whole network
     delay = math.ceil(math.log(len(shared.knownNodes[self.streamNumber]) + 2, 20)) * (0.2 + objectHashHolder.size/2)
     # +2 is to avoid problems with log(0) and log(1)
     # 20 is avg connected nodes count
     # 0.2 is avg message transmission time
     now = time.time()
     if initial and now - delay < self.startTime:
         logger.debug("Initial sleeping for %.2fs", delay - (now - self.startTime))
         time.sleep(delay - (now - self.startTime))
     elif not initial:
         logger.debug("Sleeping due to missing object for %.2fs", delay)
         time.sleep(delay)
Exemplo n.º 23
0
    def sendSearchRouter(self):
        from debug import logger
        ssdpRequest = "M-SEARCH * HTTP/1.1\r\n" + \
                    "HOST: %s:%d\r\n" % (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT) + \
                    "MAN: \"ssdp:discover\"\r\n" + \
                    "MX: %d\r\n" % (uPnPThread.SSDP_MX, ) + \
                    "ST: %s\r\n" % (uPnPThread.SSDP_ST, ) + "\r\n"

        try:
            logger.debug("Sending UPnP query")
            self.sock.sendto(ssdpRequest, (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT))
        except:
            logger.exception("UPnP send query failed")
    def connectionFullyEstablished(self):
        if self.connectionIsOrWasFullyEstablished:
            # there is no reason to run this function a second time
            return
        self.connectionIsOrWasFullyEstablished = True

        self.sslSock = self.sock
        if ((self.services & shared.NODE_SSL == shared.NODE_SSL) and
            shared.haveSSL(not self.initiatedConnection)):
            logger.debug("Initialising TLS")
            self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
            if hasattr(self.sslSock, "context"):
                self.sslSock.context.set_ecdh_curve("secp256k1")
            while True:
                try:
                    self.sslSock.do_handshake()
                    break
                except ssl.SSLError as e:
                    if e.errno == 2:
                        select.select([self.sslSock], [self.sslSock], [])
                    else:
                        break
                except:
                    break
        # Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also
        self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (self.services, self.sslSock)))

        if not self.initiatedConnection:
            shared.clientHasReceivedIncomingConnections = True
            shared.UISignalQueue.put(('setStatusIcon', 'green'))
        self.sock.settimeout(
            600)  # We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
        shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
        logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
            'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
            'The length of sendDataQueues is now: ' + str(len(shared.sendDataQueues)) + "\n" + \
            'broadcasting addr from within connectionFullyEstablished function.')

        # Let all of our peers know about this new node.
        dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort)
        shared.broadcastToSendDataQueues((
            self.streamNumber, 'advertisepeer', dataToSend))

        self.sendaddr()  # This is one large addr message to this one peer.
        if not self.initiatedConnection and len(shared.connectedHostsList) > 200:
            logger.info ('We are connected to too many people. Closing connection.')

            self.sendDataThreadQueue.put((0, 'shutdown','no data'))
            return
        self.sendBigInv()
Exemplo n.º 25
0
def _checkAndSharePubkeyWithPeers(data):
    if len(data) < 146 or len(data) > 440:  # sanity check
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    addressVersion, varintLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += varintLength
    streamNumber, varintLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += varintLength
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
        return
    if addressVersion >= 4:
        tag = data[readPosition:readPosition + 32]
        logger.debug('tag in received pubkey is: %s', hexlify(tag))
    else:
        tag = ''

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this pubkey. Ignoring it.')
        return
    objectType = 1
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))


    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
Exemplo n.º 26
0
    def __init__(self, ssdpResponse, address):
        import urllib2
        from xml.dom.minidom import parseString
        from urlparse import urlparse
        import pprint
        from debug import logger

        self.address = address

        row = ssdpResponse.split('\r\n')
        header = {}
        for i in range(1, len(row)):
            part = row[i].split(': ')
            if len(part) == 2:
                header[part[0].lower()] = part[1]
        try:
            self.routerPath = urlparse(header['location'])
            if not self.routerPath or not hasattr(self.routerPath, "hostname"):
                logger.error ("UPnP: no hostname: %s", header['location'])
        except KeyError:
            logger.error ("UPnP: missing location header")

        # get the profile xml file and read it into a variable
        directory = urllib2.urlopen(header['location']).read()

        # create a DOM object that represents the `directory` document
        dom = parseString(directory)

        self.name = dom.getElementsByTagName('friendlyName')[0].childNodes[0].data
        # find all 'serviceType' elements
        service_types = dom.getElementsByTagName('serviceType')

        for service in service_types:
            if service.childNodes[0].data.find('WANIPConnection') > 0:
                self.path = service.parentNode.getElementsByTagName('controlURL')[0].childNodes[0].data

        # get local IP
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        try:
            logger.debug("Connecting to %s:%i", self.address, self.routerPath.port)
            s.connect ((self.address, self.routerPath.port))
        except:
            pass
        self.localAddress = s.getsockname()[0]
        logger.debug("Local IP: %s", self.localAddress)
        try:
            s.shutdown(socket.SHUT_RDWR)
            s.close()
        except:
            pass
Exemplo n.º 27
0
def _checkAndShareBroadcastWithPeers(data):
    if len(data) < 180:
        logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    broadcastVersion, broadcastVersionLength = decodeVarint(
        data[readPosition:readPosition + 10])
    readPosition += broadcastVersionLength
    if broadcastVersion >= 2:
        streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
        readPosition += streamNumberLength
        if not streamNumber in state.streamsInWhichIAmParticipating:
            logger.debug('The streamNumber %s isn\'t one we are interested in.', streamNumber)
            return
    if broadcastVersion >= 3:
        tag = data[readPosition:readPosition+32]
    else:
        tag = ''
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this broadcast object. Ignoring.')
        return
    # It is valid. Let's let our peers know about it.
    objectType = 3
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, tag)
    # This object is valid. Forward it to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType,data))
Exemplo n.º 28
0
def resendPubkeyRequest(address):
    logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.')
    try:
        del shared.neededPubkeys[
            address] # We need to take this entry out of the shared.neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
    except:
        pass

    shared.UISignalQueue.put((
         'updateStatusBar', 'Doing work necessary to again attempt to request a public key...'))
    sqlExecute(
        '''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
        address)
    shared.workerQueue.put(('sendmessage', ''))
Exemplo n.º 29
0
 def antiIntersectionDelay(self, initial = False):
     # estimated time for a small object to propagate across the whole network
     delay = math.ceil(math.log(max(len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes) + 2, 20)) * (0.2 + invQueue.queueCount/2.0)
     # take the stream with maximum amount of nodes
     # +2 is to avoid problems with log(0) and log(1)
     # 20 is avg connected nodes count
     # 0.2 is avg message transmission time
     if delay > 0:
         if initial:
             self.skipUntil = self.connectedAt + delay
             if self.skipUntil > time.time():
                 logger.debug("Initial skipping processing getdata for %.2fs", self.skipUntil - time.time())
         else:
             logger.debug("Skipping processing getdata due to missing object for %.2fs", delay)
             self.skipUntil = time.time() + delay
Exemplo n.º 30
0
 def __init__(self):
     threading.Thread.__init__(self, name="objectProcessor")
     """
     It may be the case that the last time Bitmessage was running, the user
     closed it before it finished processing everything in the
     objectProcessorQueue. Assuming that Bitmessage wasn't closed forcefully,
     it should have saved the data in the queue into the objectprocessorqueue 
     table. Let's pull it out.
     """
     queryreturn = sqlQuery("""SELECT objecttype, data FROM objectprocessorqueue""")
     for row in queryreturn:
         objectType, data = row
         shared.objectProcessorQueue.put((objectType, data))
     sqlExecute("""DELETE FROM objectprocessorqueue""")
     logger.debug("Loaded %s objects from disk into the objectProcessorQueue." % str(len(queryreturn)))
Exemplo n.º 31
0
 def state_bm_header(self):
     self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(
         self.read_buf[:protocol.Header.size])
     self.command = self.command.rstrip('\x00')
     if self.magic != 0xE9BEB4D9:
         # skip 1 byte in order to sync
         self.set_state("bm_header", length=1)
         self.bm_proto_reset()
         logger.debug("Bad magic")
         self.handle_close("Bad magic")
         return False
     if self.payloadLength > BMProto.maxMessageSize:
         self.invalid = True
     self.set_state("bm_command",
                    length=protocol.Header.size,
                    expectBytes=self.payloadLength)
     return True
Exemplo n.º 32
0
    def sendSearchRouter(self):
        from debug import logger
        SSDP_ADDR = "239.255.255.250"
        SSDP_PORT = 1900
        SSDP_MX = 2
        SSDP_ST = "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
        ssdpRequest = "M-SEARCH * HTTP/1.1\r\n" + \
                    "HOST: %s:%d\r\n" % (SSDP_ADDR, SSDP_PORT) + \
                    "MAN: \"ssdp:discover\"\r\n" + \
                    "MX: %d\r\n" % (SSDP_MX, ) + \
                    "ST: %s\r\n" % (SSDP_ST, ) + "\r\n"

        try:
            logger.debug("Sending UPnP query")
            self.sock.sendto(ssdpRequest, (SSDP_ADDR, SSDP_PORT))
        except:
            logger.exception("UPnP send query failed")
 def smtp_AUTH(self, arg):
     if not arg or arg[0:5] not in ["PLAIN"]:
         self.push('501 Syntax: AUTH PLAIN')
         return
     authstring = arg[6:]
     try:
         decoded = base64.b64decode(authstring)
         correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \
                 "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "")
         logger.debug("authstring: %s / %s", correctauth, decoded)
         if correctauth == decoded:
             self.auth = True
             self.push('235 2.7.0 Authentication successful')
         else:
             raise Exception("Auth fail")
     except:
         self.push('501 Authentication fail')
Exemplo n.º 34
0
    def run(self):
        while True:
            objectType, data = queues.objectProcessorQueue.get()

            try:
                if objectType == 0:  # getpubkey
                    self.processgetpubkey(data)
                elif objectType == 1:  #pubkey
                    self.processpubkey(data)
                elif objectType == 2:  #msg
                    self.processmsg(data)
                elif objectType == 3:  #broadcast
                    self.processbroadcast(data)
                elif objectType == 'checkShutdownVariable':  # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable.
                    pass
                else:
                    logger.critical(
                        'Error! Bug! The class_objectProcessor was passed an object type it doesn\'t recognize: %s'
                        % str(objectType))
            except varintDecodeError as e:
                logger.debug(
                    "There was a problem with a varint while processing an object. Some details: %s"
                    % e)
            except Exception as e:
                logger.critical(
                    "Critical error within objectProcessorThread: \n%s" %
                    traceback.format_exc())

            if state.shutdown:
                time.sleep(
                    .5
                )  # Wait just a moment for most of the connections to close
                numberOfObjectsThatWereInTheObjectProcessorQueue = 0
                with SqlBulkExecute() as sql:
                    while queues.objectProcessorQueue.curSize > 0:
                        objectType, data = queues.objectProcessorQueue.get()
                        sql.execute(
                            '''INSERT INTO objectprocessorqueue VALUES (?,?)''',
                            objectType, data)
                        numberOfObjectsThatWereInTheObjectProcessorQueue += 1
                logger.debug(
                    'Saved %s objects from the objectProcessorQueue to disk. objectProcessorThread exiting.'
                    % str(numberOfObjectsThatWereInTheObjectProcessorQueue))
                state.shutdown = 2
                break
 def __init__(self):
     threading.Thread.__init__(self, name="objectProcessor")
     """
     It may be the case that the last time Bitmessage was running, the user
     closed it before it finished processing everything in the
     objectProcessorQueue. Assuming that Bitmessage wasn't closed forcefully,
     it should have saved the data in the queue into the objectprocessorqueue 
     table. Let's pull it out.
     """
     queryreturn = sqlQuery(
         '''SELECT objecttype, data FROM objectprocessorqueue''')
     for row in queryreturn:
         objectType, data = row
         queues.objectProcessorQueue.put((objectType, data))
     sqlExecute('''DELETE FROM objectprocessorqueue''')
     logger.debug(
         'Loaded %s objects from disk into the objectProcessorQueue.' %
         str(len(queryreturn)))
Exemplo n.º 36
0
 def __init__(self):
     # It may be the case that the last time Bitmessage was running,
     # the user closed it before it finished processing everything in the
     # objectProcessorQueue. Assuming that Bitmessage wasn't closed
     # forcefully, it should have saved the data in the queue into the
     # objectprocessorqueue table. Let's pull it out.
     threading.Thread.__init__(self, name="objectProcessor")
     queryreturn = sqlQuery(
         '''SELECT objecttype, data FROM objectprocessorqueue''')
     for row in queryreturn:
         objectType, data = row
         queues.objectProcessorQueue.put((objectType, data))
     sqlExecute('''DELETE FROM objectprocessorqueue''')
     logger.debug(
         'Loaded %s objects from disk into the objectProcessorQueue.',
         len(queryreturn))
     self._ack_obj = bmproto.BMStringParser()
     self.successfullyDecryptMessageTimings = []
Exemplo n.º 37
0
 def run(self):
     while not self._stopped:
         requested = 0
         # Choose downloading peers randomly
         connections = [
             x for x in BMConnectionPool().inboundConnections.values() +
             BMConnectionPool().outboundConnections.values()
             if x.fullyEstablished
         ]
         random.shuffle(connections)
         try:
             requestChunk = max(
                 int(
                     min(DownloadThread.maxRequestChunk,
                         len(missingObjects)) / len(connections)), 1)
         except ZeroDivisionError:
             requestChunk = 1
         for i in connections:
             now = time.time()
             try:
                 request = i.objectsNewToMe.randomKeys(requestChunk)
             except KeyError:
                 continue
             payload = bytearray()
             payload.extend(addresses.encodeVarint(len(request)))
             for chunk in request:
                 if chunk in Inventory() and not Dandelion().hasHash(chunk):
                     try:
                         del i.objectsNewToMe[chunk]
                     except KeyError:
                         pass
                     continue
                 payload.extend(chunk)
                 missingObjects[chunk] = now
             if not payload:
                 continue
             i.append_write_buf(protocol.CreatePacket('getdata', payload))
             logger.debug("%s:%i Requesting %i objects", i.destination.host,
                          i.destination.port, len(request))
             requested += len(request)
         if time.time() >= self.lastCleaned + DownloadThread.cleanInterval:
             self.cleanPending()
         if not requested:
             self.stop.wait(1)
Exemplo n.º 38
0
 def run(self):
     while not self._stopped:
         uploaded = 0
         # Choose downloading peers randomly
         connections = [x for x in BMConnectionPool().inboundConnections.values() +
                        BMConnectionPool().outboundConnections.values() if x.fullyEstablished]
         helper_random.randomshuffle(connections)
         for i in connections:
             now = time.time()
             # avoid unnecessary delay
             if i.skipUntil >= now:
                 continue
             if len(i.write_buf) > UploadThread.maxBufSize:
                 continue
             try:
                 request = i.pendingUpload.randomKeys(RandomTrackingDict.maxPending)
             except KeyError:
                 continue
             payload = bytearray()
             chunk_count = 0
             for chunk in request:
                 del i.pendingUpload[chunk]
                 if Dandelion().hasHash(chunk) and \
                    i != Dandelion().objectChildStem(chunk):
                     i.antiIntersectionDelay()
                     logger.info('%s asked for a stem object we didn\'t offer to it.',
                                 i.destination)
                     break
                 try:
                     payload.extend(protocol.CreatePacket('object',
                                                          Inventory()[chunk].payload))
                     chunk_count += 1
                 except KeyError:
                     i.antiIntersectionDelay()
                     logger.info('%s asked for an object we don\'t have.', i.destination)
                     break
             if not chunk_count:
                 continue
             i.append_write_buf(payload)
             logger.debug("%s:%i Uploading %i objects",
                          i.destination.host, i.destination.port, chunk_count)
             uploaded += chunk_count
         if not uploaded:
             self.stop.wait(1)
Exemplo n.º 39
0
def checkAndShareObjectWithPeers(data):
    """
    This function is called after either receiving an object off of the wire
    or after receiving one as ackdata. 
    Returns the length of time that we should reserve to process this message
    if we are receiving it off of the wire.
    """
    if len(data) > 2 ** 18:
        logger.info('The payload length of this object is too large (%s bytes). Ignoring it.', len(data))
        return 0
    # Let us check to make sure that the proof of work is sufficient.
    if not isProofOfWorkSufficient(data):
        logger.info('Proof of work is insufficient.')
        return 0
    
    endOfLifeTime, = unpack('>Q', data[8:16])
    if endOfLifeTime - int(time.time()) > 28 * 24 * 60 * 60 + 10800: # The TTL may not be larger than 28 days + 3 hours of wiggle room
        logger.info('This object\'s End of Life time is too far in the future. Ignoring it. Time is %s', endOfLifeTime)
        return 0
    if endOfLifeTime - int(time.time()) < - 3600: # The EOL time was more than an hour ago. That's too much.
        logger.info('This object\'s End of Life time was more than an hour ago. Ignoring the object. Time is %s', endOfLifeTime)
        return 0
    intObjectType, = unpack('>I', data[16:20])
    try:
        if intObjectType == 0:
            _checkAndShareGetpubkeyWithPeers(data)
            return 0.1
        elif intObjectType == 1:
            _checkAndSharePubkeyWithPeers(data)
            return 0.1
        elif intObjectType == 2:
            _checkAndShareMsgWithPeers(data)
            return 0.6
        elif intObjectType == 3:
            _checkAndShareBroadcastWithPeers(data)
            return 0.6
        else:
            _checkAndShareUndefinedObjectWithPeers(data)
            return 0.6
    except varintDecodeError as e:
        logger.debug("There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s", e)
    except Exception as e:
        logger.critical('There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n%s', traceback.format_exc())
    return 0
Exemplo n.º 40
0
    def run(self):
        while not self._stopped:
            requested = 0
            # Choose downloading peers randomly
            connections = BMConnectionPool().inboundConnections.values(
            ) + BMConnectionPool().outboundConnections.values()
            random.shuffle(connections)
            for i in connections:
                now = time.time()
                timedOut = now - DownloadThread.requestTimeout
                # this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk
                with i.objectsNewToMeLock:
                    downloadPending = len(
                        list((k for k, v in i.objectsNewToMe.iteritems()
                              if k in self.pending
                              and self.pending[k] > timedOut)))
                    if downloadPending >= DownloadThread.maxPending:
                        continue
                    # keys with True values in the dict
                    request = list(
                        (k for k, v in i.objectsNewToMe.iteritems()
                         if k not in self.pending or self.pending[k] < timedOut
                         ))
                    if not request:
                        continue
                    if len(request
                           ) > DownloadThread.requestChunk - downloadPending:
                        request = request[:DownloadThread.requestChunk -
                                          downloadPending]
                    # mark them as pending
                    for k in request:
                        i.objectsNewToMe[k] = False
                        self.pending[k] = now

                payload = addresses.encodeVarint(
                    len(request)) + ''.join(request)
                i.append_write_buf(protocol.CreatePacket('getdata', payload))
                logger.debug("%s:%i Requesting %i objects", i.destination.host,
                             i.destination.port, len(request))
                requested += len(request)
            if time.time() >= self.lastCleaned + DownloadThread.cleanInterval:
                self.cleanPending()
            if not requested:
                self.stop.wait(1)
Exemplo n.º 41
0
def _doFastPoW(target, initialHash):
    logger.debug("Fast PoW start")
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores

    # temporarily disable handlers
    int_handler = signal.getsignal(signal.SIGINT)
    term_handler = signal.getsignal(signal.SIGTERM)
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, signal.SIG_IGN)

    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(
            pool.apply_async(_pool_worker,
                             args=(i, initialHash, target, pool_size)))

    # re-enable handlers
    signal.signal(signal.SIGINT, int_handler)
    signal.signal(signal.SIGTERM, term_handler)

    while True:
        if shutdown >= 1:
            pool.terminate()
            raise Exception("Interrupted")
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join()  #Wait for the workers to exit...
                logger.debug("Fast PoW done")
                return result[0], result[1]
        time.sleep(0.2)
Exemplo n.º 42
0
    def tls_handshake(self):
        # wait for flush
        if self.write_buf:
            return False
        # Perform the handshake.
        try:
            #print "handshaking (internal)"
            self.sslSocket.do_handshake()
        except ssl.SSLError as err:
            #print "%s:%i: handshake fail" % (self.destination.host, self.destination.port)
            self.want_read = self.want_write = False
            if err.args[0] == ssl.SSL_ERROR_WANT_READ:
                #print "want read"
                self.want_read = True
            if err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
                #print "want write"
                self.want_write = True
            if not (self.want_write or self.want_read):
                raise
        except socket.error as err:
            if err.errno in asyncore._DISCONNECTED:
                self.handle_close()
            else:
                raise
        else:
            if sys.version_info >= (2, 7, 9):
                self.tlsVersion = self.sslSocket.version()
                logger.debug(
                    "%s:%i: TLS handshake success, TLS protocol version: %s",
                    self.destination.host, self.destination.port,
                    self.sslSocket.version())
            else:
                self.tlsVersion = "TLSv1"
                logger.debug("%s:%i: TLS handshake success",
                             self.destination.host, self.destination.port)
            # The handshake has completed, so remove this channel and...
            self.del_channel()
            self.set_socket(self.sslSocket)
            self.tlsDone = True

            self.bm_proto_reset()
            self.set_state("connection_fully_established")
            receiveDataQueue.put(self.destination)
        return False
Exemplo n.º 43
0
def _doFastPoW(target, initialHash):
    logger.debug("Fast PoW start")
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = BMConfigParser().getint('lmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores

    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(
            pool.apply_async(_pool_worker,
                             args=(i, initialHash, target, pool_size)))

    while True:
        if state.shutdown > 0:
            try:
                pool.terminate()
                pool.join()
            except:
                pass
            raise StopIteration("Interrupted")
        for i in range(pool_size):
            if result[i].ready():
                try:
                    result[i].successful()
                except AssertionError:
                    pool.terminate()
                    pool.join()
                    raise StopIteration("Interrupted")
                result = result[i].get()
                pool.terminate()
                pool.join()
                logger.debug("Fast PoW done")
                return result[0], result[1]
        time.sleep(0.2)
def resendPubkeyRequest(address):
    logger.debug(
        'It has been a long time and we haven\'t heard a response to our'
        ' getpubkey request. Sending again.')
    try:
        # We need to take this entry out of the neededPubkeys structure
        # because the queues.workerQueue checks to see whether the entry
        # is already present and will not do the POW and send the message
        # because it assumes that it has already done it recently.
        del state.neededPubkeys[address]
    except:
        pass

    queues.UISignalQueue.put(
        ('updateStatusBar',
         'Doing work necessary to again attempt to request a public key...'))
    sqlExecute('''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
               address)
    queues.workerQueue.put(('sendmessage', ''))
Exemplo n.º 45
0
def reloadMyAddressHashes():
    """Reload keys for user's addresses from the config file"""
    logger.debug('reloading keys from keys.dat file')
    myECCryptorObjects.clear()
    myAddressesByHash.clear()
    myAddressesByTag.clear()
    # myPrivateKeys.clear()

    keyfileSecure = checkSensitiveFilePermissions(
        os.path.join(state.appdata, 'keys.dat'))
    hasEnabledKeys = False
    for addressInKeysFile in BMConfigParser().addresses():
        isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled')
        if isEnabled:
            hasEnabledKeys = True
            # status
            addressVersionNumber, streamNumber, hashobj = decodeAddress(
                addressInKeysFile)[1:]
            if addressVersionNumber in (2, 3, 4):
                # Returns a simple 32 bytes of information encoded
                # in 64 Hex characters, or null if there was an error.
                privEncryptionKey = hexlify(
                    decodeWalletImportFormat(BMConfigParser().get(
                        addressInKeysFile, 'privencryptionkey')))
                # It is 32 bytes encoded as 64 hex characters
                if len(privEncryptionKey) == 64:
                    myECCryptorObjects[hashobj] = \
                        highlevelcrypto.makeCryptor(privEncryptionKey)
                    myAddressesByHash[hashobj] = addressInKeysFile
                    tag = hashlib.sha512(
                        hashlib.sha512(
                            encodeVarint(addressVersionNumber) +
                            encodeVarint(streamNumber) +
                            hashobj).digest()).digest()[32:]
                    myAddressesByTag[tag] = addressInKeysFile
            else:
                logger.error('Error in reloadMyAddressHashes: Can\'t handle'
                             ' address versions other than 2, 3, or 4.\n')

    if not keyfileSecure:
        fixSensitiveFilePermissions(os.path.join(state.appdata, 'keys.dat'),
                                    hasEnabledKeys)
Exemplo n.º 46
0
 def antiIntersectionDelay(self, initial=False):
     # estimated time for a small object to propagate across the whole network
     delay = math.ceil(
         math.log(
             max(
                 len(knownnodes.knownNodes[x])
                 for x in knownnodes.knownNodes) + 2,
             20)) * (0.2 + objectHashHolder.size / 2)
     # take the stream with maximum amount of nodes
     # +2 is to avoid problems with log(0) and log(1)
     # 20 is avg connected nodes count
     # 0.2 is avg message transmission time
     now = time.time()
     if initial and now - delay < self.startTime:
         logger.debug("Initial sleeping for %.2fs",
                      delay - (now - self.startTime))
         time.sleep(delay - (now - self.startTime))
     elif not initial:
         logger.debug("Sleeping due to missing object for %.2fs", delay)
         time.sleep(delay)
Exemplo n.º 47
0
 def handle_connect(self):
     """Callback for TCP connection being established."""
     try:
         AdvancedDispatcher.handle_connect(self)
     except socket.error as e:
         if e.errno in asyncore._DISCONNECTED:  # pylint: disable=protected-access
             logger.debug("%s:%i: Connection failed: %s",
                          self.destination.host, self.destination.port,
                          str(e))
             return
     self.nodeid = randomBytes(8)
     self.append_write_buf(
         protocol.assembleVersionMessage(
             self.destination.host,
             self.destination.port,
             network.connectionpool.BMConnectionPool().streams,
             False,
             nodeid=self.nodeid))
     self.connectedAt = time.time()
     receiveDataQueue.put(self.destination)
Exemplo n.º 48
0
    def run(self):
        while True:
            objectType, data = queues.objectProcessorQueue.get()

            self.checkackdata(data)

            try:
                if objectType == 0: # getpubkey
                    self.processgetpubkey(data)
                elif objectType == 1: #pubkey
                    self.processpubkey(data)
                elif objectType == 2: #msg
                    self.processmsg(data)
                elif objectType == 3: #broadcast
                    self.processbroadcast(data)
                elif objectType == 'checkShutdownVariable': # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable.
                    pass
                else:
                    if isinstance(objectType, int):
                        logger.info('Don\'t know how to handle object type 0x%08X', objectType)
                    else:
                        logger.info('Don\'t know how to handle object type %s', objectType)
            except helper_msgcoding.DecompressionSizeException as e:
                logger.error("The object is too big after decompression (stopped decompressing at %ib, your configured limit %ib). Ignoring", e.size, BMConfigParser().safeGetInt("zlib", "maxsize"))
            except varintDecodeError as e:
                logger.debug("There was a problem with a varint while processing an object. Some details: %s" % e)
            except Exception as e:
                logger.critical("Critical error within objectProcessorThread: \n%s" % traceback.format_exc())

            if state.shutdown:
                time.sleep(.5) # Wait just a moment for most of the connections to close
                numberOfObjectsThatWereInTheObjectProcessorQueue = 0
                with SqlBulkExecute() as sql:
                    while queues.objectProcessorQueue.curSize > 0:
                        objectType, data = queues.objectProcessorQueue.get()
                        sql.execute('''INSERT INTO objectprocessorqueue VALUES (?,?)''',
                                   objectType,data)
                        numberOfObjectsThatWereInTheObjectProcessorQueue += 1
                logger.debug('Saved %s objects from the objectProcessorQueue to disk. objectProcessorThread exiting.' % str(numberOfObjectsThatWereInTheObjectProcessorQueue))
                state.shutdown = 2
                break
Exemplo n.º 49
0
    def createPortMapping(self, router):
        from debug import logger

        for i in range(50):
            try:
                routerIP, = unpack('>I', socket.inet_aton(router.address))
                localIP = router.localAddress
                if i == 0:
                    extPort = self.localPort # try same port first
                elif i == 1 and self.extPort:
                    extPort = self.extPort # try external port from last time next
                else:
                    extPort = randint(32767, 65535)
                logger.debug("Requesting UPnP mapping for %s:%i on external port %i", localIP, self.localPort,  extPort)
                router.AddPortMapping(extPort, self.localPort, localIP, 'TCP', 'BitMessage')
                shared.extPort = extPort
                self.extPort = extPort
                shared.config.set('bitmessagesettings', 'extport', str(extPort))
                break
            except UPnPError:
                logger.debug("UPnP error: ", exc_info=True)
Exemplo n.º 50
0
 def _checkIPv4Address(self, host, hostStandardFormat):
     if host[0] == '\x7F': # 127/8
         logger.debug('Ignoring IP address in loopback range: ' + hostStandardFormat)
         return False
     if host[0] == '\x0A': # 10/8
         logger.debug('Ignoring IP address in private range: ' + hostStandardFormat)
         return False
     if host[0:2] == '\xC0\xA8': # 192.168/16
         logger.debug('Ignoring IP address in private range: ' + hostStandardFormat)
         return False
     if host[0:2] >= '\xAC\x10' and host[0:2] < '\xAC\x20': # 172.16/12
         logger.debug('Ignoring IP address in private range:' + hostStandardFormat)
         return False
     return hostStandardFormat
Exemplo n.º 51
0
def _checkAndShareGetpubkeyWithPeers(data):
    if len(data) < 42:
        logger.info(
            'getpubkey message doesn\'t contain enough data. Ignoring.')
        return
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass the nonce, time, and object type
    requestedAddressVersionNumber, addressVersionLength = \
        decodeVarint(data[readPosition:readPosition + 10])
    readPosition += addressVersionLength
    streamNumber, streamNumberLength = \
        decodeVarint(data[readPosition:readPosition + 10])
    if streamNumber not in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %i isn\'t one we are interested in.',
                     streamNumber)
        return
    readPosition += streamNumberLength

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug(
            'We have already received this getpubkey request. Ignoring it.')
        return

    objectType = 0
    Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime,
                                  '')
    # This getpubkey request is valid. Forward to peers.
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    protocol.broadcastToSendDataQueues(
        (streamNumber, 'advertiseobject', inventoryHash))

    # Now let's queue it to be processed ourselves.
    objectProcessorQueue.put((objectType, data))
Exemplo n.º 52
0
def _checkAndShareMsgWithPeers(data):
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass nonce, time, and object type
    objectVersion, objectVersionLength = decodeVarint(
        data[readPosition:readPosition + 9])
    readPosition += objectVersionLength
    streamNumber, streamNumberLength = decodeVarint(
        data[readPosition:readPosition + 9])
    if not streamNumber in state.streamsInWhichIAmParticipating:
        logger.debug('The streamNumber %s isn\'t one we are interested in.',
                     streamNumber)
        return
    readPosition += streamNumberLength
    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug('We have already received this msg message. Ignoring.')
        return
    # This msg message is valid. Let's let our peers know about it.
    objectType = 2
    Inventory()[inventoryHash] = (objectType, streamNumber, data, embeddedTime,
                                  '')
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))

    # Now let's enqueue it to be processed ourselves.
    objectProcessorQueue.put((objectType, data))
Exemplo n.º 53
0
def _checkAndShareUndefinedObjectWithPeers(data):
    embeddedTime, = unpack('>Q', data[8:16])
    readPosition = 20  # bypass nonce, time, and object type
    objectVersion, objectVersionLength = decodeVarint(
        data[readPosition:readPosition + 9])
    readPosition += objectVersionLength
    streamNumber, streamNumberLength = decodeVarint(
        data[readPosition:readPosition + 9])
    if streamNumber not in state.streamsInWhichIAmParticipating:
        logger.debug(
            'The streamNumber %i isn\'t one we are interested in.',
            streamNumber
        )
        return

    inventoryHash = calculateInventoryHash(data)
    if inventoryHash in Inventory():
        logger.debug(
            'We have already received this undefined object. Ignoring.')
        return
    objectType, = unpack('>I', data[16:20])
    Inventory()[inventoryHash] = (
        objectType, streamNumber, data, embeddedTime, '')
    logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
    protocol.broadcastToSendDataQueues(
        (streamNumber, 'advertiseobject', inventoryHash))
Exemplo n.º 54
0
 def setup(self, sock, HOST, PORT, streamNumber,
           someObjectsOfWhichThisRemoteNodeIsAlreadyAware):
     self.sock = sock
     self.peer = shared.Peer(HOST, PORT)
     self.name = "sendData-" + self.peer.host.replace(
         ":", ".")  # log parser field separator
     self.streamNumber = streamNumber
     self.services = 0
     self.initiatedConnection = False
     self.remoteProtocolVersion = - \
         1  # This must be set using setRemoteProtocolVersion command which is sent through the self.sendDataThreadQueue queue.
     self.lastTimeISentData = int(
         time.time()
     )  # If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
     self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware = someObjectsOfWhichThisRemoteNodeIsAlreadyAware
     if self.streamNumber == -1:  # This was an incoming connection.
         self.initiatedConnection = False
     else:
         self.initiatedConnection = True
     logger.debug('The streamNumber of this sendDataThread (ID: ' +
                  str(id(self)) + ') at setup() is' +
                  str(self.streamNumber))
Exemplo n.º 55
0
def _doGPUPoW(target, initialHash):
    logger.debug("GPU PoW start")
    nonce = openclpow.do_opencl_pow(initialHash.encode("hex"), target)
    trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8])
    if trialValue > target:
        deviceNames = ", ".join(gpu.name for gpu in openclpow.enabledGpus)
        queues.UISignalQueue.put((
            'updateStatusBar', (
                tr._translate(
                    "MainWindow",
                    'Your GPU(s) did not calculate correctly, disabling OpenCL. Please report to the developers.'
                ),
                1)))
        logger.error(
            "Your GPUs (%s) did not calculate correctly, disabling OpenCL. Please report to the developers.",
            deviceNames)
        openclpow.enabledGpus = []
        raise Exception("GPU did not calculate correctly.")
    if state.shutdown != 0:
        raise StopIteration("Interrupted")
    logger.debug("GPU PoW done")
    return [trialValue, nonce]
Exemplo n.º 56
0
    def bm_command_addr(self):
#        BMProto.bm_command_object(self)
        addresses = self._decode_addr()
        # only allow peer discovery from private IPs in order to avoid attacks from random IPs on the internet
        if not self.local:
            return True
        remoteport = False
        for i in addresses:
            seenTime, stream, services, ip, port = i
            decodedIP = protocol.checkIPAddress(str(ip))
            if stream not in state.streamsInWhichIAmParticipating:
                continue
            if seenTime < time.time() - BMProto.maxTimeOffset or seenTime > time.time() + BMProto.maxTimeOffset:
                continue
            if decodedIP is False:
                # if the address isn't local, interpret it as the hosts' own announcement
                remoteport = port
        if remoteport is False:
            return True
        logger.debug("received peer discovery from %s:%i (port %i):", self.destination.host, self.destination.port, remoteport)
        if self.local:
            state.discoveredPeers[state.Peer(self.destination.host, remoteport)] = time.time()
        return True
    def checkackdata(self, data):
        # Let's check whether this is a message acknowledgement bound for us.
        if len(data) < 32:
            return

        # bypass nonce and time, retain object type/version/stream + body
        readPosition = 16

        if data[readPosition:] in shared.ackdataForWhichImWatching:
            logger.info('This object is an acknowledgement bound for me.')
            del shared.ackdataForWhichImWatching[data[readPosition:]]
            sqlExecute(
                'UPDATE sent SET status=?, lastactiontime=? WHERE ackdata=?',
                'ackreceived', int(time.time()), data[readPosition:])
            queues.UISignalQueue.put(
                ('updateSentItemStatusByAckdata',
                 (data[readPosition:],
                  tr._translate(
                      "MainWindow",
                      'Acknowledgement of the message received %1').arg(
                          l10n.formatTimestamp()))))
        else:
            logger.debug('This object is not an acknowledgement bound for me.')
Exemplo n.º 58
0
def readKnownNodes():
    try:
        with open(state.appdata + 'knownnodes.dat', 'rb') as source:
            with knownNodesLock:
                try:
                    json_deserialize_knownnodes(source)
                except ValueError:
                    source.seek(0)
                    pickle_deserialize_old_knownnodes(source)
    except (IOError, OSError, KeyError, EOFError):
        logger.debug('Failed to read nodes from knownnodes.dat', exc_info=True)
        createDefaultKnownNodes()

    config = BMConfigParser()

    # your own onion address, if setup
    onionhostname = config.safeGet('bitmessagesettings', 'onionhostname')
    if onionhostname and ".onion" in onionhostname:
        onionport = config.safeGetInt('bitmessagesettings', 'onionport')
        if onionport:
            self_peer = state.Peer(onionhostname, onionport)
            addKnownNode(1, self_peer, is_self=True)
            state.ownAddresses[self_peer] = True
Exemplo n.º 59
0
    def peerValidityChecks(self):
        if self.remoteProtocolVersion < 3:
            self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                errorText="Your is using an old protocol. Closing connection."))
            logger.debug ('Closing connection to old protocol version %s, node: %s',
                str(self.remoteProtocolVersion), str(self.destination))
            return False
        if self.timeOffset > BMProto.maxTimeOffset:
            self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                errorText="Your time is too far in the future compared to mine. Closing connection."))
            logger.info("%s's time is too far in the future (%s seconds). Closing connection to it.",
                self.destination, self.timeOffset)
            shared.timeOffsetWrongCount += 1
            return False
        elif self.timeOffset < -BMProto.maxTimeOffset:
            self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                errorText="Your time is too far in the past compared to mine. Closing connection."))
            logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.",
                self.destination, self.timeOffset)
            shared.timeOffsetWrongCount += 1
            return False
        else:
            shared.timeOffsetWrongCount = 0
        if not self.streams:
            self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                errorText="We don't have shared stream interests. Closing connection."))
            logger.debug ('Closed connection to %s because there is no overlapping interest in streams.',
                str(self.destination))
            return False
        if self.destination in network.connectionpool.BMConnectionPool().inboundConnections:
            try:
                if not protocol.checkSocksIP(self.destination.host):
                    self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                        errorText="Too many connections from your IP. Closing connection."))
                    logger.debug ('Closed connection to %s because we are already connected to that IP.',
                        str(self.destination))
                    return False
            except:
                pass
        if network.connectionpool.BMConnectionPool().isAlreadyConnected(self.nonce):
            self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
                errorText="I'm connected to myself. Closing connection."))
            logger.debug ("Closed connection to %s because I'm connected to myself.",
                str(self.destination))
            return False

        return True
Exemplo n.º 60
0
def _doFastPoW(target, initialHash):
    logger.debug("Fast PoW start")
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores
    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(
            pool.apply_async(_pool_worker,
                             args=(i, initialHash, target, pool_size)))
    while True:
        if shutdown >= 1:
            pool.terminate()
            while True:
                time.sleep(
                    10
                )  # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py
            return
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join()  #Wait for the workers to exit...
                logger.debug("Fast PoW done")
                return result[0], result[1]
        time.sleep(0.2)