Ejemplo n.º 1
0
    def publishKeys(self, allKeys=0):
        """Publish server descriptors to the directory server.  Ordinarily,
           only unpublished descriptors are sent.  If allKeys is true,
           all descriptors are sent."""
        keySets = [ks for _, _, ks in self.keySets]
        if allKeys:
            LOG.info("Republishing all known keys to directory server")
        else:
            keySets = [ks for ks in keySets if not ks.isPublished()]
            if not keySets:
                LOG.trace("publishKeys: no unpublished keys found")
                return
            LOG.info("Publishing %s keys to directory server...", len(keySets))

        rejected = 0
        for ks in keySets:
            status = ks.publish(DIRECTORY_UPLOAD_URL)
            if status == 'error':
                LOG.error("Error publishing a key; giving up")
                return 0
            elif status == 'reject':
                rejected += 1
            else:
                assert status == 'accept'
        if rejected == 0:
            LOG.info("All keys published successfully.")
            return 1
        else:
            LOG.info("%s/%s keys were rejected.", rejected, len(keySets))
            return 0
Ejemplo n.º 2
0
def sendPackets(routing, packetList, timeout=300, callback=None):
    """Sends a list of packets to a server.  Raise MixProtocolError on
       failure.

       routing -- an instance of mixminion.Packet.IPV4Info or
                  mixminion.Packet.MMTPHostInfo.
                  If routing.keyinfo == '\000'*20, we ignore the server's
                  keyid.
       packetList -- a list of 32KB packets and control strings.  Control
           strings must be one of "JUNK" to send a 32KB padding chunk,
           or "RENEGOTIATE" to renegotiate the connection key.
       connectTimeout -- None, or a number of seconds to wait for data
           on the connection before raising TimeoutError.
       callback -- None, or a function to call with a index into packetList
           after each successful packet delivery.
    """
    # Find out where we're connecting to.
    serverName = mixminion.ServerInfo.displayServerByRouting(routing)
    if isinstance(routing, IPV4Info):
        family, addr = socket.AF_INET, routing.ip
    else:
        assert isinstance(routing, MMTPHostInfo)
        LOG.trace("Looking up %s...",routing.hostname)
        family, addr, _ = mixminion.NetUtils.getIP(routing.hostname)
        if family == "NOENT":
            raise MixProtocolError("Couldn't resolve hostname %s: %s" % (
                                   routing.hostname, addr))

    # Create an MMTPClientConnection
    try:
        con = MMTPClientConnection(
            family, addr, routing.port, routing.keyinfo, serverName=serverName)
    except socket.error, e:
        raise MixProtocolError(str(e))
Ejemplo n.º 3
0
    def deliveryFailed(self, handle, retriable=0, now=None):
        assert self.retrySchedule is not None
        if now is None:
            now = time.time()
        self._lock.acquire()
        try:
            try:
                mState = self.store.getMetadata(handle)
            except KeyError:
                mState = None
            except CorruptedFile:
                mState = None

            if mState is None:
                # This should never happen
                LOG.error_exc(sys.exc_info(),
                              "Handle %s had no state; removing", handle)
                self.removeMessage(handle)
                return
            elif not mState.isPending():
                LOG.error("Handle %s was not pending", handle)
                return

            last = mState.pending
            mState.setNonPending()
            if not retriable:
                LOG.trace("     (Giving up on %s)", handle)
                self.removeMessage(handle)

            aState = self._getAddressState(mState.address, now)
            aState.failed(attempt=last,now=now)
            aState.setNextAttempt(self.retrySchedule,now=now)
            self.addressStateDB[str(aState.address)] = aState # flush to db.
        finally:
            self._lock.release()
Ejemplo n.º 4
0
    def publishKeys(self, allKeys=0):
        """Publish server descriptors to the directory server.  Ordinarily,
           only unpublished descriptors are sent.  If allKeys is true,
           all descriptors are sent."""
        keySets = [ ks for _, _, ks in self.keySets ]
        if allKeys:
            LOG.info("Republishing all known keys to directory server")
        else:
            keySets = [ ks for ks in keySets if not ks.isPublished() ]
            if not keySets:
                LOG.trace("publishKeys: no unpublished keys found")
                return
            LOG.info("Publishing %s keys to directory server...",len(keySets))

        rejected = 0
        for ks in keySets:
            status = ks.publish(DIRECTORY_UPLOAD_URL)
            if status == 'error':
                LOG.error("Error publishing a key; giving up")
                return 0
            elif status == 'reject':
                rejected += 1
            else:
                assert status == 'accept'
        if rejected == 0:
            LOG.info("All keys published successfully.")
            return 1
        else:
            LOG.info("%s/%s keys were rejected." , rejected, len(keySets))
            return 0
Ejemplo n.º 5
0
    def getSigners(self):
        #DOCDOC -- returns members of self.dirInfo.voters with valid signatures.
        if self.signers is not None:
            return self.signers

        sigs = {}
        self.signers = []
        for s in self.signatures:
            sigs[s.getKeyFingerprint()] = s
        for digest, url in self.dirInfo.voters:
            try:
                s = sigs[digest]
            except KeyError:
                #XXXX008 log something.
                continue
            if s.checkSignature():
                LOG.trace("Found valid signature from %s at %s",
                          digest, url)
                self.signers.append((digest, url))
            else:
                LOG.trace("Signature claiming to be from %s was not valid",
                          digest)
                continue

        return self.signers
Ejemplo n.º 6
0
    def deliveryFailed(self, handle, retriable=0, now=None):
        assert self.retrySchedule is not None
        if now is None:
            now = time.time()
        self._lock.acquire()
        try:
            try:
                mState = self.store.getMetadata(handle)
            except KeyError:
                mState = None
            except CorruptedFile:
                mState = None

            if mState is None:
                # This should never happen
                LOG.error_exc(sys.exc_info(),
                              "Handle %s had no state; removing", handle)
                self.removeMessage(handle)
                return
            elif not mState.isPending():
                LOG.error("Handle %s was not pending", handle)
                return

            last = mState.pending
            mState.setNonPending()
            if not retriable:
                LOG.trace("     (Giving up on %s)", handle)
                self.removeMessage(handle)

            aState = self._getAddressState(mState.address, now)
            aState.failed(attempt=last, now=now)
            aState.setNextAttempt(self.retrySchedule, now=now)
            self.addressStateDB[str(aState.address)] = aState  # flush to db.
        finally:
            self._lock.release()
Ejemplo n.º 7
0
    def lookup(self,name,cb):
        """Look up the name 'name', and pass the result to the callback
           function 'cb' when we're done.  The result will be of the
           same form as the return value of NetUtils.getIP: either
           (Family, Address, Time) or ('NOENT', Reason, Time).

           Note: The callback may be invoked from a different thread.  Either
           this thread or a DNS thread will block until the callback finishes,
           so it shouldn't be especially time-consuming.
        """
        # Check for a static IP first; no need to resolve that.
        v = mixminion.NetUtils.nameIsStaticIP(name)
        if v is not None:
            cb(name,v)
            return

        try:
            self.lock.acquire()
            v = self.cache.get(name)
            # If we don't have a cached answer, add cb to self.callbacks
            if v is None or v is PENDING:
                self.callbacks.setdefault(name, []).append(cb)
            # If we aren't looking up the answer, start looking it up.
            if v is None:
                LOG.trace("DNS cache starting lookup of %r", name)
                self._beginLookup(name)
        finally:
            self.lock.release()
        # If we _did_ have an answer, invoke the callback now.
        if v is not None and v is not PENDING:
            LOG.trace("DNS cache returning cached value %s for %r",
                      v,name)
            cb(name,v)
Ejemplo n.º 8
0
    def deliverySucceeded(self, handle, now=None):
        """Removes a message from the outgoing queue.  This method
           should be invoked after the corresponding message has been
           successfully delivered.
        """
        assert self.retrySchedule is not None

        LOG.trace("DeliveryQueue got successful delivery for %s from %s", handle, self.qname)
        self.removeMessage(handle)
Ejemplo n.º 9
0
    def deliverySucceeded(self, handle, now=None):
        """Removes a message from the outgoing queue.  This method
           should be invoked after the corresponding message has been
           successfully delivered.
        """
        assert self.retrySchedule is not None

        LOG.trace("DeliveryQueue got successful delivery for %s from %s",
                  handle, self.qname)
        self.removeMessage(handle)
Ejemplo n.º 10
0
    def queueDeliveryMessage(self, msg, address=None, now=None):
        """Schedule a message for delivery.
             msg -- the message.  This can be any pickleable object.
        """
        assert self.retrySchedule is not None
        try:
            self._lock.acquire()
            ds = _DeliveryState(now, None, address)
            ds.setNextAttempt(self.retrySchedule, now)
            handle = self.store.queueObjectAndMetadata(msg, ds)
            LOG.trace("DeliveryQueue got message %s for %s", handle, self.qname)
        finally:
            self._lock.release()

        return handle
Ejemplo n.º 11
0
    def queueDeliveryMessage(self, msg, address=None, now=None):
        """Schedule a message for delivery.
             msg -- the message.  This can be any pickleable object.
        """
        assert self.retrySchedule is not None
        try:
            self._lock.acquire()
            ds = _DeliveryState(now, None, address)
            ds.setNextAttempt(self.retrySchedule, now)
            handle = self.store.queueObjectAndMetadata(msg, ds)
            LOG.trace("DeliveryQueue got message %s for %s", handle,
                      self.qname)
        finally:
            self._lock.release()

        return handle
Ejemplo n.º 12
0
    def deliverySucceeded(self, handle, now=None):
        assert self.retrySchedule is not None
        self._lock.acquire()
        try:
            LOG.trace("PerAddressDeliveryQueue got successful delivery for %s from %s", handle, self.qname)
            try:
                mState = self.store.getMetadata(handle)
            except CorruptedFile:
                mState = None
            if mState:
                aState = self._getAddressState(mState.address, now)
                aState.succeeded(now=now)
                aState.setNextAttempt(self.retrySchedule, now)
                self.addressStateDB[str(mState.address)] = aState

            self.removeMessage(handle)
        finally:
            self._lock.release()
Ejemplo n.º 13
0
def getIP(name, preferIP4=PREFER_INET4):
    """Resolve the hostname 'name' and return the 'best' answer.  An
       answer is either a 3-tuple as returned by getIPs, or a 3-tuple of
       ('NOENT', reason, Time) if no answers were found.

       If both IPv4 and IPv6 addresses are found, return an IPv4 address
       iff preferIPv4 is true.

       If this host does not support IPv6, never return an IPv6 address;
       return a ('NOENT', reason, Time) tuple if only ipv6 addresses are
       found.
    """
    _,haveIP6 = getProtocolSupport()
    try:
        r = getIPs(name)
        inet4 = [ addr for addr in r if addr[0] == AF_INET ]
        inet6 = [ addr for addr in r if addr[0] == AF_INET6 ]
        if not (inet4 or inet6):
            LOG.warn("getIP returned no inet addresses for %r",name)
            return ("NOENT", "No inet addresses returned", time.time())
        if inet6 and not inet4 and not haveIP6:
            return ("NOENT",
                 "All addresses were IPv6, and this host has no IPv6 support",
                 time.time())
        best4=best6=None
        if inet4: best4=inet4[0]
        if inet6: best6=inet6[0]
        if preferIP4:
            res = best4 or best6
        else:
            res = best6 or best4
        assert res
        assert res[0] in (AF_INET, AF_INET6)
        assert nameIsStaticIP(res[1])
        protoname = (res[0] == AF_INET) and "inet" or "inet6"
        LOG.trace("Result for getIP(%r): %s:%s (%d others dropped)",
                  name,protoname,res[1],len(r)-1)
        return res
    except socket.error, e:
        LOG.trace("Result for getIP(%r): error:%r",name,e)
        if len(e.args) == 2:
            return ("NOENT", str(e[1]), time.time())
        else:
            return ("NOENT", str(e), time.time())
Ejemplo n.º 14
0
def getIP(name, preferIP4=PREFER_INET4):
    """Resolve the hostname 'name' and return the 'best' answer.  An
       answer is either a 3-tuple as returned by getIPs, or a 3-tuple of
       ('NOENT', reason, Time) if no answers were found.

       If both IPv4 and IPv6 addresses are found, return an IPv4 address
       iff preferIPv4 is true.

       If this host does not support IPv6, never return an IPv6 address;
       return a ('NOENT', reason, Time) tuple if only ipv6 addresses are
       found.
    """
    _,haveIP6 = getProtocolSupport()
    try:
        r = getIPs(name)
        inet4 = [ addr for addr in r if addr[0] == AF_INET ]
        inet6 = [ addr for addr in r if addr[0] == AF_INET6 ]
        if not (inet4 or inet6):
            LOG.warn("getIP returned no inet addresses for %r",name)
            return ("NOENT", "No inet addresses returned", time.time())
        if inet6 and not inet4 and not haveIP6:
            return ("NOENT",
                 "All addresses were IPv6, and this host has no IPv6 support",
                 time.time())
        best4=best6=None
        if inet4: best4=inet4[0]
        if inet6: best6=inet6[0]
        if preferIP4:
            res = best4 or best6
        else:
            res = best6 or best4
        assert res
        assert res[0] in (AF_INET, AF_INET6)
        assert nameIsStaticIP(res[1])
        protoname = (res[0] == AF_INET) and "inet" or "inet6"
        LOG.trace("Result for getIP(%r): %s:%s (%d others dropped)",
                  name,protoname,res[1],len(r)-1)
        return res
    except socket.error, e:
        LOG.trace("Result for getIP(%r): error:%r",name,e)
        if len(e.args) == 2:
            return ("NOENT", str(e[1]), time.time())
        else:
            return ("NOENT", str(e), time.time())
Ejemplo n.º 15
0
    def _updateRWState(self):
        """Helper: if we have any queued packets that haven't been sent yet,
           and we aren't waiting for WRITEAHEAD acks, and we're connected,
           start sending the pending packets.
        """
        if not self._isConnected: return

        while self.nPacketsSent < self.nPacketsAcked + self.WRITEAHEAD:
            if not self.packets:
                break
            LOG.trace("Queueing new packet for %s",self.address)
            self._startSendingNextPacket()

        if self.nPacketsAcked == self.nPacketsSent:
            LOG.debug("Successfully relayed all packets to %s",self.address)
            self.allPacketsSent()
            self._isConnected = 0
            self._isAlive = 0
            self.startShutdown()
Ejemplo n.º 16
0
def getHashLog(filename, keyid):
    """Given a filename and keyid, return a HashLog object with that fname
       and ID, opening a new one if necessary.  This function is needed to
       implement key rotation: we want to assemble a list of current
       hashlogs, but we can't open the same HashLog database twice at once."""
    try:
        _HASHLOG_DICT_LOCK.acquire()
        try:
            keyid_orig, hl = _OPEN_HASHLOGS[filename]
            if keyid != keyid_orig:
                raise MixFatalError("KeyID changed for hashlog %s"%filename)
            LOG.trace("getHashLog() returning open hashlog at %s",filename)
        except KeyError:
            LOG.trace("getHashLog() opening hashlog at %s",filename)
            hl = HashLog(filename, keyid)
            _OPEN_HASHLOGS[filename] = (keyid, hl)
        return hl
    finally:
        _HASHLOG_DICT_LOCK.release()
Ejemplo n.º 17
0
    def _updateRWState(self):
        """Helper: if we have any queued packets that haven't been sent yet,
           and we aren't waiting for WRITEAHEAD acks, and we're connected,
           start sending the pending packets.
        """
        if not self._isConnected: return

        while self.nPacketsSent < self.nPacketsAcked + self.WRITEAHEAD:
            if not self.packets:
                break
            LOG.trace("Queueing new packet for %s", self.address)
            self._startSendingNextPacket()

        if self.nPacketsAcked == self.nPacketsSent:
            LOG.debug("Successfully relayed all packets to %s", self.address)
            self.allPacketsSent()
            self._isConnected = 0
            self._isAlive = 0
            self.startShutdown()
Ejemplo n.º 18
0
def getHashLog(filename, keyid):
    """Given a filename and keyid, return a HashLog object with that fname
       and ID, opening a new one if necessary.  This function is needed to
       implement key rotation: we want to assemble a list of current
       hashlogs, but we can't open the same HashLog database twice at once."""
    try:
        _HASHLOG_DICT_LOCK.acquire()
        try:
            keyid_orig, hl = _OPEN_HASHLOGS[filename]
            if keyid != keyid_orig:
                raise MixFatalError("KeyID changed for hashlog %s" % filename)
            LOG.trace("getHashLog() returning open hashlog at %s", filename)
        except KeyError:
            LOG.trace("getHashLog() opening hashlog at %s", filename)
            hl = HashLog(filename, keyid)
            _OPEN_HASHLOGS[filename] = (keyid, hl)
        return hl
    finally:
        _HASHLOG_DICT_LOCK.release()
Ejemplo n.º 19
0
 def __doWrite(self, cap):
     "Helper function: write as much data from self.outbuf as we can."
     self.__writeBlockedOnRead = 0
     while self.outbuf and cap > 0:
         if self.__blockedWriteLen:
             # If the last write blocked, we must retry the exact same
             # length, or else OpenSSL will give an error.
             span = self.__blockedWriteLen
         else:
             # Otherwise, we try to write as much of the first string on
             # the output buffer as our bandwidth cap will allow.
             span = min(len(self.outbuf[0]), cap)
         try:
             n = self.tls.write(self.outbuf[0][:span])
         except _ml.TLSWantRead:
             self.__blockedWriteLen = span
             self.__writeBlockedOnRead = 1
             self.wantWrite = 0
             self.wantRead = 1
             return cap
         except _ml.TLSWantWrite:
             self.__blockedWriteLen = span
             self.wantWrite = 1
             return cap
         else:
             # We wrote some data: remove it from the buffer.
             assert n >= 0
             self.__blockedWriteLen = 0
             LOG.trace("Wrote %s bytes to %s", n, self.address)
             if n == len(self.outbuf[0]):
                 del self.outbuf[0]
             else:
                 self.outbuf[0] = self.outbuf[0][n:]
             self.outbuflen -= n
             cap -= n
             self.onWrite(n)
     if not self.outbuf:
         # There's no more data to write.  We only want write events now if
         # read is blocking on write.
         self.wantWrite = self.__readBlockedOnWrite
         self.doneWriting()
     return cap
Ejemplo n.º 20
0
 def __doWrite(self, cap):
     "Helper function: write as much data from self.outbuf as we can."
     self.__writeBlockedOnRead = 0
     while self.outbuf and cap > 0:
         if self.__blockedWriteLen:
             # If the last write blocked, we must retry the exact same
             # length, or else OpenSSL will give an error.
             span = self.__blockedWriteLen
         else:
             # Otherwise, we try to write as much of the first string on
             # the output buffer as our bandwidth cap will allow.
             span = min(len(self.outbuf[0]),cap)
         try:
             n = self.tls.write(self.outbuf[0][:span])
         except _ml.TLSWantRead:
             self.__blockedWriteLen = span
             self.__writeBlockedOnRead = 1
             self.wantWrite = 0
             self.wantRead = 1
             return cap
         except _ml.TLSWantWrite:
             self.__blockedWriteLen = span
             self.wantWrite = 1
             return cap
         else:
             # We wrote some data: remove it from the buffer.
             assert n >= 0
             self.__blockedWriteLen = 0
             LOG.trace("Wrote %s bytes to %s", n, self.address)
             if n == len(self.outbuf[0]):
                 del self.outbuf[0]
             else:
                 self.outbuf[0] = self.outbuf[0][n:]
             self.outbuflen -= n
             cap -= n
             self.onWrite(n)
     if not self.outbuf:
         # There's no more data to write.  We only want write events now if
         # read is blocking on write.
         self.wantWrite = self.__readBlockedOnWrite
         self.doneWriting()
     return cap
Ejemplo n.º 21
0
    def deliverySucceeded(self, handle, now=None):
        assert self.retrySchedule is not None
        self._lock.acquire()
        try:
            LOG.trace(
                "PerAddressDeliveryQueue got successful delivery for %s from %s",
                handle, self.qname)
            try:
                mState = self.store.getMetadata(handle)
            except CorruptedFile:
                mState = None
            if mState:
                aState = self._getAddressState(mState.address, now)
                aState.succeeded(now=now)
                aState.setNextAttempt(self.retrySchedule, now)
                self.addressStateDB[str(mState.address)] = aState

            self.removeMessage(handle)
        finally:
            self._lock.release()
Ejemplo n.º 22
0
    def __shutdownFn(self, r, w, cap):
        """state function: TLS shutdonw"""
        while 1:
            if self.__awaitingShutdown:
                # We've already sent a 'shutdown' once.  Read until we
                # get another shutdown, or until we get enough data to
                # give up.
                s = "x"
                while s != 0:
                    #XXXX007 respect cap.
                    s = self.tls.read(_READLEN)  # might raise TLSWant*
                    if s == 0:
                        LOG.debug("Read returned 0; shutdown to %s done",
                                  self.address)
                    else:
                        self.__bytesReadOnShutdown += len(s)
                        if self.__bytesReadOnShutdown > 128:
                            self.__readTooMuch()
                            return 0

            done = self.tls.shutdown()

            if not done and self.__awaitingShutdown:
                # This should neer actually happen, but let's cover the
                # possibility.
                LOG.error("Shutdown returned zero twice from %s--bailing",
                          self.address)
                done = 1
            if done:
                LOG.debug("Got a completed shutdown from %s", self.address)
                self.shutdownFinished()
                raise _Closing()
            else:
                LOG.trace("Shutdown returned zero -- entering read mode.")
                self.__awaitingShutdown = 1
                self.__bytesReadOnShutdown = 0
                self.wantRead = 1
                return 1

        raise AssertionError()  # unreached; appease pychecker
Ejemplo n.º 23
0
    def __shutdownFn(self, r, w, cap):
        """state function: TLS shutdonw"""
        while 1:
            if self.__awaitingShutdown:
                # We've already sent a 'shutdown' once.  Read until we
                # get another shutdown, or until we get enough data to
                # give up.
                s = "x"
                while s != 0:
                    #XXXX007 respect cap.
                    s = self.tls.read(_READLEN) # might raise TLSWant*
                    if s == 0:
                        LOG.debug("Read returned 0; shutdown to %s done",
                                  self.address)
                    else:
                        self.__bytesReadOnShutdown += len(s)
                        if self.__bytesReadOnShutdown > 128:
                            self.__readTooMuch()
                            return 0

            done = self.tls.shutdown()

            if not done and self.__awaitingShutdown:
                # This should neer actually happen, but let's cover the
                # possibility.
                LOG.error("Shutdown returned zero twice from %s--bailing",
                          self.address)
                done = 1
            if done:
                LOG.debug("Got a completed shutdown from %s", self.address)
                self.shutdownFinished()
                raise _Closing()
            else:
                LOG.trace("Shutdown returned zero -- entering read mode.")
                self.__awaitingShutdown = 1
                self.__bytesReadOnShutdown = 0
                self.wantRead = 1
                return 1

        raise AssertionError() # unreached; appease pychecker
Ejemplo n.º 24
0
    def sendReadyMessages(self, now=None):
        """Sends all messages which are not already being sent, and which
           are scheduled to be sent."""
        assert self.retrySchedule is not None
        self._repOK()
        if now is None:
            now = time.time()
        LOG.trace("DeliveryQueue checking for deliverable messages in %s",
                  self.qname)
        try:
            self._lock.acquire()
            messages = []
            for h in self.store._metadata_cache.keys():
                try:
                    state = self.store.getMetadata(h)
                except CorruptedFile:
                    continue
                if state.isPending():
                    #LOG.trace("     [%s] is pending delivery", h)
                    continue
                elif state.isRemovable():
                    #LOG.trace("     [%s] is expired", h)
                    self.removeMessage(h)
                elif state.nextAttempt <= now:
                    #LOG.trace("     [%s] is ready for delivery", h)
                    if state is None:
                        addr = None
                    else:
                        addr = state.address
                    messages.append(PendingMessage(h,self,addr))
                    state.setPending(now)
                else:
                    #LOG.trace("     [%s] is not yet ready for redelivery", h)
                    continue
        finally:
            self._lock.release()

        self._deliverMessages(messages)
        self._repOK()
Ejemplo n.º 25
0
    def sendReadyMessages(self, now=None):
        """Sends all messages which are not already being sent, and which
           are scheduled to be sent."""
        assert self.retrySchedule is not None
        self._repOK()
        if now is None:
            now = time.time()
        LOG.trace("DeliveryQueue checking for deliverable messages in %s",
                  self.qname)
        try:
            self._lock.acquire()
            messages = []
            for h in self.store._metadata_cache.keys():
                try:
                    state = self.store.getMetadata(h)
                except CorruptedFile:
                    continue
                if state.isPending():
                    #LOG.trace("     [%s] is pending delivery", h)
                    continue
                elif state.isRemovable():
                    #LOG.trace("     [%s] is expired", h)
                    self.removeMessage(h)
                elif state.nextAttempt <= now:
                    #LOG.trace("     [%s] is ready for delivery", h)
                    if state is None:
                        addr = None
                    else:
                        addr = state.address
                    messages.append(PendingMessage(h, self, addr))
                    state.setPending(now)
                else:
                    #LOG.trace("     [%s] is not yet ready for redelivery", h)
                    continue
        finally:
            self._lock.release()

        self._deliverMessages(messages)
        self._repOK()
Ejemplo n.º 26
0
 def __doRead(self, cap):
     "Helper function: read as much data as we can."
     self.__readBlockedOnWrite = 0
     # Keep reading until we decide to stop or run out of bandwidth
     #    (or break because we [1] need to wait for network events or
     #     [2] we get a shutdown.)
     while self.__reading and cap > 0:
         try:
             s = self.tls.read(min(_READLEN, cap))
             if s == 0:
                 # The other side sent us a shutdown; we'll shutdown too.
                 self.receivedShutdown()
                 LOG.trace(
                     "read returned 0: shutting down connection to %s",
                     self.address)
                 self.startShutdown()
                 break
             else:
                 # We got some data; add it to the inbuf.
                 LOG.trace("Read got %s bytes from %s", len(s),
                           self.address)
                 self.inbuf.append(s)
                 self.inbuflen += len(s)
                 cap -= len(s)
                 if (not self.tls.pending()) and cap > 0:
                     # Only call onRead when we've got all the pending
                     # data from self.tls, or we've just run out of
                     # allocated bandwidth.
                     self.onRead()
         except _ml.TLSWantRead:
             self.wantRead = 1
             break
         except _ml.TLSWantWrite:
             self.wantRead = 0
             self.wantWrite = 1
             self.__readBlockedOnWrite = 1
             break
     return cap
Ejemplo n.º 27
0
    def deliveryFailed(self, handle, retriable=0, now=None):
        """Removes a message from the outgoing queue, or requeues it
           for delivery at a later time.  This method should be
           invoked after the corresponding message has been
           unsuccessfully delivered."""
        assert self.retrySchedule is not None
        LOG.trace("DeliveryQueue failed to deliver %s from %s",
                  handle, self.qname)
        try:
            self._lock.acquire()
            try:
                ds = self.store.getMetadata(handle)
            except KeyError:
                ds = None
            except CorruptedFile:
                return

            if ds is None:
                # This should never happen
                LOG.error_exc(sys.exc_info(),
                              "Handle %s had no state", handle)
                ds = _DeliveryState(now)
                ds.setNextAttempt(self.retrySchedule, now)
                self.store.setMetadata(handle, ds)
                return

            if not ds.isPending():
                LOG.error("Handle %s was not pending", handle)
                return

            last = ds.pending
            ds.setNonPending()

            if retriable:
                # If we can retry the message, update the deliveryState
                # with the most recent attempt, and see if there's another
                # attempt in the future.
                ds.setLastAttempt(last)
                ds.setNextAttempt(self.retrySchedule, now)
                if ds.nextAttempt is not None:
                    # There is another scheduled delivery attempt.  Remember
                    # it, mark the message sendable again, and save our state.
                    LOG.trace("     (We'll try %s again at %s)", handle,
                              formatTime(ds.nextAttempt, 1))

                    self.store.setMetadata(handle, ds)
                    return
                else:
                    assert ds.isRemovable()
                # Otherwise, fallthrough.

            # If we reach this point, the message is undeliverable, either
            # because 'retriable' is false, or because we've run out of
            # retries.
            LOG.trace("     (Giving up on %s)", handle)
            self.removeMessage(handle)
        finally:
            self._lock.release()
Ejemplo n.º 28
0
def deleteHashLog(filename):
    """Remove all files associated with a hashlog."""
    try:
        _HASHLOG_DICT_LOCK.acquire()
        try:
            _, hl = _OPEN_HASHLOGS[filename]
            LOG.trace("deleteHashLog() removing open hashlog at %s",filename)
            hl.close()
        except KeyError:
            LOG.trace("deleteHashLog() removing closed hashlog at %s",filename)
            pass
        remove = []
        parent,name = os.path.split(filename)
        prefix1 = name+"."
        prefix2 = name+"."
        if os.path.exists(parent):
            for fn in os.listdir(parent):
                if fn.startswith(prefix1) or fn.startswith(prefix2):
                    remove.append(os.path.join(parent, fn))
        remove = [f for f in remove if os.path.exists(f)]
        secureDelete(remove, blocking=1)
    finally:
        _HASHLOG_DICT_LOCK.release()
Ejemplo n.º 29
0
def buildForwardPacket(payload, exitType, exitInfo, path1, path2, paddingPRNG=None, suppressTag=0):
    """Construct a forward message.
            payload: The payload to deliver.  Must be exactly 28K.  If the
                  payload is None, 28K of random data is sent.
            exitType: The routing type for the final node. (2 bytes, >=0x100)
            exitInfo: The routing info for the final node, not including tag.
            path1: Sequence of ServerInfo objects for the first leg of the path
            path2: Sequence of ServerInfo objects for the 2nd leg of the path
            paddingPRNG: random number generator used to generate padding.
                  If None, a new PRNG is initialized.
            suppressTag: if true, do not include a decodind handle in the
                  routingInfo for this packet.

        Neither path1 nor path2 may be empty.  If one is, MixError is raised.
    """
    if paddingPRNG is None:
        paddingPRNG = Crypto.getCommonPRNG()
    if not path1:
        raise MixError("First leg of path is empty")
    if not path2:
        raise MixError("Second leg of path is empty")

    assert len(payload) == PAYLOAD_LEN

    LOG.trace(
        "  Building packet with path %s:%s; delivering to %04x:%r",
        ",".join([s.getNickname() for s in path1]),
        ",".join([s.getNickname() for s in path2]),
        exitType,
        exitInfo,
    )

    # Choose a random decoding tag.
    if not suppressTag:
        tag = _getRandomTag(paddingPRNG)
        exitInfo = tag + exitInfo
    return _buildPacket(payload, exitType, exitInfo, path1, path2, paddingPRNG, suppressTag=suppressTag)
Ejemplo n.º 30
0
def sendPackets(routing, packetList, timeout=300, callback=None):
    """Sends a list of packets to a server.  Raise MixProtocolError on
       failure.

       routing -- an instance of mixminion.Packet.IPV4Info or
                  mixminion.Packet.MMTPHostInfo.
                  If routing.keyinfo == '\000'*20, we ignore the server's
                  keyid.
       packetList -- a list of 32KB packets and control strings.  Control
           strings must be one of "JUNK" to send a 32KB padding chunk,
           or "RENEGOTIATE" to renegotiate the connection key.
       connectTimeout -- None, or a number of seconds to wait for data
           on the connection before raising TimeoutError.
       callback -- None, or a function to call with a index into packetList
           after each successful packet delivery.
    """
    # Find out where we're connecting to.
    serverName = mixminion.ServerInfo.displayServerByRouting(routing)
    if isinstance(routing, IPV4Info):
        family, addr = socket.AF_INET, routing.ip
    else:
        assert isinstance(routing, MMTPHostInfo)
        LOG.trace("Looking up %s...", routing.hostname)
        family, addr, _ = mixminion.NetUtils.getIP(routing.hostname)
        if family == "NOENT":
            raise MixProtocolError("Couldn't resolve hostname %s: %s" %
                                   (routing.hostname, addr))

    # Create an MMTPClientConnection
    try:
        con = MMTPClientConnection(family,
                                   addr,
                                   routing.port,
                                   routing.keyinfo,
                                   serverName=serverName)
    except socket.error, e:
        raise MixProtocolError(str(e))
Ejemplo n.º 31
0
    def deliveryFailed(self, handle, retriable=0, now=None):
        """Removes a message from the outgoing queue, or requeues it
           for delivery at a later time.  This method should be
           invoked after the corresponding message has been
           unsuccessfully delivered."""
        assert self.retrySchedule is not None
        LOG.trace("DeliveryQueue failed to deliver %s from %s", handle,
                  self.qname)
        try:
            self._lock.acquire()
            try:
                ds = self.store.getMetadata(handle)
            except KeyError:
                ds = None
            except CorruptedFile:
                return

            if ds is None:
                # This should never happen
                LOG.error_exc(sys.exc_info(), "Handle %s had no state", handle)
                ds = _DeliveryState(now)
                ds.setNextAttempt(self.retrySchedule, now)
                self.store.setMetadata(handle, ds)
                return

            if not ds.isPending():
                LOG.error("Handle %s was not pending", handle)
                return

            last = ds.pending
            ds.setNonPending()

            if retriable:
                # If we can retry the message, update the deliveryState
                # with the most recent attempt, and see if there's another
                # attempt in the future.
                ds.setLastAttempt(last)
                ds.setNextAttempt(self.retrySchedule, now)
                if ds.nextAttempt is not None:
                    # There is another scheduled delivery attempt.  Remember
                    # it, mark the message sendable again, and save our state.
                    LOG.trace("     (We'll try %s again at %s)", handle,
                              formatTime(ds.nextAttempt, 1))

                    self.store.setMetadata(handle, ds)
                    return
                else:
                    assert ds.isRemovable()
                # Otherwise, fallthrough.

            # If we reach this point, the message is undeliverable, either
            # because 'retriable' is false, or because we've run out of
            # retries.
            LOG.trace("     (Giving up on %s)", handle)
            self.removeMessage(handle)
        finally:
            self._lock.release()
Ejemplo n.º 32
0
def buildForwardPacket(payload, exitType, exitInfo, path1, path2,
                       paddingPRNG=None, suppressTag=0):
    """Construct a forward message.
            payload: The payload to deliver.  Must be exactly 28K.  If the
                  payload is None, 28K of random data is sent.
            exitType: The routing type for the final node. (2 bytes, >=0x100)
            exitInfo: The routing info for the final node, not including tag.
            path1: Sequence of ServerInfo objects for the first leg of the path
            path2: Sequence of ServerInfo objects for the 2nd leg of the path
            paddingPRNG: random number generator used to generate padding.
                  If None, a new PRNG is initialized.
            suppressTag: if true, do not include a decodind handle in the
                  routingInfo for this packet.

        Neither path1 nor path2 may be empty.  If one is, MixError is raised.
    """
    if paddingPRNG is None:
        paddingPRNG = Crypto.getCommonPRNG()
    if not path1:
        raise MixError("First leg of path is empty")
    if not path2:
        raise MixError("Second leg of path is empty")

    assert len(payload) == PAYLOAD_LEN

    LOG.trace("  Building packet with path %s:%s; delivering to %04x:%r",
                   ",".join([s.getNickname() for s in path1]),
                   ",".join([s.getNickname() for s in path2]),
                   exitType, exitInfo)

    # Choose a random decoding tag.
    if not suppressTag:
        tag = _getRandomTag(paddingPRNG)
        exitInfo = tag + exitInfo
    return _buildPacket(payload, exitType, exitInfo, path1, path2,
                        paddingPRNG,suppressTag=suppressTag)
Ejemplo n.º 33
0
    def getSigners(self):
        #DOCDOC -- returns members of self.dirInfo.voters with valid signatures.
        if self.signers is not None:
            return self.signers

        sigs = {}
        self.signers = []
        for s in self.signatures:
            sigs[s.getKeyFingerprint()] = s
        for digest, url in self.dirInfo.voters:
            try:
                s = sigs[digest]
            except KeyError:
                #XXXX008 log something.
                continue
            if s.checkSignature():
                LOG.trace("Found valid signature from %s at %s", digest, url)
                self.signers.append((digest, url))
            else:
                LOG.trace("Signature claiming to be from %s was not valid",
                          digest)
                continue

        return self.signers
Ejemplo n.º 34
0
 def __doRead(self, cap):
     "Helper function: read as much data as we can."
     self.__readBlockedOnWrite = 0
     # Keep reading until we decide to stop or run out of bandwidth
     #    (or break because we [1] need to wait for network events or
     #     [2] we get a shutdown.)
     while self.__reading and cap > 0:
         try:
             s = self.tls.read(min(_READLEN,cap))
             if s == 0:
                 # The other side sent us a shutdown; we'll shutdown too.
                 self.receivedShutdown()
                 LOG.trace("read returned 0: shutting down connection to %s"
                           , self.address)
                 self.startShutdown()
                 break
             else:
                 # We got some data; add it to the inbuf.
                 LOG.trace("Read got %s bytes from %s",len(s), self.address)
                 self.inbuf.append(s)
                 self.inbuflen += len(s)
                 cap -= len(s)
                 if (not self.tls.pending()) and cap > 0:
                     # Only call onRead when we've got all the pending
                     # data from self.tls, or we've just run out of
                     # allocated bandwidth.
                     self.onRead()
         except _ml.TLSWantRead:
             self.wantRead = 1
             break
         except _ml.TLSWantWrite:
             self.wantRead = 0
             self.wantWrite = 1
             self.__readBlockedOnWrite = 1
             break
     return cap
Ejemplo n.º 35
0
def deleteHashLog(filename):
    """Remove all files associated with a hashlog."""
    try:
        _HASHLOG_DICT_LOCK.acquire()
        try:
            _, hl = _OPEN_HASHLOGS[filename]
            LOG.trace("deleteHashLog() removing open hashlog at %s", filename)
            hl.close()
        except KeyError:
            LOG.trace("deleteHashLog() removing closed hashlog at %s",
                      filename)
            pass
        remove = []
        parent, name = os.path.split(filename)
        prefix1 = name + "."
        prefix2 = name + "."
        if os.path.exists(parent):
            for fn in os.listdir(parent):
                if fn.startswith(prefix1) or fn.startswith(prefix2):
                    remove.append(os.path.join(parent, fn))
        remove = [f for f in remove if os.path.exists(f)]
        secureDelete(remove, blocking=1)
    finally:
        _HASHLOG_DICT_LOCK.release()
Ejemplo n.º 36
0
class PeerCertificateCache:
    """A PeerCertificateCache validates certificate chains from MMTP servers,
       and remembers which chains we've already seen and validated."""

    ## Fields
    # cache: A map from peer (temporary) KeyID's to a (signing) KeyID.
    def __init__(self):
        self.cache = {}

    def check(self, tls, targetKeyID, serverName):
        """Check whether the certificate chain on the TLS connection 'tls'
           is valid, current, and matches the keyID 'targetKeyID'.  If so,
           return.  If not, raise MixProtocolBadAuth.  Display all messages
           using the server 'serverName'.
        """

        # First, make sure the certificate is neither premature nor expired.
        try:
            tls.check_cert_alive()
        except _ml.TLSError, e:
            s = str(e)
            skewed = 0
            notBefore, notAfter = tls.get_cert_lifetime()
            # XXXX 'stringContains' is not the best possible check here...
            if stringContains(s, "expired"):
                s += " [expired at %s]" % notAfter
                skewed = 1
            elif stringContains(s, "not yet valid"):
                s += " [not valid until %s]" % notBefore
                skewed = 1
            if skewed:
                s += " (One of you may have a skewed clock or wrong time zone)"
            raise MixProtocolBadAuth("Invalid certificate from %s: %s " %
                                     (serverName, s))

        # If we don't care whom we're talking to, we don't need to check
        # them out.
        if targetKeyID is None:
            return

        # Get the KeyID for the peer (temporary) key.
        hashed_peer_pk = sha1(tls.get_peer_cert_pk().encode_key(public=1))

        # Before 0.0.4alpha, a server's keyID was a hash of its current
        # TLS public key.  In 0.0.4alpha, we allowed this for backward
        # compatibility.  As of 0.0.4alpha2, since we've dropped backward
        # compatibility with earlier packet formats, we drop certificate
        # compatibility as well.
        if targetKeyID == hashed_peer_pk:
            raise MixProtocolBadAuth(
                "Pre-0.0.4 (non-rotatable) certificate from %s" % serverName)

        try:
            if targetKeyID == self.cache[hashed_peer_pk]:
                # We recognize the key, and have already seen it to be
                # signed by the target identity.
                LOG.trace("Got a cached certificate from %s", serverName)
                return  # All is well.
            else:
                # We recognize the key, but some other identity signed it.
                raise MixProtocolBadAuth(
                    "Mismatch between expected and actual key ID")
        except KeyError:
            pass

        # We haven't found an identity for this pk yet.  Try to check the
        # signature on it.
        try:
            identity = tls.verify_cert_and_get_identity_pk()
        except _ml.TLSError, e:
            raise MixProtocolBadAuth("Invalid KeyID (allegedly) from %s: %s" %
                                     serverName)
Ejemplo n.º 37
0
                return  # All is well.
            else:
                # We recognize the key, but some other identity signed it.
                raise MixProtocolBadAuth(
                    "Mismatch between expected and actual key ID")
        except KeyError:
            pass

        # We haven't found an identity for this pk yet.  Try to check the
        # signature on it.
        try:
            identity = tls.verify_cert_and_get_identity_pk()
        except _ml.TLSError, e:
            raise MixProtocolBadAuth("Invalid KeyID (allegedly) from %s: %s" %
                                     serverName)

        # Okay, remember who has signed this certificate.
        hashed_identity = sha1(identity.encode_key(public=1))
        LOG.trace("Remembering valid certificate for %s", serverName)
        self.cache[hashed_peer_pk] = hashed_identity

        # Note: we don't need to worry about two identities signing the
        # same certificate.  While this *is* possible to do, it's useless:
        # You could get someone else's certificate and sign it, but you
        # couldn't start up a TLS connection with that certificate without
        # stealing their private key too.

        # Was the signer the right person?
        if hashed_identity != targetKeyID:
            raise MixProtocolBadAuth("Invalid KeyID for %s" % serverName)
Ejemplo n.º 38
0
                return # All is well.
            else:
                # We recognize the key, but some other identity signed it.
                raise MixProtocolBadAuth(
                    "Mismatch between expected and actual key ID")
        except KeyError:
            pass

        # We haven't found an identity for this pk yet.  Try to check the
        # signature on it.
        try:
            identity = tls.verify_cert_and_get_identity_pk()
        except _ml.TLSError, e:
            raise MixProtocolBadAuth("Invalid KeyID (allegedly) from %s: %s"
                                   %serverName)

        # Okay, remember who has signed this certificate.
        hashed_identity = sha1(identity.encode_key(public=1))
        LOG.trace("Remembering valid certificate for %s", serverName)
        self.cache[hashed_peer_pk] = hashed_identity

        # Note: we don't need to worry about two identities signing the
        # same certificate.  While this *is* possible to do, it's useless:
        # You could get someone else's certificate and sign it, but you
        # couldn't start up a TLS connection with that certificate without
        # stealing their private key too.

        # Was the signer the right person?
        if hashed_identity != targetKeyID:
            raise MixProtocolBadAuth("Invalid KeyID for %s" % serverName)
Ejemplo n.º 39
0
class ServerKeyring:
    """A ServerKeyring remembers current and future keys, descriptors, and
       hash logs for a mixminion server.  It keeps track of key rotation
       schedules, and generates new keys as needed.
       """

    ## Fields:
    # homeDir: server home directory
    # keyDir: server key directory
    # keyOverlap: How long after a new key begins do we accept the old one?
    # keySets: sorted list of (start, end, keyset)
    # nextUpdate: time_t when a new key should be added, or a current key
    #      should be removed, or "None" for uncalculated.
    # keyRange: tuple of (firstKey, lastKey) to represent which key names
    #      have keys on disk.
    # currentKeys: None, if we haven't checked for currently live keys, or
    #      a list of currently live ServerKeyset objects.
    # dhFile: pathname to file holding diffie-helman parameters.
    # _lock: A lock to prevent concurrent key generation or rotation.

    def __init__(self, config):
        "Create a ServerKeyring from a config object"
        self._lock = threading.RLock()
        self.configure(config)

    def configure(self, config):
        "Set up a ServerKeyring from a config object"
        self.config = config
        self.homeDir = config.getBaseDir()
        self.keyDir = config.getKeyDir()
        self.hashDir = os.path.join(config.getWorkDir(), 'hashlogs')
        self.dhFile = os.path.join(config.getWorkDir(), 'tls', 'dhparam')
        self.certFile = os.path.join(config.getWorkDir(), "cert_chain")
        self.keyOverlap = config['Server']['PublicKeyOverlap'].getSeconds()
        self.nickname = config['Server']['Nickname']  #DOCDOC
        self.nextUpdate = None
        self.currentKeys = None
        self._tlsContext = None  #DOCDOC
        self._tlsContextExpires = -1  #DOCDOC
        self.pingerSeed = None
        self.checkKeys()

    def checkKeys(self):
        """Internal method: read information about all this server's
           currently-prepared keys from disk.

           May raise ConfigError if any of the server descriptors on disk
           are invalid.
           """
        self.keySets = []
        badKeySets = []
        firstKey = sys.maxint
        lastKey = 0

        LOG.debug("Scanning server keystore at %s", self.keyDir)

        if not os.path.exists(self.keyDir):
            LOG.info("Creating server keystore at %s", self.keyDir)
            createPrivateDir(self.keyDir)

        # Iterate over the entires in HOME/keys
        for dirname in os.listdir(self.keyDir):
            # Skip any that aren't directories named "key_INT"
            if not os.path.isdir(os.path.join(self.keyDir, dirname)):
                continue
            if not dirname.startswith('key_'):
                LOG.warn("Unexpected directory %s under %s", dirname,
                         self.keyDir)
                continue
            keysetname = dirname[4:]
            try:
                setNum = int(keysetname)
                # keep trace of the first and last used key number
                if setNum < firstKey: firstKey = setNum
                if setNum > lastKey: lastKey = setNum
            except ValueError:
                LOG.warn("Unexpected directory %s under %s", dirname,
                         self.keyDir)
                continue

            # Find the server descriptor...
            keyset = ServerKeyset(self.keyDir, keysetname, self.hashDir)
            ok = 1
            try:
                keyset.checkKeys()
            except MixError, e:
                LOG.warn("Error checking private keys in keyset %s: %s",
                         keysetname, str(e))
                ok = 0

            try:
                if ok:
                    keyset.getServerDescriptor()
            except (ConfigError, IOError), e:
                LOG.warn("Key set %s has invalid/missing descriptor: %s",
                         keysetname, str(e))
                ok = 0

            if ok:
                t1, t2 = keyset.getLiveness()
                self.keySets.append((t1, t2, keyset))

                LOG.trace("Found key %s (valid from %s to %s)", dirname,
                          formatDate(t1), formatDate(t2))
            else:
                badKeySets.append(keyset)