def __readTooMuch(self): """Helper function -- called if we read too much data while we're shutting down.""" LOG.error("Read over 128 bytes of unexpected data from closing " "connection to %s", self.address) self.onTLSError() raise _Closing()
def deliveryFailed(self, handle, retriable=0, now=None): assert self.retrySchedule is not None if now is None: now = time.time() self._lock.acquire() try: try: mState = self.store.getMetadata(handle) except KeyError: mState = None except CorruptedFile: mState = None if mState is None: # This should never happen LOG.error_exc(sys.exc_info(), "Handle %s had no state; removing", handle) self.removeMessage(handle) return elif not mState.isPending(): LOG.error("Handle %s was not pending", handle) return last = mState.pending mState.setNonPending() if not retriable: LOG.trace(" (Giving up on %s)", handle) self.removeMessage(handle) aState = self._getAddressState(mState.address, now) aState.failed(attempt=last,now=now) aState.setNextAttempt(self.retrySchedule,now=now) self.addressStateDB[str(aState.address)] = aState # flush to db. finally: self._lock.release()
def publishKeys(self, allKeys=0): """Publish server descriptors to the directory server. Ordinarily, only unpublished descriptors are sent. If allKeys is true, all descriptors are sent.""" keySets = [ ks for _, _, ks in self.keySets ] if allKeys: LOG.info("Republishing all known keys to directory server") else: keySets = [ ks for ks in keySets if not ks.isPublished() ] if not keySets: LOG.trace("publishKeys: no unpublished keys found") return LOG.info("Publishing %s keys to directory server...",len(keySets)) rejected = 0 for ks in keySets: status = ks.publish(DIRECTORY_UPLOAD_URL) if status == 'error': LOG.error("Error publishing a key; giving up") return 0 elif status == 'reject': rejected += 1 else: assert status == 'accept' if rejected == 0: LOG.info("All keys published successfully.") return 1 else: LOG.info("%s/%s keys were rejected." , rejected, len(keySets)) return 0
def deliveryFailed(self, handle, retriable=0, now=None): assert self.retrySchedule is not None if now is None: now = time.time() self._lock.acquire() try: try: mState = self.store.getMetadata(handle) except KeyError: mState = None except CorruptedFile: mState = None if mState is None: # This should never happen LOG.error_exc(sys.exc_info(), "Handle %s had no state; removing", handle) self.removeMessage(handle) return elif not mState.isPending(): LOG.error("Handle %s was not pending", handle) return last = mState.pending mState.setNonPending() if not retriable: LOG.trace(" (Giving up on %s)", handle) self.removeMessage(handle) aState = self._getAddressState(mState.address, now) aState.failed(attempt=last, now=now) aState.setNextAttempt(self.retrySchedule, now=now) self.addressStateDB[str(aState.address)] = aState # flush to db. finally: self._lock.release()
def publishKeys(self, allKeys=0): """Publish server descriptors to the directory server. Ordinarily, only unpublished descriptors are sent. If allKeys is true, all descriptors are sent.""" keySets = [ks for _, _, ks in self.keySets] if allKeys: LOG.info("Republishing all known keys to directory server") else: keySets = [ks for ks in keySets if not ks.isPublished()] if not keySets: LOG.trace("publishKeys: no unpublished keys found") return LOG.info("Publishing %s keys to directory server...", len(keySets)) rejected = 0 for ks in keySets: status = ks.publish(DIRECTORY_UPLOAD_URL) if status == 'error': LOG.error("Error publishing a key; giving up") return 0 elif status == 'reject': rejected += 1 else: assert status == 'accept' if rejected == 0: LOG.info("All keys published successfully.") return 1 else: LOG.info("%s/%s keys were rejected.", rejected, len(keySets)) return 0
def __readTooMuch(self): """Helper function -- called if we read too much data while we're shutting down.""" LOG.error( "Read over 128 bytes of unexpected data from closing " "connection to %s", self.address) self.onTLSError() raise _Closing()
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [ requestedFile ] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def deliveryFailed(self, handle, retriable=0, now=None): """Removes a message from the outgoing queue, or requeues it for delivery at a later time. This method should be invoked after the corresponding message has been unsuccessfully delivered.""" assert self.retrySchedule is not None LOG.trace("DeliveryQueue failed to deliver %s from %s", handle, self.qname) try: self._lock.acquire() try: ds = self.store.getMetadata(handle) except KeyError: ds = None except CorruptedFile: return if ds is None: # This should never happen LOG.error_exc(sys.exc_info(), "Handle %s had no state", handle) ds = _DeliveryState(now) ds.setNextAttempt(self.retrySchedule, now) self.store.setMetadata(handle, ds) return if not ds.isPending(): LOG.error("Handle %s was not pending", handle) return last = ds.pending ds.setNonPending() if retriable: # If we can retry the message, update the deliveryState # with the most recent attempt, and see if there's another # attempt in the future. ds.setLastAttempt(last) ds.setNextAttempt(self.retrySchedule, now) if ds.nextAttempt is not None: # There is another scheduled delivery attempt. Remember # it, mark the message sendable again, and save our state. LOG.trace(" (We'll try %s again at %s)", handle, formatTime(ds.nextAttempt, 1)) self.store.setMetadata(handle, ds) return else: assert ds.isRemovable() # Otherwise, fallthrough. # If we reach this point, the message is undeliverable, either # because 'retriable' is false, or because we've run out of # retries. LOG.trace(" (Giving up on %s)", handle) self.removeMessage(handle) finally: self._lock.release()
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [requestedFile] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def _sendPackets(self, family, ip, port, keyID, deliverable, serverName): """Begin sending a set of packets to a given server. 'deliverable' is a list of objects obeying the DeliverableMessage interface. """ try: # Is there an existing connection open to the right server? con = self.clientConByAddr[(ip, port, keyID)] except KeyError: pass else: # No exception: There is an existing connection. But is that # connection currently sending packets? if con.isActive(): LOG.debug("Queueing %s packets on open connection to %s", len(deliverable), con.address) for d in deliverable: con.addPacket(d) return if len(self.clientConByAddr) >= self.maxClientConnections: LOG.debug( "We already have %s open client connections; delaying %s packets for %s", len(self.clientConByAddr), len(deliverable), serverName) self.pendingPackets.append( (family, ip, port, keyID, deliverable, serverName)) return try: # There isn't any connection to the right server. Open one... addr = (ip, port, keyID) finished = lambda addr=addr, self=self: self.__clientFinished(addr) con = _ClientCon(family, ip, port, keyID, serverName=serverName, context=self.clientContext, certCache=self.certificateCache) nickname = mixminion.ServerInfo.getNicknameByKeyID(keyID) if nickname is not None: # If we recognize this server, then we'll want to tell # the ping log what happens to our connection attempt. con.configurePingLog(self.pingLog, keyID) #con.allPacketsSent = finished #XXXX007 wrong! con.onClosed = finished except (socket.error, MixProtocolError), e: LOG.error("Unexpected socket error connecting to %s: %s", serverName, e) EventStats.log.failedConnect() #FFFF addr for m in deliverable: try: m.failed(1) except AttributeError: pass
def getPacket(self, handle): """Given a handle, return a 3-tuple of the corresponding 32K packet, {IPV4/Host}Info, and time of first queueing. (The time is rounded down to the closest midnight GMT.) May raise CorruptedFile.""" obj = self.store.getObject(handle) try: magic, packet, routing, when = obj except (ValueError, TypeError): magic = None if magic != "PACKET-0": LOG.error("Unrecognized packet format for %s",handle) return None return packet, routing, when
def getPacket(self, handle): """Given a handle, return a 3-tuple of the corresponding 32K packet, {IPV4/Host}Info, and time of first queueing. (The time is rounded down to the closest midnight GMT.) May raise CorruptedFile.""" obj = self.store.getObject(handle) try: magic, packet, routing, when = obj except (ValueError, TypeError): magic = None if magic != "PACKET-0": LOG.error("Unrecognized packet format for %s", handle) return None return packet, routing, when
def _sendPackets(self, family, ip, port, keyID, deliverable, serverName): """Begin sending a set of packets to a given server. 'deliverable' is a list of objects obeying the DeliverableMessage interface. """ try: # Is there an existing connection open to the right server? con = self.clientConByAddr[(ip,port,keyID)] except KeyError: pass else: # No exception: There is an existing connection. But is that # connection currently sending packets? if con.isActive(): LOG.debug("Queueing %s packets on open connection to %s", len(deliverable), con.address) for d in deliverable: con.addPacket(d) return if len(self.clientConByAddr) >= self.maxClientConnections: LOG.debug("We already have %s open client connections; delaying %s packets for %s", len(self.clientConByAddr), len(deliverable), serverName) self.pendingPackets.append((family,ip,port,keyID,deliverable,serverName)) return try: # There isn't any connection to the right server. Open one... addr = (ip, port, keyID) finished = lambda addr=addr, self=self: self.__clientFinished(addr) con = _ClientCon( family, ip, port, keyID, serverName=serverName, context=self.clientContext, certCache=self.certificateCache) nickname = mixminion.ServerInfo.getNicknameByKeyID(keyID) if nickname is not None: # If we recognize this server, then we'll want to tell # the ping log what happens to our connection attempt. con.configurePingLog(self.pingLog, keyID) #con.allPacketsSent = finished #XXXX007 wrong! con.onClosed = finished except (socket.error, MixProtocolError), e: LOG.error("Unexpected socket error connecting to %s: %s", serverName, e) EventStats.log.failedConnect() #FFFF addr for m in deliverable: try: m.failed(1) except AttributeError: pass
def getObject(self, handle): """Given a message handle, read and unpickle the contents of the corresponding message. In rare error cases, raises CorruptedFile. """ try: self._lock.acquire() f = open(os.path.join(self.dir, "msg_" + handle), 'rb') try: res = cPickle.load(f) f.close() return res except (cPickle.UnpicklingError, EOFError, IOError), e: LOG.error("Found damaged object %s in filestore %s: %s", handle, self.dir, str(e)) self._preserveCorrupted(handle) raise CorruptedFile() finally: self._lock.release()
def getObject(self, handle): """Given a message handle, read and unpickle the contents of the corresponding message. In rare error cases, raises CorruptedFile. """ try: self._lock.acquire() f = open(os.path.join(self.dir, "msg_"+handle), 'rb') try: res = cPickle.load(f) f.close() return res except (cPickle.UnpicklingError, EOFError, IOError), e: LOG.error("Found damaged object %s in filestore %s: %s", handle, self.dir, str(e)) self._preserveCorrupted(handle) raise CorruptedFile() finally: self._lock.release()
def publish(self, url): """Try to publish this descriptor to a given directory URL. Returns 'accept' if the publication was successful, 'reject' if the server refused to accept the descriptor, and 'error' if publication failed for some other reason.""" fname = self.getDescriptorFileName() descriptor = readFile(fname) fields = urllib.urlencode({"desc" : descriptor}) f = None try: try: f = urllib2.urlopen(url, fields) info = f.info() reply = f.read() except IOError, e: LOG.error("Error while publishing server descriptor: %s",e) return 'error' except: LOG.error_exc(sys.exc_info(), "Error publishing server descriptor") return 'error'
def publish(self, url): """Try to publish this descriptor to a given directory URL. Returns 'accept' if the publication was successful, 'reject' if the server refused to accept the descriptor, and 'error' if publication failed for some other reason.""" fname = self.getDescriptorFileName() descriptor = readFile(fname) fields = urllib.urlencode({"desc": descriptor}) f = None try: try: f = urllib2.urlopen(url, fields) info = f.info() reply = f.read() except IOError, e: LOG.error("Error while publishing server descriptor: %s", e) return 'error' except: LOG.error_exc(sys.exc_info(), "Error publishing server descriptor") return 'error'
def __shutdownFn(self, r, w, cap): """state function: TLS shutdonw""" while 1: if self.__awaitingShutdown: # We've already sent a 'shutdown' once. Read until we # get another shutdown, or until we get enough data to # give up. s = "x" while s != 0: #XXXX007 respect cap. s = self.tls.read(_READLEN) # might raise TLSWant* if s == 0: LOG.debug("Read returned 0; shutdown to %s done", self.address) else: self.__bytesReadOnShutdown += len(s) if self.__bytesReadOnShutdown > 128: self.__readTooMuch() return 0 done = self.tls.shutdown() if not done and self.__awaitingShutdown: # This should neer actually happen, but let's cover the # possibility. LOG.error("Shutdown returned zero twice from %s--bailing", self.address) done = 1 if done: LOG.debug("Got a completed shutdown from %s", self.address) self.shutdownFinished() raise _Closing() else: LOG.trace("Shutdown returned zero -- entering read mode.") self.__awaitingShutdown = 1 self.__bytesReadOnShutdown = 0 self.wantRead = 1 return 1 raise AssertionError() # unreached; appease pychecker
def _changeState(self, handle, s1, s2): """Helper method: changes the state of message 'handle' from 's1' to 's2', and changes the internal count.""" try: self._lock.acquire() try: replaceFile(os.path.join(self.dir, s1 + "_" + handle), os.path.join(self.dir, s2 + "_" + handle)) except OSError, e: contents = os.listdir(self.dir) LOG.error("Error while trying to change %s from %s to %s: %s", handle, s1, s2, e) LOG.error("Directory %s contains: %s", self.dir, contents) self.count(1) return if self.n_entries < 0: return if s1 == 'msg' and s2 != 'msg': self.n_entries -= 1 elif s1 != 'msg' and s2 == 'msg': self.n_entries += 1
def _changeState(self, handle, s1, s2): """Helper method: changes the state of message 'handle' from 's1' to 's2', and changes the internal count.""" try: self._lock.acquire() try: replaceFile(os.path.join(self.dir, s1+"_"+handle), os.path.join(self.dir, s2+"_"+handle)) except OSError, e: contents = os.listdir(self.dir) LOG.error("Error while trying to change %s from %s to %s: %s", handle, s1, s2, e) LOG.error("Directory %s contains: %s", self.dir, contents) self.count(1) return if self.n_entries < 0: return if s1 == 'msg' and s2 != 'msg': self.n_entries -= 1 elif s1 != 'msg' and s2 == 'msg': self.n_entries += 1
def getMetadata(self, handle): """Return the metadata associated with a given handle. If the metadata is damaged, may raise CorruptedFile.""" fname = os.path.join(self.dir, "meta_" + handle) if not os.path.exists(fname): raise KeyError(handle) try: self._lock.acquire() try: return self._metadata_cache[handle] except KeyError: pass f = open(fname, 'rb') try: res = cPickle.load(f) except cPickle.UnpicklingError, e: LOG.error("Found damaged metadata for %s in filestore %s: %s", handle, self.dir, str(e)) self._preserveCorrupted(handle) raise CorruptedFile() f.close() self._metadata_cache[handle] = res return res
def getMetadata(self, handle): """Return the metadata associated with a given handle. If the metadata is damaged, may raise CorruptedFile.""" fname = os.path.join(self.dir, "meta_"+handle) if not os.path.exists(fname): raise KeyError(handle) try: self._lock.acquire() try: return self._metadata_cache[handle] except KeyError: pass f = open(fname, 'rb') try: res = cPickle.load(f) except cPickle.UnpicklingError, e: LOG.error("Found damaged metadata for %s in filestore %s: %s", handle, self.dir, str(e)) self._preserveCorrupted(handle) raise CorruptedFile() f.close() self._metadata_cache[handle] = res return res
def publish(self, url): """Try to publish this descriptor to a given directory URL. Returns 'accept' if the publication was successful, 'reject' if the server refused to accept the descriptor, and 'error' if publication failed for some other reason.""" fname = self.getDescriptorFileName() descriptor = readFile(fname) fields = urllib.urlencode({"desc" : descriptor}) f = None try: try: ############################################# # some python versions verify certificates # anemone.mooo.com uses a self-signed cert # this workaround is not a problem because # the directory information is already signed # (although as Zax says, it is certainly a # kludge ;) if sys.version_info >= (2,7,9): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE f = urllib2.urlopen(url, fields, context=ctx) else: f = urllib2.urlopen(url, fields) ############################################# #f = urllib2.urlopen(url, fields) info = f.info() reply = f.read() except IOError, e: LOG.error("Error while publishing server descriptor: %s",e) return 'error' except: LOG.error_exc(sys.exc_info(), "Error publishing server descriptor") return 'error'
def generateServerDescriptorAndKeys(config, identityKey, keydir, keyname, hashdir, validAt=None, now=None, useServerKeys=0, validUntil=None): """Generate and sign a new server descriptor, and generate all the keys to go with it. config -- Our ServerConfig object. identityKey -- This server's private identity key keydir -- The root directory for storing key sets. keyname -- The name of this new key set within keydir hashdir -- The root directory for storing hash logs. validAt -- The starting time (in seconds) for this key's lifetime. useServerKeys -- If true, try to read an existing keyset from (keydir,keyname,hashdir) rather than generating a fresh one. validUntil -- Time at which the generated descriptor should expire. """ if useServerKeys: serverKeys = ServerKeyset(keydir, keyname, hashdir) serverKeys.load() packetKey = serverKeys.packetKey else: # First, we generate both of our short-term keys... packetKey = mixminion.Crypto.pk_generate(PACKET_KEY_BYTES*8) # ...and save them to disk, setting up our directory structure while # we're at it. serverKeys = ServerKeyset(keydir, keyname, hashdir) serverKeys.packetKey = packetKey serverKeys.save() # FFFF unused # allowIncoming = config['Incoming/MMTP'].get('Enabled', 0) # Now, we pull all the information we need from our configuration. nickname = config['Server']['Nickname'] contact = config['Server']['Contact-Email'] fingerprint = config['Server']['Contact-Fingerprint'] comments = config['Server']['Comments'] if not now: now = time.time() if not validAt: validAt = now insecurities = config.getInsecurities() if insecurities: secure = "no" else: secure = "yes" # Calculate descriptor and X509 certificate lifetimes. # (Round validAt to previous midnight.) validAt = mixminion.Common.previousMidnight(validAt+30) if not validUntil: keyLifetime = config['Server']['PublicKeyLifetime'].getSeconds() validUntil = previousMidnight(validAt + keyLifetime + 30) mmtpProtocolsIn = mixminion.server.MMTPServer.MMTPServerConnection \ .PROTOCOL_VERSIONS[:] mmtpProtocolsOut = mixminion.server.MMTPServer.MMTPClientConnection \ .PROTOCOL_VERSIONS[:] mmtpProtocolsIn.sort() mmtpProtocolsOut.sort() mmtpProtocolsIn = ",".join(mmtpProtocolsIn) mmtpProtocolsOut = ",".join(mmtpProtocolsOut) #XXXX009 remove: hasn't been checked since 007 or used since 005. identityKeyID = formatBase64( mixminion.Crypto.sha1( mixminion.Crypto.pk_encode_public_key(identityKey))) fields = { # XXXX009 remove: hasn't been checked since 007. "IP": config['Incoming/MMTP'].get('IP', "0.0.0.0"), "Hostname": config['Incoming/MMTP'].get('Hostname', None), "Port": config['Incoming/MMTP'].get('Port', 0), "Nickname": nickname, "Identity": formatBase64(mixminion.Crypto.pk_encode_public_key(identityKey)), "Published": formatTime(now), "ValidAfter": formatDate(validAt), "ValidUntil": formatDate(validUntil), "PacketKey": formatBase64(mixminion.Crypto.pk_encode_public_key(packetKey)), "KeyID": identityKeyID, "MMTPProtocolsIn" : mmtpProtocolsIn, "MMTPProtocolsOut" : mmtpProtocolsOut, "PacketVersion" : mixminion.Packet.PACKET_VERSION, "mm_version" : mixminion.__version__, "Secure" : secure, "Contact" : contact, } # If we don't know our IP address, try to guess if fields['IP'] == '0.0.0.0': #XXXX008 remove; not needed since 005. try: fields['IP'] = _guessLocalIP() LOG.warn("No IP configured; guessing %s",fields['IP']) except IPGuessError, e: LOG.error("Can't guess IP: %s", str(e)) raise UIError("Can't guess IP: %s" % str(e))
def generateServerDescriptorAndKeys(config, identityKey, keydir, keyname, hashdir, validAt=None, now=None, useServerKeys=0, validUntil=None): """Generate and sign a new server descriptor, and generate all the keys to go with it. config -- Our ServerConfig object. identityKey -- This server's private identity key keydir -- The root directory for storing key sets. keyname -- The name of this new key set within keydir hashdir -- The root directory for storing hash logs. validAt -- The starting time (in seconds) for this key's lifetime. useServerKeys -- If true, try to read an existing keyset from (keydir,keyname,hashdir) rather than generating a fresh one. validUntil -- Time at which the generated descriptor should expire. """ if useServerKeys: serverKeys = ServerKeyset(keydir, keyname, hashdir) serverKeys.load() packetKey = serverKeys.packetKey else: # First, we generate both of our short-term keys... packetKey = mixminion.Crypto.pk_generate(PACKET_KEY_BYTES * 8) # ...and save them to disk, setting up our directory structure while # we're at it. serverKeys = ServerKeyset(keydir, keyname, hashdir) serverKeys.packetKey = packetKey serverKeys.save() # FFFF unused # allowIncoming = config['Incoming/MMTP'].get('Enabled', 0) # Now, we pull all the information we need from our configuration. nickname = config['Server']['Nickname'] contact = config['Server']['Contact-Email'] fingerprint = config['Server']['Contact-Fingerprint'] comments = config['Server']['Comments'] if not now: now = time.time() if not validAt: validAt = now insecurities = config.getInsecurities() if insecurities: secure = "no" else: secure = "yes" # Calculate descriptor and X509 certificate lifetimes. # (Round validAt to previous midnight.) validAt = mixminion.Common.previousMidnight(validAt + 30) if not validUntil: keyLifetime = config['Server']['PublicKeyLifetime'].getSeconds() validUntil = previousMidnight(validAt + keyLifetime + 30) mmtpProtocolsIn = mixminion.server.MMTPServer.MMTPServerConnection \ .PROTOCOL_VERSIONS[:] mmtpProtocolsOut = mixminion.server.MMTPServer.MMTPClientConnection \ .PROTOCOL_VERSIONS[:] mmtpProtocolsIn.sort() mmtpProtocolsOut.sort() mmtpProtocolsIn = ",".join(mmtpProtocolsIn) mmtpProtocolsOut = ",".join(mmtpProtocolsOut) #XXXX009 remove: hasn't been checked since 007 or used since 005. identityKeyID = formatBase64( mixminion.Crypto.sha1( mixminion.Crypto.pk_encode_public_key(identityKey))) fields = { # XXXX009 remove: hasn't been checked since 007. "IP": config['Incoming/MMTP'].get('IP', "0.0.0.0"), "Hostname": config['Incoming/MMTP'].get('Hostname', None), "Port": config['Incoming/MMTP'].get('Port', 0), "Nickname": nickname, "Identity": formatBase64(mixminion.Crypto.pk_encode_public_key(identityKey)), "Published": formatTime(now), "ValidAfter": formatDate(validAt), "ValidUntil": formatDate(validUntil), "PacketKey": formatBase64(mixminion.Crypto.pk_encode_public_key(packetKey)), "KeyID": identityKeyID, "MMTPProtocolsIn": mmtpProtocolsIn, "MMTPProtocolsOut": mmtpProtocolsOut, "PacketVersion": mixminion.Packet.PACKET_VERSION, "mm_version": mixminion.__version__, "Secure": secure, "Contact": contact, } # If we don't know our IP address, try to guess if fields['IP'] == '0.0.0.0': #XXXX008 remove; not needed since 005. try: fields['IP'] = _guessLocalIP() LOG.warn("No IP configured; guessing %s", fields['IP']) except IPGuessError, e: LOG.error("Can't guess IP: %s", str(e)) raise UIError("Can't guess IP: %s" % str(e))
f = urllib2.urlopen(url, fields) info = f.info() reply = f.read() except IOError, e: LOG.error("Error while publishing server descriptor: %s", e) return 'error' except: LOG.error_exc(sys.exc_info(), "Error publishing server descriptor") return 'error' finally: if f is not None: f.close() if info.get('Content-Type') != 'text/plain': LOG.error("Bad content type %s from directory" % info.get('Content-Type')) return 'error' m = DIRECTORY_RESPONSE_RE.search(reply) if not m: LOG.error("Didn't understand reply from directory: %s", reply) return 'error' ok = int(m.group(1)) msg = m.group(2) if not ok: LOG.error("Directory rejected descriptor: %r", msg) return 'reject' LOG.info("Directory accepted descriptor: %r", msg) self.markAsPublished() return 'accept'
f = urllib2.urlopen(url, fields) info = f.info() reply = f.read() except IOError, e: LOG.error("Error while publishing server descriptor: %s",e) return 'error' except: LOG.error_exc(sys.exc_info(), "Error publishing server descriptor") return 'error' finally: if f is not None: f.close() if info.get('Content-Type') != 'text/plain': LOG.error("Bad content type %s from directory"%info.get( 'Content-Type')) return 'error' m = DIRECTORY_RESPONSE_RE.search(reply) if not m: LOG.error("Didn't understand reply from directory: %s", reply) return 'error' ok = int(m.group(1)) msg = m.group(2) if not ok: LOG.error("Directory rejected descriptor: %r", msg) return 'reject' LOG.info("Directory accepted descriptor: %r", msg) self.markAsPublished() return 'accept'