class ServerInbox: """A ServerInbox holds server descriptors received from the outside world that are not yet ready to be included in the directory. """ ## Fields: # newQueue: IncomingQueue object to hold descriptors for previously # unknown servers. # updateQueue: IncomingQueue object to hold descriptors for currently # known servers. def __init__(self, base, idCache): """Initialize a ServerInbox to store its files in 'base', and check server descriptors against the IDCache 'idCache'.""" self.newQueue = IncomingQueue(os.path.join(base, "new"), os.path.join(base, "reject")) self.updateQueue = IncomingQueue(os.path.join(base, "updates"), os.path.join(base, "reject")) self.idCache = idCache def receiveServer(self, text, source): """Process a new server descriptor and store it for later action. (To be run by the CGI user.) If the server will be automatically inserted, return true. If the server will be inserted (given administrator intervention), raise ServerQueuedException. If there is a problem, log it, and raise UIError. text -- a string containing a new server descriptor. source -- a (human readable) string describing the source of the descriptor, used in error messages. """ try: server = ServerInfo(string=text,assumeValid=0) except MixError, e: LOG.warn("Rejected invalid server from %s: %s", source,e) raise UIError("Server descriptor was not valid: %s"%e) nickname = server.getNickname() try: known = self.idCache.containsServer(server) except MismatchedID: LOG.warn("Rejected server with mismatched identity from %s", source) self.updateQueue.queueRejectedServer(text,server) raise UIError(("I already know a server named " "%s with a different key.")%nickname) if not known: LOG.info("Received previously unknown server %s from %s", nickname, source) self.newQueue.queueIncomingServer(text,server) raise ServerQueuedException( "Server queued pending manual checking") else: LOG.info("Received update for server %s from %s", nickname, source) self.updateQueue.queueIncomingServer(text,server) return 1
def regenerateDescriptors(self): """Regenerate all server descriptors for all keysets in this keyring, but keep all old keys intact.""" LOG.info("Regenerating server descriptors; keeping old keys.") identityKey = self.getIdentityKey() for _, _, ks in self.keySets: ks.regenerateServerDescriptor(self.config, identityKey)
def _validateZlib(): """Internal function: Make sure that zlib is a recognized version, and that it compresses things as expected. (This check is important, because using a zlib version that compressed differently from zlib1.1.4 would make senders partitionable by payload compression.) """ global _ZLIB_LIBRARY_OK ver = getattr(zlib, "ZLIB_VERSION", None) if ver and ver < "1.1.2": raise MixFatalError("Zlib version %s is not supported"%ver) _ZLIB_LIBRARY_OK = 0.5 if ver in ("1.1.2", "1.1.3", "1.1.4", "1.2.0", "1.2.0.1", "1.2.0.2", "1.2.0.3", "1.2.0.4", "1.2.0.5", "1.2.0.6", "1.2.0.7", "1.2.0.8", "1.2.1", "1.2.1.1", "1.2.1.2", "1.2.2", "1.2.2.2", "1.2.3", "1.2.7", "1.2.8"): _ZLIB_LIBRARY_OK = 1 return LOG.info("Unrecognized zlib version: %r. Spot-checking output", ver) # This test is inadequate, but it _might_ catch future incompatible # changes. _ZLIB_LIBRARY_OK = 0.5 good = '\x78\xda\xed\xc6A\x11\x00 \x08\x00\xb0l\xd4\xf0\x87\x02\xf6o'+\ '`\x0e\xef\xb6\xd7r\xed\x88S=7\xcd\xcc\xcc\xcc\xcc\xcc\xcc'+\ '\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xbe\xdd\x03'+\ 'q\x8d\n\x93' if compressData("aZbAAcdefg"*1000) == good: _ZLIB_LIBRARY_OK = 1 else: _ZLIB_LIBRARY_OK = 0 raise MixFatalError("Zlib output not as exected.")
def regenerateDescriptors(self): """Regenerate all server descriptors for all keysets in this keyring, but keep all old keys intact.""" LOG.info("Regenerating server descriptors; keeping old keys.") identityKey = self.getIdentityKey() for _,_,ks in self.keySets: ks.regenerateServerDescriptor(self.config, identityKey)
def _validateZlib(): """Internal function: Make sure that zlib is a recognized version, and that it compresses things as expected. (This check is important, because using a zlib version that compressed differently from zlib1.1.4 would make senders partitionable by payload compression.) """ global _ZLIB_LIBRARY_OK ver = getattr(zlib, "ZLIB_VERSION", None) if ver and ver < "1.1.2": raise MixFatalError("Zlib version %s is not supported" % ver) _ZLIB_LIBRARY_OK = 0.5 if ver in ("1.1.2", "1.1.3", "1.1.4", "1.2.0", "1.2.0.1", "1.2.0.2", "1.2.0.3", "1.2.0.4", "1.2.0.5", "1.2.0.6", "1.2.0.7", "1.2.0.8", "1.2.1", "1.2.1.1", "1.2.1.2", "1.2.2", "1.2.2.2", "1.2.3"): _ZLIB_LIBRARY_OK = 1 return LOG.info("Unrecognized zlib version: %r. Spot-checking output", ver) # This test is inadequate, but it _might_ catch future incompatible # changes. _ZLIB_LIBRARY_OK = 0.5 good = '\x78\xda\xed\xc6A\x11\x00 \x08\x00\xb0l\xd4\xf0\x87\x02\xf6o'+\ '`\x0e\xef\xb6\xd7r\xed\x88S=7\xcd\xcc\xcc\xcc\xcc\xcc\xcc'+\ '\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xbe\xdd\x03'+\ 'q\x8d\n\x93' if compressData("aZbAAcdefg" * 1000) == good: _ZLIB_LIBRARY_OK = 1 else: _ZLIB_LIBRARY_OK = 0 raise MixFatalError("Zlib output not as exected.")
def removeDeadKeys(self, now=None): """Remove all keys that have expired.""" self.checkKeys() keys = self.getDeadKeys(now) for message, keyset in keys: LOG.info(message) keyset.delete() self.checkKeys()
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [ requestedFile ] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [requestedFile] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def __loadModules(self, section, sectionEntries): """Callback from the [Server] section of a config file. Parses the module options, and adds new sections to the syntax accordingly.""" self.moduleManager.setPath(section.get('ModulePath')) for mod in section.get('Module', []): LOG.info("Loading module %s", mod) self.moduleManager.loadExtModule(mod) self._syntax.update(self.moduleManager.getConfigSyntax())
def checkKeys(self): """Internal method: read information about all this server's currently-prepared keys from disk. May raise ConfigError if any of the server descriptors on disk are invalid. """ self.keySets = [] badKeySets = [] firstKey = sys.maxint lastKey = 0 LOG.debug("Scanning server keystore at %s", self.keyDir) if not os.path.exists(self.keyDir): LOG.info("Creating server keystore at %s", self.keyDir) createPrivateDir(self.keyDir) # Iterate over the entires in HOME/keys for dirname in os.listdir(self.keyDir): # Skip any that aren't directories named "key_INT" if not os.path.isdir(os.path.join(self.keyDir, dirname)): continue if not dirname.startswith('key_'): LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue keysetname = dirname[4:] try: setNum = int(keysetname) # keep trace of the first and last used key number if setNum < firstKey: firstKey = setNum if setNum > lastKey: lastKey = setNum except ValueError: LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue # Find the server descriptor... keyset = ServerKeyset(self.keyDir, keysetname, self.hashDir) ok = 1 try: keyset.checkKeys() except MixError, e: LOG.warn("Error checking private keys in keyset %s: %s", keysetname, str(e)) ok = 0 try: if ok: keyset.getServerDescriptor() except (ConfigError, IOError), e: LOG.warn("Key set %s has invalid/missing descriptor: %s", keysetname, str(e)) ok = 0
def checkKeys(self): """Internal method: read information about all this server's currently-prepared keys from disk. May raise ConfigError if any of the server descriptors on disk are invalid. """ self.keySets = [] badKeySets = [] firstKey = sys.maxint lastKey = 0 LOG.debug("Scanning server keystore at %s", self.keyDir) if not os.path.exists(self.keyDir): LOG.info("Creating server keystore at %s", self.keyDir) createPrivateDir(self.keyDir) # Iterate over the entires in HOME/keys for dirname in os.listdir(self.keyDir): # Skip any that aren't directories named "key_INT" if not os.path.isdir(os.path.join(self.keyDir,dirname)): continue if not dirname.startswith('key_'): LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue keysetname = dirname[4:] try: setNum = int(keysetname) # keep trace of the first and last used key number if setNum < firstKey: firstKey = setNum if setNum > lastKey: lastKey = setNum except ValueError: LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue # Find the server descriptor... keyset = ServerKeyset(self.keyDir, keysetname, self.hashDir) ok = 1 try: keyset.checkKeys() except MixError, e: LOG.warn("Error checking private keys in keyset %s: %s", keysetname, str(e)) ok = 0 try: if ok: keyset.getServerDescriptor() except (ConfigError, IOError), e: LOG.warn("Key set %s has invalid/missing descriptor: %s", keysetname, str(e)) ok = 0
def run(self): """Internal: main body of processing thread.""" try: while 1: job = self.mqueue.get() job() except ProcessingThread._Shutdown: LOG.info("Shutting down %s", self.threadName) return except: LOG.error_exc(sys.exc_info(), "Exception in %s; shutting down thread.", self.threadName)
def run(self): """Internal: main body of processing thread.""" try: while 1: job = self.mqueue.get() job() except ProcessingThread._Shutdown: LOG.info("Shutting down %s",self.threadName) return except: LOG.error_exc(sys.exc_info(), "Exception in %s; shutting down thread.", self.threadName)
def removeIdentityKey(self): """Remove this server's identity key.""" fn = os.path.join(self.keyDir, "identity.key") if not os.path.exists(fn): LOG.info("No identity key to remove.") else: LOG.warn("Removing identity key in 10 seconds") time.sleep(10) LOG.warn("Removing identity key") secureDelete([fn], blocking=1) if os.path.exists(self.dhFile): LOG.info("Removing diffie-helman parameters file") secureDelete([self.dhFile], blocking=1)
def publishKeys(self, allKeys=0): """Publish server descriptors to the directory server. Ordinarily, only unpublished descriptors are sent. If allKeys is true, all descriptors are sent.""" keySets = [ ks for _, _, ks in self.keySets ] if allKeys: LOG.info("Republishing all known keys to directory server") else: keySets = [ ks for ks in keySets if not ks.isPublished() ] if not keySets: LOG.trace("publishKeys: no unpublished keys found") return LOG.info("Publishing %s keys to directory server...",len(keySets)) rejected = 0 for ks in keySets: status = ks.publish(DIRECTORY_UPLOAD_URL) if status == 'error': LOG.error("Error publishing a key; giving up") return 0 elif status == 'reject': rejected += 1 else: assert status == 'accept' if rejected == 0: LOG.info("All keys published successfully.") return 1 else: LOG.info("%s/%s keys were rejected." , rejected, len(keySets)) return 0
def regenerateServerDescriptor(self, config, identityKey): """Regenerate the server descriptor for this keyset, keeping the original keys.""" self.load() self.markAsUnpublished() validAt,validUntil = self.getLiveness() LOG.info("Regenerating descriptor for keyset %s (%s--%s)", self.keyname, formatTime(validAt,1), formatTime(validUntil,1)) generateServerDescriptorAndKeys(config, identityKey, self.keyroot, self.keyname, self.hashroot, validAt=validAt, validUntil=validUntil, useServerKeys=1) self.serverinfo = self.validAfter = self.validUntil = None
def publishKeys(self, allKeys=0): """Publish server descriptors to the directory server. Ordinarily, only unpublished descriptors are sent. If allKeys is true, all descriptors are sent.""" keySets = [ks for _, _, ks in self.keySets] if allKeys: LOG.info("Republishing all known keys to directory server") else: keySets = [ks for ks in keySets if not ks.isPublished()] if not keySets: LOG.trace("publishKeys: no unpublished keys found") return LOG.info("Publishing %s keys to directory server...", len(keySets)) rejected = 0 for ks in keySets: status = ks.publish(DIRECTORY_UPLOAD_URL) if status == 'error': LOG.error("Error publishing a key; giving up") return 0 elif status == 'reject': rejected += 1 else: assert status == 'accept' if rejected == 0: LOG.info("All keys published successfully.") return 1 else: LOG.info("%s/%s keys were rejected.", rejected, len(keySets)) return 0
class ListenConnection(Connection): """A ListenConnection listens on a given port/ip combination, and calls a 'connectionFactory' method whenever a new connection is made to that port.""" ## Fields: # ip: IP to listen on. # port: port to listen on. # sock: socket to bind. # connectionFactory: a function that takes as input a socket from a # newly received connection, and returns a Connection object to # register with the async server. def __init__(self, family, ip, port, backlog, connectionFactory): """Create a new ListenConnection""" self.ip = ip self.port = port self.sock = socket.socket(family, socket.SOCK_STREAM) self.sock.setblocking(0) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self.sock.bind((self.ip, self.port)) except socket.error, (err, msg): extra = "" code = errno.errorcode.get(err) if code in ["EADDRNOTAVAIL", "WSAEADDRNOTAVAIL"]: extra = " (Is that really your IP address?)" elif code == "EACCES": extra = " (Remember, only root can bind low ports)" raise UIError("Error while trying to bind to %s:%s: %s%s" % (self.ip, self.port, msg, extra)) self.sock.listen(backlog) self.connectionFactory = connectionFactory self.isOpen = 1 LOG.info("Listening at %s on port %s (fd %s)", ip, port, self.sock.fileno())
def _getDHFile(self): """Return the filename for the diffie-helman parameters for the server. Creates the file if it doesn't yet exist.""" dhdir = os.path.split(self.dhFile)[0] createPrivateDir(dhdir) if not os.path.exists(self.dhFile): # ???? This is only using 512-bit Diffie-Hellman! That isn't # ???? remotely enough. LOG.info("Generating Diffie-Helman parameters for TLS...") mixminion._minionlib.generate_dh_parameters(self.dhFile, verbose=0) LOG.info("...done") else: LOG.debug("Using existing Diffie-Helman parameter from %s", self.dhFile) return self.dhFile
def createKeys(self, num=1, startAt=None): """Generate 'num' public keys for this server. If startAt is provided, make the first key become valid at 'startAt'. Otherwise, make the first key become valid right after the last key we currently have expires. If we have no keys now, make the first key start now.""" # FFFF Use this. #password = None if startAt is None: if self.keySets: startAt = self.keySets[-1][1]+60 if startAt < time.time(): startAt = time.time()+60 else: startAt = time.time()+60 startAt = previousMidnight(startAt) firstKey, lastKey = self.keyRange for _ in xrange(num): if firstKey == sys.maxint: keynum = firstKey = lastKey = 1 elif firstKey > 1: firstKey -= 1 keynum = firstKey else: lastKey += 1 keynum = lastKey keyname = "%04d" % keynum lifetime = self.config['Server']['PublicKeyLifetime'].getSeconds() nextStart = startAt + lifetime LOG.info("Generating key %s to run from %s through %s (GMT)", keyname, formatDate(startAt), formatDate(nextStart-3600)) generateServerDescriptorAndKeys(config=self.config, identityKey=self.getIdentityKey(), keyname=keyname, keydir=self.keyDir, hashdir=self.hashDir, validAt=startAt) startAt = nextStart self.checkKeys()
def createKeys(self, num=1, startAt=None): """Generate 'num' public keys for this server. If startAt is provided, make the first key become valid at 'startAt'. Otherwise, make the first key become valid right after the last key we currently have expires. If we have no keys now, make the first key start now.""" # FFFF Use this. #password = None if startAt is None: if self.keySets: startAt = self.keySets[-1][1] + 60 if startAt < time.time(): startAt = time.time() + 60 else: startAt = time.time() + 60 startAt = previousMidnight(startAt) firstKey, lastKey = self.keyRange for _ in xrange(num): if firstKey == sys.maxint: keynum = firstKey = lastKey = 1 elif firstKey > 1: firstKey -= 1 keynum = firstKey else: lastKey += 1 keynum = lastKey keyname = "%04d" % keynum lifetime = self.config['Server']['PublicKeyLifetime'].getSeconds() nextStart = startAt + lifetime LOG.info("Generating key %s to run from %s through %s (GMT)", keyname, formatDate(startAt), formatDate(nextStart - 3600)) generateServerDescriptorAndKeys(config=self.config, identityKey=self.getIdentityKey(), keyname=keyname, keydir=self.keyDir, hashdir=self.hashDir, validAt=startAt) startAt = nextStart self.checkKeys()
def getNextKeygen(self): """Return the time (in seconds) when we should next generate keys. If -1 is returned, keygen should occur immediately. """ if not self.keySets: return -1 # Our last current key expires at 'lastExpiry'. lastExpiry = self.keySets[-1][1] # We want to have keys in the directory valid for # PREPUBLICATION_INTERVAL seconds after that, and we assume that # a key takes up to PUBLICATION_LATENCY seconds to make it into the # directory. nextKeygen = lastExpiry - PUBLICATION_LATENCY - PREPUBLICATION_INTERVAL LOG.info("Last expiry at %s; next keygen at %s", formatTime(lastExpiry,1), formatTime(nextKeygen, 1)) return nextKeygen
def getNextKeygen(self): """Return the time (in seconds) when we should next generate keys. If -1 is returned, keygen should occur immediately. """ if not self.keySets: return -1 # Our last current key expires at 'lastExpiry'. lastExpiry = self.keySets[-1][1] # We want to have keys in the directory valid for # PREPUBLICATION_INTERVAL seconds after that, and we assume that # a key takes up to PUBLICATION_LATENCY seconds to make it into the # directory. nextKeygen = lastExpiry - PUBLICATION_LATENCY - PREPUBLICATION_INTERVAL LOG.info("Last expiry at %s; next keygen at %s", formatTime(lastExpiry, 1), formatTime(nextKeygen, 1)) return nextKeygen
def regenerateServerDescriptor(self, config, identityKey): """Regenerate the server descriptor for this keyset, keeping the original keys.""" self.load() self.markAsUnpublished() validAt, validUntil = self.getLiveness() LOG.info("Regenerating descriptor for keyset %s (%s--%s)", self.keyname, formatTime(validAt, 1), formatTime(validUntil, 1)) generateServerDescriptorAndKeys(config, identityKey, self.keyroot, self.keyname, self.hashroot, validAt=validAt, validUntil=validUntil, useServerKeys=1) self.serverinfo = self.validAfter = self.validUntil = None
def learnServerID(self, server): """Mark the ID for a server descriptor as the canonical identity key associated with that server's nickname.""" try: self._lock() ident = server.getIdentity() nickname = server.getNickname() try: if self.idCache.containsServer(server): LOG.warn("Server %s already known", nickname) except mixminion.directory.MismatchedID: raise MixFatalError("Mismatched ID for server %s" % nickname) LOG.info("Learning identity for new server %s", nickname) self.idCache.insertServer(server) writePickled(os.path.join(self.serverIDDir, nickname+"-"+formatFnameTime()), ("V0", (nickname, pk_encode_public_key(ident)))) self.idCache.save() finally: self._unlock()
def getIdentityKey(self): """Return this server's identity key. Generate one if it doesn't exist.""" password = None # FFFF Use this, somehow. fn = os.path.join(self.keyDir, "identity.key") bits = self.config['Server']['IdentityKeyBits'] if os.path.exists(fn): checkPrivateFile(fn) key = mixminion.Crypto.pk_PEM_load(fn, password) keylen = key.get_modulus_bytes()*8 if keylen != bits: LOG.warn( "Stored identity key has %s bits, but you asked for %s.", keylen, bits) else: LOG.info("Generating identity key. (This may take a while.)") key = mixminion.Crypto.pk_generate(bits) mixminion.Crypto.pk_PEM_save(key, fn, password) LOG.info("Generated %s-bit identity key.", bits) return key
def getIdentityKey(self): """Return this server's identity key. Generate one if it doesn't exist.""" password = None # FFFF Use this, somehow. fn = os.path.join(self.keyDir, "identity.key") bits = self.config['Server']['IdentityKeyBits'] if os.path.exists(fn): checkPrivateFile(fn) key = mixminion.Crypto.pk_PEM_load(fn, password) keylen = key.get_modulus_bytes() * 8 if keylen != bits: LOG.warn( "Stored identity key has %s bits, but you asked for %s.", keylen, bits) else: LOG.info("Generating identity key. (This may take a while.)") key = mixminion.Crypto.pk_generate(bits) mixminion.Crypto.pk_PEM_save(key, fn, password) LOG.info("Generated %s-bit identity key.", bits) return key
def clean(self, now=None): """Remove all expired or superseded servers from the active directory. """ # This algorithm is inefficient: O(N_descs * N_descs_per_nickname). # We're just going to ignore that. if now is None: now = time.time() try: self._lock() removed = {} # Map from filename->whyRemoved # Find all superseded servers for servers in self.serversByNickname.values(): servers = [ (self.servers[fn]['Server']['Published'], fn, self.servers[fn]) for fn in servers ] servers.sort() fns = [ fn for _, fn, _ in servers] servers = [ s for _, _, s in servers ] for idx in range(len(servers)): if servers[idx].isSupersededBy(servers[idx+1:]): removed[fns[idx]] = "superseded" # Find all expired servers. for fn, s in self.servers.items(): if removed.has_key(fn): continue if s.isExpiredAt(now-6000): # The descriptor is expired. removed[fn] = "expired" # Now, do the actual removing. for fn, why in removed.items(): LOG.info("Removing %s descriptor %s", why, fn) _moveServer(self.serverDir, self.archiveDir, fn) del self.servers[fn] self.__buildNicknameMap() finally: self._unlock()
def learnServerID(self, server): """Mark the ID for a server descriptor as the canonical identity key associated with that server's nickname.""" try: self._lock() ident = server.getIdentity() nickname = server.getNickname() try: if self.idCache.containsServer(server): LOG.warn("Server %s already known", nickname) except mixminion.directory.MismatchedID: raise MixFatalError("Mismatched ID for server %s" % nickname) LOG.info("Learning identity for new server %s", nickname) self.idCache.insertServer(server) writePickled( os.path.join(self.serverIDDir, nickname + "-" + formatFnameTime()), ("V0", (nickname, pk_encode_public_key(ident)))) self.idCache.save() finally: self._unlock()
def updateKeys(self, packetHandler, statusFile=None, when=None): """Update the keys stored in a PacketHandler, MMTPServer object, so that they contain the currently correct keys. Also removes any dead keys. This function is idempotent. """ self.checkKeys() deadKeys = self.getDeadKeys(when) self.currentKeys = keys = self.getServerKeysets(when) keyNames = [k.keyname for k in keys] deadKeyNames = [k.keyname for msg, k in deadKeys] LOG.info("Updating keys: %s currently valid (%s); %s expired (%s)", len(keys), " ".join(keyNames), len(deadKeys), " ".join(deadKeyNames)) if packetHandler is not None: packetKeys = [] hashLogs = [] for k in keys: packetKeys.append(k.getPacketKey()) hashLogs.append(k.getHashLog()) packetHandler.setKeys(packetKeys, hashLogs) if statusFile: writeFile( statusFile, "".join(["%s\n" % k.getDescriptorFileName() for k in keys]), 0644) for msg, ks in deadKeys: LOG.info(msg) ks.delete() if deadKeys: self.checkKeys() self.nextUpdate = None self.getNextKeyRotation(keys)
def clean(self, now=None): """Remove all expired or superseded servers from the active directory. """ # This algorithm is inefficient: O(N_descs * N_descs_per_nickname). # We're just going to ignore that. if now is None: now = time.time() try: self._lock() removed = {} # Map from filename->whyRemoved # Find all superseded servers for servers in self.serversByNickname.values(): servers = [(self.servers[fn]['Server']['Published'], fn, self.servers[fn]) for fn in servers] servers.sort() fns = [fn for _, fn, _ in servers] servers = [s for _, _, s in servers] for idx in range(len(servers)): if servers[idx].isSupersededBy(servers[idx + 1:]): removed[fns[idx]] = "superseded" # Find all expired servers. for fn, s in self.servers.items(): if removed.has_key(fn): continue if s.isExpiredAt(now - 6000): # The descriptor is expired. removed[fn] = "expired" # Now, do the actual removing. for fn, why in removed.items(): LOG.info("Removing %s descriptor %s", why, fn) _moveServer(self.serverDir, self.archiveDir, fn) del self.servers[fn] self.__buildNicknameMap() finally: self._unlock()
def updateKeys(self, packetHandler, statusFile=None,when=None): """Update the keys stored in a PacketHandler, MMTPServer object, so that they contain the currently correct keys. Also removes any dead keys. This function is idempotent. """ self.checkKeys() deadKeys = self.getDeadKeys(when) self.currentKeys = keys = self.getServerKeysets(when) keyNames = [k.keyname for k in keys] deadKeyNames = [k.keyname for msg, k in deadKeys] LOG.info("Updating keys: %s currently valid (%s); %s expired (%s)", len(keys), " ".join(keyNames), len(deadKeys), " ".join(deadKeyNames)) if packetHandler is not None: packetKeys = [] hashLogs = [] for k in keys: packetKeys.append(k.getPacketKey()) hashLogs.append(k.getHashLog()) packetHandler.setKeys(packetKeys, hashLogs) if statusFile: writeFile(statusFile, "".join(["%s\n"%k.getDescriptorFileName() for k in keys]), 0644) for msg, ks in deadKeys: LOG.info(msg) ks.delete() if deadKeys: self.checkKeys() self.nextUpdate = None self.getNextKeyRotation(keys)
def rescan(self): self._statusDB.close() os.path.unlink(self._dbLoc) self.clean() self._statusDB = mixminion.Filestore.WritethroughDict( self._dbLoc, "server cache") for key in os.listdir(self._loc): fn = os.path.join(self._loc, key) try: #XXXX digest-cache server = ServerInfo(fname=fn) except (OSError, MixError, ConfigError), e: LOG.warn("Deleting invalid server %s: %s", key, e) os.unlink(fn) server = None if server is None: continue k2 = self._getKey(server.getDigest()) if k2 != key: LOG.info("Renaming server in %s to correct file %s", key, k2) os.rename(fn, os.path.join(self._loc, k2)) key = k2 self._updateCache(key, server)
def rescan(self): self._statusDB.close() os.path.unlink(self._dbLoc) self.clean() self._statusDB = mixminion.Filestore.WritethroughDict( self._dbLoc, "server cache") for key in os.listdir(self._loc): fn = os.path.join(self._loc, key) try: #XXXX digest-cache server = ServerInfo(fname=fn) except (OSError, MixError, ConfigError), e: LOG.warn("Deleting invalid server %s: %s", key, e) os.unlink(fn) server = None if server is None: continue k2 = self._getKey(server.getDigest()) if k2 != key: LOG.info("Renaming server in %s to correct file %s",key,k2) os.rename(fn, os.path.join(self._loc, k2)) key = k2 self._updateCache(key, server)
def createKeysAsNeeded(self, now=None): """Generate new keys and descriptors as needed, so that the next PUBLICATION_LATENCY+PREPUBLICATION_INTERVAL seconds are covered.""" if now is None: now = time.time() if self.getNextKeygen() > now - 10: # 10 seconds of leeway return if self.keySets: lastExpiry = self.keySets[-1][1] if lastExpiry < now: lastExpiry = now else: lastExpiry = now needToCoverUntil = now + PUBLICATION_LATENCY + PREPUBLICATION_INTERVAL timeToCover = needToCoverUntil - lastExpiry lifetime = self.config['Server']['PublicKeyLifetime'].getSeconds() nKeys = int(ceilDiv(timeToCover, lifetime)) LOG.info("Creating %s keys", nKeys) self.createKeys(num=nKeys)
def createKeysAsNeeded(self,now=None): """Generate new keys and descriptors as needed, so that the next PUBLICATION_LATENCY+PREPUBLICATION_INTERVAL seconds are covered.""" if now is None: now = time.time() if self.getNextKeygen() > now-10: # 10 seconds of leeway return if self.keySets: lastExpiry = self.keySets[-1][1] if lastExpiry < now: lastExpiry = now else: lastExpiry = now needToCoverUntil = now+PUBLICATION_LATENCY+PREPUBLICATION_INTERVAL timeToCover = needToCoverUntil-lastExpiry lifetime = self.config['Server']['PublicKeyLifetime'].getSeconds() nKeys = int(ceilDiv(timeToCover, lifetime)) LOG.info("Creating %s keys", nKeys) self.createKeys(num=nKeys)
def expungeServersByNickname(self, nickname): """Forcibly remove all servers named <nickname>""" try: self._lock() LOG.info("Removing all servers named %s", nickname) lcnickname = nickname.lower() if not self.serversByNickname.has_key(lcnickname): LOG.info(" (No such servers exist)") return servers = self.serversByNickname[lcnickname] for fn in servers: LOG.info(" Removing %s", fn) _moveServer(self.serverDir, self.archiveDir, fn) del self.servers[fn] del self.serversByNickname[lcnickname] LOG.info(" (%s servers removed)", len(servers)) finally: self._unlock()
def configureLog(config): """Given a configuration file, set up the log. May replace the log global variable. """ global log if config['Server']['LogStats']: LOG.info("Enabling statistics logging") statsfile = config.getStatsFile() if not os.path.exists(os.path.split(statsfile)[0]): # create parent if needed. os.makedirs(os.path.split(statsfile)[0], 0700) workfile = os.path.join(config.getWorkDir(), "stats.tmp") log = EventLog( workfile, statsfile, config['Server']['StatsInterval'].getSeconds()) import mixminion.MMTPClient mixminion.MMTPClient.useEventStats() LOG.info("Statistics logging enabled") else: log = NilEventLog() LOG.info("Statistics logging disabled")
def configureLog(config): """Given a configuration file, set up the log. May replace the log global variable. """ global log if config['Server']['LogStats']: LOG.info("Enabling statistics logging") statsfile = config.getStatsFile() if not os.path.exists(os.path.split(statsfile)[0]): # create parent if needed. os.makedirs(os.path.split(statsfile)[0], 0700) workfile = os.path.join(config.getWorkDir(), "stats.tmp") log = EventLog(workfile, statsfile, config['Server']['StatsInterval'].getSeconds()) import mixminion.MMTPClient mixminion.MMTPClient.useEventStats() LOG.info("Statistics logging enabled") else: log = NilEventLog() LOG.info("Statistics logging disabled")
def getNextKeyRotation(self, curKeys=None): """Calculate the next time at which we should change the set of live keys.""" if self.nextUpdate is None: if curKeys is None: if self.currentKeys is None: curKeys = self.getServerKeysets() else: curKeys = self.currentKeys events = [] curNames = {} # For every current keyset, we'll remove it at keyOverlap # seconds after its stated expiry time. for k in curKeys: va, vu = k.getLiveness() events.append((vu + self.keyOverlap, "RM")) curNames[k.keyname] = 1 # For every other keyset, we'll add it when it becomes valid. for va, vu, k in self.keySets: if curNames.has_key(k.keyname): continue events.append((va, "ADD")) # Which even happens first? events.sort() if not events: LOG.info("No future key rotation events.") self.nextUpdate = sys.maxint return self.nextUpdate self.nextUpdate, eventType = events[0] if eventType == "RM": LOG.info("Next key event: old key is removed at %s", formatTime(self.nextUpdate, 1)) else: assert eventType == "ADD" LOG.info("Next key event: new key becomes valid at %s", formatTime(self.nextUpdate, 1)) return self.nextUpdate
def getNextKeyRotation(self, curKeys=None): """Calculate the next time at which we should change the set of live keys.""" if self.nextUpdate is None: if curKeys is None: if self.currentKeys is None: curKeys = self.getServerKeysets() else: curKeys = self.currentKeys events = [] curNames = {} # For every current keyset, we'll remove it at keyOverlap # seconds after its stated expiry time. for k in curKeys: va, vu = k.getLiveness() events.append((vu+self.keyOverlap, "RM")) curNames[k.keyname] = 1 # For every other keyset, we'll add it when it becomes valid. for va, vu, k in self.keySets: if curNames.has_key(k.keyname): continue events.append((va, "ADD")) # Which even happens first? events.sort() if not events: LOG.info("No future key rotation events.") self.nextUpdate = sys.maxint return self.nextUpdate self.nextUpdate, eventType = events[0] if eventType == "RM": LOG.info("Next key event: old key is removed at %s", formatTime(self.nextUpdate,1)) else: assert eventType == "ADD" LOG.info("Next key event: new key becomes valid at %s", formatTime(self.nextUpdate,1)) return self.nextUpdate
class ServerInbox: """A ServerInbox holds server descriptors received from the outside world that are not yet ready to be included in the directory. """ ## Fields: # store: A ServerStore to hold server files. Must be readable/writeable by # directory server user and CGI user. # voteFile: A VoteFile obejct. Must be readable by CGI user. def __init__(self, store, voteFile): """Create a new ServerInbox.""" self.store = store self.voteFile = voteFile def receiveServer(self, text, source, now=None): """Process a new server descriptor and store it for later action. (To be run by the CGI user.) If the server will be automatically inserted, return true. If the server will be inserted (given administrator intervention), raise ServerQueuedException. If there is a problem, log it, and raise UIError. text -- a string containing a new server descriptor. source -- a (human readable) string describing the source of the descriptor, used in error messages. """ if now is None: now = time.time() try: #XXXX digest cache?? server = ServerInfo(string=text, assumeValid=0, _keepContents=1) except MixError, e: LOG.warn("Rejected invalid server from %s: %s", source, e) raise UIError("Server descriptor was not valid: %s" % e) status = self.voteFile.getServerStatus(server) if status == "mismatch": LOG.warn("Rejected server with mismatched identity for %r from %s", nickname, source) self.store.addServer(server) raise UIError(("I already know a server named " "%s with a different key.") % server.getNickname()) elif status == "ignore": LOG.warn("Rejected descriptor for ignored server %r from %s", nickname, source) return if server.isExpiredAt(time.time()): LOG.warn("Rejecting expired descriptor from %s", source) raise UIError("That descriptor is already expired; your clock" " is probably skewed.") if status in ("yes", "no", "abstain"): LOG.info("Received update for server %r from %s (vote=%s)", server.getNickname(), source, status) self.store.addServer(server) return 1 else: assert status == "unknown" LOG.info("Received previously unknown server %s from %s", nickname, source) self.store.addServer(server) raise ServerQueuedException( "Server queued pending manual checking")
if info.get('Content-Type') != 'text/plain': LOG.error("Bad content type %s from directory"%info.get( 'Content-Type')) return 'error' m = DIRECTORY_RESPONSE_RE.search(reply) if not m: LOG.error("Didn't understand reply from directory: %s", reply) return 'error' ok = int(m.group(1)) msg = m.group(2) if not ok: LOG.error("Directory rejected descriptor: %r", msg) return 'reject' LOG.info("Directory accepted descriptor: %r", msg) self.markAsPublished() return 'accept' # Matches the reply a directory server gives. DIRECTORY_RESPONSE_RE = re.compile(r'^Status: (0|1)[ \t]*\nMessage: (.*)$', re.M) class _WarnWrapper: """Helper for 'checkDescriptorConsistency' to keep its implementation short. Counts the number of times it's invoked, and delegates to LOG.warn if silence is false.""" def __init__(self, silence, isPublished): self.silence = silence self.errors = 0 self.called = 0
if info.get('Content-Type') != 'text/plain': LOG.error("Bad content type %s from directory" % info.get('Content-Type')) return 'error' m = DIRECTORY_RESPONSE_RE.search(reply) if not m: LOG.error("Didn't understand reply from directory: %s", reply) return 'error' ok = int(m.group(1)) msg = m.group(2) if not ok: LOG.error("Directory rejected descriptor: %r", msg) return 'reject' LOG.info("Directory accepted descriptor: %r", msg) self.markAsPublished() return 'accept' # Matches the reply a directory server gives. DIRECTORY_RESPONSE_RE = re.compile(r'^Status: (0|1)[ \t]*\nMessage: (.*)$', re.M) class _WarnWrapper: """Helper for 'checkDescriptorConsistency' to keep its implementation short. Counts the number of times it's invoked, and delegates to LOG.warn if silence is false.""" def __init__(self, silence, isPublished): self.silence = silence
def decodePayload(payload, tag, key=None, userKeys=(), retNym=None): """Given a 28K payload and a 20-byte decoding tag, attempt to decode the original message. Returns either a SingletonPayload instance, a FragmentPayload instance, or None. key: an RSA key to decode encrypted forward messages, or None userKeys: a sequence of (name,key) tuples maping identity names to SURB keys. For backward compatibility, 'userKeys' may also be None (no SURBs known), a dict (from name to key), or a single key (implied identity is ""). retNym: If present, and if the payload was a reply, we call retNym.append(pseudonym). (For the default SURB identity, we append the empty string.) If we can successfully decrypt the payload, we return it. If we might be able to decrypt the payload given more/different keys, we return None. If the payload is corrupt, we raise MixError. """ if userKeys is None: userKeys = [] elif type(userKeys) is types.StringType: userKeys = [("", userKeys)] elif type(userKeys) is types.DictType: userKeys = userKeys.items() if len(payload) != PAYLOAD_LEN: raise MixError("Wrong payload length") if len(tag) not in (0, TAG_LEN): raise MixError("Wrong tag length: %s" % len(tag)) # If the payload already contains a valid checksum, it's a forward # message. if _checkPayload(payload): return parsePayload(payload) if not tag: return None # If H(tag|userKey|"Validate") ends with 0, then the message _might_ # be a reply message using H(tag|userKey|"Generate") as the seed for # its master secrets. (There's a 1-in-256 chance that it isn't.) for name, userKey in userKeys: if Crypto.sha1(tag + userKey + "Validate")[-1] == "\x00": try: p = _decodeStatelessReplyPayload(payload, tag, userKey) if name: LOG.info("Decoded reply message to identity %r", name) if retNym is not None: retNym.append(name) return p except MixError: pass # If we have an RSA key, and none of the above steps get us a good # payload, then we may as well try to decrypt the start of tag+key with # our RSA key. if key is not None: p = _decodeEncryptedForwardPayload(payload, tag, key) if p is not None: return p return None
def shutdown(self,flush=1): """Tells this thread to shut down once the current job is done.""" LOG.info("Telling %s to shut down.", self.threadName) if flush: self.mqueue.clear() self.mqueue.put(ProcessingThread._Shutdown())
def decodePayload(payload, tag, key=None, userKeys=(), retNym=None): """Given a 28K payload and a 20-byte decoding tag, attempt to decode the original message. Returns either a SingletonPayload instance, a FragmentPayload instance, or None. key: an RSA key to decode encrypted forward messages, or None userKeys: a sequence of (name,key) tuples maping identity names to SURB keys. For backward compatibility, 'userKeys' may also be None (no SURBs known), a dict (from name to key), or a single key (implied identity is ""). retNym: If present, and if the payload was a reply, we call retNym.append(pseudonym). (For the default SURB identity, we append the empty string.) If we can successfully decrypt the payload, we return it. If we might be able to decrypt the payload given more/different keys, we return None. If the payload is corrupt, we raise MixError. """ if userKeys is None: userKeys = [] elif type(userKeys) is types.StringType: userKeys = [ ("", userKeys) ] elif type(userKeys) is types.DictType: userKeys = userKeys.items() if len(payload) != PAYLOAD_LEN: raise MixError("Wrong payload length") if len(tag) not in (0, TAG_LEN): raise MixError("Wrong tag length: %s"%len(tag)) # If the payload already contains a valid checksum, it's a forward # message. if _checkPayload(payload): return parsePayload(payload) if not tag: return None # If H(tag|userKey|"Validate") ends with 0, then the message _might_ # be a reply message using H(tag|userKey|"Generate") as the seed for # its master secrets. (There's a 1-in-256 chance that it isn't.) for name,userKey in userKeys: if Crypto.sha1(tag+userKey+"Validate")[-1] == '\x00': try: p = _decodeStatelessReplyPayload(payload, tag, userKey) if name: LOG.info("Decoded reply message to identity %r", name) if retNym is not None: retNym.append(name) return p except MixError: pass # If we have an RSA key, and none of the above steps get us a good # payload, then we may as well try to decrypt the start of tag+key with # our RSA key. if key is not None: p = _decodeEncryptedForwardPayload(payload, tag, key) if p is not None: return p return None
def shutdown(self, flush=1): """Tells this thread to shut down once the current job is done.""" LOG.info("Telling %s to shut down.", self.threadName) if flush: self.mqueue.clear() self.mqueue.put(ProcessingThread._Shutdown())
def shutdown(self): LOG.debug("Closing listener connection (fd %s)", self.sock.fileno()) self.isOpen = 0 self.sock.close() LOG.info("Server connection closed")
for src, val in directories: LOG.debug("Checking vote directory from %s",src) val = str(val) try: directory = mixminion.ServerInfo.SignedDirectory(string=val, validatedDigests=validatedDigests, _keepServerContents=1) except ConfigError,e: LOG.warn("Rejecting malformed vote directory from %s: %s",src,e) continue try: checkVoteDirectory(voters, validAfter, directory) except BadVote, e: LOG.warn("Rejecting vote directory from %s: %s", src, e) continue LOG.info("Accepting vote directory from %s",src) # Remember server descs minimally to save room. sig = directory.getSignatures()[0] fp = pk_fingerprint(sig['Signed-Directory']['Directory-Identity']) serversByDir[fp] = [] for s in directory.getAllServers(): d = s.getDigest() serversByDir[fp].append(d) if not serverMap.has_key(d): serverMap[d] = s del directory.servers[:] # Save RAM if goodDirectories.has_key(fp): LOG.warn("Multiple directories with fingerprint %s; ignoring one from %s", fp, goodDirectories[fp][0])