def prevalidate(self, contents): for name, ents in contents: if name == 'Server': for k, v, _ in ents: if k == 'Descriptor-Version' and v.strip() != '0.2': raise ConfigError( "Unrecognized descriptor version: %s" % v.strip()) # Remove any sections with unrecognized versions. revisedContents = [] for name, ents in contents: v = self.expected_versions.get(name) if not v: revisedContents.append((name, ents)) continue versionkey, versionval = v for k, v, _ in ents: if k == versionkey and v.strip() != versionval: LOG.warn( "Skipping %s section with unrecognized version %s", name, v.strip()) break else: revisedContents.append((name, ents)) return revisedContents
def prevalidate(self, contents): for name, ents in contents: if name == 'Server': for k,v,_ in ents: if k == 'Descriptor-Version' and v.strip() != '0.2': raise ConfigError("Unrecognized descriptor version: %s" % v.strip()) # Remove any sections with unrecognized versions. revisedContents = [] for name, ents in contents: v = self.expected_versions.get(name) if not v: revisedContents.append((name, ents)) continue versionkey, versionval = v for k,v,_ in ents: if k == versionkey and v.strip() != versionval: LOG.warn("Skipping %s section with unrecognized version %s" , name, v.strip()) break else: revisedContents.append((name, ents)) return revisedContents
def checkDescriptorConsistency(self, regen=1): """Check whether the server descriptors in this keyring are consistent with the server's configuration. If 'regen' is true, inconsistent descriptors are regenerated.""" identity = None state = [] for _, _, ks in self.keySets: ok = ks.checkConsistency(self.config, 0) if ok == 'good': continue state.append((ok, ks)) if not state: return LOG.warn("Some generated keysets do not match " "current configuration...") for ok, ks in state: va, vu = ks.getLiveness() LOG.warn("Keyset %s (%s--%s):", ks.keyname, formatTime(va, 1), formatTime(vu, 1)) ks.checkConsistency(self.config, 1) if regen and ok == 'bad': if not identity: identity = self.getIdentityKey() ks.regenerateServerDescriptor(self.config, identity)
def getInbufLine(self, maxBytes=None, terminator="\r\n", clear=0, allowExtra=0): """Return the first prefix of the current inbuf that ends with the 'terminator' string. Returns the string on success, None if no such string is found, and -1 on error. Errors occur when: there are 'maxBytes' bytes available but the terminator is not found; or when 'allowExtra' is false and there is data on the input buffer following the terminator.""" s = self.getInbuf(maxBytes) idx = s.find(terminator) if idx < 0: if len(s) == maxBytes: LOG.warn("Too much data without EOL from %s", self.address) return -1 else: return None if not allowExtra and idx + len(terminator) < self.inbuflen: LOG.warn("Trailing data after EOL from %s", self.address) return -1 return self.getInbuf(idx + len(terminator), clear=clear)
def readProtocol(self): s = self.getInbufLine(4096, clear=1) if s is None: return elif s == -1: self.startShutdown() #failed return self.stopReading() m = PROTOCOL_RE.match(s) if not m: LOG.warn("Bad MMTP protocol string format from %s", self.address) #failed self.startShutdown() return protocols = m.group(1).split(",") for p in self.PROTOCOL_VERSIONS: if p in protocols: self.protocol = p self.onWrite = self.protocolWritten self.beginWriting("MMTP %s\r\n" % p) return LOG.warn("No common protocols with %s", self.address) #failed self.startShutdown()
def addChunk(self, h, fm): """Register a chunk with handle h and FragmentMetadata fm. If the chunk is inconsistent with other fragments of this message, raise MismatchedFragment.""" assert fm.isChunk assert fm.messageid == self.messageid if fm.size != self.params.length: raise MismatchedFragment("Mismatched message length") if fm.overhead != self.overhead: raise MismatchedFragment("Mismatched packet overhead") if self.chunks.has_key(fm.chunkNum): raise MismatchedFragment("Duplicate chunks") if fm.nym != self.nym: raise MismatchedFragment( "Fragments received for differing identities") if self.inserted > fm.insertedDate: self.inserted = fm.insertedDate self.chunks[fm.chunkNum] = (h, fm) if self.fragmentsByChunk[fm.chunkNum]: LOG.warn("Found a chunk with unneeded fragments for message %r", self.messageid) if self.readyChunks.get(fm.chunkNum): del self.readyChunks[fm.chunkNum]
def addChunk(self, h, fm): """Register a chunk with handle h and FragmentMetadata fm. If the chunk is inconsistent with other fragments of this message, raise MismatchedFragment.""" assert fm.isChunk assert fm.messageid == self.messageid if fm.size != self.params.length: raise MismatchedFragment("Mismatched message length") if fm.overhead != self.overhead: raise MismatchedFragment("Mismatched packet overhead") if self.chunks.has_key(fm.chunkNum): raise MismatchedFragment("Duplicate chunks") if fm.nym != self.nym: raise MismatchedFragment("Fragments received for differing identities") if self.inserted > fm.insertedDate: self.inserted = fm.insertedDate self.chunks[fm.chunkNum] = (h,fm) if self.fragmentsByChunk[fm.chunkNum]: LOG.warn("Found a chunk with unneeded fragments for message %r", self.messageid) if self.readyChunks.get(fm.chunkNum): del self.readyChunks[fm.chunkNum]
def checkDescriptorConsistency(self, regen=1): """Check whether the server descriptors in this keyring are consistent with the server's configuration. If 'regen' is true, inconsistent descriptors are regenerated.""" identity = None state = [] for _,_,ks in self.keySets: ok = ks.checkConsistency(self.config, 0) if ok == 'good': continue state.append((ok, ks)) if not state: return LOG.warn("Some generated keysets do not match " "current configuration...") for ok, ks in state: va,vu = ks.getLiveness() LOG.warn("Keyset %s (%s--%s):",ks.keyname,formatTime(va,1), formatTime(vu,1)) ks.checkConsistency(self.config, 1) if regen and ok == 'bad': if not identity: identity = self.getIdentityKey() ks.regenerateServerDescriptor(self.config, identity)
def readProtocol(self): s = self.getInbufLine(4096,clear=1) if s is None: return elif s == -1: self.startShutdown() #failed return self.stopReading() m = PROTOCOL_RE.match(s) if not m: LOG.warn("Bad MMTP protocol string format from %s", self.address) #failed self.startShutdown() return protocols = m.group(1).split(",") for p in self.PROTOCOL_VERSIONS: if p in protocols: self.protocol = p self.onWrite = self.protocolWritten self.beginWriting("MMTP %s\r\n"%p) return LOG.warn("No common protocols with %s", self.address) #failed self.startShutdown()
def getHandlesByDestAndAge(self, destList, directory, notAfter=None, warnUnused=1): """Return a list of handles for all messages queued for servers in a given list before a given date. destList -- A list of hostnames, ips, keyids, or nicknames for servers whose messages should be included in the result. directory -- An instance of ClientDirectory used to resolve nicknames. This may be None if no nicknames are included. notAfter -- If provided, a time such that no messages queued later should be included warnUnused -- If true, we log a message for every element in destList that has no matching messages in the queue. """ destSet = {} reverse = {} for d in destList: if directory: keyid = directory.getKeyIDByNickname(d) if keyid: destSet[keyid] = 1 reverse[keyid] = d continue destSet[d] = 1 self.loadMetadata() result = [] foundAny = {} foundMatch = {} for h in self.store.getAllMessages(): _, r, when = self.store.getMetadata(h) if (destSet.has_key(r.keyinfo) or (hasattr(r, 'hostname') and destSet.has_key(r.hostname)) or (hasattr(r, 'ip') and destSet.has_key(r.ip))): keys = [ getattr(r, 'hostname', None), getattr(r, 'ip', None), reverse.get(r.keyinfo, None), r.keyinfo ] for k in keys: foundAny[k] = 1 if notAfter and when > notAfter: continue for k in keys: foundMatch[k] = 1 result.append(h) if warnUnused: for d in destList: if foundMatch.get(d): continue elif foundAny.get(d): LOG.warn("No expired packets found for %r", d) else: LOG.warn("No pending packets found for %r", d) return result
def __call__(self, *args): self.called = 1 self.errors += 1 if not self.published: args = list(args) args[0] = args[0].replace("published", "in unpublished descriptor") if not self.silence: LOG.warn(*args)
def __clientFinished(self, addr): """Called when a client connection runs out of packets to send, or halts.""" try: del self.clientConByAddr[addr] except KeyError: LOG.warn("Didn't find client connection to %s in address map", addr)
def getHeaders(self): """Return a dict containing the headers for this message.""" if self.type is None: self.decode() if self.headers is None: LOG.warn("getHeaders found no decoded headers") return {} return self.headers
def process(self, r, w, x, cap): #XXXX007 do something with x try: con, addr = self.sock.accept() LOG.debug("Accepted connection from %s", addr) self.connectionFactory(con) except socket.error, e: LOG.warn("Socket error while accepting connection: %s", e)
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [ requestedFile ] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def encodeMessage(message, overhead, uncompressedFragmentPrefix="", paddingPRNG=None): """Given a message, compress it, fragment it into individual payloads, and add extra fields (size, hash, etc) as appropriate. Return a list of strings, each of which is a message payload suitable for use in build*Message. message: the initial message overhead: number of bytes to omit from each payload, given the type ofthe message encoding. (0 or ENC_FWD_OVERHEAD) uncompressedFragmentPrefix: If we fragment the message, we add this string to the message after compression but before whitening and fragmentation. paddingPRNG: generator for padding. Note: If multiple strings are returned, be sure to shuffle them before transmitting them to the network. """ assert overhead in (0, ENC_FWD_OVERHEAD) if paddingPRNG is None: paddingPRNG = Crypto.getCommonPRNG() origLength = len(message) payload = compressData(message) length = len(payload) if length > 1024 and length*20 <= origLength: LOG.warn("Message is very compressible and will look like a zlib bomb") paddingLen = PAYLOAD_LEN - SINGLETON_PAYLOAD_OVERHEAD - overhead - length # If the compressed payload fits in 28K, we're set. if paddingLen >= 0: # We pad the payload, and construct a new SingletonPayload, # including this payload's size and checksum. payload += paddingPRNG.getBytes(paddingLen) p = SingletonPayload(length, None, payload) p.computeHash() return [ p.pack() ] # Okay, we need to fragment the message. First, add the prefix if needed. if uncompressedFragmentPrefix: payload = uncompressedFragmentPrefix+payload # Now generate a message ID messageid = Crypto.getCommonPRNG().getBytes(FRAGMENT_MESSAGEID_LEN) # Figure out how many chunks to divide it into... p = mixminion.Fragments.FragmentationParams(len(payload), overhead) # ... fragment the payload into chunks... rawFragments = p.getFragments(payload) fragments = [] # ... and annotate each chunk with appropriate payload header info. for i in xrange(len(rawFragments)): pyld = FragmentPayload(i, None, messageid, p.length, rawFragments[i]) pyld.computeHash() fragments.append(pyld.pack()) rawFragments[i] = None return fragments
def getBaseDir(self): """Return the base directory for this configuration.""" v = self["Server"]["BaseDir"] if v is None: v = self["Server"]["Homedir"] if v is None: LOG.warn("Defaulting base directory to /var/spool/minion; this will change.") v = "/var/spool/minion" return v
def configure_trng(config): """Initialize the true entropy source from a given Config object. If none is provided, tries some sane defaults.""" global _TRNG_FILENAME global _theTrueRNG if sys.platform == 'win32': # We have two entropy sources on windows: openssl's built-in # entropy generator that takes data from the screen, and # Windows's CryptGenRandom function. Because the former is # insecure, and the latter is closed-source, we xor them. _ml.win32_openssl_seed() _ml.openssl_seed(_ml.win32_get_random_bytes(32)) _theTrueRNG = _XorRNG(_OpensslRNG(), _WinTrueRNG()) return if config is not None: requestedFile = config['Host'].get('EntropySource') else: requestedFile = None # Build a list of candidates defaults = PLATFORM_TRNG_DEFAULTS.get(sys.platform, PLATFORM_TRNG_DEFAULTS['***']) files = [requestedFile] + defaults # Now find the first of our candidates that exists and is a character # device. randFile = None for filename in files: if filename is None: continue verbose = (filename == requestedFile) if not os.path.exists(filename): if verbose: LOG.warn("No such file as %s", filename) else: st = os.stat(filename) if not (st[stat.ST_MODE] & stat.S_IFCHR): if verbose: LOG.error("Entropy source %s isn't a character device", filename) else: randFile = filename break if randFile is None and _TRNG_FILENAME is None: LOG.fatal("No entropy source available: Tried all of %s", files) raise MixFatalError("No entropy source available") elif randFile is None: LOG.warn("Falling back to previous entropy source %s", _TRNG_FILENAME) else: LOG.info("Setting entropy source to %r", randFile) _TRNG_FILENAME = randFile _theTrueRNG = _TrueRNG(1024)
def onConnected(self): LOG.debug("Completed MMTP client connection to %s", self.address) # Is the certificate correct? try: self.certCache.check(self.tls, self.targetKeyID, self.address) except MixProtocolBadAuth, e: LOG.warn("Certificate error: %s. Shutting down connection.", e) self._failPendingPackets() self.startShutdown() return
def rebuildIDCache(self): for fn in os.listdir(self.serverIDDir): fname = os.path.join(self.serverIDDir, fn) tp, val = readPickled(fname) if tp != "V0": LOG.warn("Weird file version %s on %s", tp, fname) continue nickname, ident = val ID = mixminion.Crypto.sha1(ident) self.idCache.insertID(nickname, ID)
def onConnected(self): LOG.debug("Completed MMTP client connection to %s",self.address) # Is the certificate correct? try: self.certCache.check(self.tls, self.targetKeyID, self.address) except MixProtocolBadAuth, e: LOG.warn("Certificate error: %s. Shutting down connection.", e) self._failPendingPackets() self.startShutdown() return
def rebuildIDCache(self): for fn in os.listdir(self.serverIDDir): fname = os.path.join(self.serverIDDir, fn) tp,val = readPickled(fname) if tp != "V0": LOG.warn("Weird file version %s on %s",tp,fname) continue nickname, ident = val ID = mixminion.Crypto.sha1(ident) self.idCache.insertID(nickname, ID)
def tryTimeout(self, cutoff): """Close self.sock if the last activity on this connection was before 'cutoff'. Returns true iff the connection is timed out. """ if self.lastActivity <= cutoff: LOG.warn("Connection to %s timed out: %.2f seconds without activity", self.address, time.time()-self.lastActivity) self.onTimeout() self.__close() return 1 return 0
def getBaseDir(self): """Return the base directory for this configuration.""" v = self["Server"]["BaseDir"] if v is None: v = self["Server"]["Homedir"] if v is None: LOG.warn( "Defaulting base directory to /var/spool/minion; this will change." ) v = "/var/spool/minion" return v
def getHandlesByDestAndAge(self, destList, directory, notAfter=None, warnUnused=1): """Return a list of handles for all messages queued for servers in a given list before a given date. destList -- A list of hostnames, ips, keyids, or nicknames for servers whose messages should be included in the result. directory -- An instance of ClientDirectory used to resolve nicknames. This may be None if no nicknames are included. notAfter -- If provided, a time such that no messages queued later should be included warnUnused -- If true, we log a message for every element in destList that has no matching messages in the queue. """ destSet = {} reverse = {} for d in destList: if directory: keyid = directory.getKeyIDByNickname(d) if keyid: destSet[keyid] = 1 reverse[keyid] = d continue destSet[d] = 1 self.loadMetadata() result = [] foundAny = {} foundMatch = {} for h in self.store.getAllMessages(): _, r, when = self.store.getMetadata(h) if (destSet.has_key(r.keyinfo) or (hasattr(r, 'hostname') and destSet.has_key(r.hostname)) or (hasattr(r, 'ip') and destSet.has_key(r.ip))): keys = [ getattr(r, 'hostname', None), getattr(r, 'ip', None), reverse.get(r.keyinfo, None), r.keyinfo ] for k in keys: foundAny[k]=1 if notAfter and when > notAfter: continue for k in keys: foundMatch[k]=1 result.append(h) if warnUnused: for d in destList: if foundMatch.get(d): continue elif foundAny.get(d): LOG.warn("No expired packets found for %r", d) else: LOG.warn("No pending packets found for %r", d) return result
def onDataRead(self): while self.inbuflen >= self.MESSAGE_LEN: data = self.getInbuf(self.MESSAGE_LEN, clear=1) control = data[:SEND_CONTROL_LEN] pkt = data[SEND_CONTROL_LEN:-DIGEST_LEN] digest = data[-DIGEST_LEN:] if control == JUNK_CONTROL: expectedDigest = sha1(pkt + "JUNK") replyDigest = sha1(pkt + "RECEIVED JUNK") replyControl = RECEIVED_CONTROL isJunk = 1 elif control == SEND_CONTROL: expectedDigest = sha1(pkt + "SEND") if self.rejectPackets: replyDigest = sha1(pkt + "REJECTED") replyControl = REJECTED_CONTROL else: replyDigest = sha1(pkt + "RECEIVED") replyControl = RECEIVED_CONTROL isJunk = 0 else: LOG.warn( "Unrecognized command (%r) from %s. Closing connection.", control, self.address) #failed self.startShutdown() return if expectedDigest != digest: LOG.warn("Invalid checksum from %s. Closing connection.", self.address) #failed self.startShutdown() return else: if isJunk: LOG.debug("Link padding received from %s; Checksum valid.", self.address) else: LOG.debug("Packet received from %s; Checksum valid.", self.address) # Make sure we process the packet before we queue the ack. if isJunk: self.junkCallback() elif self.rejectPackets: self.rejectCallback() else: self.packetConsumer(pkt) # Queue the ack. self.beginWriting(replyControl + replyDigest)
def rescan(self): """Check all fragment metadata objects on disk, and reconstruct our internal view of message states. """ # Delete all internal state; reload FragmentMetadatas from disk. self.store.loadAllMetadata(lambda: None) meta = self.store._metadata_cache self.states = {} badMessageIDs = {} # map from bad messageID to 1 unneededHandles = [] # list of handles that aren't needed. for h, fm in meta.items(): if not fm: LOG.debug("Removing fragment %s with missing metadata", h) self.store.removeMessage(h) continue try: mid = fm.messageid if badMessageIDs.has_key(mid): # We've already decided to reject fragments with this ID. pass else: # All is well; try to register the fragment/chunk. If it's # redundant or inconsistent, raise an exception. state = self._getState(fm) if fm.isChunk: state.addChunk(h, fm) else: state.addFragment(h, fm) except MismatchedFragment: # Mark the message ID for this fragment as inconsistent. badMessageIDs[mid] = 1 except UnneededFragment: LOG.warn("Found redundant fragment %s in pool", h) # Remember that this message is unneeded. unneededHandles.append(h) # Check for fragments superseded by chunks -- those are unneeded too. for s in self.states.values(): unneededHandles.extend(s.getUnneededFragmentHandles()) # Delete unneeded fragments. for h in unneededHandles: try: fm = meta[h] except KeyError: continue LOG.debug("Removing unneeded fragment %s from message ID %r", fm.idx, fm.messageid) self.store.removeMessage(h) # Now nuke inconsistent messages. self._deleteMessageIDs(badMessageIDs, "REJECTED")
def tryTimeout(self, cutoff): """Close self.sock if the last activity on this connection was before 'cutoff'. Returns true iff the connection is timed out. """ if self.lastActivity <= cutoff: LOG.warn( "Connection to %s timed out: %.2f seconds without activity", self.address, time.time() - self.lastActivity) self.onTimeout() self.__close() return 1 return 0
def onDataRead(self): while self.inbuflen >= self.MESSAGE_LEN: data = self.getInbuf(self.MESSAGE_LEN, clear=1) control = data[:SEND_CONTROL_LEN] pkt = data[SEND_CONTROL_LEN:-DIGEST_LEN] digest = data[-DIGEST_LEN:] if control == JUNK_CONTROL: expectedDigest = sha1(pkt+"JUNK") replyDigest = sha1(pkt+"RECEIVED JUNK") replyControl = RECEIVED_CONTROL isJunk = 1 elif control == SEND_CONTROL: expectedDigest = sha1(pkt+"SEND") if self.rejectPackets: replyDigest = sha1(pkt+"REJECTED") replyControl = REJECTED_CONTROL else: replyDigest = sha1(pkt+"RECEIVED") replyControl = RECEIVED_CONTROL isJunk = 0 else: LOG.warn("Unrecognized command (%r) from %s. Closing connection.", control, self.address) #failed self.startShutdown() return if expectedDigest != digest: LOG.warn("Invalid checksum from %s. Closing connection.", self.address) #failed self.startShutdown() return else: if isJunk: LOG.debug("Link padding received from %s; Checksum valid.", self.address) else: LOG.debug("Packet received from %s; Checksum valid.", self.address) # Make sure we process the packet before we queue the ack. if isJunk: self.junkCallback() elif self.rejectPackets: self.rejectCallback() else: self.packetConsumer(pkt) # Queue the ack. self.beginWriting(replyControl+replyDigest)
def _checkHostnameIsLocal(name): if _KNOWN_LOCAL_HOSTNAMES.has_key(name): return r = mixminion.NetUtils.getIPs(name) for family, addr, _ in r: if family == mixminion.NetUtils.AF_INET: if addr.startswith("127.") or addr.startswith("0."): LOG.warn("Hostname %r resolves to reserved address %s", name, addr) else: if addr in ("::", "::1"): LOG.warn("Hostname %r resolves to reserved address %s", name, addr) _KNOWN_LOCAL_HOSTNAMES[name] = 1
def __close(self, gotClose=0): """helper: close the underlying socket without cleaning up the TLS connection.""" if gotClose: if self.__stateFn == self.__connectFn: LOG.warn("Couldn't connect to %s",self.address) else: LOG.warn("Unexpectedly closed connection to %s", self.address) self.onTLSError() self.sock.close() self.sock = None self.tls = None self.__stateFn = self.__closedFn self.onClosed()
def _checkHostnameIsLocal(name): if _KNOWN_LOCAL_HOSTNAMES.has_key(name): return r = mixminion.NetUtils.getIPs(name) for family, addr, _ in r: if family == mixminion.NetUtils.AF_INET: if addr.startswith("127.") or addr.startswith("0."): LOG.warn("Hostname %r resolves to reserved address %s", name, addr) else: if addr in ("::", "::1"): LOG.warn("Hostname %r resolves to reserved address %s", name,addr) _KNOWN_LOCAL_HOSTNAMES[name] = 1
def removeIdentityKey(self): """Remove this server's identity key.""" fn = os.path.join(self.keyDir, "identity.key") if not os.path.exists(fn): LOG.info("No identity key to remove.") else: LOG.warn("Removing identity key in 10 seconds") time.sleep(10) LOG.warn("Removing identity key") secureDelete([fn], blocking=1) if os.path.exists(self.dhFile): LOG.info("Removing diffie-helman parameters file") secureDelete([self.dhFile], blocking=1)
def __close(self, gotClose=0): """helper: close the underlying socket without cleaning up the TLS connection.""" if gotClose: if self.__stateFn == self.__connectFn: LOG.warn("Couldn't connect to %s", self.address) else: LOG.warn("Unexpectedly closed connection to %s", self.address) self.onTLSError() self.sock.close() self.sock = None self.tls = None self.__stateFn = self.__closedFn self.onClosed()
def _buildReplyBlockImpl(path, exitType, exitInfo, expiryTime=0, secretPRNG=None, tag=None): """Helper function: makes a reply block, given a tag and a PRNG to generate secrets. Returns a 3-tuple containing (1) a newly-constructed reply block, (2) a list of secrets used to make it, (3) a tag. path: A list of ServerInfo exitType: Routing type to use for the final node exitInfo: Routing info for the final node, not including tag. expiryTime: The time at which this block should expire. secretPRNG: A PRNG to use for generating secrets. If not provided, uses an AES counter-mode stream seeded from our entropy source. Note: the secrets are generated so that they will be used to encrypt the message in reverse order. tag: If provided, a 159-bit tag. If not provided, a new one is generated. """ if secretPRNG is None: secretPRNG = Crypto.getCommonPRNG() if expiryTime is None: # XXXX This is dangerous, and should go away; the user should # XXXX *always* specify an expiry time. LOG.warn("Inferring expiry time for reply block") expiryTime = min([s.getValidUntil() for s in path]) checkPathLength(None, path, exitType, exitInfo, explicitSwap=0) LOG.debug("Building reply block for path %s", [s.getNickname() for s in path]) LOG.debug(" Delivering to %04x:%r", exitType, exitInfo) # The message is encrypted first by the end-to-end key, then by # each of the path keys in order. We need to reverse these steps, so we # generate the path keys back-to-front, followed by the end-to-end key. secrets = [ secretPRNG.getBytes(SECRET_LEN) for _ in range(len(path)+1) ] headerSecrets = secrets[:-1] headerSecrets.reverse() sharedKey = secrets[-1] # (This will go away when we deprecate 'stateful' reply blocks if tag is None: tag = _getRandomTag(secretPRNG) header = _buildHeader(path, headerSecrets, exitType, tag+exitInfo, paddingPRNG=Crypto.getCommonPRNG()) return ReplyBlock(header, expiryTime, SWAP_FWD_HOST_TYPE, path[0].getMMTPHostInfo().pack(), sharedKey), secrets, tag
def getCurrentDescriptor(self, now=None): """DOCDOC""" self._lock.acquire() if now is None: now = time.time() try: keysets = self.getServerKeysets() for k in keysets: va,vu = k.getLiveness() if va <= now <= vu: return k.getServerDescriptor() LOG.warn("getCurrentDescriptor: no live keysets??") return self.getServerKeysets()[-1].getServerDescriptor() finally: self._lock.release()
def getCurrentDescriptor(self, now=None): """DOCDOC""" self._lock.acquire() if now is None: now = time.time() try: keysets = self.getServerKeysets() for k in keysets: va, vu = k.getLiveness() if va <= now <= vu: return k.getServerDescriptor() LOG.warn("getCurrentDescriptor: no live keysets??") return self.getServerKeysets()[-1].getServerDescriptor() finally: self._lock.release()
class ServerInbox: """A ServerInbox holds server descriptors received from the outside world that are not yet ready to be included in the directory. """ ## Fields: # newQueue: IncomingQueue object to hold descriptors for previously # unknown servers. # updateQueue: IncomingQueue object to hold descriptors for currently # known servers. def __init__(self, base, idCache): """Initialize a ServerInbox to store its files in 'base', and check server descriptors against the IDCache 'idCache'.""" self.newQueue = IncomingQueue(os.path.join(base, "new"), os.path.join(base, "reject")) self.updateQueue = IncomingQueue(os.path.join(base, "updates"), os.path.join(base, "reject")) self.idCache = idCache def receiveServer(self, text, source): """Process a new server descriptor and store it for later action. (To be run by the CGI user.) If the server will be automatically inserted, return true. If the server will be inserted (given administrator intervention), raise ServerQueuedException. If there is a problem, log it, and raise UIError. text -- a string containing a new server descriptor. source -- a (human readable) string describing the source of the descriptor, used in error messages. """ try: server = ServerInfo(string=text,assumeValid=0) except MixError, e: LOG.warn("Rejected invalid server from %s: %s", source,e) raise UIError("Server descriptor was not valid: %s"%e) nickname = server.getNickname() try: known = self.idCache.containsServer(server) except MismatchedID: LOG.warn("Rejected server with mismatched identity from %s", source) self.updateQueue.queueRejectedServer(text,server) raise UIError(("I already know a server named " "%s with a different key.")%nickname) if not known: LOG.info("Received previously unknown server %s from %s", nickname, source) self.newQueue.queueIncomingServer(text,server) raise ServerQueuedException( "Server queued pending manual checking") else: LOG.info("Received update for server %s from %s", nickname, source) self.updateQueue.queueIncomingServer(text,server) return 1
def acceptNewServer(self, serverList, nickname): """Move the descriptors for a new server with a given nickname into the directory. (To be run by a the directory user.) If the nickname is of the format name:FINGERPRINT, then only insert servers with the nickname/fingerprint pair. """ if ':' in nickname: nickname, fingerprint = nickname.split(":") else: fingerprint = None lcnickname = nickname.lower() incoming = self.newQueue.readPendingServers() # Do we have any pending servers of the desired name? incoming = [ (fname,server,text,fp) for fname,server,text,fp in incoming if server.getNickname().lower() == lcnickname ] if not incoming: raise UIError("No incoming servers named %s"%nickname) if not fingerprint: fps = [fp for f,s,t,fp in incoming] for f in fps: if f != fps[0]: raise UIError("Multiple KeyIDs for servers named %s"% nickname) reject = [] else: reject = [ (f,s,t,fp) for f,s,t,fp in incoming if fp != fingerprint ] incoming = [ (f,s,t,fp) for f,s,t,fp in incoming if fp == fingerprint ] if not incoming: raise UIError("No servers named %s with matching KeyID"% nickname) if reject: LOG.warn("Rejecting %s servers named %s with unmatched KeyIDs", len(reject), nickname) try: serverList._lock() serverList.learnServerID(incoming[0][1]) self._doAccept(serverList, self.newQueue, incoming, reject, knownOnly=1) finally: serverList._unlock()
def loadAllMetadata(self, newDataFn): """For all objects in the store, load their metadata into the internal cache. If any object is missing its metadata, create metadata for it by invoking newDataFn(handle).""" try: self._lock.acquire() self._metadata_cache = {} for h in self.getAllMessages(): try: self.getMetadata(h) except KeyError: LOG.warn("Missing metadata for file %s",h) self.setMetadata(h, newDataFn(h)) except CorruptedFile: continue finally: self._lock.release()
def cleanMetadata(self,secureDeleteFn=None): """Find all orphaned metadata files and remove them.""" hSet = {} for h in self.getAllMessages(): hSet[h] = 1 rmv = [] for h in [fn[5:] for fn in os.listdir(self.dir) if fn.startswith("meta_")]: if not hSet.get(h): rmv.append("meta_"+h) if rmv: LOG.warn("Removing %s orphaned metadata files from %s", len(rmv), self.dir) if secureDeleteFn: secureDeleteFn(rmv) else: secureDelete(rmv, blocking=1)
def loadAllMetadata(self, newDataFn): """For all objects in the store, load their metadata into the internal cache. If any object is missing its metadata, create metadata for it by invoking newDataFn(handle).""" try: self._lock.acquire() self._metadata_cache = {} for h in self.getAllMessages(): try: self.getMetadata(h) except KeyError: LOG.warn("Missing metadata for file %s", h) self.setMetadata(h, newDataFn(h)) except CorruptedFile: continue finally: self._lock.release()
def generateVoteDirectory(identity, servers, goodServerNames, voters, validAfter, clientVersions, serverVersions, validatedDigests=None): valid = [] for server in servers: try: if isinstance(server, mixminion.ServerInfo.ServerInfo): assert server._originalContents s = server else: s = mixminion.ServerInfo.ServerInfo( string=str(server), validatedDigests=validatedDigests, _keepContents=1) except ConfigError,e: LOG.warn("Rejecting malformed serverinfo: %s",e) else: valid.append(s)
def checkKeys(self): """Internal method: read information about all this server's currently-prepared keys from disk. May raise ConfigError if any of the server descriptors on disk are invalid. """ self.keySets = [] badKeySets = [] firstKey = sys.maxint lastKey = 0 LOG.debug("Scanning server keystore at %s", self.keyDir) if not os.path.exists(self.keyDir): LOG.info("Creating server keystore at %s", self.keyDir) createPrivateDir(self.keyDir) # Iterate over the entires in HOME/keys for dirname in os.listdir(self.keyDir): # Skip any that aren't directories named "key_INT" if not os.path.isdir(os.path.join(self.keyDir,dirname)): continue if not dirname.startswith('key_'): LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue keysetname = dirname[4:] try: setNum = int(keysetname) # keep trace of the first and last used key number if setNum < firstKey: firstKey = setNum if setNum > lastKey: lastKey = setNum except ValueError: LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue # Find the server descriptor... keyset = ServerKeyset(self.keyDir, keysetname, self.hashDir) ok = 1 try: keyset.checkKeys() except MixError, e: LOG.warn("Error checking private keys in keyset %s: %s", keysetname, str(e)) ok = 0 try: if ok: keyset.getServerDescriptor() except (ConfigError, IOError), e: LOG.warn("Key set %s has invalid/missing descriptor: %s", keysetname, str(e)) ok = 0
def checkKeys(self): """Internal method: read information about all this server's currently-prepared keys from disk. May raise ConfigError if any of the server descriptors on disk are invalid. """ self.keySets = [] badKeySets = [] firstKey = sys.maxint lastKey = 0 LOG.debug("Scanning server keystore at %s", self.keyDir) if not os.path.exists(self.keyDir): LOG.info("Creating server keystore at %s", self.keyDir) createPrivateDir(self.keyDir) # Iterate over the entires in HOME/keys for dirname in os.listdir(self.keyDir): # Skip any that aren't directories named "key_INT" if not os.path.isdir(os.path.join(self.keyDir, dirname)): continue if not dirname.startswith('key_'): LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue keysetname = dirname[4:] try: setNum = int(keysetname) # keep trace of the first and last used key number if setNum < firstKey: firstKey = setNum if setNum > lastKey: lastKey = setNum except ValueError: LOG.warn("Unexpected directory %s under %s", dirname, self.keyDir) continue # Find the server descriptor... keyset = ServerKeyset(self.keyDir, keysetname, self.hashDir) ok = 1 try: keyset.checkKeys() except MixError, e: LOG.warn("Error checking private keys in keyset %s: %s", keysetname, str(e)) ok = 0 try: if ok: keyset.getServerDescriptor() except (ConfigError, IOError), e: LOG.warn("Key set %s has invalid/missing descriptor: %s", keysetname, str(e)) ok = 0
def cleanMetadata(self, secureDeleteFn=None): """Find all orphaned metadata files and remove them.""" hSet = {} for h in self.getAllMessages(): hSet[h] = 1 rmv = [] for h in [ fn[5:] for fn in os.listdir(self.dir) if fn.startswith("meta_") ]: if not hSet.get(h): rmv.append("meta_" + h) if rmv: LOG.warn("Removing %s orphaned metadata files from %s", len(rmv), self.dir) if secureDeleteFn: secureDeleteFn(rmv) else: secureDelete(rmv, blocking=1)
def getIP(name, preferIP4=PREFER_INET4): """Resolve the hostname 'name' and return the 'best' answer. An answer is either a 3-tuple as returned by getIPs, or a 3-tuple of ('NOENT', reason, Time) if no answers were found. If both IPv4 and IPv6 addresses are found, return an IPv4 address iff preferIPv4 is true. If this host does not support IPv6, never return an IPv6 address; return a ('NOENT', reason, Time) tuple if only ipv6 addresses are found. """ _,haveIP6 = getProtocolSupport() try: r = getIPs(name) inet4 = [ addr for addr in r if addr[0] == AF_INET ] inet6 = [ addr for addr in r if addr[0] == AF_INET6 ] if not (inet4 or inet6): LOG.warn("getIP returned no inet addresses for %r",name) return ("NOENT", "No inet addresses returned", time.time()) if inet6 and not inet4 and not haveIP6: return ("NOENT", "All addresses were IPv6, and this host has no IPv6 support", time.time()) best4=best6=None if inet4: best4=inet4[0] if inet6: best6=inet6[0] if preferIP4: res = best4 or best6 else: res = best6 or best4 assert res assert res[0] in (AF_INET, AF_INET6) assert nameIsStaticIP(res[1]) protoname = (res[0] == AF_INET) and "inet" or "inet6" LOG.trace("Result for getIP(%r): %s:%s (%d others dropped)", name,protoname,res[1],len(r)-1) return res except socket.error, e: LOG.trace("Result for getIP(%r): error:%r",name,e) if len(e.args) == 2: return ("NOENT", str(e[1]), time.time()) else: return ("NOENT", str(e), time.time())
def receiveServer(self, text, source): """Process a new server descriptor and store it for later action. (To be run by the CGI user.) If the server will be automatically inserted, return true. If the server will be inserted (given administrator intervention), raise ServerQueuedException. If there is a problem, log it, and raise UIError. text -- a string containing a new server descriptor. source -- a (human readable) string describing the source of the descriptor, used in error messages. """ try: server = ServerInfo(string=text,assumeValid=0) except MixError, e: LOG.warn("Rejected invalid server from %s: %s", source,e) raise UIError("Server descriptor was not valid: %s"%e)
def readPendingServers(self): """Scan all of the servers waiting in the incoming directory. If any are bad, remove them. Return a list of (filename, ServerInfo, server descriptor, ID Fingerprint) tuples for all the servers in the directory. """ res = [] for fname in os.listdir(self.incomingDir): path = os.path.join(self.incomingDir,fname) try: text, server = _readServer(path) except MixError, e: os.unlink(path) LOG.warn( "Removed a bad server descriptor %s from incoming dir: %s", fname, e) continue fp = formatBase64(getIDFingerprint(server)) res.append((fname, server, text, fp))