def processEvents(self): """ Process any packets to receive and call callbacks such as onData, onInterest or onTimeout. This returns immediately if there is no data to receive. This blocks while calling the callbacks. You should repeatedly call this from an event loop, with calls to sleep as needed so that the loop doesn't use 100% of the CPU. Since processEvents modifies the pending interest table, your application should make sure that it calls processEvents in the same thread as expressInterest (which also modifies the pending interest table). :raises: This may raise an exception for reading data or in the callback for processing the data. If you call this from an main event loop, you may want to catch and log/disregard all exceptions. """ self._transport.processEvents() # Check for PIT entry timeouts. Go backwards through the list so we can # erase entries. nowMilliseconds = Common.getNowMilliseconds() i = len(self._pendingInterestTable) - 1 while i >= 0: if self._pendingInterestTable[i].isTimedOut(nowMilliseconds): # Save the PendingInterest and remove it from the PIT. Then # call the callback. pendingInterest = self._pendingInterestTable[i] self._pendingInterestTable.pop(i) pendingInterest.callTimeout() # Refresh now since the timeout callback might have delayed. nowMilliseconds = Common.getNowMilliseconds() i -= 1
def __str__(self): s = "Certificate name:\n" s += " " + self.getName().toUri() + "\n" s += "Validity:\n" dateFormat = "%Y%m%dT%H%M%S" notBeforeStr = Common.datetimeFromTimestamp( self.getNotBefore()).strftime(dateFormat) notAfterStr = Common.datetimeFromTimestamp( self.getNotAfter()).strftime(dateFormat) s += " NotBefore: " + notBeforeStr + "\n" s += " NotAfter: " + notAfterStr + "\n" for sd in self._subjectDescriptionList: s += "Subject Description:\n" s += " " + str( sd.getOid()) + ": " + sd.getValue().toRawStr() + "\n" s += "Public key bits:\n" s += Common.base64Encode(self.getPublicKeyDer().toBytes(), True) if len(self._extensionList) > 0: s += "Extensions:\n" for ext in self._extensionList: s += " OID: " + ext.getOid() + "\n" s += " Is critical: " + ('Y' if ext.isCritical() else 'N') + "\n" s += " Value: " + str(ext.getValue()).encode('hex') + "\n" return s
def generateKeyPair(self, keyName, params): """ Generate a pair of asymmetric keys. :param Name keyName: The name of the key pair. :param KeyParams params: The parameters of the key. """ if self.doesKeyExist(keyName, KeyClass.PUBLIC): raise SecurityException("Public key already exists") if self.doesKeyExist(keyName, KeyClass.PRIVATE): raise SecurityException("Private key already exists") try: privateKey = TpmPrivateKey.generatePrivateKey(params) privateKeyDer = privateKey.toPkcs8().toBytes() publicKeyDer = privateKey.derivePublicKey().toBytes() except Exception as ex: raise SecurityException("Error in generatePrivateKey: " + str(ex)) keyUri = keyName.toUri() keyFilePathNoExtension = self.maintainMapping(keyUri) publicKeyFilePath = keyFilePathNoExtension + ".pub" privateKeyFilePath = keyFilePathNoExtension + ".pri" with open(publicKeyFilePath, 'w') as keyFile: keyFile.write(Common.base64Encode(publicKeyDer, True)) with open(privateKeyFilePath, 'w') as keyFile: keyFile.write(Common.base64Encode(privateKeyDer, True)) os.chmod(publicKeyFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) os.chmod(privateKeyFilePath, stat.S_IRUSR)
def __str__(self): s = "Certificate name:\n" s += " "+self.getName().toUri()+"\n" s += "Validity:\n" dateFormat = "%Y%m%dT%H%M%S" notBeforeStr = Common.datetimeFromTimestamp(self.getNotBefore()).strftime(dateFormat) notAfterStr = Common.datetimeFromTimestamp(self.getNotAfter()).strftime(dateFormat) s += " NotBefore: " + notBeforeStr+"\n" s += " NotAfter: " + notAfterStr + "\n" for sd in self._subjectDescriptionList: s += "Subject Description:\n" s += " " + str(sd.getOid()) + ": " + sd.getValue().toRawStr() + "\n" s += "Public key bits:\n" s += Common.base64Encode(self.getPublicKeyDer().toBytes(), True) if len(self._extensionList) > 0: s += "Extensions:\n" for ext in self._extensionList: s += " OID: "+ext.getOid()+"\n" s += " Is critical: " + ('Y' if ext.isCritical() else 'N') + "\n" s += " Value: " + str(ext.getValue()).encode('hex') + "\n" return s
def testDifference(self): size = 10 ownIblt = InvertibleBloomLookupTable(size) receivedIblt = InvertibleBloomLookupTable(ownIblt) diff = ownIblt.difference(receivedIblt) # Non-empty positive means we have some elements that the other doesn't. positive = set() negative = set() self.assertTrue(diff.listEntries(positive, negative)) self.assertEqual(0, len(positive)) self.assertEqual(0, len(negative)) prefix = Name("/test/memphis").appendNumber(1).toUri() newHash = Common.murmurHash3Blob(11, prefix) ownIblt.insert(newHash) diff = ownIblt.difference(receivedIblt) self.assertTrue(diff.listEntries(positive, negative)) self.assertEqual(1, len(positive)) self.assertEqual(0, len(negative)) prefix = Name("/test/csu").appendNumber(1).toUri() newHash = Common.murmurHash3Blob(11, prefix) receivedIblt.insert(newHash) diff = ownIblt.difference(receivedIblt) self.assertTrue(diff.listEntries(positive, negative)) self.assertEqual(1, len(positive)) self.assertEqual(1, len(negative))
def testHigherSequence(self): # This is the case where we can't recognize if the received IBF has a # higher sequence number. This is relevant to the full sync case. size = 10 ownIblt = InvertibleBloomLookupTable(size) receivedIblt = InvertibleBloomLookupTable(size) prefix = Name("/test/memphis").appendNumber(3).toUri() hash1 = Common.murmurHash3Blob(11, prefix) ownIblt.insert(hash1) prefix2 = Name("/test/memphis").appendNumber(4).toUri() hash2 = Common.murmurHash3Blob(11, prefix2) receivedIblt.insert(hash2) diff = ownIblt.difference(receivedIblt) positive = set() negative = set() self.assertTrue(diff.listEntries(positive, negative)) self.assertEqual(1, len(positive)) self.assertTrue(min(positive) == hash1) self.assertEqual(1, len(negative)) self.assertTrue(min(negative) == hash2)
def testDifferenceBwOversizedIblts(self): # Insert 50 elements into an IBLT of size 10. Then check that we can # still list the difference even though we can't list the IBLT itself. size = 10 ownIblt = InvertibleBloomLookupTable(size) for i in range(50): prefix = Name("/test/memphis" + str(i)).appendNumber(1).toUri() newHash = Common.murmurHash3Blob(11, prefix) ownIblt.insert(newHash) receivedIblt = InvertibleBloomLookupTable(ownIblt) prefix = Name("/test/ucla").appendNumber(1).toUri() newHash = Common.murmurHash3Blob(11, prefix) ownIblt.insert(newHash) diff = ownIblt.difference(receivedIblt) positive = set() negative = set() self.assertTrue(diff.listEntries(positive, negative)) self.assertEqual(1, len(positive)) self.assertEqual(newHash, min(positive)) self.assertEqual(0, len(negative)) self.assertTrue(not ownIblt.listEntries(positive, negative)) self.assertTrue(not receivedIblt.listEntries(positive, negative))
def test_self_signed_cert_validity(self): certificate = (self._fixture.addIdentity (Name("/Security/V2/TestKeyChain/SelfSignedCertValidity")) .getDefaultKey().getDefaultCertificate()) self.assertTrue(certificate.isValid()) # Check 10 years from now. self.assertTrue(certificate.isValid (Common.getNowMilliseconds() + 10 * 365 * 24 * 3600 * 1000.0)) # Check that notAfter is later than 10 years from now. self.assertTrue(certificate.getValidityPeriod().getNotAfter() > Common.getNowMilliseconds() + 10 * 365 * 24 * 3600 * 1000.0)
def test_overwrite(self): fixture = self.fixture pibImpl = PibMemory() try: PibKeyImpl(fixture.id1Key1Name, pibImpl) self.fail("Did not throw the expected exception") except Pib.Error: pass else: self.fail("Did not throw the expected exception") PibKeyImpl(fixture.id1Key1Name, fixture.id1Key1.buf(), pibImpl) key1 = PibKeyImpl(fixture.id1Key1Name, pibImpl) # Overwriting the key should work. PibKeyImpl(fixture.id1Key1Name, fixture.id1Key2.buf(), pibImpl) key2 = PibKeyImpl(fixture.id1Key1Name, pibImpl) # key1 should have cached the original public key. self.assertTrue(not key1.getPublicKey().equals(key2.getPublicKey())) self.assertTrue(key2.getPublicKey().equals(fixture.id1Key2)) key1.addCertificate(fixture.id1Key1Cert1) # Use the wire encoding to check equivalence. self.assertTrue( key1.getCertificate( fixture.id1Key1Cert1.getName()).wireEncode().equals( fixture.id1Key1Cert1.wireEncode())) otherCert = CertificateV2(fixture.id1Key1Cert1) otherCert.getSignature().getValidityPeriod().setPeriod( Common.getNowMilliseconds(), Common.getNowMilliseconds() + 1000) # Don't bother resigning so we don't have to load a private key. self.assertTrue(fixture.id1Key1Cert1.getName().equals( otherCert.getName())) self.assertTrue(otherCert.getContent().equals( fixture.id1Key1Cert1.getContent())) self.assertFalse(otherCert.wireEncode().equals( fixture.id1Key1Cert1.wireEncode())) key1.addCertificate(otherCert) self.assertTrue( key1.getCertificate( fixture.id1Key1Cert1.getName()).wireEncode().equals( otherCert.wireEncode()))
def toUri(self): """ Return a string representation of the exclude values. :return: The string representation. :rtype: string """ if len(self._entries) == 0: return "" result = BytesIO() didFirst = False for entry in self._entries: if didFirst: # write is required to take a byte buffer. result.write(Exclude._comma) if entry.getType() == Exclude.ANY: # write is required to take a byte buffer. result.write(Exclude._star) else: entry.getComponent().toEscapedString(result) didFirst = True return Common.getBytesIOString(result)
def toHex(self, result = None): """ Return the hex representation of the bytes in array. :param BytesIO result: (optional) The BytesIO stream to write to. If omitted, return a str with the result. :return: The hex string (only if result is omitted). :rtype: str """ if result == None: if self._array == None: return "" result = BytesIO() self.toHex(result) return Common.getBytesIOString(result) if self._array == None: return array = self.buf() hexBuffer = bytearray(2) for i in range(len(array)): # Get the hex string and transfer to hexBuffer for writing. hex = "%02x" % array[i] hexBuffer[0] = ord(hex[0]) hexBuffer[1] = ord(hex[1]) result.write(hexBuffer)
def test_expired_certificate(self): # Copy the default certificate. expiredCertificate = Data( self._fixture._subIdentity.getDefaultKey().getDefaultCertificate()) info = SigningInfo(self._fixture._identity) # Validity period from 2 hours ago do 1 hour ago. now = Common.getNowMilliseconds() info.setValidityPeriod( ValidityPeriod(now - 2 * 3600 * 1000, now - 3600 * 1000.0)) self._fixture._keyChain.sign(expiredCertificate, info) try: CertificateV2(expiredCertificate).wireEncode() except Exception as ex: self.fail("Unexpected exception: " + str(ex)) originalProcessInterest = self._fixture._face._processInterest def processInterest(interest, onData, onTimeout, onNetworkNack): if interest.getName().isPrefixOf(expiredCertificate.getName()): onData(interest, expiredCertificate) else: originalProcessInterest.processInterest( interest, onData, onTimeout, onNetworkNack) self._fixture._face._processInterest = processInterest data = Data(Name("/Security/V2/ValidatorFixture/Sub1/Sub2/Data")) self._fixture._keyChain.sign(data, SigningInfo(self._fixture._subIdentity)) self.validateExpectFailure(data, "Signed by an expired certificate") self.assertEqual(1, len(self._fixture._face._sentInterests))
def __init__(self, data): super(MemoryContentCache._StaleTimeContent, self).__init__(data) # Set up staleTimeMilliseconds which is The time when the content # becomse stale in milliseconds according to # Common.getNowMilliseconds(). self._staleTimeMilliseconds = (Common.getNowMilliseconds() + data.getMetaInfo().getFreshnessPeriod())
def addSubCertificate(self, subIdentityName, issuer, params = None): """ Issue a certificate for subIdentityName signed by issuer. If the identity does not exist, it is created. A new key is generated as the default key for the identity. A default certificate for the key is signed by the issuer using its default certificate. """ if params == None: params = KeyChain.getDefaultKeyParams() subIdentity = self.addIdentity(subIdentityName, params) request = subIdentity.getDefaultKey().getDefaultCertificate() request.setName(request.getKeyName().append("parent").appendVersion(1)) certificateParams = SigningInfo(issuer) # Validity period of 20 years. now = Common.getNowMilliseconds() certificateParams.setValidityPeriod( ValidityPeriod(now, now + 20 * 365 * 24 * 3600 * 1000.0)) # Skip the AdditionalDescription. self._keyChain.sign(request, certificateParams) self._keyChain.setDefaultCertificate(subIdentity.getDefaultKey(), request) return subIdentity
def _generateCertificateForKey(self, keyName): # Let any raised SecurityExceptions bubble up. publicKeyBits = self._identityStorage.getKey(keyName) publicKey = PublicKey(publicKeyBits) timestamp = Common.getNowMilliseconds() # TODO: Specify where the 'KEY' component is inserted # to delegate responsibility for cert delivery. # cf: http://redmine.named-data.net/issues/1659 certificateName = keyName.getPrefix(-1).append('KEY').append(keyName.get(-1)) certificateName.append("ID-CERT").appendVersion(int(timestamp)) certificate = IdentityCertificate() certificate.setName(certificateName) certificate.setNotBefore(timestamp) certificate.setNotAfter((timestamp + 2*365*24*3600*1000)) # about 2 years. certificate.setPublicKeyInfo(publicKey) # ndnsec likes to put the key name in a subject description. sd = CertificateSubjectDescription("2.5.4.41", keyName.toUri()) certificate.addSubjectDescription(sd) certificate.encode() return certificate
def satisfyInterests(self, data): """ Remove timed-out Interests, then for each pending Interest that the Data packet matches, send the Data packet through the face and remove the pending Interest. :param Data data: The Data packet to send if it satisfies an Interest. """ # Go backwards through the list so we can erase entries. nowMilliseconds = Common.getNowMilliseconds() for i in range(len(self._table) - 1, -1, -1): pendingInterest = self._table[i] if pendingInterest.isTimedOut(nowMilliseconds): self._table.pop(i) continue # TODO: Use matchesData to match selectors? if pendingInterest.getInterest().matchesName(data.getName()): try: # Send to the same face from the original call to the # OnInterest callback. wireEncode returns the cached # encoding if available. pendingInterest.getFace().send(data.wireEncode()) except: logging.exception( "Error calling Face.send in satisfyInterests") # The pending interest is satisfied, so remove it. self._table.pop(i)
def __init__(self, array=None, copy=True): if array == None: self._array = None elif isinstance(array, Blob): # Use the existing _array. Don't need to check for copy. self._array = array._array else: array = Common.stringToUtf8Array(array) if copy: # We are copying, so just make another bytearray. # We always use a memoryview so that slicing is efficient. if type(array) is _memoryviewWrapper: # Use the underlying memoryview directly. (When we only # support Python 3.3 or later, this check is not necessary.) self._array = memoryview(bytearray(array._view)) else: self._array = memoryview(bytearray(array)) else: if type(array) is bytearray: # We always use a memoryview so that slicing is efficient. self._array = memoryview(array) else: # Can't take a memoryview, so use as-is. self._array = array if not _memoryviewUsesInt and type(self._array) is memoryview: # memoryview elements are not int (Python versions before 3.3) # so we need a wrapper which will return int elements. self._array = _memoryviewWrapper(self._array)
def run(self, generator): now = Common.getNowMilliseconds() real_delay = now - self.start_time if real_delay <= self.interval: return self else: return generator.send(real_delay)
def __init__(self, arg1 = None, arg2 = None): if arg1 == None or Common.typeIsString(arg1): filePath = "" if arg1 == None and arg2 == None: # Check if we can connect using UnixSocket. tryFilePath = "/var/run/nfd.sock" # Use listdir because isfile doesn't see socket file types. if (os.path.basename(tryFilePath) in os.listdir(os.path.dirname(tryFilePath))): filePath = tryFilePath else: tryFilePath = "/tmp/.ndnd.sock" if (os.path.basename(tryFilePath) in os.listdir(os.path.dirname(tryFilePath))): filePath = tryFilePath if filePath == "": transport = TcpTransport() host = arg1 if arg1 != None else "localhost" connectionInfo = TcpTransport.ConnectionInfo( host, arg2 if type(arg2) is int else 6363) else: transport = UnixTransport() connectionInfo = UnixTransport.ConnectionInfo(filePath) else: transport = arg1 connectionInfo = arg2 self._node = Node(transport, connectionInfo) self._commandKeyChain = None self._commandCertificateName = Name()
def refresh(self): """ Request a certificate refresh. """ now = Common.getNowMilliseconds() if self._expireTime > now: return self._expireTime = now + self._refreshPeriod logging.getLogger(__name__).info( "Reloading the dynamic trust anchor group") # Save a copy of _anchorNames . oldAnchorNames = set(self._anchorNames) if not self._isDirectory: self._loadCertificate(self._path, oldAnchorNames) else: try: allFiles = [f for f in os.listdir(self._path) if os.path.isfile(os.path.join(self._path, f))] except: raise RuntimeError("Cannot list files in directory " + self._path) for f in allFiles: self._loadCertificate(os.path.join(self._path, f), oldAnchorNames) # Remove old certificates. for name in oldAnchorNames: self._anchorNames.remove(name) self._certificates.remove(name)
def addCertificate(self, key, issuerId): """ Add a self-signed certificate made from the key and issuer ID. :param PibKey key: The key for the certificate. :param str issuerId: The issuer ID name component for the certificate name. :return: The new certificate. :rtype: CertificateV2 """ certificateName = Name(key.getName()) certificateName.append(issuerId).appendVersion(3) certificate = CertificateV2() certificate.setName(certificateName) # Set the MetaInfo. certificate.getMetaInfo().setType(ContentType.KEY) # One hour. certificate.getMetaInfo().setFreshnessPeriod(3600 * 1000.0) # Set the content. certificate.setContent(key.getPublicKey()) params = SigningInfo(key) # Validity period of 10 days. now = Common.getNowMilliseconds() params.setValidityPeriod( ValidityPeriod(now, now + 10 * 24 * 3600 * 1000.0)) self._keyChain.sign(certificate, params) return certificate
def __init__(self, array = None, copy = True): self._hash = None if array == None: self._array = None elif isinstance(array, Blob): # Use the existing _array. Don't need to check for copy. self._array = array._array else: array = Common.stringToUtf8Array(array) if copy: # We are copying, so just make another bytearray. # We always use a memoryview so that slicing is efficient. if type(array) is _memoryviewWrapper: # Use the underlying memoryview directly. (When we only # support Python 3.3 or later, this check is not necessary.) self._array = memoryview(bytearray(array._view)) else: self._array = memoryview(bytearray(array)) else: if type(array) is bytearray: # We always use a memoryview so that slicing is efficient. self._array = memoryview(array) else: # Can't take a memoryview, so use as-is. self._array = array if not _memoryviewUsesInt and type(self._array) is memoryview: # memoryview elements are not int (Python versions before 3.3) # so we need a wrapper which will return int elements. self._array = _memoryviewWrapper(self._array)
def addDirectory(self, directoryName, refreshPeriod): allFiles = [f for f in os.listdir(directoryName) if os.path.isfile(os.path.join(directoryName, f))] certificateNames = [] for f in allFiles: if self._isSecurityV1: try: fullPath = os.path.join(directoryName, f) cert = self.loadIdentityCertificateFromFile(fullPath) except Exception: pass # allow files that are not certificates else: # Cut off the timestamp so it matches KeyLocator Name format. certUri = cert.getName()[:-1].toUri() self._certificateCache.insertCertificate(cert) certificateNames.append(certUri) else: try: fullPath = os.path.join(directoryName, f) cert = self.loadCertificateV2FromFile(fullPath) except Exception: pass # allow files that are not certificates else: # Get the key name since this is in the KeyLocator. certUri = CertificateV2.extractKeyNameFromCertName( cert.getName()).toUri() self._certificateCacheV2.insert(cert) certificateNames.append(certUri) self._refreshDirectories[directoryName] = { 'certificates': certificateNames, 'nextRefresh': Common.getNowMilliseconds() + refreshPeriod, 'refreshPeriod':refreshPeriod }
def _contentCacheAdd(self, data): """ Add the data packet to the _contentCache. Remove timed-out entries from _pendingInterestTable. If the data packet satisfies any pending interest, then send the data packet to the pending interest's transport and remove from the _pendingInterestTable. :param Data data: The data packet to add. """ self._contentCache.add(data) # Remove timed-out interests and check if the data packet matches any # pending interest. # Go backwards through the list so we can erase entries. nowMilliseconds = Common.getNowMilliseconds() for i in range(len(self._pendingInterestTable) - 1, -1, -1): pendingInterest = self._pendingInterestTable[i] if pendingInterest.isTimedOut(nowMilliseconds): self._pendingInterestTable.pop(i) continue if pendingInterest.getInterest().matchesName(data.getName()): try: # Send to the same transport from the original call to onInterest. # wireEncode returns the cached encoding if available. pendingInterest.getTransport().send(data.wireEncode().toBuffer()) except Exception as ex: logging.getLogger(__name__).error( "Error in transport.send: %s", str(ex)) return # The pending interest is satisfied, so remove it. self._pendingInterestTable.pop(i)
def _refresh(self): """ Remove all outdated certificate entries. """ # _nowOffsetMilliseconds is only used for testing. now = Common.getNowMilliseconds() + self._nowOffsetMilliseconds if now < self._nextRefreshTime: return # We recompute _nextRefreshTime. nextRefreshTime = sys.float_info.max # Go backwards through the list so we can erase entries. i = len(self._certificatesByNameKeys) - 1 while i >= 0: entry = self._certificatesByName[self._certificatesByNameKeys[i]] if entry._removalTime <= now: del self._certificatesByName[self._certificatesByNameKeys[i]] self._certificatesByNameKeys.pop(i) else: nextRefreshTime = min(nextRefreshTime, entry._removalTime) i -= 1 self._nextRefreshTime = nextRefreshTime
def processInterest(interest, onData, onTimeout, onNetworkNack): try: # Create another key for the same identity and sign it properly. parentKey = self._fixture._keyChain.createKey( self._fixture._subIdentity) requestedKey = self._fixture._subIdentity.getKey( interest.getName()) # Copy the Name. certificateName = Name(requestedKey.getName()) certificateName.append("looper").appendVersion(1) certificate = CertificateV2() certificate.setName(certificateName) # Set the MetaInfo. certificate.getMetaInfo().setType(ContentType.KEY) # Set the freshness period to one hour. certificate.getMetaInfo().setFreshnessPeriod(3600 * 1000.0) # Set the content. certificate.setContent(requestedKey.getPublicKey()) # Set SigningInfo. params = SigningInfo(parentKey) # Validity period from 10 days before to 10 days after now. now = Common.getNowMilliseconds() params.setValidityPeriod( ValidityPeriod(now - 10 * 24 * 3600 * 1000.0, now + 10 * 24 * 3600 * 1000.0)) self._fixture._keyChain.sign(certificate, params) onData(interest, certificate) except Exception as ex: self.fail("Error in InfiniteCertificateChain: " + repr(ex))
def getNewKeyName(self, identityName, useKsk): """ Generate a name for a new key belonging to the identity. :param Name identityName: The identity name. :param bool useKsk: If True, generate a KSK name, otherwise a DSK name. :return: The generated key name. :rtype: Name """ timestamp = math.floor(Common.getNowMilliseconds() / 1000.0) while timestamp <= self._lastTimestamp: # Make the timestamp unique. timestamp += 1 self._lastTimestamp = timestamp nowString = repr(timestamp).replace(".0", "") if useKsk: keyIdStr = "ksk-" + nowString else: keyIdStr = "dsk-" + nowString keyName = Name(identityName).append(keyIdStr) if self.doesKeyExist(keyName): raise SecurityException("Key name already exists") return keyName
def processEvents(self): """ Process any packets to receive and call callbacks such as onData, onInterest or onTimeout. This returns immediately if there is no data to receive. This blocks while calling the callbacks. You should repeatedly call this from an event loop, with calls to sleep as needed so that the loop doesn't use 100% of the CPU. Since processEvents modifies the pending interest table, your application should make sure that it calls processEvents in the same thread as expressInterest (which also modifies the pending interest table). :raises: This may raise an exception for reading data or in the callback for processing the data. If you call this from an main event loop, you may want to catch and log/disregard all exceptions. """ self._transport.processEvents() # Check for delayed calls. Since callLater does a sorted insert into # _delayedCallTable, the check for timeouts is quick and does not # require searching the entire table. If callLater is overridden to use # a different mechanism, then processEvents is not needed to check for # delayed calls. now = Common.getNowMilliseconds() # _delayedCallTable is sorted on _callTime, so we only need to process # the timed-out entries at the front, then quit. while (len(self._delayedCallTable) > 0 and self._delayedCallTable[0].getCallTime() <= now): delayedCall = self._delayedCallTable[0] del self._delayedCallTable[0] delayedCall.callCallback()
def _interestTimestampIsFresh(self, keyName, timestamp, failureReason): """ Determine whether the timestamp from the interest is newer than the last use of this key, or within the grace interval on first use. :param Name keyName: The name of the public key used to sign the interest. :paramt int timestamp: The timestamp extracted from the interest name. :param Array<str> failureReason: If verification fails, set failureReason[0] to the failure reason string. """ try: lastTimestamp = self._keyTimestamps[keyName.toUri()] except KeyError: now = Common.getNowMilliseconds() notBefore = now - self._keyGraceInterval notAfter = now + self._keyGraceInterval if not (timestamp > notBefore and timestamp < notAfter): return False failureReason[0] = ( "The command interest timestamp is not within the first use grace period of " + str(self._keyGraceInterval) + " milliseconds.") else: return True else: if timestamp <= lastTimestamp: failureReason[0] = ( "The command interest timestamp is not newer than the previous timestamp") return False else: return True
def main(): # The default Face will connect using a Unix socket, or to "localhost". face = Face() # Use the system default key chain and certificate name to sign. keyChain = KeyChain() face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName()) publishIntervalMs = 1000.0 stream = Namespace("/ndn/eb/stream/run/28/annotations", keyChain) handler = GeneralizedObjectStreamHandler(stream) dump("Register prefix", stream.name) # Set the face and register to receive Interests. stream.setFace(face, lambda prefixName: dump("Register failed for prefix", prefixName)) # Loop, producing a new object every publishIntervalMs milliseconds (and # also calling processEvents()). previousPublishMs = 0 while True: now = Common.getNowMilliseconds() if now >= previousPublishMs + publishIntervalMs: dump("Preparing data for sequence", handler.getProducedSequenceNumber() + 1) handler.addObject( Blob("Payload " + str(handler.getProducedSequenceNumber() + 1)), "application/json") previousPublishMs = now face.processEvents() # We need to sleep for a few milliseconds so we don't use 100% of the CPU. time.sleep(0.01)
def processInterest(interest, onData, onTimeout, onNetworkNack): try: # Create another key for the same identity and sign it properly. parentKey = self._fixture._keyChain.createKey( self._fixture._subIdentity) requestedKey = self._fixture._subIdentity.getKey(interest.getName()) # Copy the Name. certificateName = Name(requestedKey.getName()) certificateName.append("looper").appendVersion(1) certificate = CertificateV2() certificate.setName(certificateName) # Set the MetaInfo. certificate.getMetaInfo().setType(ContentType.KEY) # Set the freshness period to one hour. certificate.getMetaInfo().setFreshnessPeriod(3600 * 1000.0) # Set the content. certificate.setContent(requestedKey.getPublicKey()) # Set SigningInfo. params = SigningInfo(parentKey) # Validity period from 10 days before to 10 days after now. now = Common.getNowMilliseconds() params.setValidityPeriod(ValidityPeriod( now - 10 * 24 * 3600 * 1000.0, now + 10 * 24 * 3600 * 1000.0)) self._fixture._keyChain.sign(certificate, params) onData(interest, certificate) except Exception as ex: self.fail("Error in InfiniteCertificateChain: " + repr(ex))
def refreshAnchors(self): refreshTime = Common.getNowMilliseconds() for directory, info in self._refreshDirectories.items(): nextRefreshTime = info['nextRefresh'] if nextRefreshTime <= refreshTime: certificateList = info['certificates'][:] # delete the certificates associated with this directory if possible # then re-import # IdentityStorage subclasses may not support deletion # should we be deleting for c in certificateList: try: if self._isSecurityV1: self._certificateCache.deleteCertificate(Name(c)) else: # The name in the CertificateCacheV2 contains the # but the name in the certificateList does not, so # find the certificate based on the prefix first. foundCertificate = self._certificateCacheV2.find(Name(c)) if foundCertificate != None: self._certificateCacheV2.deleteCertificate( foundCertificate.getName()) except KeyError: # was already removed? not supported? pass self.addDirectory(directory, info['refreshPeriod'])
def refreshAnchors(self): refreshTime = Common.getNowMilliseconds() for directory, info in self._refreshDirectories.items(): nextRefreshTime = info['nextRefresh'] if nextRefreshTime <= refreshTime: certificateList = info['certificates'][:] # delete the certificates associated with this directory if possible # then re-import # IdentityStorage subclasses may not support deletion # should we be deleting for c in certificateList: try: if self._isSecurityV1: self._certificateCache.deleteCertificate(Name(c)) else: # The name in the CertificateCacheV2 contains the # but the name in the certificateList does not, so # find the certificate based on the prefix first. foundCertificate = self._certificateCacheV2.find( Name(c)) if foundCertificate != None: self._certificateCacheV2.deleteCertificate( foundCertificate.getName()) except KeyError: # was already removed? not supported? pass self.addDirectory(directory, info['refreshPeriod'])
def _generateCertificateForKey(self, keyName): # Let any raised SecurityExceptions bubble up. publicKeyBits = self._identityStorage.getKey(keyName) publicKey = PublicKey(publicKeyBits) timestamp = Common.getNowMilliseconds() # TODO: Specify where the 'KEY' component is inserted # to delegate responsibility for cert delivery. # cf: http://redmine.named-data.net/issues/1659 certificateName = keyName.getPrefix(-1).append('KEY').append( keyName.get(-1)) certificateName.append("ID-CERT").appendVersion(int(timestamp)) certificate = IdentityCertificate() certificate.setName(certificateName) certificate.setNotBefore(timestamp) certificate.setNotAfter( (timestamp + 2 * 365 * 24 * 3600 * 1000)) # about 2 years. certificate.setPublicKeyInfo(publicKey) # ndnsec likes to put the key name in a subject description. sd = CertificateSubjectDescription("2.5.4.41", keyName.toUri()) certificate.addSubjectDescription(sd) certificate.encode() return certificate
def generateCertificateForKey(self, keyName): # let any raised SecurityExceptions bubble up publicKeyBits = self._identityStorage.getKey(keyName) publicKeyType = self._identityStorage.getKeyType(keyName) publicKey = PublicKey(publicKeyType, publicKeyBits) timestamp = Common.getNowMilliseconds() # TODO: specify where the 'KEY' component is inserted # to delegate responsibility for cert delivery certificateName = keyName.getPrefix(-1).append('KEY').append(keyName.get(-1)) certificateName.append("ID-CERT").append(Name.Component(struct.pack(">Q", timestamp))) certificate = IdentityCertificate(certificateName) certificate.setNotBefore(timestamp) certificate.setNotAfter((timestamp + 30*86400*1000)) # about a month certificate.setPublicKeyInfo(publicKey) # ndnsec likes to put the key name in a subject description sd = CertificateSubjectDescription("2.5.4.41", keyName.toUri()) certificate.addSubjectDescription(sd) certificate.encode() return certificate
def _updateTimestampForKey(self, keyName, timestamp): """ Trim the table size down if necessary, and insert/update the latest interest signing timestamp for the key. Any key which has not been used within the TTL period is purged. If the table is still too large, the oldest key is purged. :param Name keyName: The name of the public key used to sign the interest. :paramt int timestamp: The timestamp extracted from the interest name. """ self._keyTimestamps[keyName.toUri()] = timestamp if len(self._keyTimestamps) >= self._maxTrackedKeys: now = Common.getNowMilliseconds() oldestTimestamp = now oldestKey = None trackedKeys = self._keyTimestamps.keys() for keyUri in trackedKeys: ts = self._keyTimestamps[keyUri] if now - ts > self._keyTimestampTtl: del self._keyTimestamps[keyUri] elif ts < oldestTimestamp: oldestTimestamp = ts oldestKey = keyUri if len(self._keyTimestamps) > self._maxTrackedKeys: # have not removed enough del self._keyTimestamps[oldestKey]
def testNameAppendAndExtract(self): size = 10 iblt = InvertibleBloomLookupTable(size) prefix = Name("/test/memphis").appendNumber(1).toUri() newHash = Common.murmurHash3Blob(11, prefix) iblt.insert(newHash) expectedEncoding = [ 0x78, 0xda, 0x63, 0x64, 0x60, 0x60, 0xd8, 0x55, 0xb5, 0xfc, 0x5b, 0xb2, 0xef, 0xe2, 0x6c, 0x06, 0x0a, 0x00, 0x23, 0x1d, 0xcd, 0x01, 0x00, 0x65, 0x29, 0x0d, 0xb1 ] ibltName = Name("sync") encodedIblt = iblt.encode() self.assertTrue(encodedIblt.equals(Blob(expectedEncoding))) ibltName.append(encodedIblt) received = InvertibleBloomLookupTable(size) received.initialize(ibltName.get(-1).getValue()) self.assertTrue(iblt.equals(received)) receivedDifferentSize = InvertibleBloomLookupTable(20) try: receivedDifferentSize.initialize(ibltName.get(-1).getValue()) self.fail("Did not throw the expected exception") except RuntimeError: pass else: self.fail("Did not throw the expected exception")
def _checkTimestamp(self, state, keyName, timestamp): """ :param ValidationState state: On error, this calls state.fail and returns False. :param Name keyName: The key name. :param float timestamp: The timestamp as milliseconds since Jan 1, 1970 UTC. :return: On success, return True. On error, call state.fail and return False. :rtype: bool """ self._cleanUp() # _nowOffsetMilliseconds is only used for testing. now = Common.getNowMilliseconds() + self._nowOffsetMilliseconds if (timestamp < now - self._options._gracePeriod or timestamp > now + self._options._gracePeriod): state.fail(ValidationError(ValidationError.POLICY_ERROR, "Timestamp is outside the grace period for key " + keyName.toUri())) return False index = self._findByKeyName(keyName) if index >= 0: if timestamp <= self._container[index]._timestamp: state.fail(ValidationError(ValidationError.POLICY_ERROR, "Timestamp is reordered for key " + keyName.toUri())) return False def successCallback(interest): self._insertNewRecord(interest, keyName, timestamp) state.addSuccessCallback(successCallback) return True
def _onObjectNeeded(self, namespace, neededNamespace, callbackId): """ This is called for object needed at the Handler's namespace. If neededNamespace is the Handler's Namespace (called by the appliction), then fetch the _latest packet. If neededNamespace is for the _latest packet (from an incoming Interest), produce the _latest packet for the current sequence number. """ if neededNamespace == self.namespace: # Assume this is called by a consumer. Fetch the _latest packet. self._latestNamespace.objectNeeded(True) return True if (neededNamespace == self._latestNamespace and self._producedSequenceNumber >= 0): # Produce the _latest Data packet. sequenceName = Name(self.namespace.name).append( Name.Component.fromSequenceNumber( self._producedSequenceNumber)) delegations = DelegationSet() delegations.add(1, sequenceName) versionedLatest = self._latestNamespace[Name.Component.fromVersion( Common.getNowMilliseconds())] metaInfo = MetaInfo() metaInfo.setFreshnessPeriod(self._latestPacketFreshnessPeriod) versionedLatest.setNewDataMetaInfo(metaInfo) # Make the Data packet and reply to outstanding Interests. versionedLatest.serializeObject(delegations.wireEncode()) return True return False
def _interestTimestampIsFresh(self, keyName, timestamp, failureReason): """ Determine whether the timestamp from the interest is newer than the last use of this key, or within the grace interval on first use. :param Name keyName: The name of the public key used to sign the interest. :paramt int timestamp: The timestamp extracted from the interest name. :param Array<str> failureReason: If verification fails, set failureReason[0] to the failure reason string. """ try: lastTimestamp = self._keyTimestamps[keyName.toUri()] except KeyError: now = Common.getNowMilliseconds() notBefore = now - self._keyGraceInterval notAfter = now + self._keyGraceInterval if not (timestamp > notBefore and timestamp < notAfter): return False failureReason[0] = ( "The command interest timestamp is not within the first use grace period of " + str(self._keyGraceInterval) + " milliseconds.") else: return True else: if timestamp <= lastTimestamp: failureReason[0] = ( "The command interest timestamp is not newer than the previous timestamp" ) return False else: return True
def add(self, data): """ Add the Data packet to the cache so that it is available to use to answer interests. If data.getMetaInfo().getFreshnessPeriod() is not None, set the staleness time to now plus data.getMetaInfo().getFreshnessPeriod(), which is checked during cleanup to remove stale content. This also checks if cleanupIntervalMilliseconds milliseconds have passed and removes stale content from the cache. After removing stale content, remove timed-out pending interests from storePendingInterest(), then if the added Data packet satisfies any interest, send it through the face and remove the interest from the pending interest table. :param Data data: The Data packet object to put in the cache. This copies the fields from the object. """ self._doCleanup() if (data.getMetaInfo().getFreshnessPeriod() != None and data.getMetaInfo().getFreshnessPeriod() >= 0.0): # The content will go stale, so use staleTimeCache. content = MemoryContentCache._StaleTimeContent(data) # Insert into _staleTimeCache, sorted on content._staleTimeMilliseconds. # Search from the back since we expect it to go there. i = len(self._staleTimeCache) - 1 while i >= 0: if (self._staleTimeCache[i]._staleTimeMilliseconds <= content._staleTimeMilliseconds): break i -= 1 # Element i is the greatest less than or equal to # content._staleTimeMilliseconds, so insert after it. self._staleTimeCache.insert(i + 1, content) else: # The data does not go stale, so use _noStaleTimeCache. self._noStaleTimeCache.append(MemoryContentCache._Content(data)) # Remove timed-out interests and check if the data packet matches any # pending interest. # Go backwards through the list so we can erase entries. nowMilliseconds = Common.getNowMilliseconds() for i in range(len(self._pendingInterestTable) - 1, -1, -1): pendingInterest = self._pendingInterestTable[i] if pendingInterest.isTimedOut(nowMilliseconds): self._pendingInterestTable.pop(i) continue if pendingInterest.getInterest().matchesName(data.getName()): try: # Send to the same face from the original call to onInterest. # wireEncode returns the cached encoding if available. pendingInterest.getFace().send(data.wireEncode()) except Exception as ex: logging.getLogger(__name__).error( "Error in face.send: %s", str(ex)) return # The pending interest is satisfied, so remove it. self._pendingInterestTable.pop(i)
def _update(self, plusOrMinus, key): """ Update the entries in _hashTable. :param int plusOrMinus: The amount to update the count. :param int key: The key for computing the entry. """ bucketsPerHash = int(len(self._hashTable) / InvertibleBloomLookupTable.N_HASH) for i in range(InvertibleBloomLookupTable.N_HASH): startEntry = i * bucketsPerHash h = Common.murmurHash3Uint32(i, key) entry = self._hashTable[startEntry + (h % bucketsPerHash)] entry._count += plusOrMinus entry._keySum ^= key entry._keyCheck ^= Common.murmurHash3Uint32( InvertibleBloomLookupTable.N_HASHCHECK, key)
def test_refresh_10s(self): with open('policy_config/testData', 'r') as dataFile: encodedData = dataFile.read() data = Data() dataBlob = Blob(b64decode(encodedData)) data.wireDecode(dataBlob) # This test is needed, since the KeyChain will express interests in # unknown certificates. vr = doVerify(self.policyManager, data) self.assertTrue(vr.hasFurtherSteps, "ConfigPolicyManager did not create ValidationRequest for unknown certificate") self.assertEqual(vr.successCount, 0, "ConfigPolicyManager called success callback with pending ValidationRequest") self.assertEqual(vr.failureCount, 0, "ConfigPolicyManager called failure callback with pending ValidationRequest") # Now save the cert data to our anchor directory, and wait. # We have to sign it with the current identity or the policy manager # will create an interest for the signing certificate. cert = CertificateV2() certData = b64decode(CERT_DUMP) cert.wireDecode(Blob(certData, False)) signingInfo = SigningInfo() signingInfo.setSigningIdentity(self.identityName) # Make sure the validity period is current for two years. now = Common.getNowMilliseconds() signingInfo.setValidityPeriod(ValidityPeriod (now, now + 2 * 365 * 24 * 3600 * 1000.0)) self.keyChain.sign(cert, signingInfo) encodedCert = b64encode(cert.wireEncode().toBytes()) with open(self.testCertFile, 'w') as certFile: certFile.write(Blob(encodedCert, False).toRawStr()) # Still too early for refresh to pick it up. vr = doVerify(self.policyManager, data) self.assertTrue(vr.hasFurtherSteps, "ConfigPolicyManager refresh occured sooner than specified") self.assertEqual(vr.successCount, 0, "ConfigPolicyManager called success callback with pending ValidationRequest") self.assertEqual(vr.failureCount, 0, "ConfigPolicyManager called failure callback with pending ValidationRequest") time.sleep(6) # Now we should find it. vr = doVerify(self.policyManager, data) self.assertFalse(vr.hasFurtherSteps, "ConfigPolicyManager did not refresh certificate store") self.assertEqual(vr.successCount, 1, "Verification success called {} times instead of 1".format( vr.successCount)) self.assertEqual(vr.failureCount, 0, "ConfigPolicyManager did not verify valid signed data")
def test_overwrite(self): fixture = self.fixture pibImpl = PibMemory() try: PibKeyImpl(fixture.id1Key1Name, pibImpl) self.fail("Did not throw the expected exception") except Pib.Error: pass else: self.fail("Did not throw the expected exception") PibKeyImpl(fixture.id1Key1Name, fixture.id1Key1.buf(), pibImpl) key1 = PibKeyImpl(fixture.id1Key1Name, pibImpl) # Overwriting the key should work. PibKeyImpl(fixture.id1Key1Name, fixture.id1Key2.buf(), pibImpl) key2 = PibKeyImpl(fixture.id1Key1Name, pibImpl) # key1 should have cached the original public key. self.assertTrue(not key1.getPublicKey().equals(key2.getPublicKey())) self.assertTrue(key2.getPublicKey().equals(fixture.id1Key2)) key1.addCertificate(fixture.id1Key1Cert1) # Use the wire encoding to check equivalence. self.assertTrue( key1.getCertificate(fixture.id1Key1Cert1.getName()).wireEncode().equals (fixture.id1Key1Cert1.wireEncode())) otherCert = CertificateV2(fixture.id1Key1Cert1) otherCert.getSignature().getValidityPeriod().setPeriod( Common.getNowMilliseconds(), Common.getNowMilliseconds() + 1000) # Don't bother resigning so we don't have to load a private key. self.assertTrue(fixture.id1Key1Cert1.getName().equals(otherCert.getName())) self.assertTrue(otherCert.getContent().equals (fixture.id1Key1Cert1.getContent())) self.assertFalse(otherCert.wireEncode().equals (fixture.id1Key1Cert1.wireEncode())) key1.addCertificate(otherCert) self.assertTrue( key1.getCertificate(fixture.id1Key1Cert1.getName()).wireEncode().equals (otherCert.wireEncode()))
def setFreshnessPeriod(self, freshnessPeriod): """ Set the freshness period. :param float freshnessPeriod: The freshness period in milliseconds, or None for not specified. """ self._freshnessPeriod = Common.nonNegativeFloatOrNone(freshnessPeriod) self._changeCount += 1